id stringlengths 25 30 | content stringlengths 14 942k | max_stars_repo_path stringlengths 49 55 |
|---|---|---|
crossvul-cpp_data_good_3399_0 | /* radare - LGPL - Copyright 2006-2016 - pancake */
#include "r_config.h"
#include "r_util.h" // r_str_hash, r_str_chop, ...
#ifdef _MSC_VER
#define strcasecmp stricmp
#endif
R_API RConfigNode* r_config_node_new(const char *name, const char *value) {
RConfigNode *node;
if (STRNULL (name)) {
return NULL;
}
node = R_NEW0 (RConfigNode);
if (!node) {
return NULL;
}
node->name = strdup (name);
node->value = strdup (value? value: "");
node->flags = CN_RW | CN_STR;
node->i_value = r_num_get (NULL, value);
node->options = r_list_new ();
return node;
}
R_API RConfigNode* r_config_node_clone(RConfigNode *n) {
RConfigNode *cn = R_NEW0 (RConfigNode);
if (!cn) {
return NULL;
}
cn->name = strdup (n->name);
cn->desc = n->desc? strdup (n->desc): NULL;
cn->value = strdup (n->value? n->value: "");
cn->i_value = n->i_value;
cn->flags = n->flags;
cn->setter = n->setter;
cn->options = r_list_clone (n->options);
return cn;
}
R_API void r_config_node_free(void *n) {
RConfigNode *node = (RConfigNode *)n;
if (!node) {
return;
}
free (node->name);
free (node->desc);
free (node->value);
r_list_free (node->options);
free (node);
}
static void config_print_value_json(RConfig *cfg, RConfigNode *node) {
const char *val = node->value;
if (!val) {
val = "0";
}
char *sval = r_str_escape (val);
if (node->flags & CN_BOOL || node->flags & CN_INT || node->flags & CN_OFFT) {
if (!strncmp (val, "0x", 2)) {
ut64 n = r_num_get (NULL, val);
cfg->cb_printf ("%"PFMT64d, n);
} else if (r_str_isnumber (val) || !strcmp (val, "true") || !strcmp (val, "false")) {
cfg->cb_printf ("%s", val);
} else {
cfg->cb_printf ("\"%s\"", sval);
}
} else {
cfg->cb_printf ("\"%s\"", sval);
}
free (sval);
}
static void config_print_node(RConfig *cfg, RConfigNode *node, const char *pfx, const char *sfx, bool verbose, bool json) {
char *option;
bool isFirst;
RListIter *iter;
char *es = NULL;
if (json) {
if (verbose) {
cfg->cb_printf ("{");
cfg->cb_printf ("\"name\":\"%s\",", node->name);
cfg->cb_printf ("\"value\":");
config_print_value_json (cfg, node);
cfg->cb_printf (",\"type\":\"%s\",", r_config_node_type (node));
es = r_str_escape (node->desc);
if (es) {
cfg->cb_printf ("\"desc\":\"%s\",", es);
free (es);
}
cfg->cb_printf ("\"ro\":%s", node->flags & CN_RO ? "true" : "false");
if (!r_list_empty (node->options)) {
isFirst = true;
cfg->cb_printf (",\"options\":[");
r_list_foreach (node->options, iter, option) {
es = r_str_escape (option);
if (es) {
if (isFirst) {
isFirst = false;
} else {
cfg->cb_printf (",");
}
cfg->cb_printf ("\"%s\"", es);
free (es);
}
}
cfg->cb_printf ("]");
}
cfg->cb_printf ("}");
} else {
cfg->cb_printf ("\"%s\":", node->name);
config_print_value_json (cfg, node);
}
} else {
if (verbose) {
cfg->cb_printf ("%s%s = %s%s %s; %s", pfx,
node->name, node->value, sfx,
node->flags & CN_RO ? "(ro)" : "",
node->desc);
if (!r_list_empty (node->options)) {
isFirst = true;
cfg->cb_printf(" [");
r_list_foreach (node->options, iter, option) {
if (isFirst) {
isFirst = false;
} else {
cfg->cb_printf(", ");
}
cfg->cb_printf("%s", option);
}
cfg->cb_printf("]");
}
cfg->cb_printf ("\n");
} else {
cfg->cb_printf ("%s%s = %s%s\n", pfx,
node->name, node->value, sfx);
}
}
}
R_API void r_config_list(RConfig *cfg, const char *str, int rad) {
RConfigNode *node;
RListIter *iter;
const char *sfx = "";
const char *pfx = "";
int len = 0;
bool verbose = false;
bool json = false;
bool isFirst = false;
if (!STRNULL (str)) {
str = r_str_chop_ro (str);
len = strlen (str);
if (len > 0 && str[0] == 'j') {
str++;
len--;
json = true;
rad = 'J';
}
if (len > 0 && str[0] == ' ') {
str++;
len--;
}
if (strlen (str) == 0) {
str = NULL;
len = 0;
}
}
switch (rad) {
case 1:
pfx = "\"e ";
sfx = "\"";
/* fallthrou */
case 0:
r_list_foreach (cfg->nodes, iter, node) {
if (!str || (str && (!strncmp (str, node->name, len)))) {
config_print_node (cfg, node, pfx, sfx, verbose, json);
}
}
break;
case 2:
r_list_foreach (cfg->nodes, iter, node) {
if (!str || (str && (!strncmp (str, node->name, len)))) {
if (!str || !strncmp (str, node->name, len)) {
cfg->cb_printf ("%20s: %s\n", node->name,
node->desc? node->desc: "");
}
}
}
break;
case 'v':
verbose = true;
r_list_foreach (cfg->nodes, iter, node) {
if (!str || (str && (!strncmp (str, node->name, len)))) {
config_print_node (cfg, node, pfx, sfx, verbose, json);
}
}
break;
case 'q':
r_list_foreach (cfg->nodes, iter, node) {
cfg->cb_printf ("%s\n", node->name);
}
break;
case 'J':
verbose = true;
/* fallthrou */
case 'j':
json = true;
isFirst = true;
if (verbose) {
cfg->cb_printf ("[");
} else {
cfg->cb_printf ("{");
}
r_list_foreach (cfg->nodes, iter, node) {
if (!str || (str && (!strncmp (str, node->name, len)))) {
if (!str || !strncmp (str, node->name, len)) {
if (isFirst) {
isFirst = false;
} else {
cfg->cb_printf (",");
}
config_print_node (cfg, node, pfx, sfx, verbose, json);
}
}
}
if (verbose) {
cfg->cb_printf ("]\n");
} else {
cfg->cb_printf ("}\n");
}
break;
}
}
R_API RConfigNode* r_config_node_get(RConfig *cfg, const char *name) {
if (STRNULL (name)) {
return NULL;
}
return ht_find (cfg->ht, name, NULL);
}
R_API int r_config_set_getter(RConfig *cfg, const char *key, RConfigCallback cb) {
RConfigNode *node = r_config_node_get (cfg, key);
if (node) {
node->getter = cb;
return 1;
}
return 0;
}
R_API int r_config_set_setter(RConfig *cfg, const char *key, RConfigCallback cb) {
RConfigNode *node = r_config_node_get (cfg, key);
if (node) {
node->setter = cb;
return 1;
}
return 0;
}
static bool is_true(const char *s) {
return !strcasecmp ("true", s) || !strcasecmp ("1", s);
}
static bool is_bool(const char *s) {
return !strcasecmp ("true", s) || !strcasecmp ("false", s);
}
R_API const char* r_config_get(RConfig *cfg, const char *name) {
RConfigNode *node = r_config_node_get (cfg, name);
if (node) {
if (node->getter) {
node->getter (cfg->user, node);
}
cfg->last_notfound = 0;
if (node->flags & CN_BOOL) {
return r_str_bool (is_true (node->value));
}
return node->value;
} else {
eprintf ("r_config_get: variable '%s' not found\n", name);
}
cfg->last_notfound = 1;
return NULL;
}
R_API int r_config_toggle(RConfig *cfg, const char *name) {
RConfigNode *node = r_config_node_get (cfg, name);
if (node && node->flags & CN_BOOL) {
(void)r_config_set_i (cfg, name, !node->i_value);
return true;
}
return false;
}
R_API ut64 r_config_get_i(RConfig *cfg, const char *name) {
RConfigNode *node = r_config_node_get (cfg, name);
if (node) {
if (node->getter) {
node->getter (cfg->user, node);
}
if (node->i_value || !strcmp (node->value, "false")) {
return node->i_value;
}
return (ut64) r_num_math (cfg->num, node->value);
}
return (ut64) 0LL;
}
R_API const char* r_config_node_type(RConfigNode *node) {
if (node) {
int f = node->flags;
if (f & CN_BOOL) {
return "bool";
}
if (f & CN_STR) {
return "str";
}
if (f & CN_OFFT || f & CN_INT) {
if (!strncmp (node->value, "0x", 2)) {
return "addr";
}
return "int";
}
}
return "";
}
R_API RConfigNode* r_config_set_cb(RConfig *cfg, const char *name, const char *value, RConfigCallback cb) {
RConfigNode *node = r_config_set (cfg, name, value);
if (node && (node->setter = cb)) {
if (!cb (cfg->user, node)) {
return NULL;
}
}
return node;
}
R_API RConfigNode* r_config_set_i_cb(RConfig *cfg, const char *name, int ivalue, RConfigCallback cb) {
RConfigNode *node = r_config_set_i (cfg, name, ivalue);
if (node && (node->setter = cb)) {
if (!node->setter (cfg->user, node)) {
return NULL;
}
}
return node;
}
/* TODO: reduce number of strdups here */
R_API RConfigNode* r_config_set(RConfig *cfg, const char *name, const char *value) {
RConfigNode *node = NULL;
char *ov = NULL;
ut64 oi;
if (!cfg || STRNULL (name)) {
return NULL;
}
node = r_config_node_get (cfg, name);
if (node) {
if (node->flags & CN_RO) {
eprintf ("(error: '%s' config key is read only)\n", name);
return node;
}
oi = node->i_value;
if (node->value) {
ov = strdup (node->value);
if (!ov) {
goto beach;
}
} else {
free (node->value);
node->value = strdup ("");
}
if (node->flags & CN_BOOL) {
bool b = is_true (value);
node->i_value = (ut64) b? 1: 0;
char *value = strdup (r_str_bool (b));
if (value) {
free (node->value);
node->value = value;
}
} else {
if (!value) {
free (node->value);
node->value = strdup ("");
node->i_value = 0;
} else {
if (node->value == value) {
goto beach;
}
char *tmp = node->value;
node->value = strdup (value);
free (tmp);
if (IS_DIGIT (*value)) {
if (strchr (value, '/')) {
node->i_value = r_num_get (cfg->num, value);
} else {
node->i_value = r_num_math (cfg->num, value);
}
} else {
node->i_value = 0;
}
node->flags |= CN_INT;
}
}
} else { // Create a new RConfigNode
oi = UT64_MAX;
if (!cfg->lock) {
node = r_config_node_new (name, value);
if (node) {
if (value && is_bool (value)) {
node->flags |= CN_BOOL;
node->i_value = is_true (value)? 1: 0;
}
if (cfg->ht) {
ht_insert (cfg->ht, node->name, node);
r_list_append (cfg->nodes, node);
cfg->n_nodes++;
}
} else {
eprintf ("r_config_set: unable to create a new RConfigNode\n");
}
} else {
eprintf ("r_config_set: variable '%s' not found\n", name);
}
}
if (node && node->setter) {
int ret = node->setter (cfg->user, node);
if (ret == false) {
if (oi != UT64_MAX) {
node->i_value = oi;
}
free (node->value);
node->value = strdup (ov? ov: "");
}
}
beach:
free (ov);
return node;
}
/* r_config_desc takes a RConfig and a name,
* r_config_node_desc takes a RConfigNode
* Both set and return node->desc */
R_API const char* r_config_desc(RConfig *cfg, const char *name, const char *desc) {
RConfigNode *node = r_config_node_get (cfg, name);
return r_config_node_desc (node, desc);
}
R_API const char* r_config_node_desc(RConfigNode *node, const char *desc) {
if (node) {
if (desc) {
free (node->desc);
node->desc = strdup (desc);
}
return node->desc;
}
return NULL;
}
R_API int r_config_rm(RConfig *cfg, const char *name) {
RConfigNode *node = r_config_node_get (cfg, name);
if (node) {
ht_delete (cfg->ht, node->name);
r_list_delete_data (cfg->nodes, node);
cfg->n_nodes--;
return true;
}
return false;
}
R_API RConfigNode* r_config_set_i(RConfig *cfg, const char *name, const ut64 i) {
char buf[128], *ov = NULL;
if (!cfg || !name) {
return NULL;
}
RConfigNode *node = r_config_node_get (cfg, name);
if (node) {
if (node->flags & CN_RO) {
node = NULL;
goto beach;
}
if (node->value) {
ov = strdup (node->value);
if (!ov) {
node = NULL;
goto beach;
}
free (node->value);
}
if (node->flags & CN_BOOL) {
node->value = strdup (r_str_bool (i));
} else {
snprintf (buf, sizeof (buf) - 1, "%" PFMT64d, i);
node->value = strdup (buf);
}
if (!node->value) {
node = NULL;
goto beach;
}
//node->flags = CN_RW | CN_INT;
node->i_value = i;
} else {
if (!cfg->lock) {
if (i < 1024) {
snprintf (buf, sizeof (buf), "%" PFMT64d "", i);
} else {
snprintf (buf, sizeof (buf), "0x%08" PFMT64x "", i);
}
node = r_config_node_new (name, buf);
if (!node) {
node = NULL;
goto beach;
}
node->flags = CN_RW | CN_OFFT;
node->i_value = i;
if (cfg->ht) {
ht_insert (cfg->ht, node->name, node);
}
if (cfg->nodes) {
r_list_append (cfg->nodes, node);
cfg->n_nodes++;
}
} else {
eprintf ("(locked: no new keys can be created (%s))\n", name);
}
}
if (node && node->setter) {
ut64 oi = node->i_value;
int ret = node->setter (cfg->user, node);
if (!ret) {
node->i_value = oi;
free (node->value);
node->value = strdup (ov? ov: "");
}
}
beach:
free (ov);
return node;
}
R_API int r_config_eval(RConfig *cfg, const char *str) {
char *ptr, *a, *b, name[1024];
unsigned int len;
if (!str || !cfg) {
return false;
}
len = strlen (str) + 1;
if (len >= sizeof (name)) {
return false;
}
memcpy (name, str, len);
str = r_str_chop (name);
if (!str) {
return false;
}
if (str[0] == '\0' || !strcmp (str, "help")) {
r_config_list (cfg, NULL, 0);
return false;
}
if (str[0] == '-') {
r_config_rm (cfg, str + 1);
return false;
}
ptr = strchr (str, '=');
if (ptr) {
/* set */
ptr[0] = '\0';
a = r_str_chop (name);
b = r_str_chop (ptr + 1);
(void) r_config_set (cfg, a, b);
} else {
char *foo = r_str_chop (name);
if (foo[strlen (foo) - 1] == '.') {
r_config_list (cfg, name, 0);
return false;
} else {
/* get */
const char *str = r_config_get (cfg, foo);
if (str) {
cfg->cb_printf ("%s\n",
(((int) (size_t) str) == 1)? "true": str);
}
}
}
return true;
}
static int cmp(RConfigNode *a, RConfigNode *b) {
return strcmp (a->name, b->name);
}
R_API void r_config_lock(RConfig *cfg, int l) {
r_list_sort (cfg->nodes, (RListComparator) cmp);
cfg->lock = l;
}
R_API int r_config_readonly(RConfig *cfg, const char *key) {
RConfigNode *n = r_config_node_get (cfg, key);
if (!n) {
return false;
}
n->flags |= CN_RO;
return true;
}
static void _ht_node_free_kv(HtKv *kv) {
free (kv->key);
//we do not free kv->value because there is other reference
free (kv);
}
R_API RConfig* r_config_new(void *user) {
RConfig *cfg = R_NEW0 (RConfig);
if (!cfg) {
return NULL;
}
cfg->ht = ht_new (NULL, _ht_node_free_kv, NULL);
cfg->nodes = r_list_newf ((RListFree)r_config_node_free);
if (!cfg->nodes) {
R_FREE (cfg);
return NULL;
}
cfg->user = user;
cfg->num = NULL;
cfg->n_nodes = 0;
cfg->lock = 0;
cfg->cb_printf = (void *) printf;
return cfg;
}
R_API RConfig* r_config_clone(RConfig *cfg) {
RListIter *iter;
RConfigNode *node;
RConfig *c = r_config_new (cfg->user);
if (!c) {
return NULL;
}
r_list_foreach (cfg->nodes, iter, node) {
RConfigNode *nn = r_config_node_clone (node);
ht_insert (c->ht, node->name, nn);
r_list_append (c->nodes, nn);
c->n_nodes++;
}
c->lock = cfg->lock;
c->cb_printf = cfg->cb_printf;
return c;
}
R_API int r_config_free(RConfig *cfg) {
if (!cfg) {
return 0;
}
cfg->nodes->free = r_config_node_free; // damn
r_list_free (cfg->nodes);
ht_free (cfg->ht);
free (cfg);
return 0;
}
R_API void r_config_visual_hit_i(RConfig *cfg, const char *name, int delta) {
RConfigNode *node = r_config_node_get (cfg, name);
if (node && (node->flags & CN_INT || node->flags & CN_OFFT)) {
(void)r_config_set_i (cfg, name, r_config_get_i (cfg, name) + delta);
}
}
R_API void r_config_bump(RConfig *cfg, const char *key) {
char *orig = strdup (r_config_get (cfg, key));
r_config_set (cfg, key, orig);
free (orig);
}
R_API bool r_config_save_char(RConfigHold *h, ...) {
va_list ap;
char *key;
if (!h->list_char) {
h->list_char = r_list_newf ((RListFree) free);
if (!h->list_char) {
return false;
}
}
va_start (ap, h);
while ((key = va_arg (ap, char *))) {
RConfigHoldChar *hc = R_NEW0 (RConfigHoldChar);
if (!hc) {
continue;
}
hc->key = key;
hc->value = r_config_get (h->cfg, key);
r_list_append (h->list_char, hc);
}
va_end (ap);
return true;
}
R_API bool r_config_save_num(RConfigHold *h, ...) {
va_list ap;
char *key;
if (!h->list_num) {
h->list_num = r_list_newf ((RListFree) free);
if (!h->list_num) {
return false;
}
}
va_start (ap, h);
while ((key = va_arg (ap, char *))) {
RConfigHoldNum *hc = R_NEW0 (RConfigHoldNum);
if (!hc) {
continue;
}
hc->key = key;
hc->value = r_config_get_i (h->cfg, key);
r_list_append (h->list_num, hc);
}
va_end (ap);
return true;
}
R_API RConfigHold* r_config_hold_new(RConfig *cfg) {
if (cfg) {
RConfigHold *hold = R_NEW0 (RConfigHold);
if (hold) {
hold->cfg = cfg;
return hold;
}
}
return NULL;
}
R_API void r_config_restore(RConfigHold *h) {
RListIter *iter;
RConfigHoldChar *hchar;
RConfigHoldNum *hnum;
if (h) {
r_list_foreach (h->list_num, iter, hnum) {
(void)r_config_set_i (h->cfg, hnum->key, hnum->value);
}
r_list_foreach (h->list_char, iter, hchar) {
(void)r_config_set (h->cfg, hchar->key, hchar->value);
}
}
}
R_API void r_config_hold_free(RConfigHold *h) {
if (h) {
r_list_free (h->list_num);
r_list_free (h->list_char);
R_FREE (h);
}
}
| ./CrossVul/dataset_final_sorted/CWE-416/c/good_3399_0 |
crossvul-cpp_data_bad_4902_0 | /*
* DBD::mysql - DBI driver for the mysql database
*
* Copyright (c) 2004-2014 Patrick Galbraith
* Copyright (c) 2013-2014 Michiel Beijen
* Copyright (c) 2004-2007 Alexey Stroganov
* Copyright (c) 2003-2005 Rudolf Lippan
* Copyright (c) 1997-2003 Jochen Wiedmann
*
* You may distribute this under the terms of either the GNU General Public
* License or the Artistic License, as specified in the Perl README file.
*/
#ifdef WIN32
#include "windows.h"
#include "winsock.h"
#endif
#include "dbdimp.h"
#if defined(WIN32) && defined(WORD)
#undef WORD
typedef short WORD;
#endif
#ifdef WIN32
#define MIN min
#else
#ifndef MIN
#define MIN(a, b) ((a) < (b) ? (a) : (b))
#endif
#endif
#if MYSQL_ASYNC
# include <poll.h>
# include <errno.h>
# define ASYNC_CHECK_RETURN(h, value)\
if(imp_dbh->async_query_in_flight) {\
do_error(h, 2000, "Calling a synchronous function on an asynchronous handle", "HY000");\
return (value);\
}
#else
# define ASYNC_CHECK_RETURN(h, value)
#endif
static int parse_number(char *string, STRLEN len, char **end);
DBISTATE_DECLARE;
typedef struct sql_type_info_s
{
const char *type_name;
int data_type;
int column_size;
const char *literal_prefix;
const char *literal_suffix;
const char *create_params;
int nullable;
int case_sensitive;
int searchable;
int unsigned_attribute;
int fixed_prec_scale;
int auto_unique_value;
const char *local_type_name;
int minimum_scale;
int maximum_scale;
int num_prec_radix;
int sql_datatype;
int sql_datetime_sub;
int interval_precision;
int native_type;
int is_num;
} sql_type_info_t;
/*
This function manually counts the number of placeholders in an SQL statement,
used for emulated prepare statements < 4.1.3
*/
static int
count_params(imp_xxh_t *imp_xxh, pTHX_ char *statement, bool bind_comment_placeholders)
{
bool comment_end= false;
char* ptr= statement;
int num_params= 0;
int comment_length= 0;
char c;
if (DBIc_DBISTATE(imp_xxh)->debug >= 2)
PerlIO_printf(DBIc_LOGPIO(imp_xxh), ">count_params statement %s\n", statement);
while ( (c = *ptr++) )
{
switch (c) {
/* so, this is a -- comment, so let's burn up characters */
case '-':
{
if (bind_comment_placeholders)
{
c = *ptr++;
break;
}
else
{
comment_length= 1;
/* let's see if the next one is a dash */
c = *ptr++;
if (c == '-') {
/* if two dashes, ignore everything until newline */
while ((c = *ptr))
{
if (DBIc_DBISTATE(imp_xxh)->debug >= 2)
PerlIO_printf(DBIc_LOGPIO(imp_xxh), "%c\n", c);
ptr++;
comment_length++;
if (c == '\n')
{
comment_end= true;
break;
}
}
/*
if not comment_end, the comment never ended and we need to iterate
back to the beginning of where we started and let the database
handle whatever is in the statement
*/
if (! comment_end)
ptr-= comment_length;
}
/* otherwise, only one dash/hyphen, backtrack by one */
else
ptr--;
break;
}
}
/* c-type comments */
case '/':
{
if (bind_comment_placeholders)
{
c = *ptr++;
break;
}
else
{
c = *ptr++;
/* let's check if the next one is an asterisk */
if (c == '*')
{
comment_length= 0;
comment_end= false;
/* ignore everything until closing comment */
while ((c= *ptr))
{
ptr++;
comment_length++;
if (c == '*')
{
c = *ptr++;
/* alas, end of comment */
if (c == '/')
{
comment_end= true;
break;
}
/*
nope, just an asterisk, not so fast, not
end of comment, go back one
*/
else
ptr--;
}
}
/*
if the end of the comment was never found, we have
to backtrack to wherever we first started skipping
over the possible comment.
This means we will pass the statement to the database
to see its own fate and issue the error
*/
if (!comment_end)
ptr -= comment_length;
}
else
ptr--;
break;
}
}
case '`':
case '"':
case '\'':
/* Skip string */
{
char end_token = c;
while ((c = *ptr) && c != end_token)
{
if (c == '\\')
if (! *(++ptr))
continue;
++ptr;
}
if (c)
++ptr;
break;
}
case '?':
++num_params;
break;
default:
break;
}
}
return num_params;
}
/*
allocate memory in statement handle per number of placeholders
*/
static imp_sth_ph_t *alloc_param(int num_params)
{
imp_sth_ph_t *params;
if (num_params)
Newz(908, params, (unsigned int) num_params, imp_sth_ph_t);
else
params= NULL;
return params;
}
#if MYSQL_VERSION_ID >= SERVER_PREPARE_VERSION
/*
allocate memory in MYSQL_BIND bind structure per
number of placeholders
*/
static MYSQL_BIND *alloc_bind(int num_params)
{
MYSQL_BIND *bind;
if (num_params)
Newz(908, bind, (unsigned int) num_params, MYSQL_BIND);
else
bind= NULL;
return bind;
}
/*
allocate memory in fbind imp_sth_phb_t structure per
number of placeholders
*/
static imp_sth_phb_t *alloc_fbind(int num_params)
{
imp_sth_phb_t *fbind;
if (num_params)
Newz(908, fbind, (unsigned int) num_params, imp_sth_phb_t);
else
fbind= NULL;
return fbind;
}
/*
alloc memory for imp_sth_fbh_t fbuffer per number of fields
*/
static imp_sth_fbh_t *alloc_fbuffer(int num_fields)
{
imp_sth_fbh_t *fbh;
if (num_fields)
Newz(908, fbh, (unsigned int) num_fields, imp_sth_fbh_t);
else
fbh= NULL;
return fbh;
}
/*
free MYSQL_BIND bind struct
*/
static void free_bind(MYSQL_BIND *bind)
{
if (bind)
Safefree(bind);
}
/*
free imp_sth_phb_t fbind structure
*/
static void free_fbind(imp_sth_phb_t *fbind)
{
if (fbind)
Safefree(fbind);
}
/*
free imp_sth_fbh_t fbh structure
*/
static void free_fbuffer(imp_sth_fbh_t *fbh)
{
if (fbh)
Safefree(fbh);
}
#endif
/*
free statement param structure per num_params
*/
static void
free_param(pTHX_ imp_sth_ph_t *params, int num_params)
{
if (params)
{
int i;
for (i= 0; i < num_params; i++)
{
imp_sth_ph_t *ph= params+i;
if (ph->value)
{
(void) SvREFCNT_dec(ph->value);
ph->value= NULL;
}
}
Safefree(params);
}
}
/*
Convert a MySQL type to a type that perl can handle
NOTE: In the future we may want to return a struct with a lot of
information for each type
*/
static enum enum_field_types mysql_to_perl_type(enum enum_field_types type)
{
static enum enum_field_types enum_type;
switch (type) {
case MYSQL_TYPE_DOUBLE:
case MYSQL_TYPE_FLOAT:
enum_type= MYSQL_TYPE_DOUBLE;
break;
case MYSQL_TYPE_SHORT:
case MYSQL_TYPE_TINY:
case MYSQL_TYPE_LONG:
case MYSQL_TYPE_INT24:
case MYSQL_TYPE_YEAR:
#if IVSIZE >= 8
case MYSQL_TYPE_LONGLONG:
enum_type= MYSQL_TYPE_LONGLONG;
#else
enum_type= MYSQL_TYPE_LONG;
#endif
break;
#if MYSQL_VERSION_ID > NEW_DATATYPE_VERSION
case MYSQL_TYPE_BIT:
enum_type= MYSQL_TYPE_BIT;
break;
#endif
#if MYSQL_VERSION_ID > NEW_DATATYPE_VERSION
case MYSQL_TYPE_NEWDECIMAL:
#endif
case MYSQL_TYPE_DECIMAL:
enum_type= MYSQL_TYPE_DECIMAL;
break;
#if IVSIZE < 8
case MYSQL_TYPE_LONGLONG:
#endif
case MYSQL_TYPE_DATE:
case MYSQL_TYPE_TIME:
case MYSQL_TYPE_DATETIME:
case MYSQL_TYPE_NEWDATE:
case MYSQL_TYPE_TIMESTAMP:
case MYSQL_TYPE_VAR_STRING:
#if MYSQL_VERSION_ID > NEW_DATATYPE_VERSION
case MYSQL_TYPE_VARCHAR:
#endif
case MYSQL_TYPE_STRING:
enum_type= MYSQL_TYPE_STRING;
break;
#if MYSQL_VERSION_ID > GEO_DATATYPE_VERSION
case MYSQL_TYPE_GEOMETRY:
#endif
case MYSQL_TYPE_BLOB:
case MYSQL_TYPE_TINY_BLOB:
enum_type= MYSQL_TYPE_BLOB;
break;
default:
enum_type= MYSQL_TYPE_STRING; /* MySQL can handle all types as strings */
}
return(enum_type);
}
#if defined(DBD_MYSQL_EMBEDDED)
/*
count embedded options
*/
int count_embedded_options(char *st)
{
int rc;
char c;
char *ptr;
ptr= st;
rc= 0;
if (st)
{
while ((c= *ptr++))
{
if (c == ',')
rc++;
}
rc++;
}
return rc;
}
/*
Free embedded options
*/
int free_embedded_options(char ** options_list, int options_count)
{
int i;
for (i= 0; i < options_count; i++)
{
if (options_list[i])
free(options_list[i]);
}
free(options_list);
return 1;
}
/*
Print out embedded option settings
*/
int print_embedded_options(PerlIO *stream, char ** options_list, int options_count)
{
int i;
for (i=0; i<options_count; i++)
{
if (options_list[i])
PerlIO_printf(stream,
"Embedded server, parameter[%d]=%s\n",
i, options_list[i]);
}
return 1;
}
/*
*/
char **fill_out_embedded_options(PerlIO *stream,
char *options,
int options_type,
int slen, int cnt)
{
int ind, len;
char c;
char *ptr;
char **options_list= NULL;
if (!(options_list= (char **) calloc(cnt, sizeof(char *))))
{
PerlIO_printf(stream,
"Initialize embedded server. Out of memory \n");
return NULL;
}
ptr= options;
ind= 0;
if (options_type == 0)
{
/* server_groups list NULL terminated */
options_list[cnt]= (char *) NULL;
}
if (options_type == 1)
{
/* first item in server_options list is ignored. fill it with \0 */
if (!(options_list[0]= calloc(1,sizeof(char))))
return NULL;
ind++;
}
while ((c= *ptr++))
{
slen--;
if (c == ',' || !slen)
{
len= ptr - options;
if (c == ',')
len--;
if (!(options_list[ind]=calloc(len+1,sizeof(char))))
return NULL;
strncpy(options_list[ind], options, len);
ind++;
options= ptr;
}
}
return options_list;
}
#endif
/*
constructs an SQL statement previously prepared with
actual values replacing placeholders
*/
static char *parse_params(
imp_xxh_t *imp_xxh,
pTHX_ MYSQL *sock,
char *statement,
STRLEN *slen_ptr,
imp_sth_ph_t* params,
int num_params,
bool bind_type_guessing,
bool bind_comment_placeholders)
{
bool comment_end= false;
char *salloc, *statement_ptr;
char *statement_ptr_end, *ptr, *valbuf;
char *cp, *end;
int alen, i;
int slen= *slen_ptr;
int limit_flag= 0;
int comment_length=0;
STRLEN vallen;
imp_sth_ph_t *ph;
if (DBIc_DBISTATE(imp_xxh)->debug >= 2)
PerlIO_printf(DBIc_LOGPIO(imp_xxh), ">parse_params statement %s\n", statement);
if (num_params == 0)
return NULL;
while (isspace(*statement))
{
++statement;
--slen;
}
/* Calculate the number of bytes being allocated for the statement */
alen= slen;
for (i= 0, ph= params; i < num_params; i++, ph++)
{
int defined= 0;
if (ph->value)
{
if (SvMAGICAL(ph->value))
mg_get(ph->value);
if (SvOK(ph->value))
defined=1;
}
if (!defined)
alen+= 3; /* Erase '?', insert 'NULL' */
else
{
valbuf= SvPV(ph->value, vallen);
alen+= 2+vallen+1;
/* this will most likely not happen since line 214 */
/* of mysql.xs hardcodes all types to SQL_VARCHAR */
if (!ph->type)
{
if (bind_type_guessing)
{
valbuf= SvPV(ph->value, vallen);
ph->type= SQL_INTEGER;
if (parse_number(valbuf, vallen, &end) != 0)
{
ph->type= SQL_VARCHAR;
}
}
else
ph->type= SQL_VARCHAR;
}
}
}
/* Allocate memory, why *2, well, because we have ptr and statement_ptr */
New(908, salloc, alen*2, char);
ptr= salloc;
i= 0;
/* Now create the statement string; compare count_params above */
statement_ptr_end= (statement_ptr= statement)+ slen;
while (statement_ptr < statement_ptr_end)
{
/* LIMIT should be the last part of the query, in most cases */
if (! limit_flag)
{
/*
it would be good to be able to handle any number of cases and orders
*/
if ((*statement_ptr == 'l' || *statement_ptr == 'L') &&
(!strncmp(statement_ptr+1, "imit ?", 6) ||
!strncmp(statement_ptr+1, "IMIT ?", 6)))
{
limit_flag = 1;
}
}
switch (*statement_ptr)
{
/* comment detection. Anything goes in a comment */
case '-':
{
if (bind_comment_placeholders)
{
*ptr++= *statement_ptr++;
break;
}
else
{
comment_length= 1;
comment_end= false;
*ptr++ = *statement_ptr++;
if (*statement_ptr == '-')
{
/* ignore everything until newline or end of string */
while (*statement_ptr)
{
comment_length++;
*ptr++ = *statement_ptr++;
if (!*statement_ptr || *statement_ptr == '\n')
{
comment_end= true;
break;
}
}
/* if not end of comment, go back to where we started, no end found */
if (! comment_end)
{
statement_ptr -= comment_length;
ptr -= comment_length;
}
}
break;
}
}
/* c-type comments */
case '/':
{
if (bind_comment_placeholders)
{
*ptr++= *statement_ptr++;
break;
}
else
{
comment_length= 1;
comment_end= false;
*ptr++ = *statement_ptr++;
if (*statement_ptr == '*')
{
/* use up characters everything until newline */
while (*statement_ptr)
{
*ptr++ = *statement_ptr++;
comment_length++;
if (!strncmp(statement_ptr, "*/", 2))
{
comment_length += 2;
comment_end= true;
break;
}
}
/* Go back to where started if comment end not found */
if (! comment_end)
{
statement_ptr -= comment_length;
ptr -= comment_length;
}
}
break;
}
}
case '`':
case '\'':
case '"':
/* Skip string */
{
char endToken = *statement_ptr++;
*ptr++ = endToken;
while (statement_ptr != statement_ptr_end &&
*statement_ptr != endToken)
{
if (*statement_ptr == '\\')
{
*ptr++ = *statement_ptr++;
if (statement_ptr == statement_ptr_end)
break;
}
*ptr++= *statement_ptr++;
}
if (statement_ptr != statement_ptr_end)
*ptr++= *statement_ptr++;
}
break;
case '?':
/* Insert parameter */
statement_ptr++;
if (i >= num_params)
{
break;
}
ph = params+ (i++);
if (!ph->value || !SvOK(ph->value))
{
*ptr++ = 'N';
*ptr++ = 'U';
*ptr++ = 'L';
*ptr++ = 'L';
}
else
{
int is_num = FALSE;
valbuf= SvPV(ph->value, vallen);
if (valbuf)
{
switch (ph->type)
{
case SQL_NUMERIC:
case SQL_DECIMAL:
case SQL_INTEGER:
case SQL_SMALLINT:
case SQL_FLOAT:
case SQL_REAL:
case SQL_DOUBLE:
case SQL_BIGINT:
case SQL_TINYINT:
is_num = TRUE;
break;
}
/* (note this sets *end, which we use if is_num) */
if ( parse_number(valbuf, vallen, &end) != 0 && is_num)
{
if (bind_type_guessing) {
/* .. not a number, so apparently we guessed wrong */
is_num = 0;
ph->type = SQL_VARCHAR;
}
}
/* we're at the end of the query, so any placeholders if */
/* after a LIMIT clause will be numbers and should not be quoted */
if (limit_flag == 1)
is_num = TRUE;
if (!is_num)
{
*ptr++ = '\'';
ptr += mysql_real_escape_string(sock, ptr, valbuf, vallen);
*ptr++ = '\'';
}
else
{
for (cp= valbuf; cp < end; cp++)
*ptr++= *cp;
}
}
}
break;
/* in case this is a nested LIMIT */
case ')':
limit_flag = 0;
*ptr++ = *statement_ptr++;
break;
default:
*ptr++ = *statement_ptr++;
break;
}
}
*slen_ptr = ptr - salloc;
*ptr++ = '\0';
return(salloc);
}
int bind_param(imp_sth_ph_t *ph, SV *value, IV sql_type)
{
dTHX;
if (ph->value)
{
if (SvMAGICAL(ph->value))
mg_get(ph->value);
(void) SvREFCNT_dec(ph->value);
}
ph->value= newSVsv(value);
if (sql_type)
ph->type = sql_type;
return TRUE;
}
static const sql_type_info_t SQL_GET_TYPE_INFO_values[]= {
{ "varchar", SQL_VARCHAR, 255, "'", "'", "max length",
1, 0, 3, 0, 0, 0, "variable length string",
0, 0, 0,
SQL_VARCHAR, 0, 0,
#if MYSQL_VERSION_ID < MYSQL_VERSION_5_0
FIELD_TYPE_VAR_STRING, 0,
#else
MYSQL_TYPE_STRING, 0,
#endif
},
{ "decimal", SQL_DECIMAL, 15, NULL, NULL, "precision,scale",
1, 0, 3, 0, 0, 0, "double",
0, 6, 2,
SQL_DECIMAL, 0, 0,
#if MYSQL_VERSION_ID < MYSQL_VERSION_5_0
FIELD_TYPE_DECIMAL, 1
#else
MYSQL_TYPE_DECIMAL, 1
#endif
},
{ "tinyint", SQL_TINYINT, 3, NULL, NULL, NULL,
1, 0, 3, 0, 0, 0, "Tiny integer",
0, 0, 10,
SQL_TINYINT, 0, 0,
#if MYSQL_VERSION_ID < MYSQL_VERSION_5_0
FIELD_TYPE_TINY, 1
#else
MYSQL_TYPE_TINY, 1
#endif
},
{ "smallint", SQL_SMALLINT, 5, NULL, NULL, NULL,
1, 0, 3, 0, 0, 0, "Short integer",
0, 0, 10,
SQL_SMALLINT, 0, 0,
#if MYSQL_VERSION_ID < MYSQL_VERSION_5_0
FIELD_TYPE_SHORT, 1
#else
MYSQL_TYPE_SHORT, 1
#endif
},
{ "integer", SQL_INTEGER, 10, NULL, NULL, NULL,
1, 0, 3, 0, 0, 0, "integer",
0, 0, 10,
SQL_INTEGER, 0, 0,
#if MYSQL_VERSION_ID < MYSQL_VERSION_5_0
FIELD_TYPE_LONG, 1
#else
MYSQL_TYPE_LONG, 1
#endif
},
{ "float", SQL_REAL, 7, NULL, NULL, NULL,
1, 0, 0, 0, 0, 0, "float",
0, 2, 10,
SQL_FLOAT, 0, 0,
#if MYSQL_VERSION_ID < MYSQL_VERSION_5_0
FIELD_TYPE_FLOAT, 1
#else
MYSQL_TYPE_FLOAT, 1
#endif
},
{ "double", SQL_FLOAT, 15, NULL, NULL, NULL,
1, 0, 3, 0, 0, 0, "double",
0, 4, 2,
SQL_FLOAT, 0, 0,
#if MYSQL_VERSION_ID < MYSQL_VERSION_5_0
FIELD_TYPE_DOUBLE, 1
#else
MYSQL_TYPE_DOUBLE, 1
#endif
},
{ "double", SQL_DOUBLE, 15, NULL, NULL, NULL,
1, 0, 3, 0, 0, 0, "double",
0, 4, 10,
SQL_DOUBLE, 0, 0,
#if MYSQL_VERSION_ID < MYSQL_VERSION_5_0
FIELD_TYPE_DOUBLE, 1
#else
MYSQL_TYPE_DOUBLE, 1
#endif
},
/*
FIELD_TYPE_NULL ?
*/
{ "timestamp", SQL_TIMESTAMP, 14, "'", "'", NULL,
0, 0, 3, 0, 0, 0, "timestamp",
0, 0, 0,
SQL_TIMESTAMP, 0, 0,
#if MYSQL_VERSION_ID < MYSQL_VERSION_5_0
FIELD_TYPE_TIMESTAMP, 0
#else
MYSQL_TYPE_TIMESTAMP, 0
#endif
},
{ "bigint", SQL_BIGINT, 19, NULL, NULL, NULL,
1, 0, 3, 0, 0, 0, "Longlong integer",
0, 0, 10,
SQL_BIGINT, 0, 0,
#if MYSQL_VERSION_ID < MYSQL_VERSION_5_0
FIELD_TYPE_LONGLONG, 1
#else
MYSQL_TYPE_LONGLONG, 1
#endif
},
{ "mediumint", SQL_INTEGER, 8, NULL, NULL, NULL,
1, 0, 3, 0, 0, 0, "Medium integer",
0, 0, 10,
SQL_INTEGER, 0, 0,
#if MYSQL_VERSION_ID < MYSQL_VERSION_5_0
FIELD_TYPE_INT24, 1
#else
MYSQL_TYPE_INT24, 1
#endif
},
{ "date", SQL_DATE, 10, "'", "'", NULL,
1, 0, 3, 0, 0, 0, "date",
0, 0, 0,
SQL_DATE, 0, 0,
#if MYSQL_VERSION_ID < MYSQL_VERSION_5_0
FIELD_TYPE_DATE, 0
#else
MYSQL_TYPE_DATE, 0
#endif
},
{ "time", SQL_TIME, 6, "'", "'", NULL,
1, 0, 3, 0, 0, 0, "time",
0, 0, 0,
SQL_TIME, 0, 0,
#if MYSQL_VERSION_ID < MYSQL_VERSION_5_0
FIELD_TYPE_TIME, 0
#else
MYSQL_TYPE_TIME, 0
#endif
},
{ "datetime", SQL_TIMESTAMP, 21, "'", "'", NULL,
1, 0, 3, 0, 0, 0, "datetime",
0, 0, 0,
SQL_TIMESTAMP, 0, 0,
#if MYSQL_VERSION_ID < MYSQL_VERSION_5_0
FIELD_TYPE_DATETIME, 0
#else
MYSQL_TYPE_DATETIME, 0
#endif
},
{ "year", SQL_SMALLINT, 4, NULL, NULL, NULL,
1, 0, 3, 0, 0, 0, "year",
0, 0, 10,
SQL_SMALLINT, 0, 0,
#if MYSQL_VERSION_ID < MYSQL_VERSION_5_0
FIELD_TYPE_YEAR, 0
#else
MYSQL_TYPE_YEAR, 0
#endif
},
{ "date", SQL_DATE, 10, "'", "'", NULL,
1, 0, 3, 0, 0, 0, "date",
0, 0, 0,
SQL_DATE, 0, 0,
#if MYSQL_VERSION_ID < MYSQL_VERSION_5_0
FIELD_TYPE_NEWDATE, 0
#else
MYSQL_TYPE_NEWDATE, 0
#endif
},
{ "enum", SQL_VARCHAR, 255, "'", "'", NULL,
1, 0, 1, 0, 0, 0, "enum(value1,value2,value3...)",
0, 0, 0,
0, 0, 0,
#if MYSQL_VERSION_ID < MYSQL_VERSION_5_0
FIELD_TYPE_ENUM, 0
#else
MYSQL_TYPE_ENUM, 0
#endif
},
{ "set", SQL_VARCHAR, 255, "'", "'", NULL,
1, 0, 1, 0, 0, 0, "set(value1,value2,value3...)",
0, 0, 0,
0, 0, 0,
#if MYSQL_VERSION_ID < MYSQL_VERSION_5_0
FIELD_TYPE_SET, 0
#else
MYSQL_TYPE_SET, 0
#endif
},
{ "blob", SQL_LONGVARBINARY, 65535, "'", "'", NULL,
1, 0, 3, 0, 0, 0, "binary large object (0-65535)",
0, 0, 0,
SQL_LONGVARBINARY, 0, 0,
#if MYSQL_VERSION_ID < MYSQL_VERSION_5_0
FIELD_TYPE_BLOB, 0
#else
MYSQL_TYPE_BLOB, 0
#endif
},
{ "tinyblob", SQL_VARBINARY, 255, "'", "'", NULL,
1, 0, 3, 0, 0, 0, "binary large object (0-255) ",
0, 0, 0,
SQL_VARBINARY, 0, 0,
#if MYSQL_VERSION_ID < MYSQL_VERSION_5_0
FIELD_TYPE_TINY_BLOB, 0
#else
FIELD_TYPE_TINY_BLOB, 0
#endif
},
{ "mediumblob", SQL_LONGVARBINARY, 16777215, "'", "'", NULL,
1, 0, 3, 0, 0, 0, "binary large object",
0, 0, 0,
SQL_LONGVARBINARY, 0, 0,
#if MYSQL_VERSION_ID < MYSQL_VERSION_5_0
FIELD_TYPE_MEDIUM_BLOB, 0
#else
MYSQL_TYPE_MEDIUM_BLOB, 0
#endif
},
{ "longblob", SQL_LONGVARBINARY, 2147483647, "'", "'", NULL,
1, 0, 3, 0, 0, 0, "binary large object, use mediumblob instead",
0, 0, 0,
SQL_LONGVARBINARY, 0, 0,
#if MYSQL_VERSION_ID < MYSQL_VERSION_5_0
FIELD_TYPE_LONG_BLOB, 0
#else
MYSQL_TYPE_LONG_BLOB, 0
#endif
},
{ "char", SQL_CHAR, 255, "'", "'", "max length",
1, 0, 3, 0, 0, 0, "string",
0, 0, 0,
SQL_CHAR, 0, 0,
#if MYSQL_VERSION_ID < MYSQL_VERSION_5_0
FIELD_TYPE_STRING, 0
#else
MYSQL_TYPE_STRING, 0
#endif
},
{ "decimal", SQL_NUMERIC, 15, NULL, NULL, "precision,scale",
1, 0, 3, 0, 0, 0, "double",
0, 6, 2,
SQL_NUMERIC, 0, 0,
#if MYSQL_VERSION_ID < MYSQL_VERSION_5_0
FIELD_TYPE_DECIMAL, 1
#else
MYSQL_TYPE_DECIMAL, 1
#endif
},
{ "tinyint unsigned", SQL_TINYINT, 3, NULL, NULL, NULL,
1, 0, 3, 1, 0, 0, "Tiny integer unsigned",
0, 0, 10,
SQL_TINYINT, 0, 0,
#if MYSQL_VERSION_ID < MYSQL_VERSION_5_0
FIELD_TYPE_TINY, 1
#else
MYSQL_TYPE_TINY, 1
#endif
},
{ "smallint unsigned", SQL_SMALLINT, 5, NULL, NULL, NULL,
1, 0, 3, 1, 0, 0, "Short integer unsigned",
0, 0, 10,
SQL_SMALLINT, 0, 0,
#if MYSQL_VERSION_ID < MYSQL_VERSION_5_0
FIELD_TYPE_SHORT, 1
#else
MYSQL_TYPE_SHORT, 1
#endif
},
{ "mediumint unsigned", SQL_INTEGER, 8, NULL, NULL, NULL,
1, 0, 3, 1, 0, 0, "Medium integer unsigned",
0, 0, 10,
SQL_INTEGER, 0, 0,
#if MYSQL_VERSION_ID < MYSQL_VERSION_5_0
FIELD_TYPE_INT24, 1
#else
MYSQL_TYPE_INT24, 1
#endif
},
{ "int unsigned", SQL_INTEGER, 10, NULL, NULL, NULL,
1, 0, 3, 1, 0, 0, "integer unsigned",
0, 0, 10,
SQL_INTEGER, 0, 0,
#if MYSQL_VERSION_ID < MYSQL_VERSION_5_0
FIELD_TYPE_LONG, 1
#else
MYSQL_TYPE_LONG, 1
#endif
},
{ "int", SQL_INTEGER, 10, NULL, NULL, NULL,
1, 0, 3, 0, 0, 0, "integer",
0, 0, 10,
SQL_INTEGER, 0, 0,
#if MYSQL_VERSION_ID < MYSQL_VERSION_5_0
FIELD_TYPE_LONG, 1
#else
MYSQL_TYPE_LONG, 1
#endif
},
{ "integer unsigned", SQL_INTEGER, 10, NULL, NULL, NULL,
1, 0, 3, 1, 0, 0, "integer",
0, 0, 10,
SQL_INTEGER, 0, 0,
#if MYSQL_VERSION_ID < MYSQL_VERSION_5_0
FIELD_TYPE_LONG, 1
#else
MYSQL_TYPE_LONG, 1
#endif
},
{ "bigint unsigned", SQL_BIGINT, 20, NULL, NULL, NULL,
1, 0, 3, 1, 0, 0, "Longlong integer unsigned",
0, 0, 10,
SQL_BIGINT, 0, 0,
#if MYSQL_VERSION_ID < MYSQL_VERSION_5_0
FIELD_TYPE_LONGLONG, 1
#else
MYSQL_TYPE_LONGLONG, 1
#endif
},
{ "text", SQL_LONGVARCHAR, 65535, "'", "'", NULL,
1, 0, 3, 0, 0, 0, "large text object (0-65535)",
0, 0, 0,
SQL_LONGVARCHAR, 0, 0,
#if MYSQL_VERSION_ID < MYSQL_VERSION_5_0
FIELD_TYPE_BLOB, 0
#else
MYSQL_TYPE_BLOB, 0
#endif
},
{ "mediumtext", SQL_LONGVARCHAR, 16777215, "'", "'", NULL,
1, 0, 3, 0, 0, 0, "large text object",
0, 0, 0,
SQL_LONGVARCHAR, 0, 0,
#if MYSQL_VERSION_ID < MYSQL_VERSION_5_0
FIELD_TYPE_MEDIUM_BLOB, 0
#else
MYSQL_TYPE_MEDIUM_BLOB, 0
#endif
},
{ "mediumint unsigned auto_increment", SQL_INTEGER, 8, NULL, NULL, NULL,
0, 0, 3, 1, 0, 1, "Medium integer unsigned auto_increment", 0, 0, 10,
#if MYSQL_VERSION_ID < MYSQL_VERSION_5_0
SQL_INTEGER, 0, 0, FIELD_TYPE_INT24, 1,
#else
SQL_INTEGER, 0, 0, MYSQL_TYPE_INT24, 1,
#endif
},
{ "tinyint unsigned auto_increment", SQL_TINYINT, 3, NULL, NULL, NULL,
0, 0, 3, 1, 0, 1, "tinyint unsigned auto_increment", 0, 0, 10,
#if MYSQL_VERSION_ID < MYSQL_VERSION_5_0
SQL_TINYINT, 0, 0, FIELD_TYPE_TINY, 1
#else
SQL_TINYINT, 0, 0, MYSQL_TYPE_TINY, 1
#endif
},
{ "smallint auto_increment", SQL_SMALLINT, 5, NULL, NULL, NULL,
0, 0, 3, 0, 0, 1, "smallint auto_increment", 0, 0, 10,
#if MYSQL_VERSION_ID < MYSQL_VERSION_5_0
SQL_SMALLINT, 0, 0, FIELD_TYPE_SHORT, 1
#else
SQL_SMALLINT, 0, 0, MYSQL_TYPE_SHORT, 1
#endif
},
{ "int unsigned auto_increment", SQL_INTEGER, 10, NULL, NULL, NULL,
0, 0, 3, 1, 0, 1, "integer unsigned auto_increment", 0, 0, 10,
#if MYSQL_VERSION_ID < MYSQL_VERSION_5_0
SQL_INTEGER, 0, 0, FIELD_TYPE_LONG, 1
#else
SQL_INTEGER, 0, 0, MYSQL_TYPE_LONG, 1
#endif
},
{ "mediumint", SQL_INTEGER, 7, NULL, NULL, NULL,
1, 0, 3, 0, 0, 0, "Medium integer", 0, 0, 10,
#if MYSQL_VERSION_ID < MYSQL_VERSION_5_0
SQL_INTEGER, 0, 0, FIELD_TYPE_INT24, 1
#else
SQL_INTEGER, 0, 0, MYSQL_TYPE_INT24, 1
#endif
},
{ "bit", SQL_BIT, 1, NULL, NULL, NULL,
1, 0, 3, 0, 0, 0, "char(1)", 0, 0, 0,
#if MYSQL_VERSION_ID < MYSQL_VERSION_5_0
SQL_BIT, 0, 0, FIELD_TYPE_TINY, 0
#else
SQL_BIT, 0, 0, MYSQL_TYPE_TINY, 0
#endif
},
{ "numeric", SQL_NUMERIC, 19, NULL, NULL, "precision,scale",
1, 0, 3, 0, 0, 0, "numeric", 0, 19, 10,
#if MYSQL_VERSION_ID < MYSQL_VERSION_5_0
SQL_NUMERIC, 0, 0, FIELD_TYPE_DECIMAL, 1,
#else
SQL_NUMERIC, 0, 0, MYSQL_TYPE_DECIMAL, 1,
#endif
},
{ "integer unsigned auto_increment", SQL_INTEGER, 10, NULL, NULL, NULL,
0, 0, 3, 1, 0, 1, "integer unsigned auto_increment", 0, 0, 10,
#if MYSQL_VERSION_ID < MYSQL_VERSION_5_0
SQL_INTEGER, 0, 0, FIELD_TYPE_LONG, 1,
#else
SQL_INTEGER, 0, 0, MYSQL_TYPE_LONG, 1,
#endif
},
{ "mediumint unsigned", SQL_INTEGER, 8, NULL, NULL, NULL,
1, 0, 3, 1, 0, 0, "Medium integer unsigned", 0, 0, 10,
#if MYSQL_VERSION_ID < MYSQL_VERSION_5_0
SQL_INTEGER, 0, 0, FIELD_TYPE_INT24, 1
#else
SQL_INTEGER, 0, 0, MYSQL_TYPE_INT24, 1
#endif
},
{ "smallint unsigned auto_increment", SQL_SMALLINT, 5, NULL, NULL, NULL,
0, 0, 3, 1, 0, 1, "smallint unsigned auto_increment", 0, 0, 10,
#if MYSQL_VERSION_ID < MYSQL_VERSION_5_0
SQL_SMALLINT, 0, 0, FIELD_TYPE_SHORT, 1
#else
SQL_SMALLINT, 0, 0, MYSQL_TYPE_SHORT, 1
#endif
},
{ "int auto_increment", SQL_INTEGER, 10, NULL, NULL, NULL,
0, 0, 3, 0, 0, 1, "integer auto_increment", 0, 0, 10,
#if MYSQL_VERSION_ID < MYSQL_VERSION_5_0
SQL_INTEGER, 0, 0, FIELD_TYPE_LONG, 1
#else
SQL_INTEGER, 0, 0, MYSQL_TYPE_LONG, 1
#endif
},
{ "long varbinary", SQL_LONGVARBINARY, 16777215, "0x", NULL, NULL,
1, 0, 3, 0, 0, 0, "mediumblob", 0, 0, 0,
#if MYSQL_VERSION_ID < MYSQL_VERSION_5_0
SQL_LONGVARBINARY, 0, 0, FIELD_TYPE_LONG_BLOB, 0
#else
SQL_LONGVARBINARY, 0, 0, MYSQL_TYPE_LONG_BLOB, 0
#endif
},
{ "double auto_increment", SQL_FLOAT, 15, NULL, NULL, NULL,
0, 0, 3, 0, 0, 1, "double auto_increment", 0, 4, 2,
#if MYSQL_VERSION_ID < MYSQL_VERSION_5_0
SQL_FLOAT, 0, 0, FIELD_TYPE_DOUBLE, 1
#else
SQL_FLOAT, 0, 0, MYSQL_TYPE_DOUBLE, 1
#endif
},
{ "double auto_increment", SQL_DOUBLE, 15, NULL, NULL, NULL,
0, 0, 3, 0, 0, 1, "double auto_increment", 0, 4, 10,
#if MYSQL_VERSION_ID < MYSQL_VERSION_5_0
SQL_DOUBLE, 0, 0, FIELD_TYPE_DOUBLE, 1
#else
SQL_DOUBLE, 0, 0, MYSQL_TYPE_DOUBLE, 1
#endif
},
{ "integer auto_increment", SQL_INTEGER, 10, NULL, NULL, NULL,
0, 0, 3, 0, 0, 1, "integer auto_increment", 0, 0, 10,
#if MYSQL_VERSION_ID < MYSQL_VERSION_5_0
SQL_INTEGER, 0, 0, FIELD_TYPE_LONG, 1,
#else
SQL_INTEGER, 0, 0, MYSQL_TYPE_LONG, 1,
#endif
},
{ "bigint auto_increment", SQL_BIGINT, 19, NULL, NULL, NULL,
0, 0, 3, 0, 0, 1, "bigint auto_increment", 0, 0, 10,
#if MYSQL_VERSION_ID < MYSQL_VERSION_5_0
SQL_BIGINT, 0, 0, FIELD_TYPE_LONGLONG, 1
#else
SQL_BIGINT, 0, 0, MYSQL_TYPE_LONGLONG, 1
#endif
},
{ "bit auto_increment", SQL_BIT, 1, NULL, NULL, NULL,
0, 0, 3, 0, 0, 1, "char(1) auto_increment", 0, 0, 0,
#if MYSQL_VERSION_ID < MYSQL_VERSION_5_0
SQL_BIT, 0, 0, FIELD_TYPE_TINY, 1
#else
SQL_BIT, 0, 0, MYSQL_TYPE_TINY, 1
#endif
},
{ "mediumint auto_increment", SQL_INTEGER, 7, NULL, NULL, NULL,
0, 0, 3, 0, 0, 1, "Medium integer auto_increment", 0, 0, 10,
#if MYSQL_VERSION_ID < MYSQL_VERSION_5_0
SQL_INTEGER, 0, 0, FIELD_TYPE_INT24, 1
#else
SQL_INTEGER, 0, 0, MYSQL_TYPE_INT24, 1
#endif
},
{ "float auto_increment", SQL_REAL, 7, NULL, NULL, NULL,
0, 0, 0, 0, 0, 1, "float auto_increment", 0, 2, 10,
#if MYSQL_VERSION_ID < MYSQL_VERSION_5_0
SQL_FLOAT, 0, 0, FIELD_TYPE_FLOAT, 1
#else
SQL_FLOAT, 0, 0, MYSQL_TYPE_FLOAT, 1
#endif
},
{ "long varchar", SQL_LONGVARCHAR, 16777215, "'", "'", NULL,
1, 0, 3, 0, 0, 0, "mediumtext", 0, 0, 0,
#if MYSQL_VERSION_ID < MYSQL_VERSION_5_0
SQL_LONGVARCHAR, 0, 0, FIELD_TYPE_MEDIUM_BLOB, 1
#else
SQL_LONGVARCHAR, 0, 0, MYSQL_TYPE_MEDIUM_BLOB, 1
#endif
},
{ "tinyint auto_increment", SQL_TINYINT, 3, NULL, NULL, NULL,
0, 0, 3, 0, 0, 1, "tinyint auto_increment", 0, 0, 10,
#if MYSQL_VERSION_ID < MYSQL_VERSION_5_0
SQL_TINYINT, 0, 0, FIELD_TYPE_TINY, 1
#else
SQL_TINYINT, 0, 0, MYSQL_TYPE_TINY, 1
#endif
},
{ "bigint unsigned auto_increment", SQL_BIGINT, 20, NULL, NULL, NULL,
0, 0, 3, 1, 0, 1, "bigint unsigned auto_increment", 0, 0, 10,
#if MYSQL_VERSION_ID < MYSQL_VERSION_5_0
SQL_BIGINT, 0, 0, FIELD_TYPE_LONGLONG, 1
#else
SQL_BIGINT, 0, 0, MYSQL_TYPE_LONGLONG, 1
#endif
},
/* END MORE STUFF */
};
/*
static const sql_type_info_t* native2sql (int t)
*/
static const sql_type_info_t *native2sql(int t)
{
switch (t) {
case FIELD_TYPE_VAR_STRING: return &SQL_GET_TYPE_INFO_values[0];
case FIELD_TYPE_DECIMAL: return &SQL_GET_TYPE_INFO_values[1];
#ifdef FIELD_TYPE_NEWDECIMAL
case FIELD_TYPE_NEWDECIMAL: return &SQL_GET_TYPE_INFO_values[1];
#endif
case FIELD_TYPE_TINY: return &SQL_GET_TYPE_INFO_values[2];
case FIELD_TYPE_SHORT: return &SQL_GET_TYPE_INFO_values[3];
case FIELD_TYPE_LONG: return &SQL_GET_TYPE_INFO_values[4];
case FIELD_TYPE_FLOAT: return &SQL_GET_TYPE_INFO_values[5];
/* 6 */
case FIELD_TYPE_DOUBLE: return &SQL_GET_TYPE_INFO_values[7];
case FIELD_TYPE_TIMESTAMP: return &SQL_GET_TYPE_INFO_values[8];
case FIELD_TYPE_LONGLONG: return &SQL_GET_TYPE_INFO_values[9];
case FIELD_TYPE_INT24: return &SQL_GET_TYPE_INFO_values[10];
case FIELD_TYPE_DATE: return &SQL_GET_TYPE_INFO_values[11];
case FIELD_TYPE_TIME: return &SQL_GET_TYPE_INFO_values[12];
case FIELD_TYPE_DATETIME: return &SQL_GET_TYPE_INFO_values[13];
case FIELD_TYPE_YEAR: return &SQL_GET_TYPE_INFO_values[14];
case FIELD_TYPE_NEWDATE: return &SQL_GET_TYPE_INFO_values[15];
case FIELD_TYPE_ENUM: return &SQL_GET_TYPE_INFO_values[16];
case FIELD_TYPE_SET: return &SQL_GET_TYPE_INFO_values[17];
case FIELD_TYPE_BLOB: return &SQL_GET_TYPE_INFO_values[18];
case FIELD_TYPE_TINY_BLOB: return &SQL_GET_TYPE_INFO_values[19];
case FIELD_TYPE_MEDIUM_BLOB: return &SQL_GET_TYPE_INFO_values[20];
case FIELD_TYPE_LONG_BLOB: return &SQL_GET_TYPE_INFO_values[21];
case FIELD_TYPE_STRING: return &SQL_GET_TYPE_INFO_values[22];
default: return &SQL_GET_TYPE_INFO_values[0];
}
}
#define SQL_GET_TYPE_INFO_num \
(sizeof(SQL_GET_TYPE_INFO_values)/sizeof(sql_type_info_t))
/***************************************************************************
*
* Name: dbd_init
*
* Purpose: Called when the driver is installed by DBI
*
* Input: dbistate - pointer to the DBI state variable, used for some
* DBI internal things
*
* Returns: Nothing
*
**************************************************************************/
void dbd_init(dbistate_t* dbistate)
{
dTHX;
DBISTATE_INIT;
PERL_UNUSED_ARG(dbistate);
}
/**************************************************************************
*
* Name: do_error, do_warn
*
* Purpose: Called to associate an error code and an error message
* to some handle
*
* Input: h - the handle in error condition
* rc - the error code
* what - the error message
*
* Returns: Nothing
*
**************************************************************************/
void do_error(SV* h, int rc, const char* what, const char* sqlstate)
{
dTHX;
D_imp_xxh(h);
SV *errstr;
SV *errstate;
if (DBIc_TRACE_LEVEL(imp_xxh) >= 2)
PerlIO_printf(DBIc_LOGPIO(imp_xxh), "\t\t--> do_error\n");
errstr= DBIc_ERRSTR(imp_xxh);
sv_setiv(DBIc_ERR(imp_xxh), (IV)rc); /* set err early */
sv_setpv(errstr, what);
#if MYSQL_VERSION_ID >= SQL_STATE_VERSION
if (sqlstate)
{
errstate= DBIc_STATE(imp_xxh);
sv_setpvn(errstate, sqlstate, 5);
}
#endif
/* NO EFFECT DBIh_EVENT2(h, ERROR_event, DBIc_ERR(imp_xxh), errstr); */
if (DBIc_TRACE_LEVEL(imp_xxh) >= 2)
PerlIO_printf(DBIc_LOGPIO(imp_xxh), "%s error %d recorded: %s\n",
what, rc, SvPV_nolen(errstr));
if (DBIc_TRACE_LEVEL(imp_xxh) >= 2)
PerlIO_printf(DBIc_LOGPIO(imp_xxh), "\t\t<-- do_error\n");
}
/*
void do_warn(SV* h, int rc, char* what)
*/
void do_warn(SV* h, int rc, char* what)
{
dTHX;
D_imp_xxh(h);
SV *errstr = DBIc_ERRSTR(imp_xxh);
sv_setiv(DBIc_ERR(imp_xxh), (IV)rc); /* set err early */
sv_setpv(errstr, what);
/* NO EFFECT DBIh_EVENT2(h, WARN_event, DBIc_ERR(imp_xxh), errstr);*/
if (DBIc_TRACE_LEVEL(imp_xxh) >= 2)
PerlIO_printf(DBIc_LOGPIO(imp_xxh), "%s warning %d recorded: %s\n",
what, rc, SvPV_nolen(errstr));
warn("%s", what);
}
#if defined(DBD_MYSQL_EMBEDDED)
#define DBD_MYSQL_NAMESPACE "DBD::mysqlEmb::QUIET";
#else
#define DBD_MYSQL_NAMESPACE "DBD::mysql::QUIET";
#endif
#define doquietwarn(s) \
{ \
SV* sv = perl_get_sv(DBD_MYSQL_NAMESPACE, FALSE); \
if (!sv || !SvTRUE(sv)) { \
warn s; \
} \
}
/***************************************************************************
*
* Name: mysql_dr_connect
*
* Purpose: Replacement for mysql_connect
*
* Input: MYSQL* sock - Pointer to a MYSQL structure being
* initialized
* char* mysql_socket - Name of a UNIX socket being used
* or NULL
* char* host - Host name being used or NULL for localhost
* char* port - Port number being used or NULL for default
* char* user - User name being used or NULL
* char* password - Password being used or NULL
* char* dbname - Database name being used or NULL
* char* imp_dbh - Pointer to internal dbh structure
*
* Returns: The sock argument for success, NULL otherwise;
* you have to call do_error in the latter case.
*
**************************************************************************/
MYSQL *mysql_dr_connect(
SV* dbh,
MYSQL* sock,
char* mysql_socket,
char* host,
char* port,
char* user,
char* password,
char* dbname,
imp_dbh_t *imp_dbh)
{
int portNr;
unsigned int client_flag;
MYSQL* result;
dTHX;
D_imp_xxh(dbh);
/* per Monty, already in client.c in API */
/* but still not exist in libmysqld.c */
#if defined(DBD_MYSQL_EMBEDDED)
if (host && !*host) host = NULL;
#endif
portNr= (port && *port) ? atoi(port) : 0;
/* already in client.c in API */
/* if (user && !*user) user = NULL; */
/* if (password && !*password) password = NULL; */
if (DBIc_TRACE_LEVEL(imp_xxh) >= 2)
PerlIO_printf(DBIc_LOGPIO(imp_xxh),
"imp_dbh->mysql_dr_connect: host = |%s|, port = %d," \
" uid = %s, pwd = %s\n",
host ? host : "NULL", portNr,
user ? user : "NULL",
password ? password : "NULL");
{
#if defined(DBD_MYSQL_EMBEDDED)
if (imp_dbh)
{
D_imp_drh_from_dbh;
SV* sv = DBIc_IMP_DATA(imp_dbh);
if (sv && SvROK(sv))
{
SV** svp;
STRLEN lna;
char * options;
int server_args_cnt= 0;
int server_groups_cnt= 0;
int rc= 0;
char ** server_args = NULL;
char ** server_groups = NULL;
HV* hv = (HV*) SvRV(sv);
if (SvTYPE(hv) != SVt_PVHV)
return NULL;
if (!imp_drh->embedded.state)
{
/* Init embedded server */
if ((svp = hv_fetch(hv, "mysql_embedded_groups", 21, FALSE)) &&
*svp && SvTRUE(*svp))
{
options = SvPV(*svp, lna);
imp_drh->embedded.groups=newSVsv(*svp);
if ((server_groups_cnt=count_embedded_options(options)))
{
/* number of server_groups always server_groups+1 */
server_groups=fill_out_embedded_options(DBIc_LOGPIO(imp_xxh), options, 0,
(int)lna, ++server_groups_cnt);
if (DBIc_TRACE_LEVEL(imp_xxh) >= 2)
{
PerlIO_printf(DBIc_LOGPIO(imp_xxh),
"Groups names passed to embedded server:\n");
print_embedded_options(DBIc_LOGPIO(imp_xxh), server_groups, server_groups_cnt);
}
}
}
if ((svp = hv_fetch(hv, "mysql_embedded_options", 22, FALSE)) &&
*svp && SvTRUE(*svp))
{
options = SvPV(*svp, lna);
imp_drh->embedded.args=newSVsv(*svp);
if ((server_args_cnt=count_embedded_options(options)))
{
/* number of server_options always server_options+1 */
server_args=fill_out_embedded_options(DBIc_LOGPIO(imp_xxh), options, 1, (int)lna, ++server_args_cnt);
if (DBIc_TRACE_LEVEL(imp_xxh) >= 2)
{
PerlIO_printf(DBIc_LOGPIO(imp_xxh), "Server options passed to embedded server:\n");
print_embedded_options(DBIc_LOGPIO(imp_xxh), server_args, server_args_cnt);
}
}
}
if (mysql_server_init(server_args_cnt, server_args, server_groups))
{
do_warn(dbh, AS_ERR_EMBEDDED, "Embedded server was not started. \
Could not initialize environment.");
return NULL;
}
imp_drh->embedded.state=1;
if (server_args_cnt)
free_embedded_options(server_args, server_args_cnt);
if (server_groups_cnt)
free_embedded_options(server_groups, server_groups_cnt);
}
else
{
/*
* Check if embedded parameters passed to connect() differ from
* first ones
*/
if ( ((svp = hv_fetch(hv, "mysql_embedded_groups", 21, FALSE)) &&
*svp && SvTRUE(*svp)))
rc =+ abs(sv_cmp(*svp, imp_drh->embedded.groups));
if ( ((svp = hv_fetch(hv, "mysql_embedded_options", 22, FALSE)) &&
*svp && SvTRUE(*svp)) )
rc =+ abs(sv_cmp(*svp, imp_drh->embedded.args));
if (rc)
{
do_warn(dbh, AS_ERR_EMBEDDED,
"Embedded server was already started. You cannot pass init\
parameters to embedded server once");
return NULL;
}
}
}
}
#endif
#ifdef MYSQL_NO_CLIENT_FOUND_ROWS
client_flag = 0;
#else
client_flag = CLIENT_FOUND_ROWS;
#endif
mysql_init(sock);
if (imp_dbh)
{
SV* sv = DBIc_IMP_DATA(imp_dbh);
DBIc_set(imp_dbh, DBIcf_AutoCommit, TRUE);
if (sv && SvROK(sv))
{
HV* hv = (HV*) SvRV(sv);
SV** svp;
STRLEN lna;
/* thanks to Peter John Edwards for mysql_init_command */
if ((svp = hv_fetch(hv, "mysql_init_command", 18, FALSE)) &&
*svp && SvTRUE(*svp))
{
char* df = SvPV(*svp, lna);
if (DBIc_TRACE_LEVEL(imp_xxh) >= 2)
PerlIO_printf(DBIc_LOGPIO(imp_xxh),
"imp_dbh->mysql_dr_connect: Setting" \
" init command (%s).\n", df);
mysql_options(sock, MYSQL_INIT_COMMAND, df);
}
if ((svp = hv_fetch(hv, "mysql_compression", 17, FALSE)) &&
*svp && SvTRUE(*svp))
{
if (DBIc_TRACE_LEVEL(imp_xxh) >= 2)
PerlIO_printf(DBIc_LOGPIO(imp_xxh),
"imp_dbh->mysql_dr_connect: Enabling" \
" compression.\n");
mysql_options(sock, MYSQL_OPT_COMPRESS, NULL);
}
if ((svp = hv_fetch(hv, "mysql_connect_timeout", 21, FALSE))
&& *svp && SvTRUE(*svp))
{
int to = SvIV(*svp);
if (DBIc_TRACE_LEVEL(imp_xxh) >= 2)
PerlIO_printf(DBIc_LOGPIO(imp_xxh),
"imp_dbh->mysql_dr_connect: Setting" \
" connect timeout (%d).\n",to);
mysql_options(sock, MYSQL_OPT_CONNECT_TIMEOUT,
(const char *)&to);
}
if ((svp = hv_fetch(hv, "mysql_write_timeout", 19, FALSE))
&& *svp && SvTRUE(*svp))
{
int to = SvIV(*svp);
if (DBIc_TRACE_LEVEL(imp_xxh) >= 2)
PerlIO_printf(DBIc_LOGPIO(imp_xxh),
"imp_dbh->mysql_dr_connect: Setting" \
" write timeout (%d).\n",to);
mysql_options(sock, MYSQL_OPT_WRITE_TIMEOUT,
(const char *)&to);
}
if ((svp = hv_fetch(hv, "mysql_read_timeout", 18, FALSE))
&& *svp && SvTRUE(*svp))
{
int to = SvIV(*svp);
if (DBIc_TRACE_LEVEL(imp_xxh) >= 2)
PerlIO_printf(DBIc_LOGPIO(imp_xxh),
"imp_dbh->mysql_dr_connect: Setting" \
" read timeout (%d).\n",to);
mysql_options(sock, MYSQL_OPT_READ_TIMEOUT,
(const char *)&to);
}
if ((svp = hv_fetch(hv, "mysql_skip_secure_auth", 22, FALSE)) &&
*svp && SvTRUE(*svp))
{
my_bool secauth = 0;
if (DBIc_TRACE_LEVEL(imp_xxh) >= 2)
PerlIO_printf(DBIc_LOGPIO(imp_xxh),
"imp_dbh->mysql_dr_connect: Skipping" \
" secure auth\n");
mysql_options(sock, MYSQL_SECURE_AUTH, &secauth);
}
if ((svp = hv_fetch(hv, "mysql_read_default_file", 23, FALSE)) &&
*svp && SvTRUE(*svp))
{
char* df = SvPV(*svp, lna);
if (DBIc_TRACE_LEVEL(imp_xxh) >= 2)
PerlIO_printf(DBIc_LOGPIO(imp_xxh),
"imp_dbh->mysql_dr_connect: Reading" \
" default file %s.\n", df);
mysql_options(sock, MYSQL_READ_DEFAULT_FILE, df);
}
if ((svp = hv_fetch(hv, "mysql_read_default_group", 24,
FALSE)) &&
*svp && SvTRUE(*svp)) {
char* gr = SvPV(*svp, lna);
if (DBIc_TRACE_LEVEL(imp_xxh) >= 2)
PerlIO_printf(DBIc_LOGPIO(imp_xxh),
"imp_dbh->mysql_dr_connect: Using" \
" default group %s.\n", gr);
mysql_options(sock, MYSQL_READ_DEFAULT_GROUP, gr);
}
#if (MYSQL_VERSION_ID >= 50606)
if ((svp = hv_fetch(hv, "mysql_conn_attrs", 16, FALSE)) && *svp) {
HV* attrs = (HV*) SvRV(*svp);
HE* entry = NULL;
I32 num_entries = hv_iterinit(attrs);
while (num_entries && (entry = hv_iternext(attrs))) {
I32 retlen = 0;
char *attr_name = hv_iterkey(entry, &retlen);
SV *sv_attr_val = hv_iterval(attrs, entry);
char *attr_val = SvPV(sv_attr_val, lna);
mysql_options4(sock, MYSQL_OPT_CONNECT_ATTR_ADD, attr_name, attr_val);
}
}
#endif
if ((svp = hv_fetch(hv, "mysql_client_found_rows", 23, FALSE)) && *svp)
{
if (SvTRUE(*svp))
client_flag |= CLIENT_FOUND_ROWS;
else
client_flag &= ~CLIENT_FOUND_ROWS;
}
if ((svp = hv_fetch(hv, "mysql_use_result", 16, FALSE)) && *svp)
{
imp_dbh->use_mysql_use_result = SvTRUE(*svp);
if (DBIc_TRACE_LEVEL(imp_xxh) >= 2)
PerlIO_printf(DBIc_LOGPIO(imp_xxh),
"imp_dbh->use_mysql_use_result: %d\n",
imp_dbh->use_mysql_use_result);
}
if ((svp = hv_fetch(hv, "mysql_bind_type_guessing", 24, TRUE)) && *svp)
{
imp_dbh->bind_type_guessing= SvTRUE(*svp);
if (DBIc_TRACE_LEVEL(imp_xxh) >= 2)
PerlIO_printf(DBIc_LOGPIO(imp_xxh),
"imp_dbh->bind_type_guessing: %d\n",
imp_dbh->bind_type_guessing);
}
if ((svp = hv_fetch(hv, "mysql_bind_comment_placeholders", 31, FALSE)) && *svp)
{
imp_dbh->bind_comment_placeholders = SvTRUE(*svp);
if (DBIc_TRACE_LEVEL(imp_xxh) >= 2)
PerlIO_printf(DBIc_LOGPIO(imp_xxh),
"imp_dbh->bind_comment_placeholders: %d\n",
imp_dbh->bind_comment_placeholders);
}
if ((svp = hv_fetch(hv, "mysql_no_autocommit_cmd", 23, FALSE)) && *svp)
{
imp_dbh->no_autocommit_cmd= SvTRUE(*svp);
if (DBIc_TRACE_LEVEL(imp_xxh) >= 2)
PerlIO_printf(DBIc_LOGPIO(imp_xxh),
"imp_dbh->no_autocommit_cmd: %d\n",
imp_dbh->no_autocommit_cmd);
}
#if FABRIC_SUPPORT
if ((svp = hv_fetch(hv, "mysql_use_fabric", 16, FALSE)) &&
*svp && SvTRUE(*svp))
{
if (DBIc_TRACE_LEVEL(imp_xxh) >= 2)
PerlIO_printf(DBIc_LOGPIO(imp_xxh),
"imp_dbh->use_fabric: Enabling use of" \
" MySQL Fabric.\n");
mysql_options(sock, MYSQL_OPT_USE_FABRIC, NULL);
}
#endif
#if defined(CLIENT_MULTI_STATEMENTS)
if ((svp = hv_fetch(hv, "mysql_multi_statements", 22, FALSE)) && *svp)
{
if (SvTRUE(*svp))
client_flag |= CLIENT_MULTI_STATEMENTS;
else
client_flag &= ~CLIENT_MULTI_STATEMENTS;
}
#endif
#if MYSQL_VERSION_ID >=SERVER_PREPARE_VERSION
/* took out client_flag |= CLIENT_PROTOCOL_41; */
/* because libmysql.c already sets this no matter what */
if ((svp = hv_fetch(hv, "mysql_server_prepare", 20, FALSE))
&& *svp)
{
if (SvTRUE(*svp))
{
client_flag |= CLIENT_PROTOCOL_41;
imp_dbh->use_server_side_prepare = TRUE;
}
else
{
client_flag &= ~CLIENT_PROTOCOL_41;
imp_dbh->use_server_side_prepare = FALSE;
}
}
if (DBIc_TRACE_LEVEL(imp_xxh) >= 2)
PerlIO_printf(DBIc_LOGPIO(imp_xxh),
"imp_dbh->use_server_side_prepare: %d\n",
imp_dbh->use_server_side_prepare);
if ((svp = hv_fetch(hv, "mysql_server_prepare_disable_fallback", 37, FALSE)) && *svp)
imp_dbh->disable_fallback_for_server_prepare = SvTRUE(*svp);
if (DBIc_TRACE_LEVEL(imp_xxh) >= 2)
PerlIO_printf(DBIc_LOGPIO(imp_xxh),
"imp_dbh->disable_fallback_for_server_prepare: %d\n",
imp_dbh->disable_fallback_for_server_prepare);
#endif
/* HELMUT */
#if defined(sv_utf8_decode) && MYSQL_VERSION_ID >=SERVER_PREPARE_VERSION
if ((svp = hv_fetch(hv, "mysql_enable_utf8mb4", 20, FALSE)) && *svp && SvTRUE(*svp)) {
mysql_options(sock, MYSQL_SET_CHARSET_NAME, "utf8mb4");
}
else if ((svp = hv_fetch(hv, "mysql_enable_utf8", 17, FALSE)) && *svp) {
/* Do not touch imp_dbh->enable_utf8 as we are called earlier
* than it is set and mysql_options() must be before:
* mysql_real_connect()
*/
mysql_options(sock, MYSQL_SET_CHARSET_NAME,
(SvTRUE(*svp) ? "utf8" : "latin1"));
if (DBIc_TRACE_LEVEL(imp_xxh) >= 2)
PerlIO_printf(DBIc_LOGPIO(imp_xxh),
"mysql_options: MYSQL_SET_CHARSET_NAME=%s\n",
(SvTRUE(*svp) ? "utf8" : "latin1"));
}
#endif
#if defined(DBD_MYSQL_WITH_SSL) && !defined(DBD_MYSQL_EMBEDDED) && \
(defined(CLIENT_SSL) || (MYSQL_VERSION_ID >= 40000))
if ((svp = hv_fetch(hv, "mysql_ssl", 9, FALSE)) && *svp)
{
if (SvTRUE(*svp))
{
char *client_key = NULL;
char *client_cert = NULL;
char *ca_file = NULL;
char *ca_path = NULL;
char *cipher = NULL;
STRLEN lna;
#if MYSQL_VERSION_ID >= SSL_VERIFY_VERSION && MYSQL_VERSION_ID <= SSL_LAST_VERIFY_VERSION
/*
New code to utilise MySQLs new feature that verifies that the
server's hostname that the client connects to matches that of
the certificate
*/
my_bool ssl_verify_true = 0;
if ((svp = hv_fetch(hv, "mysql_ssl_verify_server_cert", 28, FALSE)) && *svp)
ssl_verify_true = SvTRUE(*svp);
#endif
if ((svp = hv_fetch(hv, "mysql_ssl_client_key", 20, FALSE)) && *svp)
client_key = SvPV(*svp, lna);
if ((svp = hv_fetch(hv, "mysql_ssl_client_cert", 21, FALSE)) &&
*svp)
client_cert = SvPV(*svp, lna);
if ((svp = hv_fetch(hv, "mysql_ssl_ca_file", 17, FALSE)) &&
*svp)
ca_file = SvPV(*svp, lna);
if ((svp = hv_fetch(hv, "mysql_ssl_ca_path", 17, FALSE)) &&
*svp)
ca_path = SvPV(*svp, lna);
if ((svp = hv_fetch(hv, "mysql_ssl_cipher", 16, FALSE)) &&
*svp)
cipher = SvPV(*svp, lna);
mysql_ssl_set(sock, client_key, client_cert, ca_file,
ca_path, cipher);
#if MYSQL_VERSION_ID >= SSL_VERIFY_VERSION && MYSQL_VERSION_ID <= SSL_LAST_VERIFY_VERSION
mysql_options(sock, MYSQL_OPT_SSL_VERIFY_SERVER_CERT, &ssl_verify_true);
#endif
client_flag |= CLIENT_SSL;
}
}
#endif
#if (MYSQL_VERSION_ID >= 32349)
/*
* MySQL 3.23.49 disables LOAD DATA LOCAL by default. Use
* mysql_local_infile=1 in the DSN to enable it.
*/
if ((svp = hv_fetch( hv, "mysql_local_infile", 18, FALSE)) && *svp)
{
unsigned int flag = SvTRUE(*svp);
if (DBIc_TRACE_LEVEL(imp_xxh) >= 2)
PerlIO_printf(DBIc_LOGPIO(imp_xxh),
"imp_dbh->mysql_dr_connect: Using" \
" local infile %u.\n", flag);
mysql_options(sock, MYSQL_OPT_LOCAL_INFILE, (const char *) &flag);
}
#endif
}
}
if (DBIc_TRACE_LEVEL(imp_xxh) >= 2)
PerlIO_printf(DBIc_LOGPIO(imp_xxh), "imp_dbh->mysql_dr_connect: client_flags = %d\n",
client_flag);
#if MYSQL_VERSION_ID >= MULTIPLE_RESULT_SET_VERSION
client_flag|= CLIENT_MULTI_RESULTS;
#endif
result = mysql_real_connect(sock, host, user, password, dbname,
portNr, mysql_socket, client_flag);
if (DBIc_TRACE_LEVEL(imp_xxh) >= 2)
PerlIO_printf(DBIc_LOGPIO(imp_xxh), "imp_dbh->mysql_dr_connect: <-");
if (result)
{
#if MYSQL_VERSION_ID >=SERVER_PREPARE_VERSION
/* connection succeeded. */
/* imp_dbh == NULL when mysql_dr_connect() is called from mysql.xs
functions (_admin_internal(),_ListDBs()). */
if (!(result->client_flag & CLIENT_PROTOCOL_41) && imp_dbh)
imp_dbh->use_server_side_prepare = FALSE;
#endif
#if MYSQL_ASYNC
if(imp_dbh) {
imp_dbh->async_query_in_flight = NULL;
}
#endif
/*
we turn off Mysql's auto reconnect and handle re-connecting ourselves
so that we can keep track of when this happens.
*/
result->reconnect=0;
}
else {
/*
sock was allocated with mysql_init()
fixes: https://rt.cpan.org/Ticket/Display.html?id=86153
Safefree(sock);
rurban: No, we still need this handle later in mysql_dr_error().
RT #97625. It will be freed as imp_dbh->pmysql in dbd_db_destroy(),
which is called by the DESTROY handler.
*/
}
return result;
}
}
/*
safe_hv_fetch
*/
static char *safe_hv_fetch(pTHX_ HV *hv, const char *name, int name_length)
{
SV** svp;
STRLEN len;
char *res= NULL;
if ((svp= hv_fetch(hv, name, name_length, FALSE)))
{
res= SvPV(*svp, len);
if (!len)
res= NULL;
}
return res;
}
/*
Frontend for mysql_dr_connect
*/
static int my_login(pTHX_ SV* dbh, imp_dbh_t *imp_dbh)
{
SV* sv;
HV* hv;
char* dbname;
char* host;
char* port;
char* user;
char* password;
char* mysql_socket;
int result;
D_imp_xxh(dbh);
/* TODO- resolve this so that it is set only if DBI is 1.607 */
#define TAKE_IMP_DATA_VERSION 1
#if TAKE_IMP_DATA_VERSION
if (DBIc_has(imp_dbh, DBIcf_IMPSET))
{ /* eg from take_imp_data() */
if (DBIc_has(imp_dbh, DBIcf_ACTIVE))
{
if (DBIc_TRACE_LEVEL(imp_xxh) >= 2)
PerlIO_printf(DBIc_LOGPIO(imp_xxh), "my_login skip connect\n");
/* tell our parent we've adopted an active child */
++DBIc_ACTIVE_KIDS(DBIc_PARENT_COM(imp_dbh));
return TRUE;
}
if (DBIc_TRACE_LEVEL(imp_xxh) >= 2)
PerlIO_printf(DBIc_LOGPIO(imp_xxh),
"my_login IMPSET but not ACTIVE so connect not skipped\n");
}
#endif
sv = DBIc_IMP_DATA(imp_dbh);
if (!sv || !SvROK(sv))
return FALSE;
hv = (HV*) SvRV(sv);
if (SvTYPE(hv) != SVt_PVHV)
return FALSE;
host= safe_hv_fetch(aTHX_ hv, "host", 4);
port= safe_hv_fetch(aTHX_ hv, "port", 4);
user= safe_hv_fetch(aTHX_ hv, "user", 4);
password= safe_hv_fetch(aTHX_ hv, "password", 8);
dbname= safe_hv_fetch(aTHX_ hv, "database", 8);
mysql_socket= safe_hv_fetch(aTHX_ hv, "mysql_socket", 12);
if (DBIc_TRACE_LEVEL(imp_xxh) >= 2)
PerlIO_printf(DBIc_LOGPIO(imp_xxh),
"imp_dbh->my_login : dbname = %s, uid = %s, pwd = %s," \
"host = %s, port = %s\n",
dbname ? dbname : "NULL",
user ? user : "NULL",
password ? password : "NULL",
host ? host : "NULL",
port ? port : "NULL");
if (!imp_dbh->pmysql) {
Newz(908, imp_dbh->pmysql, 1, MYSQL);
}
result = mysql_dr_connect(dbh, imp_dbh->pmysql, mysql_socket, host, port, user,
password, dbname, imp_dbh) ? TRUE : FALSE;
return result;
}
/**************************************************************************
*
* Name: dbd_db_login
*
* Purpose: Called for connecting to a database and logging in.
*
* Input: dbh - database handle being initialized
* imp_dbh - drivers private database handle data
* dbname - the database we want to log into; may be like
* "dbname:host" or "dbname:host:port"
* user - user name to connect as
* password - password to connect with
*
* Returns: TRUE for success, FALSE otherwise; do_error has already
* been called in the latter case
*
**************************************************************************/
int dbd_db_login(SV* dbh, imp_dbh_t* imp_dbh, char* dbname, char* user,
char* password) {
#ifdef dTHR
dTHR;
#endif
dTHX;
D_imp_xxh(dbh);
if (DBIc_TRACE_LEVEL(imp_xxh) >= 2)
PerlIO_printf(DBIc_LOGPIO(imp_xxh),
"imp_dbh->connect: dsn = %s, uid = %s, pwd = %s\n",
dbname ? dbname : "NULL",
user ? user : "NULL",
password ? password : "NULL");
imp_dbh->stats.auto_reconnects_ok= 0;
imp_dbh->stats.auto_reconnects_failed= 0;
imp_dbh->bind_type_guessing= FALSE;
imp_dbh->bind_comment_placeholders= FALSE;
imp_dbh->has_transactions= TRUE;
/* Safer we flip this to TRUE perl side if we detect a mod_perl env. */
imp_dbh->auto_reconnect = FALSE;
/* HELMUT */
#if defined(sv_utf8_decode) && MYSQL_VERSION_ID >=SERVER_PREPARE_VERSION
imp_dbh->enable_utf8 = FALSE; /* initialize mysql_enable_utf8 */
imp_dbh->enable_utf8mb4 = FALSE; /* initialize mysql_enable_utf8mb4 */
#endif
if (!my_login(aTHX_ dbh, imp_dbh))
{
if(imp_dbh->pmysql) {
do_error(dbh, mysql_errno(imp_dbh->pmysql),
mysql_error(imp_dbh->pmysql) ,mysql_sqlstate(imp_dbh->pmysql));
Safefree(imp_dbh->pmysql);
}
return FALSE;
}
/*
* Tell DBI, that dbh->disconnect should be called for this handle
*/
DBIc_ACTIVE_on(imp_dbh);
/* Tell DBI, that dbh->destroy should be called for this handle */
DBIc_on(imp_dbh, DBIcf_IMPSET);
return TRUE;
}
/***************************************************************************
*
* Name: dbd_db_commit
* dbd_db_rollback
*
* Purpose: You guess what they should do.
*
* Input: dbh - database handle being committed or rolled back
* imp_dbh - drivers private database handle data
*
* Returns: TRUE for success, FALSE otherwise; do_error has already
* been called in the latter case
*
**************************************************************************/
int
dbd_db_commit(SV* dbh, imp_dbh_t* imp_dbh)
{
if (DBIc_has(imp_dbh, DBIcf_AutoCommit))
return FALSE;
ASYNC_CHECK_RETURN(dbh, FALSE);
if (imp_dbh->has_transactions)
{
#if MYSQL_VERSION_ID < SERVER_PREPARE_VERSION
if (mysql_real_query(imp_dbh->pmysql, "COMMIT", 6))
#else
if (mysql_commit(imp_dbh->pmysql))
#endif
{
do_error(dbh, mysql_errno(imp_dbh->pmysql), mysql_error(imp_dbh->pmysql)
,mysql_sqlstate(imp_dbh->pmysql));
return FALSE;
}
}
else
do_warn(dbh, JW_ERR_NOT_IMPLEMENTED,
"Commit ineffective because transactions are not available");
return TRUE;
}
/*
dbd_db_rollback
*/
int
dbd_db_rollback(SV* dbh, imp_dbh_t* imp_dbh) {
/* croak, if not in AutoCommit mode */
if (DBIc_has(imp_dbh, DBIcf_AutoCommit))
return FALSE;
ASYNC_CHECK_RETURN(dbh, FALSE);
if (imp_dbh->has_transactions)
{
#if MYSQL_VERSION_ID < SERVER_PREPARE_VERSION
if (mysql_real_query(imp_dbh->pmysql, "ROLLBACK", 8))
#else
if (mysql_rollback(imp_dbh->pmysql))
#endif
{
do_error(dbh, mysql_errno(imp_dbh->pmysql),
mysql_error(imp_dbh->pmysql) ,mysql_sqlstate(imp_dbh->pmysql));
return FALSE;
}
}
else
do_error(dbh, JW_ERR_NOT_IMPLEMENTED,
"Rollback ineffective because transactions are not available" ,NULL);
return TRUE;
}
/*
***************************************************************************
*
* Name: dbd_db_disconnect
*
* Purpose: Disconnect a database handle from its database
*
* Input: dbh - database handle being disconnected
* imp_dbh - drivers private database handle data
*
* Returns: TRUE for success, FALSE otherwise; do_error has already
* been called in the latter case
*
**************************************************************************/
int dbd_db_disconnect(SV* dbh, imp_dbh_t* imp_dbh)
{
#ifdef dTHR
dTHR;
#endif
dTHX;
D_imp_xxh(dbh);
/* We assume that disconnect will always work */
/* since most errors imply already disconnected. */
DBIc_ACTIVE_off(imp_dbh);
if (DBIc_TRACE_LEVEL(imp_xxh) >= 2)
PerlIO_printf(DBIc_LOGPIO(imp_xxh), "imp_dbh->pmysql: %p\n",
imp_dbh->pmysql);
mysql_close(imp_dbh->pmysql );
/* We don't free imp_dbh since a reference still exists */
/* The DESTROY method is the only one to 'free' memory. */
return TRUE;
}
/***************************************************************************
*
* Name: dbd_discon_all
*
* Purpose: Disconnect all database handles at shutdown time
*
* Input: dbh - database handle being disconnected
* imp_dbh - drivers private database handle data
*
* Returns: TRUE for success, FALSE otherwise; do_error has already
* been called in the latter case
*
**************************************************************************/
int dbd_discon_all (SV *drh, imp_drh_t *imp_drh) {
#if defined(dTHR)
dTHR;
#endif
dTHX;
#if defined(DBD_MYSQL_EMBEDDED)
D_imp_xxh(drh);
#else
PERL_UNUSED_ARG(drh);
#endif
#if defined(DBD_MYSQL_EMBEDDED)
if (imp_drh->embedded.state)
{
if (DBIc_TRACE_LEVEL(imp_xxh) >= 2)
PerlIO_printf(DBIc_LOGPIO(imp_xxh), "Stop embedded server\n");
mysql_server_end();
if (imp_drh->embedded.groups)
{
(void) SvREFCNT_dec(imp_drh->embedded.groups);
imp_drh->embedded.groups = NULL;
}
if (imp_drh->embedded.args)
{
(void) SvREFCNT_dec(imp_drh->embedded.args);
imp_drh->embedded.args = NULL;
}
}
#else
mysql_server_end();
#endif
/* The disconnect_all concept is flawed and needs more work */
if (!PL_dirty && !SvTRUE(perl_get_sv("DBI::PERL_ENDING",0))) {
sv_setiv(DBIc_ERR(imp_drh), (IV)1);
sv_setpv(DBIc_ERRSTR(imp_drh),
(char*)"disconnect_all not implemented");
/* NO EFFECT DBIh_EVENT2(drh, ERROR_event,
DBIc_ERR(imp_drh), DBIc_ERRSTR(imp_drh)); */
return FALSE;
}
PL_perl_destruct_level = 0;
return FALSE;
}
/****************************************************************************
*
* Name: dbd_db_destroy
*
* Purpose: Our part of the dbh destructor
*
* Input: dbh - database handle being destroyed
* imp_dbh - drivers private database handle data
*
* Returns: Nothing
*
**************************************************************************/
void dbd_db_destroy(SV* dbh, imp_dbh_t* imp_dbh) {
/*
* Being on the safe side never hurts ...
*/
if (DBIc_ACTIVE(imp_dbh))
{
if (imp_dbh->has_transactions)
{
if (!DBIc_has(imp_dbh, DBIcf_AutoCommit))
#if MYSQL_VERSION_ID < SERVER_PREPARE_VERSION
if ( mysql_real_query(imp_dbh->pmysql, "ROLLBACK", 8))
#else
if (mysql_rollback(imp_dbh->pmysql))
#endif
do_error(dbh, TX_ERR_ROLLBACK,"ROLLBACK failed" ,NULL);
}
dbd_db_disconnect(dbh, imp_dbh);
}
Safefree(imp_dbh->pmysql);
/* Tell DBI, that dbh->destroy must no longer be called */
DBIc_off(imp_dbh, DBIcf_IMPSET);
}
/*
***************************************************************************
*
* Name: dbd_db_STORE_attrib
*
* Purpose: Function for storing dbh attributes; we currently support
* just nothing. :-)
*
* Input: dbh - database handle being modified
* imp_dbh - drivers private database handle data
* keysv - the attribute name
* valuesv - the attribute value
*
* Returns: TRUE for success, FALSE otherwise
*
**************************************************************************/
int
dbd_db_STORE_attrib(
SV* dbh,
imp_dbh_t* imp_dbh,
SV* keysv,
SV* valuesv
)
{
dTHX;
STRLEN kl;
char *key = SvPV(keysv, kl);
SV *cachesv = Nullsv;
int cacheit = FALSE;
const bool bool_value = SvTRUE(valuesv);
if (kl==10 && strEQ(key, "AutoCommit"))
{
if (imp_dbh->has_transactions)
{
bool oldval = DBIc_has(imp_dbh,DBIcf_AutoCommit) ? 1 : 0;
if (bool_value == oldval)
return TRUE;
/* if setting AutoCommit on ... */
if (!imp_dbh->no_autocommit_cmd)
{
if (
#if MYSQL_VERSION_ID >=SERVER_PREPARE_VERSION
mysql_autocommit(imp_dbh->pmysql, bool_value)
#else
mysql_real_query(imp_dbh->pmysql,
bool_value ? "SET AUTOCOMMIT=1" : "SET AUTOCOMMIT=0",
16)
#endif
)
{
do_error(dbh, TX_ERR_AUTOCOMMIT,
bool_value ?
"Turning on AutoCommit failed" :
"Turning off AutoCommit failed"
,NULL);
return TRUE; /* TRUE means we handled it - important to avoid spurious errors */
}
}
DBIc_set(imp_dbh, DBIcf_AutoCommit, bool_value);
}
else
{
/*
* We do support neither transactions nor "AutoCommit".
* But we stub it. :-)
*/
if (!bool_value)
{
do_error(dbh, JW_ERR_NOT_IMPLEMENTED,
"Transactions not supported by database" ,NULL);
croak("Transactions not supported by database");
}
}
}
else if (kl == 16 && strEQ(key,"mysql_use_result"))
imp_dbh->use_mysql_use_result = bool_value;
else if (kl == 20 && strEQ(key,"mysql_auto_reconnect"))
imp_dbh->auto_reconnect = bool_value;
else if (kl == 20 && strEQ(key, "mysql_server_prepare"))
imp_dbh->use_server_side_prepare = bool_value;
else if (kl == 37 && strEQ(key, "mysql_server_prepare_disable_fallback"))
imp_dbh->disable_fallback_for_server_prepare = bool_value;
else if (kl == 23 && strEQ(key,"mysql_no_autocommit_cmd"))
imp_dbh->no_autocommit_cmd = bool_value;
else if (kl == 24 && strEQ(key,"mysql_bind_type_guessing"))
imp_dbh->bind_type_guessing = bool_value;
else if (kl == 31 && strEQ(key,"mysql_bind_comment_placeholders"))
imp_dbh->bind_type_guessing = bool_value;
#if defined(sv_utf8_decode) && MYSQL_VERSION_ID >=SERVER_PREPARE_VERSION
else if (kl == 17 && strEQ(key, "mysql_enable_utf8"))
imp_dbh->enable_utf8 = bool_value;
else if (kl == 20 && strEQ(key, "mysql_enable_utf8mb4"))
imp_dbh->enable_utf8mb4 = bool_value;
#endif
#if FABRIC_SUPPORT
else if (kl == 22 && strEQ(key, "mysql_fabric_opt_group"))
mysql_options(imp_dbh->pmysql, FABRIC_OPT_GROUP, (void *)SvPVbyte_nolen(valuesv));
else if (kl == 29 && strEQ(key, "mysql_fabric_opt_default_mode"))
{
if (SvOK(valuesv)) {
STRLEN len;
const char *str = SvPVbyte(valuesv, len);
if ( len == 0 || ( len == 2 && (strnEQ(str, "ro", 3) || strnEQ(str, "rw", 3)) ) )
mysql_options(imp_dbh->pmysql, FABRIC_OPT_DEFAULT_MODE, len == 0 ? NULL : str);
else
croak("Valid settings for FABRIC_OPT_DEFAULT_MODE are 'ro', 'rw', or undef/empty string");
}
else {
mysql_options(imp_dbh->pmysql, FABRIC_OPT_DEFAULT_MODE, NULL);
}
}
else if (kl == 21 && strEQ(key, "mysql_fabric_opt_mode"))
{
STRLEN len;
const char *str = SvPVbyte(valuesv, len);
if (len != 2 || (strnNE(str, "ro", 3) && strnNE(str, "rw", 3)))
croak("Valid settings for FABRIC_OPT_MODE are 'ro' or 'rw'");
mysql_options(imp_dbh->pmysql, FABRIC_OPT_MODE, str);
}
else if (kl == 34 && strEQ(key, "mysql_fabric_opt_group_credentials"))
{
croak("'fabric_opt_group_credentials' is not supported");
}
#endif
else
return FALSE; /* Unknown key */
if (cacheit) /* cache value for later DBI 'quick' fetch? */
(void)hv_store((HV*)SvRV(dbh), key, kl, cachesv, 0);
return TRUE;
}
/***************************************************************************
*
* Name: dbd_db_FETCH_attrib
*
* Purpose: Function for fetching dbh attributes
*
* Input: dbh - database handle being queried
* imp_dbh - drivers private database handle data
* keysv - the attribute name
*
* Returns: An SV*, if successful; NULL otherwise
*
* Notes: Do not forget to call sv_2mortal in the former case!
*
**************************************************************************/
static SV*
my_ulonglong2str(pTHX_ my_ulonglong val)
{
char buf[64];
char *ptr = buf + sizeof(buf) - 1;
if (val == 0)
return newSVpvn("0", 1);
*ptr = '\0';
while (val > 0)
{
*(--ptr) = ('0' + (val % 10));
val = val / 10;
}
return newSVpvn(ptr, (buf+ sizeof(buf) - 1) - ptr);
}
SV* dbd_db_FETCH_attrib(SV *dbh, imp_dbh_t *imp_dbh, SV *keysv)
{
dTHX;
STRLEN kl;
char *key = SvPV(keysv, kl);
SV* result = NULL;
dbh= dbh;
switch (*key) {
case 'A':
if (strEQ(key, "AutoCommit"))
{
if (imp_dbh->has_transactions)
return sv_2mortal(boolSV(DBIc_has(imp_dbh,DBIcf_AutoCommit)));
/* Default */
return &PL_sv_yes;
}
break;
}
if (strncmp(key, "mysql_", 6) == 0) {
key = key+6;
kl = kl-6;
}
/* MONTY: Check if kl should not be used or used everywhere */
switch(*key) {
case 'a':
if (kl == strlen("auto_reconnect") && strEQ(key, "auto_reconnect"))
result= sv_2mortal(newSViv(imp_dbh->auto_reconnect));
break;
case 'b':
if (kl == strlen("bind_type_guessing") &&
strEQ(key, "bind_type_guessing"))
{
result = sv_2mortal(newSViv(imp_dbh->bind_type_guessing));
}
else if (kl == strlen("bind_comment_placeholders") &&
strEQ(key, "bind_comment_placeholders"))
{
result = sv_2mortal(newSViv(imp_dbh->bind_comment_placeholders));
}
break;
case 'c':
if (kl == 10 && strEQ(key, "clientinfo"))
{
const char* clientinfo = mysql_get_client_info();
result= clientinfo ?
sv_2mortal(newSVpvn(clientinfo, strlen(clientinfo))) : &PL_sv_undef;
}
else if (kl == 13 && strEQ(key, "clientversion"))
{
result= sv_2mortal(my_ulonglong2str(aTHX_ mysql_get_client_version()));
}
break;
case 'e':
if (strEQ(key, "errno"))
result= sv_2mortal(newSViv((IV)mysql_errno(imp_dbh->pmysql)));
else if ( strEQ(key, "error") || strEQ(key, "errmsg"))
{
/* Note that errmsg is obsolete, as of 2.09! */
const char* msg = mysql_error(imp_dbh->pmysql);
result= sv_2mortal(newSVpvn(msg, strlen(msg)));
}
/* HELMUT */
#if defined(sv_utf8_decode) && MYSQL_VERSION_ID >=SERVER_PREPARE_VERSION
else if (kl == strlen("enable_utf8mb4") && strEQ(key, "enable_utf8mb4"))
result = sv_2mortal(newSViv(imp_dbh->enable_utf8mb4));
else if (kl == strlen("enable_utf8") && strEQ(key, "enable_utf8"))
result = sv_2mortal(newSViv(imp_dbh->enable_utf8));
#endif
break;
case 'd':
if (strEQ(key, "dbd_stats"))
{
HV* hv = newHV();
(void)hv_store(
hv,
"auto_reconnects_ok",
strlen("auto_reconnects_ok"),
newSViv(imp_dbh->stats.auto_reconnects_ok),
0
);
(void)hv_store(
hv,
"auto_reconnects_failed",
strlen("auto_reconnects_failed"),
newSViv(imp_dbh->stats.auto_reconnects_failed),
0
);
result= sv_2mortal((newRV_noinc((SV*)hv)));
}
case 'h':
if (strEQ(key, "hostinfo"))
{
const char* hostinfo = mysql_get_host_info(imp_dbh->pmysql);
result= hostinfo ?
sv_2mortal(newSVpvn(hostinfo, strlen(hostinfo))) : &PL_sv_undef;
}
break;
case 'i':
if (strEQ(key, "info"))
{
const char* info = mysql_info(imp_dbh->pmysql);
result= info ? sv_2mortal(newSVpvn(info, strlen(info))) : &PL_sv_undef;
}
else if (kl == 8 && strEQ(key, "insertid"))
/* We cannot return an IV, because the insertid is a long. */
result= sv_2mortal(my_ulonglong2str(aTHX_ mysql_insert_id(imp_dbh->pmysql)));
break;
case 'n':
if (kl == strlen("no_autocommit_cmd") &&
strEQ(key, "no_autocommit_cmd"))
result = sv_2mortal(newSViv(imp_dbh->no_autocommit_cmd));
break;
case 'p':
if (kl == 9 && strEQ(key, "protoinfo"))
result= sv_2mortal(newSViv(mysql_get_proto_info(imp_dbh->pmysql)));
break;
case 's':
if (kl == 10 && strEQ(key, "serverinfo")) {
const char* serverinfo = mysql_get_server_info(imp_dbh->pmysql);
result= serverinfo ?
sv_2mortal(newSVpvn(serverinfo, strlen(serverinfo))) : &PL_sv_undef;
}
else if (kl == 13 && strEQ(key, "serverversion"))
result= sv_2mortal(my_ulonglong2str(aTHX_ mysql_get_server_version(imp_dbh->pmysql)));
else if (strEQ(key, "sock"))
result= sv_2mortal(newSViv(PTR2IV(imp_dbh->pmysql)));
else if (strEQ(key, "sockfd"))
result= sv_2mortal(newSViv((IV) imp_dbh->pmysql->net.fd));
else if (strEQ(key, "stat"))
{
const char* stats = mysql_stat(imp_dbh->pmysql);
result= stats ?
sv_2mortal(newSVpvn(stats, strlen(stats))) : &PL_sv_undef;
}
else if (strEQ(key, "stats"))
{
/* Obsolete, as of 2.09 */
const char* stats = mysql_stat(imp_dbh->pmysql);
result= stats ?
sv_2mortal(newSVpvn(stats, strlen(stats))) : &PL_sv_undef;
}
else if (kl == 14 && strEQ(key,"server_prepare"))
result= sv_2mortal(newSViv((IV) imp_dbh->use_server_side_prepare));
else if (kl == 31 && strEQ(key, "server_prepare_disable_fallback"))
result= sv_2mortal(newSViv((IV) imp_dbh->disable_fallback_for_server_prepare));
break;
case 't':
if (kl == 9 && strEQ(key, "thread_id"))
result= sv_2mortal(newSViv(mysql_thread_id(imp_dbh->pmysql)));
break;
case 'w':
if (kl == 13 && strEQ(key, "warning_count"))
result= sv_2mortal(newSViv(mysql_warning_count(imp_dbh->pmysql)));
break;
case 'u':
if (strEQ(key, "use_result"))
{
result= sv_2mortal(newSViv((IV) imp_dbh->use_mysql_use_result));
}
break;
}
if (result== NULL)
return Nullsv;
return result;
}
/*
**************************************************************************
*
* Name: dbd_st_prepare
*
* Purpose: Called for preparing an SQL statement; our part of the
* statement handle constructor
*
* Input: sth - statement handle being initialized
* imp_sth - drivers private statement handle data
* statement - pointer to string with SQL statement
* attribs - statement attributes, currently not in use
*
* Returns: TRUE for success, FALSE otherwise; do_error will
* be called in the latter case
*
**************************************************************************/
int
dbd_st_prepare(
SV *sth,
imp_sth_t *imp_sth,
char *statement,
SV *attribs)
{
int i;
SV **svp;
dTHX;
#if MYSQL_VERSION_ID >= SERVER_PREPARE_VERSION
#if MYSQL_VERSION_ID < CALL_PLACEHOLDER_VERSION
char *str_ptr, *str_last_ptr;
#if MYSQL_VERSION_ID < LIMIT_PLACEHOLDER_VERSION
int limit_flag=0;
#endif
#endif
int prepare_retval;
MYSQL_BIND *bind, *bind_end;
imp_sth_phb_t *fbind;
#endif
D_imp_xxh(sth);
D_imp_dbh_from_sth;
if (DBIc_TRACE_LEVEL(imp_xxh) >= 2)
PerlIO_printf(DBIc_LOGPIO(imp_xxh),
"\t-> dbd_st_prepare MYSQL_VERSION_ID %d, SQL statement: %s\n",
MYSQL_VERSION_ID, statement);
#if MYSQL_VERSION_ID >= SERVER_PREPARE_VERSION
/* Set default value of 'mysql_server_prepare' attribute for sth from dbh */
imp_sth->use_server_side_prepare= imp_dbh->use_server_side_prepare;
imp_sth->disable_fallback_for_server_prepare= imp_dbh->disable_fallback_for_server_prepare;
if (attribs)
{
svp= DBD_ATTRIB_GET_SVP(attribs, "mysql_server_prepare", 20);
imp_sth->use_server_side_prepare = (svp) ?
SvTRUE(*svp) : imp_dbh->use_server_side_prepare;
svp= DBD_ATTRIB_GET_SVP(attribs, "mysql_server_prepare_disable_fallback", 37);
imp_sth->disable_fallback_for_server_prepare = (svp) ?
SvTRUE(*svp) : imp_dbh->disable_fallback_for_server_prepare;
svp = DBD_ATTRIB_GET_SVP(attribs, "async", 5);
if(svp && SvTRUE(*svp)) {
#if MYSQL_ASYNC
imp_sth->is_async = TRUE;
if (imp_sth->disable_fallback_for_server_prepare)
{
do_error(sth, ER_UNSUPPORTED_PS,
"Async option not supported with server side prepare", "HY000");
return 0;
}
imp_sth->use_server_side_prepare = FALSE;
#else
do_error(sth, 2000,
"Async support was not built into this version of DBD::mysql", "HY000");
return 0;
#endif
}
}
imp_sth->fetch_done= 0;
#endif
imp_sth->done_desc= 0;
imp_sth->result= NULL;
imp_sth->currow= 0;
/* Set default value of 'mysql_use_result' attribute for sth from dbh */
svp= DBD_ATTRIB_GET_SVP(attribs, "mysql_use_result", 16);
imp_sth->use_mysql_use_result= svp ?
SvTRUE(*svp) : imp_dbh->use_mysql_use_result;
for (i= 0; i < AV_ATTRIB_LAST; i++)
imp_sth->av_attr[i]= Nullav;
/*
Clean-up previous result set(s) for sth to prevent
'Commands out of sync' error
*/
mysql_st_free_result_sets(sth, imp_sth);
#if MYSQL_VERSION_ID >= SERVER_PREPARE_VERSION && MYSQL_VERSION_ID < CALL_PLACEHOLDER_VERSION
if (imp_sth->use_server_side_prepare)
{
if (DBIc_TRACE_LEVEL(imp_xxh) >= 2)
PerlIO_printf(DBIc_LOGPIO(imp_xxh),
"\t\tuse_server_side_prepare set, check restrictions\n");
/*
This code is here because placeholder support is not implemented for
statements with :-
1. LIMIT < 5.0.7
2. CALL < 5.5.3 (Added support for out & inout parameters)
In these cases we have to disable server side prepared statements
NOTE: These checks could cause a false positive on statements which
include columns / table names that match "call " or " limit "
*/
if (DBIc_TRACE_LEVEL(imp_xxh) >= 2)
PerlIO_printf(DBIc_LOGPIO(imp_xxh),
#if MYSQL_VERSION_ID < LIMIT_PLACEHOLDER_VERSION
"\t\tneed to test for LIMIT & CALL\n");
#else
"\t\tneed to test for restrictions\n");
#endif
str_last_ptr = statement + strlen(statement);
for (str_ptr= statement; str_ptr < str_last_ptr; str_ptr++)
{
#if MYSQL_VERSION_ID < LIMIT_PLACEHOLDER_VERSION
/*
Place holders not supported in LIMIT's
*/
if (limit_flag)
{
if (*str_ptr == '?')
{
if (DBIc_TRACE_LEVEL(imp_xxh) >= 2)
PerlIO_printf(DBIc_LOGPIO(imp_xxh),
"\t\tLIMIT and ? found, set to use_server_side_prepare=0\n");
if (imp_sth->disable_fallback_for_server_prepare)
{
do_error(sth, ER_UNSUPPORTED_PS,
"\"LIMIT ?\" not supported with server side prepare",
"HY000");
mysql_stmt_close(imp_sth->stmt);
imp_sth->stmt= NULL;
return FALSE;
}
/* ... then we do not want to try server side prepare (use emulation) */
imp_sth->use_server_side_prepare= 0;
break;
}
}
else if (str_ptr < str_last_ptr - 6 &&
isspace(*(str_ptr + 0)) &&
tolower(*(str_ptr + 1)) == 'l' &&
tolower(*(str_ptr + 2)) == 'i' &&
tolower(*(str_ptr + 3)) == 'm' &&
tolower(*(str_ptr + 4)) == 'i' &&
tolower(*(str_ptr + 5)) == 't' &&
isspace(*(str_ptr + 6)))
{
if (DBIc_TRACE_LEVEL(imp_xxh) >= 2)
PerlIO_printf(DBIc_LOGPIO(imp_xxh), "LIMIT set limit flag to 1\n");
limit_flag= 1;
}
#endif
/*
Place holders not supported in CALL's
*/
if (str_ptr < str_last_ptr - 4 &&
tolower(*(str_ptr + 0)) == 'c' &&
tolower(*(str_ptr + 1)) == 'a' &&
tolower(*(str_ptr + 2)) == 'l' &&
tolower(*(str_ptr + 3)) == 'l' &&
isspace(*(str_ptr + 4)))
{
if (DBIc_TRACE_LEVEL(imp_xxh) >= 2)
PerlIO_printf(DBIc_LOGPIO(imp_xxh), "Disable PS mode for CALL()\n");
if (imp_sth->disable_fallback_for_server_prepare)
{
do_error(sth, ER_UNSUPPORTED_PS,
"\"CALL()\" not supported with server side prepare",
"HY000");
mysql_stmt_close(imp_sth->stmt);
imp_sth->stmt= NULL;
return FALSE;
}
imp_sth->use_server_side_prepare= 0;
break;
}
}
}
#endif
#if MYSQL_VERSION_ID >= SERVER_PREPARE_VERSION
if (imp_sth->use_server_side_prepare)
{
if (DBIc_TRACE_LEVEL(imp_xxh) >= 2)
PerlIO_printf(DBIc_LOGPIO(imp_xxh),
"\t\tuse_server_side_prepare set\n");
/* do we really need this? If we do, we should return, not just continue */
if (imp_sth->stmt)
fprintf(stderr,
"ERROR: Trying to prepare new stmt while we have \
already not closed one \n");
imp_sth->stmt= mysql_stmt_init(imp_dbh->pmysql);
if (! imp_sth->stmt)
{
if (DBIc_TRACE_LEVEL(imp_xxh) >= 2)
PerlIO_printf(DBIc_LOGPIO(imp_xxh),
"\t\tERROR: Unable to return MYSQL_STMT structure \
from mysql_stmt_init(): ERROR NO: %d ERROR MSG:%s\n",
mysql_errno(imp_dbh->pmysql),
mysql_error(imp_dbh->pmysql));
}
prepare_retval= mysql_stmt_prepare(imp_sth->stmt,
statement,
strlen(statement));
if (DBIc_TRACE_LEVEL(imp_xxh) >= 2)
PerlIO_printf(DBIc_LOGPIO(imp_xxh),
"\t\tmysql_stmt_prepare returned %d\n",
prepare_retval);
if (prepare_retval)
{
if (DBIc_TRACE_LEVEL(imp_xxh) >= 2)
PerlIO_printf(DBIc_LOGPIO(imp_xxh),
"\t\tmysql_stmt_prepare %d %s\n",
mysql_stmt_errno(imp_sth->stmt),
mysql_stmt_error(imp_sth->stmt));
/* For commands that are not supported by server side prepared statement
mechanism lets try to pass them through regular API */
if (!imp_sth->disable_fallback_for_server_prepare && mysql_stmt_errno(imp_sth->stmt) == ER_UNSUPPORTED_PS)
{
if (DBIc_TRACE_LEVEL(imp_xxh) >= 2)
PerlIO_printf(DBIc_LOGPIO(imp_xxh),
"\t\tSETTING imp_sth->use_server_side_prepare to 0\n");
imp_sth->use_server_side_prepare= 0;
}
else
{
do_error(sth, mysql_stmt_errno(imp_sth->stmt),
mysql_stmt_error(imp_sth->stmt),
mysql_sqlstate(imp_dbh->pmysql));
mysql_stmt_close(imp_sth->stmt);
imp_sth->stmt= NULL;
return FALSE;
}
}
else
{
DBIc_NUM_PARAMS(imp_sth)= mysql_stmt_param_count(imp_sth->stmt);
/* mysql_stmt_param_count */
if (DBIc_NUM_PARAMS(imp_sth) > 0)
{
/* Allocate memory for bind variables */
imp_sth->bind= alloc_bind(DBIc_NUM_PARAMS(imp_sth));
imp_sth->fbind= alloc_fbind(DBIc_NUM_PARAMS(imp_sth));
imp_sth->has_been_bound= 0;
/* Initialize ph variables with NULL values */
for (i= 0,
bind= imp_sth->bind,
fbind= imp_sth->fbind,
bind_end= bind+DBIc_NUM_PARAMS(imp_sth);
bind < bind_end ;
bind++, fbind++, i++ )
{
bind->buffer_type= MYSQL_TYPE_STRING;
bind->buffer= NULL;
bind->length= &(fbind->length);
bind->is_null= (char*) &(fbind->is_null);
fbind->is_null= 1;
fbind->length= 0;
}
}
}
}
#endif
#if MYSQL_VERSION_ID >= SERVER_PREPARE_VERSION
/* Count the number of parameters (driver, vs server-side) */
if (imp_sth->use_server_side_prepare == 0)
DBIc_NUM_PARAMS(imp_sth) = count_params((imp_xxh_t *)imp_dbh, aTHX_ statement,
imp_dbh->bind_comment_placeholders);
#else
DBIc_NUM_PARAMS(imp_sth) = count_params((imp_xxh_t *)imp_dbh, aTHX_ statement,
imp_dbh->bind_comment_placeholders);
#endif
/* Allocate memory for parameters */
imp_sth->params= alloc_param(DBIc_NUM_PARAMS(imp_sth));
DBIc_IMPSET_on(imp_sth);
if (DBIc_TRACE_LEVEL(imp_xxh) >= 2)
PerlIO_printf(DBIc_LOGPIO(imp_xxh), "\t<- dbd_st_prepare\n");
return 1;
}
/***************************************************************************
* Name: dbd_st_free_result_sets
*
* Purpose: Clean-up single or multiple result sets (if any)
*
* Inputs: sth - Statement handle
* imp_sth - driver's private statement handle
*
* Returns: 1 ok
* 0 error
*************************************************************************/
int mysql_st_free_result_sets (SV * sth, imp_sth_t * imp_sth)
{
dTHX;
D_imp_dbh_from_sth;
D_imp_xxh(sth);
int next_result_rc= -1;
if (DBIc_TRACE_LEVEL(imp_xxh) >= 2)
PerlIO_printf(DBIc_LOGPIO(imp_xxh), "\t>- dbd_st_free_result_sets\n");
#if MYSQL_VERSION_ID >= MULTIPLE_RESULT_SET_VERSION
do
{
if (DBIc_TRACE_LEVEL(imp_xxh) >= 2)
PerlIO_printf(DBIc_LOGPIO(imp_xxh), "\t<- dbd_st_free_result_sets RC %d\n", next_result_rc);
if (next_result_rc == 0)
{
if (!(imp_sth->result = mysql_use_result(imp_dbh->pmysql)))
{
/* Check for possible error */
if (mysql_field_count(imp_dbh->pmysql))
{
if (DBIc_TRACE_LEVEL(imp_xxh) >= 2)
PerlIO_printf(DBIc_LOGPIO(imp_xxh), "\t<- dbd_st_free_result_sets ERROR: %s\n",
mysql_error(imp_dbh->pmysql));
do_error(sth, mysql_errno(imp_dbh->pmysql), mysql_error(imp_dbh->pmysql),
mysql_sqlstate(imp_dbh->pmysql));
return 0;
}
}
}
if (imp_sth->result)
{
mysql_free_result(imp_sth->result);
imp_sth->result=NULL;
}
} while ((next_result_rc=mysql_next_result(imp_dbh->pmysql))==0);
if (next_result_rc > 0)
{
if (DBIc_TRACE_LEVEL(imp_xxh) >= 2)
PerlIO_printf(DBIc_LOGPIO(imp_xxh), "\t<- dbd_st_free_result_sets: Error while processing multi-result set: %s\n",
mysql_error(imp_dbh->pmysql));
do_error(sth, mysql_errno(imp_dbh->pmysql), mysql_error(imp_dbh->pmysql),
mysql_sqlstate(imp_dbh->pmysql));
}
#else
if (imp_sth->result)
{
mysql_free_result(imp_sth->result);
imp_sth->result=NULL;
}
#endif
if (DBIc_TRACE_LEVEL(imp_xxh) >= 2)
PerlIO_printf(DBIc_LOGPIO(imp_xxh), "\t<- dbd_st_free_result_sets\n");
return 1;
}
#if MYSQL_VERSION_ID >= MULTIPLE_RESULT_SET_VERSION
/***************************************************************************
* Name: dbd_st_more_results
*
* Purpose: Move onto the next result set (if any)
*
* Inputs: sth - Statement handle
* imp_sth - driver's private statement handle
*
* Returns: 1 if there are more results sets
* 0 if there are not
* -1 for errors.
*************************************************************************/
int dbd_st_more_results(SV* sth, imp_sth_t* imp_sth)
{
dTHX;
D_imp_dbh_from_sth;
D_imp_xxh(sth);
int use_mysql_use_result=imp_sth->use_mysql_use_result;
int next_result_return_code, i;
MYSQL* svsock= imp_dbh->pmysql;
if (!SvROK(sth) || SvTYPE(SvRV(sth)) != SVt_PVHV)
croak("Expected hash array");
if (!mysql_more_results(svsock))
{
/* No more pending result set(s)*/
if (DBIc_TRACE_LEVEL(imp_xxh) >= 2)
PerlIO_printf(DBIc_LOGPIO(imp_xxh),
"\n <- dbs_st_more_results no more results\n");
return 0;
}
if (imp_sth->use_server_side_prepare)
{
do_warn(sth, JW_ERR_NOT_IMPLEMENTED,
"Processing of multiple result set is not possible with server side prepare");
return 0;
}
/*
* Free cached array attributes
*/
for (i= 0; i < AV_ATTRIB_LAST; i++)
{
if (imp_sth->av_attr[i])
SvREFCNT_dec(imp_sth->av_attr[i]);
imp_sth->av_attr[i]= Nullav;
}
/* Release previous MySQL result*/
if (imp_sth->result)
mysql_free_result(imp_sth->result);
if (DBIc_ACTIVE(imp_sth))
DBIc_ACTIVE_off(imp_sth);
next_result_return_code= mysql_next_result(svsock);
imp_sth->warning_count = mysql_warning_count(imp_dbh->pmysql);
/*
mysql_next_result returns
0 if there are more results
-1 if there are no more results
>0 if there was an error
*/
if (next_result_return_code > 0)
{
do_error(sth, mysql_errno(svsock), mysql_error(svsock),
mysql_sqlstate(svsock));
return 0;
}
else if(next_result_return_code == -1)
{
return 0;
}
else
{
/* Store the result from the Query */
imp_sth->result = use_mysql_use_result ?
mysql_use_result(svsock) : mysql_store_result(svsock);
if (mysql_errno(svsock))
{
do_error(sth, mysql_errno(svsock), mysql_error(svsock),
mysql_sqlstate(svsock));
return 0;
}
imp_sth->row_num= mysql_affected_rows(imp_dbh->pmysql);
if (imp_sth->result == NULL)
{
/* No "real" rowset*/
DBIc_NUM_FIELDS(imp_sth)= 0; /* for DBI <= 1.53 */
DBIS->set_attr_k(sth, sv_2mortal(newSVpvn("NUM_OF_FIELDS",13)), 0,
sv_2mortal(newSViv(0)));
return 1;
}
else
{
/* We have a new rowset */
imp_sth->currow=0;
/* delete cached handle attributes */
/* XXX should be driven by a list to ease maintenance */
(void)hv_delete((HV*)SvRV(sth), "NAME", 4, G_DISCARD);
(void)hv_delete((HV*)SvRV(sth), "NULLABLE", 8, G_DISCARD);
(void)hv_delete((HV*)SvRV(sth), "NUM_OF_FIELDS", 13, G_DISCARD);
(void)hv_delete((HV*)SvRV(sth), "PRECISION", 9, G_DISCARD);
(void)hv_delete((HV*)SvRV(sth), "SCALE", 5, G_DISCARD);
(void)hv_delete((HV*)SvRV(sth), "TYPE", 4, G_DISCARD);
(void)hv_delete((HV*)SvRV(sth), "mysql_insertid", 14, G_DISCARD);
(void)hv_delete((HV*)SvRV(sth), "mysql_is_auto_increment", 23, G_DISCARD);
(void)hv_delete((HV*)SvRV(sth), "mysql_is_blob", 13, G_DISCARD);
(void)hv_delete((HV*)SvRV(sth), "mysql_is_key", 12, G_DISCARD);
(void)hv_delete((HV*)SvRV(sth), "mysql_is_num", 12, G_DISCARD);
(void)hv_delete((HV*)SvRV(sth), "mysql_is_pri_key", 16, G_DISCARD);
(void)hv_delete((HV*)SvRV(sth), "mysql_length", 12, G_DISCARD);
(void)hv_delete((HV*)SvRV(sth), "mysql_max_length", 16, G_DISCARD);
(void)hv_delete((HV*)SvRV(sth), "mysql_table", 11, G_DISCARD);
(void)hv_delete((HV*)SvRV(sth), "mysql_type", 10, G_DISCARD);
(void)hv_delete((HV*)SvRV(sth), "mysql_type_name", 15, G_DISCARD);
(void)hv_delete((HV*)SvRV(sth), "mysql_warning_count", 20, G_DISCARD);
/* Adjust NUM_OF_FIELDS - which also adjusts the row buffer size */
DBIc_NUM_FIELDS(imp_sth)= 0; /* for DBI <= 1.53 */
DBIc_DBISTATE(imp_sth)->set_attr_k(sth, sv_2mortal(newSVpvn("NUM_OF_FIELDS",13)), 0,
sv_2mortal(newSViv(mysql_num_fields(imp_sth->result)))
);
DBIc_ACTIVE_on(imp_sth);
imp_sth->done_desc = 0;
}
imp_dbh->pmysql->net.last_errno= 0;
return 1;
}
}
#endif
/**************************************************************************
*
* Name: mysql_st_internal_execute
*
* Purpose: Internal version for executing a statement, called both from
* within the "do" and the "execute" method.
*
* Inputs: h - object handle, for storing error messages
* statement - query being executed
* attribs - statement attributes, currently ignored
* num_params - number of parameters being bound
* params - parameter array
* result - where to store results, if any
* svsock - socket connected to the database
*
**************************************************************************/
my_ulonglong mysql_st_internal_execute(
SV *h, /* could be sth or dbh */
SV *statement,
SV *attribs,
int num_params,
imp_sth_ph_t *params,
MYSQL_RES **result,
MYSQL *svsock,
int use_mysql_use_result
)
{
dTHX;
bool bind_type_guessing= FALSE;
bool bind_comment_placeholders= TRUE;
STRLEN slen;
char *sbuf = SvPV(statement, slen);
char *table;
char *salloc;
int htype;
#if MYSQL_ASYNC
bool async = FALSE;
#endif
my_ulonglong rows= 0;
/* thank you DBI.c for this info! */
D_imp_xxh(h);
attribs= attribs;
htype= DBIc_TYPE(imp_xxh);
/*
It is important to import imp_dbh properly according to the htype
that it is! Also, one might ask why bind_type_guessing is assigned
in each block. Well, it's because D_imp_ macros called in these
blocks make it so imp_dbh is not "visible" or defined outside of the
if/else (when compiled, it fails for imp_dbh not being defined).
*/
/* h is a dbh */
if (htype == DBIt_DB)
{
D_imp_dbh(h);
/* if imp_dbh is not available, it causes segfault (proper) on OpenBSD */
if (imp_dbh && imp_dbh->bind_type_guessing)
{
bind_type_guessing= imp_dbh->bind_type_guessing;
bind_comment_placeholders= bind_comment_placeholders;
}
#if MYSQL_ASYNC
async = (bool) (imp_dbh->async_query_in_flight != NULL);
#endif
}
/* h is a sth */
else
{
D_imp_sth(h);
D_imp_dbh_from_sth;
/* if imp_dbh is not available, it causes segfault (proper) on OpenBSD */
if (imp_dbh)
{
bind_type_guessing= imp_dbh->bind_type_guessing;
bind_comment_placeholders= imp_dbh->bind_comment_placeholders;
}
#if MYSQL_ASYNC
async = imp_sth->is_async;
if(async) {
imp_dbh->async_query_in_flight = imp_sth;
} else {
imp_dbh->async_query_in_flight = NULL;
}
#endif
}
if (DBIc_TRACE_LEVEL(imp_xxh) >= 2)
PerlIO_printf(DBIc_LOGPIO(imp_xxh), "mysql_st_internal_execute MYSQL_VERSION_ID %d\n",
MYSQL_VERSION_ID );
salloc= parse_params(imp_xxh,
aTHX_ svsock,
sbuf,
&slen,
params,
num_params,
bind_type_guessing,
bind_comment_placeholders);
if (salloc)
{
sbuf= salloc;
if (DBIc_TRACE_LEVEL(imp_xxh) >= 2)
PerlIO_printf(DBIc_LOGPIO(imp_xxh), "Binding parameters: %s\n", sbuf);
}
if (slen >= 11 && (!strncmp(sbuf, "listfields ", 11) ||
!strncmp(sbuf, "LISTFIELDS ", 11)))
{
/* remove pre-space */
slen-= 10;
sbuf+= 10;
while (slen && isspace(*sbuf)) { --slen; ++sbuf; }
if (!slen)
{
do_error(h, JW_ERR_QUERY, "Missing table name" ,NULL);
return -2;
}
if (!(table= malloc(slen+1)))
{
do_error(h, JW_ERR_MEM, "Out of memory" ,NULL);
return -2;
}
strncpy(table, sbuf, slen);
sbuf= table;
while (slen && !isspace(*sbuf))
{
--slen;
++sbuf;
}
*sbuf++= '\0';
*result= mysql_list_fields(svsock, table, NULL);
free(table);
if (!(*result))
{
do_error(h, mysql_errno(svsock), mysql_error(svsock)
,mysql_sqlstate(svsock));
return -2;
}
return 0;
}
#if MYSQL_ASYNC
if(async) {
if((mysql_send_query(svsock, sbuf, slen)) &&
(!mysql_db_reconnect(h) ||
(mysql_send_query(svsock, sbuf, slen))))
{
rows = -2;
} else {
rows = 0;
}
} else {
#endif
if ((mysql_real_query(svsock, sbuf, slen)) &&
(!mysql_db_reconnect(h) ||
(mysql_real_query(svsock, sbuf, slen))))
{
rows = -2;
} else {
/** Store the result from the Query */
*result= use_mysql_use_result ?
mysql_use_result(svsock) : mysql_store_result(svsock);
if (mysql_errno(svsock))
rows = -2;
else if (*result)
rows = mysql_num_rows(*result);
else {
rows = mysql_affected_rows(svsock);
/* mysql_affected_rows(): -1 indicates that the query returned an error */
if (rows == (my_ulonglong)-1)
rows = -2;
}
}
#if MYSQL_ASYNC
}
#endif
if (salloc)
Safefree(salloc);
if(rows == (my_ulonglong)-2) {
do_error(h, mysql_errno(svsock), mysql_error(svsock),
mysql_sqlstate(svsock));
if (DBIc_TRACE_LEVEL(imp_xxh) >= 2)
PerlIO_printf(DBIc_LOGPIO(imp_xxh), "IGNORING ERROR errno %d\n", mysql_errno(svsock));
}
return(rows);
}
/**************************************************************************
*
* Name: mysql_st_internal_execute41
*
* Purpose: Internal version for executing a prepared statement, called both
* from within the "do" and the "execute" method.
* MYSQL 4.1 API
*
*
* Inputs: h - object handle, for storing error messages
* statement - query being executed
* attribs - statement attributes, currently ignored
* num_params - number of parameters being bound
* params - parameter array
* result - where to store results, if any
* svsock - socket connected to the database
*
**************************************************************************/
#if MYSQL_VERSION_ID >= SERVER_PREPARE_VERSION
my_ulonglong mysql_st_internal_execute41(
SV *sth,
int num_params,
MYSQL_RES **result,
MYSQL_STMT *stmt,
MYSQL_BIND *bind,
int *has_been_bound
)
{
int i;
enum enum_field_types enum_type;
dTHX;
int execute_retval;
my_ulonglong rows=0;
D_imp_xxh(sth);
if (DBIc_TRACE_LEVEL(imp_xxh) >= 2)
PerlIO_printf(DBIc_LOGPIO(imp_xxh),
"\t-> mysql_st_internal_execute41\n");
/* free result if exists */
if (*result)
{
mysql_free_result(*result);
*result= 0;
}
/*
If were performed any changes with ph variables
we have to rebind them
*/
if (num_params > 0 && !(*has_been_bound))
{
if (mysql_stmt_bind_param(stmt,bind))
goto error;
*has_been_bound= 1;
}
if (DBIc_TRACE_LEVEL(imp_xxh) >= 2)
PerlIO_printf(DBIc_LOGPIO(imp_xxh),
"\t\tmysql_st_internal_execute41 calling mysql_execute with %d num_params\n",
num_params);
execute_retval= mysql_stmt_execute(stmt);
if (DBIc_TRACE_LEVEL(imp_xxh) >= 2)
PerlIO_printf(DBIc_LOGPIO(imp_xxh),
"\t\tmysql_stmt_execute returned %d\n",
execute_retval);
if (execute_retval)
goto error;
/*
This statement does not return a result set (INSERT, UPDATE...)
*/
if (!(*result= mysql_stmt_result_metadata(stmt)))
{
if (mysql_stmt_errno(stmt))
goto error;
rows= mysql_stmt_affected_rows(stmt);
/* mysql_stmt_affected_rows(): -1 indicates that the query returned an error */
if (rows == (my_ulonglong)-1)
goto error;
}
/*
This statement returns a result set (SELECT...)
*/
else
{
for (i = mysql_stmt_field_count(stmt) - 1; i >=0; --i) {
enum_type = mysql_to_perl_type(stmt->fields[i].type);
if (enum_type != MYSQL_TYPE_DOUBLE && enum_type != MYSQL_TYPE_LONG && enum_type != MYSQL_TYPE_LONGLONG && enum_type != MYSQL_TYPE_BIT)
{
/* mysql_stmt_store_result to update MYSQL_FIELD->max_length */
my_bool on = 1;
mysql_stmt_attr_set(stmt, STMT_ATTR_UPDATE_MAX_LENGTH, &on);
break;
}
}
/* Get the total rows affected and return */
if (mysql_stmt_store_result(stmt))
goto error;
else
rows= mysql_stmt_num_rows(stmt);
}
if (DBIc_TRACE_LEVEL(imp_xxh) >= 2)
PerlIO_printf(DBIc_LOGPIO(imp_xxh),
"\t<- mysql_internal_execute_41 returning %llu rows\n",
rows);
return(rows);
error:
if (*result)
{
mysql_free_result(*result);
*result= 0;
}
if (DBIc_TRACE_LEVEL(imp_xxh) >= 2)
PerlIO_printf(DBIc_LOGPIO(imp_xxh),
" errno %d err message %s\n",
mysql_stmt_errno(stmt),
mysql_stmt_error(stmt));
do_error(sth, mysql_stmt_errno(stmt), mysql_stmt_error(stmt),
mysql_stmt_sqlstate(stmt));
mysql_stmt_reset(stmt);
if (DBIc_TRACE_LEVEL(imp_xxh) >= 2)
PerlIO_printf(DBIc_LOGPIO(imp_xxh),
"\t<- mysql_st_internal_execute41\n");
return -2;
}
#endif
/***************************************************************************
*
* Name: dbd_st_execute
*
* Purpose: Called for preparing an SQL statement; our part of the
* statement handle constructor
*
* Input: sth - statement handle being initialized
* imp_sth - drivers private statement handle data
*
* Returns: TRUE for success, FALSE otherwise; do_error will
* be called in the latter case
*
**************************************************************************/
int dbd_st_execute(SV* sth, imp_sth_t* imp_sth)
{
dTHX;
char actual_row_num[64];
int i;
SV **statement;
D_imp_dbh_from_sth;
D_imp_xxh(sth);
#if defined (dTHR)
dTHR;
#endif
#if MYSQL_VERSION_ID >= SERVER_PREPARE_VERSION
int use_server_side_prepare = imp_sth->use_server_side_prepare;
int disable_fallback_for_server_prepare = imp_sth->disable_fallback_for_server_prepare;
#endif
ASYNC_CHECK_RETURN(sth, -2);
if (DBIc_TRACE_LEVEL(imp_xxh) >= 2)
PerlIO_printf(DBIc_LOGPIO(imp_xxh),
" -> dbd_st_execute for %p\n", sth);
if (!SvROK(sth) || SvTYPE(SvRV(sth)) != SVt_PVHV)
croak("Expected hash array");
/* Free cached array attributes */
for (i= 0; i < AV_ATTRIB_LAST; i++)
{
if (imp_sth->av_attr[i])
SvREFCNT_dec(imp_sth->av_attr[i]);
imp_sth->av_attr[i]= Nullav;
}
statement= hv_fetch((HV*) SvRV(sth), "Statement", 9, FALSE);
/*
Clean-up previous result set(s) for sth to prevent
'Commands out of sync' error
*/
mysql_st_free_result_sets (sth, imp_sth);
#if MYSQL_VERSION_ID >= SERVER_PREPARE_VERSION
if (use_server_side_prepare)
{
if (imp_sth->use_mysql_use_result)
{
if (disable_fallback_for_server_prepare)
{
do_error(sth, ER_UNSUPPORTED_PS,
"\"mysql_use_result\" not supported with server side prepare",
"HY000");
return 0;
}
use_server_side_prepare = 0;
}
if (use_server_side_prepare)
{
imp_sth->row_num= mysql_st_internal_execute41(
sth,
DBIc_NUM_PARAMS(imp_sth),
&imp_sth->result,
imp_sth->stmt,
imp_sth->bind,
&imp_sth->has_been_bound
);
if (imp_sth->row_num == (my_ulonglong)-2) /* -2 means error */
{
SV *err = DBIc_ERR(imp_xxh);
if (!disable_fallback_for_server_prepare && SvIV(err) == ER_UNSUPPORTED_PS)
{
use_server_side_prepare = 0;
}
}
}
}
if (!use_server_side_prepare)
#endif
{
imp_sth->row_num= mysql_st_internal_execute(
sth,
*statement,
NULL,
DBIc_NUM_PARAMS(imp_sth),
imp_sth->params,
&imp_sth->result,
imp_dbh->pmysql,
imp_sth->use_mysql_use_result
);
#if MYSQL_ASYNC
if(imp_dbh->async_query_in_flight) {
DBIc_ACTIVE_on(imp_sth);
return 0;
}
#endif
}
if (imp_sth->row_num+1 != (my_ulonglong)-1)
{
if (!imp_sth->result)
{
imp_sth->insertid= mysql_insert_id(imp_dbh->pmysql);
#if MYSQL_VERSION_ID >= MULTIPLE_RESULT_SET_VERSION
if (mysql_more_results(imp_dbh->pmysql))
DBIc_ACTIVE_on(imp_sth);
#endif
}
else
{
/** Store the result in the current statement handle */
DBIc_NUM_FIELDS(imp_sth)= mysql_num_fields(imp_sth->result);
DBIc_ACTIVE_on(imp_sth);
if (!use_server_side_prepare)
imp_sth->done_desc= 0;
imp_sth->fetch_done= 0;
}
}
imp_sth->warning_count = mysql_warning_count(imp_dbh->pmysql);
if (DBIc_TRACE_LEVEL(imp_xxh) >= 2)
{
/*
PerlIO_printf doesn't always handle imp_sth->row_num %llu
consistently!!
*/
sprintf(actual_row_num, "%llu", imp_sth->row_num);
PerlIO_printf(DBIc_LOGPIO(imp_xxh),
" <- dbd_st_execute returning imp_sth->row_num %s\n",
actual_row_num);
}
return (int)imp_sth->row_num;
}
/**************************************************************************
*
* Name: dbd_describe
*
* Purpose: Called from within the fetch method to describe the result
*
* Input: sth - statement handle being initialized
* imp_sth - our part of the statement handle, there's no
* need for supplying both; Tim just doesn't remove it
*
* Returns: TRUE for success, FALSE otherwise; do_error will
* be called in the latter case
*
**************************************************************************/
int dbd_describe(SV* sth, imp_sth_t* imp_sth)
{
dTHX;
D_imp_xxh(sth);
if (DBIc_TRACE_LEVEL(imp_xxh) >= 2)
PerlIO_printf(DBIc_LOGPIO(imp_xxh), "\t--> dbd_describe\n");
#if MYSQL_VERSION_ID >= SERVER_PREPARE_VERSION
if (imp_sth->use_server_side_prepare)
{
int i;
int col_type;
int num_fields= DBIc_NUM_FIELDS(imp_sth);
imp_sth_fbh_t *fbh;
MYSQL_BIND *buffer;
MYSQL_FIELD *fields;
if (DBIc_TRACE_LEVEL(imp_xxh) >= 2)
PerlIO_printf(DBIc_LOGPIO(imp_xxh), "\t\tdbd_describe() num_fields %d\n",
num_fields);
if (imp_sth->done_desc)
return TRUE;
if (!num_fields || !imp_sth->result)
{
/* no metadata */
do_error(sth, JW_ERR_SEQUENCE,
"no metadata information while trying describe result set",
NULL);
return 0;
}
/* allocate fields buffers */
if ( !(imp_sth->fbh= alloc_fbuffer(num_fields))
|| !(imp_sth->buffer= alloc_bind(num_fields)) )
{
/* Out of memory */
do_error(sth, JW_ERR_SEQUENCE,
"Out of memory in dbd_sescribe()",NULL);
return 0;
}
fields= mysql_fetch_fields(imp_sth->result);
for (
fbh= imp_sth->fbh, buffer= (MYSQL_BIND*)imp_sth->buffer, i= 0;
i < num_fields;
i++, fbh++, buffer++
)
{
/* get the column type */
col_type = fields ? fields[i].type : MYSQL_TYPE_STRING;
if (DBIc_TRACE_LEVEL(imp_xxh) >= 2)
{
PerlIO_printf(DBIc_LOGPIO(imp_xxh),"\t\ti %d col_type %d fbh->length %lu\n",
i, col_type, fbh->length);
PerlIO_printf(DBIc_LOGPIO(imp_xxh),
"\t\tfields[i].length %lu fields[i].max_length %lu fields[i].type %d fields[i].charsetnr %d\n",
fields[i].length, fields[i].max_length, fields[i].type,
fields[i].charsetnr);
}
fbh->charsetnr = fields[i].charsetnr;
#if MYSQL_VERSION_ID < FIELD_CHARSETNR_VERSION
fbh->flags = fields[i].flags;
#endif
buffer->buffer_type= mysql_to_perl_type(col_type);
if (DBIc_TRACE_LEVEL(imp_xxh) >= 2)
PerlIO_printf(DBIc_LOGPIO(imp_xxh), "\t\tmysql_to_perl_type returned %d\n",
col_type);
buffer->length= &(fbh->length);
buffer->is_null= (my_bool*) &(fbh->is_null);
buffer->error= (my_bool*) &(fbh->error);
switch (buffer->buffer_type) {
case MYSQL_TYPE_DOUBLE:
buffer->buffer_length= sizeof(fbh->ddata);
buffer->buffer= (char*) &fbh->ddata;
break;
case MYSQL_TYPE_LONG:
case MYSQL_TYPE_LONGLONG:
buffer->buffer_length= sizeof(fbh->ldata);
buffer->buffer= (char*) &fbh->ldata;
buffer->is_unsigned= (fields[i].flags & UNSIGNED_FLAG) ? 1 : 0;
break;
case MYSQL_TYPE_BIT:
buffer->buffer_length= 8;
Newz(908, fbh->data, buffer->buffer_length, char);
buffer->buffer= (char *) fbh->data;
break;
default:
buffer->buffer_length= fields[i].max_length ? fields[i].max_length : 1;
Newz(908, fbh->data, buffer->buffer_length, char);
buffer->buffer= (char *) fbh->data;
}
}
if (mysql_stmt_bind_result(imp_sth->stmt, imp_sth->buffer))
{
do_error(sth, mysql_stmt_errno(imp_sth->stmt),
mysql_stmt_error(imp_sth->stmt),
mysql_stmt_sqlstate(imp_sth->stmt));
return 0;
}
}
#endif
imp_sth->done_desc= 1;
if (DBIc_TRACE_LEVEL(imp_xxh) >= 2)
PerlIO_printf(DBIc_LOGPIO(imp_xxh), "\t<- dbd_describe\n");
return TRUE;
}
/**************************************************************************
*
* Name: dbd_st_fetch
*
* Purpose: Called for fetching a result row
*
* Input: sth - statement handle being initialized
* imp_sth - drivers private statement handle data
*
* Returns: array of columns; the array is allocated by DBI via
* DBIc_DBISTATE(imp_sth)->get_fbav(imp_sth), even the values
* of the array are prepared, we just need to modify them
* appropriately
*
**************************************************************************/
AV*
dbd_st_fetch(SV *sth, imp_sth_t* imp_sth)
{
dTHX;
int num_fields, ChopBlanks, i, rc;
unsigned long *lengths;
AV *av;
int av_length, av_readonly;
MYSQL_ROW cols;
D_imp_dbh_from_sth;
MYSQL* svsock= imp_dbh->pmysql;
imp_sth_fbh_t *fbh;
D_imp_xxh(sth);
#if MYSQL_VERSION_ID >=SERVER_PREPARE_VERSION
MYSQL_BIND *buffer;
#endif
MYSQL_FIELD *fields;
if (DBIc_TRACE_LEVEL(imp_xxh) >= 2)
PerlIO_printf(DBIc_LOGPIO(imp_xxh), "\t-> dbd_st_fetch\n");
#if MYSQL_ASYNC
if(imp_dbh->async_query_in_flight) {
if(mysql_db_async_result(sth, &imp_sth->result) <= 0) {
return Nullav;
}
}
#endif
#if MYSQL_VERSION_ID >=SERVER_PREPARE_VERSION
if (imp_sth->use_server_side_prepare)
{
if (!DBIc_ACTIVE(imp_sth) )
{
do_error(sth, JW_ERR_SEQUENCE, "no statement executing\n",NULL);
return Nullav;
}
if (imp_sth->fetch_done)
{
do_error(sth, JW_ERR_SEQUENCE, "fetch() but fetch already done",NULL);
return Nullav;
}
if (!imp_sth->done_desc)
{
if (!dbd_describe(sth, imp_sth))
{
do_error(sth, JW_ERR_SEQUENCE, "Error while describe result set.",
NULL);
return Nullav;
}
}
}
#endif
ChopBlanks = DBIc_is(imp_sth, DBIcf_ChopBlanks);
if (DBIc_TRACE_LEVEL(imp_xxh) >= 2)
PerlIO_printf(DBIc_LOGPIO(imp_xxh),
"\t\tdbd_st_fetch for %p, chopblanks %d\n",
sth, ChopBlanks);
if (!imp_sth->result)
{
do_error(sth, JW_ERR_SEQUENCE, "fetch() without execute()" ,NULL);
return Nullav;
}
/* fix from 2.9008 */
imp_dbh->pmysql->net.last_errno = 0;
#if MYSQL_VERSION_ID >=SERVER_PREPARE_VERSION
if (imp_sth->use_server_side_prepare)
{
if (DBIc_TRACE_LEVEL(imp_xxh) >= 2)
PerlIO_printf(DBIc_LOGPIO(imp_xxh), "\t\tdbd_st_fetch calling mysql_fetch\n");
if ((rc= mysql_stmt_fetch(imp_sth->stmt)))
{
if (rc == 1)
do_error(sth, mysql_stmt_errno(imp_sth->stmt),
mysql_stmt_error(imp_sth->stmt),
mysql_stmt_sqlstate(imp_sth->stmt));
#if MYSQL_VERSION_ID >= MYSQL_VERSION_5_0
if (rc == MYSQL_DATA_TRUNCATED) {
if (DBIc_TRACE_LEVEL(imp_xxh) >= 2)
PerlIO_printf(DBIc_LOGPIO(imp_xxh), "\t\tdbd_st_fetch data truncated\n");
goto process;
}
#endif
if (rc == MYSQL_NO_DATA)
{
/* Update row_num to affected_rows value */
imp_sth->row_num= mysql_stmt_affected_rows(imp_sth->stmt);
imp_sth->fetch_done=1;
if (DBIc_TRACE_LEVEL(imp_xxh) >= 2)
PerlIO_printf(DBIc_LOGPIO(imp_xxh), "\t\tdbd_st_fetch no data\n");
}
dbd_st_finish(sth, imp_sth);
return Nullav;
}
process:
imp_sth->currow++;
av= DBIc_DBISTATE(imp_sth)->get_fbav(imp_sth);
num_fields=mysql_stmt_field_count(imp_sth->stmt);
if (DBIc_TRACE_LEVEL(imp_xxh) >= 2)
PerlIO_printf(DBIc_LOGPIO(imp_xxh),
"\t\tdbd_st_fetch called mysql_fetch, rc %d num_fields %d\n",
rc, num_fields);
for (
buffer= imp_sth->buffer,
fbh= imp_sth->fbh,
i= 0;
i < num_fields;
i++,
fbh++,
buffer++
)
{
SV *sv= AvARRAY(av)[i]; /* Note: we (re)use the SV in the AV */
STRLEN len;
/* This is wrong, null is not being set correctly
* This is not the way to determine length (this would break blobs!)
*/
if (fbh->is_null)
(void) SvOK_off(sv); /* Field is NULL, return undef */
else
{
/* In case of BLOB/TEXT fields we allocate only 8192 bytes
in dbd_describe() for data. Here we know real size of field
so we should increase buffer size and refetch column value
*/
if (fbh->length > buffer->buffer_length || fbh->error)
{
if (DBIc_TRACE_LEVEL(imp_xxh) >= 2)
PerlIO_printf(DBIc_LOGPIO(imp_xxh),
"\t\tRefetch BLOB/TEXT column: %d, length: %lu, error: %d\n",
i, fbh->length, fbh->error);
Renew(fbh->data, fbh->length, char);
buffer->buffer_length= fbh->length;
buffer->buffer= (char *) fbh->data;
if (DBIc_TRACE_LEVEL(imp_xxh) >= 2) {
int j;
int m = MIN(*buffer->length, buffer->buffer_length);
char *ptr = (char*)buffer->buffer;
PerlIO_printf(DBIc_LOGPIO(imp_xxh),"\t\tbefore buffer->buffer: ");
for (j = 0; j < m; j++) {
PerlIO_printf(DBIc_LOGPIO(imp_xxh), "%c", *ptr++);
}
PerlIO_printf(DBIc_LOGPIO(imp_xxh),"\n");
}
/*TODO: Use offset instead of 0 to fetch only remain part of data*/
if (mysql_stmt_fetch_column(imp_sth->stmt, buffer , i, 0))
do_error(sth, mysql_stmt_errno(imp_sth->stmt),
mysql_stmt_error(imp_sth->stmt),
mysql_stmt_sqlstate(imp_sth->stmt));
if (DBIc_TRACE_LEVEL(imp_xxh) >= 2) {
int j;
int m = MIN(*buffer->length, buffer->buffer_length);
char *ptr = (char*)buffer->buffer;
PerlIO_printf(DBIc_LOGPIO(imp_xxh),"\t\tafter buffer->buffer: ");
for (j = 0; j < m; j++) {
PerlIO_printf(DBIc_LOGPIO(imp_xxh), "%c", *ptr++);
}
PerlIO_printf(DBIc_LOGPIO(imp_xxh),"\n");
}
}
/* This does look a lot like Georg's PHP driver doesn't it? --Brian */
/* Credit due to Georg - mysqli_api.c ;) --PMG */
switch (buffer->buffer_type) {
case MYSQL_TYPE_DOUBLE:
if (DBIc_TRACE_LEVEL(imp_xxh) >= 2)
PerlIO_printf(DBIc_LOGPIO(imp_xxh), "\t\tst_fetch double data %f\n", fbh->ddata);
sv_setnv(sv, fbh->ddata);
break;
case MYSQL_TYPE_LONG:
case MYSQL_TYPE_LONGLONG:
if (DBIc_TRACE_LEVEL(imp_xxh) >= 2)
PerlIO_printf(DBIc_LOGPIO(imp_xxh), "\t\tst_fetch int data %"IVdf", unsigned? %d\n",
fbh->ldata, buffer->is_unsigned);
if (buffer->is_unsigned)
sv_setuv(sv, fbh->ldata);
else
sv_setiv(sv, fbh->ldata);
break;
case MYSQL_TYPE_BIT:
sv_setpvn(sv, fbh->data, fbh->length);
break;
default:
if (DBIc_TRACE_LEVEL(imp_xxh) >= 2)
PerlIO_printf(DBIc_LOGPIO(imp_xxh), "\t\tERROR IN st_fetch_string");
len= fbh->length;
/* ChopBlanks server-side prepared statement */
if (ChopBlanks)
{
/*
see bottom of:
http://www.mysql.org/doc/refman/5.0/en/c-api-datatypes.html
*/
if (fbh->charsetnr != 63)
while (len && fbh->data[len-1] == ' ') { --len; }
}
/* END OF ChopBlanks */
sv_setpvn(sv, fbh->data, len);
/* UTF8 */
/*HELMUT*/
#if defined(sv_utf8_decode) && MYSQL_VERSION_ID >=SERVER_PREPARE_VERSION
#if MYSQL_VERSION_ID >= FIELD_CHARSETNR_VERSION
/* SHOW COLLATION WHERE Id = 63; -- 63 == charset binary, collation binary */
if ((imp_dbh->enable_utf8 || imp_dbh->enable_utf8mb4) && fbh->charsetnr != 63)
#else
if ((imp_dbh->enable_utf8 || imp_dbh->enable_utf8mb4) && !(fbh->flags & BINARY_FLAG))
#endif
sv_utf8_decode(sv);
#endif
/* END OF UTF8 */
break;
}
}
}
if (DBIc_TRACE_LEVEL(imp_xxh) >= 2)
PerlIO_printf(DBIc_LOGPIO(imp_xxh), "\t<- dbd_st_fetch, %d cols\n", num_fields);
return av;
}
else
{
#endif
imp_sth->currow++;
if (DBIc_TRACE_LEVEL(imp_xxh) >= 2)
{
PerlIO_printf(DBIc_LOGPIO(imp_xxh), "\tdbd_st_fetch result set details\n");
PerlIO_printf(DBIc_LOGPIO(imp_xxh), "\timp_sth->result=%p\n", imp_sth->result);
PerlIO_printf(DBIc_LOGPIO(imp_xxh), "\tmysql_num_fields=%u\n",
mysql_num_fields(imp_sth->result));
PerlIO_printf(DBIc_LOGPIO(imp_xxh), "\tmysql_num_rows=%llu\n",
mysql_num_rows(imp_sth->result));
PerlIO_printf(DBIc_LOGPIO(imp_xxh), "\tmysql_affected_rows=%llu\n",
mysql_affected_rows(imp_dbh->pmysql));
PerlIO_printf(DBIc_LOGPIO(imp_xxh), "\tdbd_st_fetch for %p, currow= %d\n",
sth,imp_sth->currow);
}
if (!(cols= mysql_fetch_row(imp_sth->result)))
{
if (DBIc_TRACE_LEVEL(imp_xxh) >= 2)
{
PerlIO_printf(DBIc_LOGPIO(imp_xxh), "\tdbd_st_fetch, no more rows to fetch");
}
if (mysql_errno(imp_dbh->pmysql))
do_error(sth, mysql_errno(imp_dbh->pmysql),
mysql_error(imp_dbh->pmysql),
mysql_sqlstate(imp_dbh->pmysql));
#if MYSQL_VERSION_ID >= MULTIPLE_RESULT_SET_VERSION
if (!mysql_more_results(svsock))
#endif
dbd_st_finish(sth, imp_sth);
return Nullav;
}
num_fields= mysql_num_fields(imp_sth->result);
fields= mysql_fetch_fields(imp_sth->result);
lengths= mysql_fetch_lengths(imp_sth->result);
if ((av= DBIc_FIELDS_AV(imp_sth)) != Nullav)
{
av_length= av_len(av)+1;
if (av_length != num_fields) /* Resize array if necessary */
{
if (DBIc_TRACE_LEVEL(imp_xxh) >= 2)
PerlIO_printf(DBIc_LOGPIO(imp_xxh), "\t<- dbd_st_fetch, size of results array(%d) != num_fields(%d)\n",
av_length, num_fields);
if (DBIc_TRACE_LEVEL(imp_xxh) >= 2)
PerlIO_printf(DBIc_LOGPIO(imp_xxh), "\t<- dbd_st_fetch, result fields(%d)\n",
DBIc_NUM_FIELDS(imp_sth));
av_readonly = SvREADONLY(av);
if (av_readonly)
SvREADONLY_off( av ); /* DBI sets this readonly */
while (av_length < num_fields)
{
av_store(av, av_length++, newSV(0));
}
while (av_length > num_fields)
{
SvREFCNT_dec(av_pop(av));
av_length--;
}
if (av_readonly)
SvREADONLY_on(av);
}
}
av= DBIc_DBISTATE(imp_sth)->get_fbav(imp_sth);
for (i= 0; i < num_fields; ++i)
{
char *col= cols[i];
SV *sv= AvARRAY(av)[i]; /* Note: we (re)use the SV in the AV */
if (col)
{
STRLEN len= lengths[i];
if (ChopBlanks)
{
while (len && col[len-1] == ' ')
{ --len; }
}
/* Set string value returned from mysql server */
sv_setpvn(sv, col, len);
switch (mysql_to_perl_type(fields[i].type)) {
case MYSQL_TYPE_DOUBLE:
/* Coerce to dobule and set scalar as NV */
(void) SvNV(sv);
SvNOK_only(sv);
break;
case MYSQL_TYPE_LONG:
case MYSQL_TYPE_LONGLONG:
/* Coerce to integer and set scalar as UV resp. IV */
if (fields[i].flags & UNSIGNED_FLAG)
{
(void) SvUV(sv);
SvIOK_only_UV(sv);
}
else
{
(void) SvIV(sv);
SvIOK_only(sv);
}
break;
#if MYSQL_VERSION_ID > NEW_DATATYPE_VERSION
case MYSQL_TYPE_BIT:
/* Let it as binary string */
break;
#endif
default:
/* UTF8 */
/*HELMUT*/
#if defined(sv_utf8_decode) && MYSQL_VERSION_ID >=SERVER_PREPARE_VERSION
/* see bottom of: http://www.mysql.org/doc/refman/5.0/en/c-api-datatypes.html */
if ((imp_dbh->enable_utf8 || imp_dbh->enable_utf8mb4) && fields[i].charsetnr != 63)
sv_utf8_decode(sv);
#endif
/* END OF UTF8 */
break;
}
}
else
(void) SvOK_off(sv); /* Field is NULL, return undef */
}
if (DBIc_TRACE_LEVEL(imp_xxh) >= 2)
PerlIO_printf(DBIc_LOGPIO(imp_xxh), "\t<- dbd_st_fetch, %d cols\n", num_fields);
return av;
#if MYSQL_VERSION_ID >= SERVER_PREPARE_VERSION
}
#endif
}
#if MYSQL_VERSION_ID >= SERVER_PREPARE_VERSION
/*
We have to fetch all data from stmt
There is may be useful for 2 cases:
1. st_finish when we have undef statement
2. call st_execute again when we have some unfetched data in stmt
*/
int mysql_st_clean_cursor(SV* sth, imp_sth_t* imp_sth) {
if (DBIc_ACTIVE(imp_sth) && dbd_describe(sth, imp_sth) &&
!imp_sth->fetch_done)
mysql_stmt_free_result(imp_sth->stmt);
return 1;
}
#endif
/***************************************************************************
*
* Name: dbd_st_finish
*
* Purpose: Called for freeing a mysql result
*
* Input: sth - statement handle being finished
* imp_sth - drivers private statement handle data
*
* Returns: TRUE for success, FALSE otherwise; do_error() will
* be called in the latter case
*
**************************************************************************/
int dbd_st_finish(SV* sth, imp_sth_t* imp_sth) {
dTHX;
D_imp_xxh(sth);
#if defined (dTHR)
dTHR;
#endif
#if MYSQL_ASYNC
D_imp_dbh_from_sth;
if(imp_dbh->async_query_in_flight) {
mysql_db_async_result(sth, &imp_sth->result);
}
#endif
#if MYSQL_VERSION_ID >= SERVER_PREPARE_VERSION
if (DBIc_TRACE_LEVEL(imp_xxh) >= 2)
{
PerlIO_printf(DBIc_LOGPIO(imp_xxh), "\n--> dbd_st_finish\n");
}
if (imp_sth->use_server_side_prepare)
{
if (imp_sth && imp_sth->stmt)
{
if (!mysql_st_clean_cursor(sth, imp_sth))
{
do_error(sth, JW_ERR_SEQUENCE,
"Error happened while tried to clean up stmt",NULL);
return 0;
}
}
}
#endif
/*
Cancel further fetches from this cursor.
We don't close the cursor till DESTROY.
The application may re execute it.
*/
if (imp_sth && DBIc_ACTIVE(imp_sth))
{
/*
Clean-up previous result set(s) for sth to prevent
'Commands out of sync' error
*/
mysql_st_free_result_sets(sth, imp_sth);
}
DBIc_ACTIVE_off(imp_sth);
if (DBIc_TRACE_LEVEL(imp_xxh) >= 2)
{
PerlIO_printf(DBIc_LOGPIO(imp_xxh), "\n<-- dbd_st_finish\n");
}
return 1;
}
/**************************************************************************
*
* Name: dbd_st_destroy
*
* Purpose: Our part of the statement handles destructor
*
* Input: sth - statement handle being destroyed
* imp_sth - drivers private statement handle data
*
* Returns: Nothing
*
**************************************************************************/
void dbd_st_destroy(SV *sth, imp_sth_t *imp_sth) {
dTHX;
D_imp_xxh(sth);
#if defined (dTHR)
dTHR;
#endif
int i;
#if MYSQL_VERSION_ID >= SERVER_PREPARE_VERSION
imp_sth_fbh_t *fbh;
int n;
n= DBIc_NUM_PARAMS(imp_sth);
if (n)
{
if (DBIc_TRACE_LEVEL(imp_xxh) >= 2)
PerlIO_printf(DBIc_LOGPIO(imp_xxh), "\tFreeing %d parameters, bind %p fbind %p\n",
n, imp_sth->bind, imp_sth->fbind);
free_bind(imp_sth->bind);
free_fbind(imp_sth->fbind);
}
fbh= imp_sth->fbh;
if (fbh)
{
n = DBIc_NUM_FIELDS(imp_sth);
i = 0;
while (i < n)
{
if (fbh[i].data) Safefree(fbh[i].data);
++i;
}
free_fbuffer(fbh);
if (imp_sth->buffer)
free_bind(imp_sth->buffer);
}
if (imp_sth->stmt)
{
if (mysql_stmt_close(imp_sth->stmt))
{
do_error(DBIc_PARENT_H(imp_sth), mysql_stmt_errno(imp_sth->stmt),
mysql_stmt_error(imp_sth->stmt),
mysql_stmt_sqlstate(imp_sth->stmt));
}
}
#endif
/* dbd_st_finish has already been called by .xs code if needed. */
/* Free values allocated by dbd_bind_ph */
if (imp_sth->params)
{
free_param(aTHX_ imp_sth->params, DBIc_NUM_PARAMS(imp_sth));
imp_sth->params= NULL;
}
/* Free cached array attributes */
for (i= 0; i < AV_ATTRIB_LAST; i++)
{
if (imp_sth->av_attr[i])
SvREFCNT_dec(imp_sth->av_attr[i]);
imp_sth->av_attr[i]= Nullav;
}
/* let DBI know we've done it */
DBIc_IMPSET_off(imp_sth);
}
/*
**************************************************************************
*
* Name: dbd_st_STORE_attrib
*
* Purpose: Modifies a statement handles attributes; we currently
* support just nothing
*
* Input: sth - statement handle being destroyed
* imp_sth - drivers private statement handle data
* keysv - attribute name
* valuesv - attribute value
*
* Returns: TRUE for success, FALSE otherwise; do_error will
* be called in the latter case
*
**************************************************************************/
int
dbd_st_STORE_attrib(
SV *sth,
imp_sth_t *imp_sth,
SV *keysv,
SV *valuesv
)
{
dTHX;
STRLEN(kl);
char *key= SvPV(keysv, kl);
int retval= FALSE;
D_imp_xxh(sth);
if (DBIc_TRACE_LEVEL(imp_xxh) >= 2)
PerlIO_printf(DBIc_LOGPIO(imp_xxh),
"\t\t-> dbd_st_STORE_attrib for %p, key %s\n",
sth, key);
if (strEQ(key, "mysql_use_result"))
{
imp_sth->use_mysql_use_result= SvTRUE(valuesv);
}
if (DBIc_TRACE_LEVEL(imp_xxh) >= 2)
PerlIO_printf(DBIc_LOGPIO(imp_xxh),
"\t\t<- dbd_st_STORE_attrib for %p, result %d\n",
sth, retval);
return retval;
}
/*
**************************************************************************
*
* Name: dbd_st_FETCH_internal
*
* Purpose: Retrieves a statement handles array attributes; we use
* a separate function, because creating the array
* attributes shares much code and it aids in supporting
* enhanced features like caching.
*
* Input: sth - statement handle; may even be a database handle,
* in which case this will be used for storing error
* messages only. This is only valid, if cacheit (the
* last argument) is set to TRUE.
* what - internal attribute number
* res - pointer to a DBMS result
* cacheit - TRUE, if results may be cached in the sth.
*
* Returns: RV pointing to result array in case of success, NULL
* otherwise; do_error has already been called in the latter
* case.
*
**************************************************************************/
#ifndef IS_KEY
#define IS_KEY(A) (((A) & (PRI_KEY_FLAG | UNIQUE_KEY_FLAG | MULTIPLE_KEY_FLAG)) != 0)
#endif
#if !defined(IS_AUTO_INCREMENT) && defined(AUTO_INCREMENT_FLAG)
#define IS_AUTO_INCREMENT(A) (((A) & AUTO_INCREMENT_FLAG) != 0)
#endif
SV*
dbd_st_FETCH_internal(
SV *sth,
int what,
MYSQL_RES *res,
int cacheit
)
{
dTHX;
D_imp_sth(sth);
AV *av= Nullav;
MYSQL_FIELD *curField;
/* Are we asking for a legal value? */
if (what < 0 || what >= AV_ATTRIB_LAST)
do_error(sth, JW_ERR_NOT_IMPLEMENTED, "Not implemented", NULL);
/* Return cached value, if possible */
else if (cacheit && imp_sth->av_attr[what])
av= imp_sth->av_attr[what];
/* Does this sth really have a result? */
else if (!res)
do_error(sth, JW_ERR_NOT_ACTIVE,
"statement contains no result" ,NULL);
/* Do the real work. */
else
{
av= newAV();
mysql_field_seek(res, 0);
while ((curField= mysql_fetch_field(res)))
{
SV *sv;
switch(what) {
case AV_ATTRIB_NAME:
sv= newSVpvn(curField->name, strlen(curField->name));
break;
case AV_ATTRIB_TABLE:
sv= newSVpvn(curField->table, strlen(curField->table));
break;
case AV_ATTRIB_TYPE:
sv= newSViv((int) curField->type);
break;
case AV_ATTRIB_SQL_TYPE:
sv= newSViv((int) native2sql(curField->type)->data_type);
break;
case AV_ATTRIB_IS_PRI_KEY:
sv= boolSV(IS_PRI_KEY(curField->flags));
break;
case AV_ATTRIB_IS_NOT_NULL:
sv= boolSV(IS_NOT_NULL(curField->flags));
break;
case AV_ATTRIB_NULLABLE:
sv= boolSV(!IS_NOT_NULL(curField->flags));
break;
case AV_ATTRIB_LENGTH:
sv= newSViv((int) curField->length);
break;
case AV_ATTRIB_IS_NUM:
sv= newSViv((int) native2sql(curField->type)->is_num);
break;
case AV_ATTRIB_TYPE_NAME:
sv= newSVpv((char*) native2sql(curField->type)->type_name, 0);
break;
case AV_ATTRIB_MAX_LENGTH:
sv= newSViv((int) curField->max_length);
break;
case AV_ATTRIB_IS_AUTO_INCREMENT:
#if defined(AUTO_INCREMENT_FLAG)
sv= boolSV(IS_AUTO_INCREMENT(curField->flags));
break;
#else
croak("AUTO_INCREMENT_FLAG is not supported on this machine");
#endif
case AV_ATTRIB_IS_KEY:
sv= boolSV(IS_KEY(curField->flags));
break;
case AV_ATTRIB_IS_BLOB:
sv= boolSV(IS_BLOB(curField->flags));
break;
case AV_ATTRIB_SCALE:
sv= newSViv((int) curField->decimals);
break;
case AV_ATTRIB_PRECISION:
sv= newSViv((int) (curField->length > curField->max_length) ?
curField->length : curField->max_length);
break;
default:
sv= &PL_sv_undef;
break;
}
av_push(av, sv);
}
/* Ensure that this value is kept, decremented in
* dbd_st_destroy and dbd_st_execute. */
if (!cacheit)
return sv_2mortal(newRV_noinc((SV*)av));
imp_sth->av_attr[what]= av;
}
if (av == Nullav)
return &PL_sv_undef;
return sv_2mortal(newRV_inc((SV*)av));
}
/*
**************************************************************************
*
* Name: dbd_st_FETCH_attrib
*
* Purpose: Retrieves a statement handles attributes
*
* Input: sth - statement handle being destroyed
* imp_sth - drivers private statement handle data
* keysv - attribute name
*
* Returns: NULL for an unknown attribute, "undef" for error,
* attribute value otherwise.
*
**************************************************************************/
#define ST_FETCH_AV(what) \
dbd_st_FETCH_internal(sth, (what), imp_sth->result, TRUE)
SV* dbd_st_FETCH_attrib(
SV *sth,
imp_sth_t *imp_sth,
SV *keysv
)
{
dTHX;
STRLEN(kl);
char *key= SvPV(keysv, kl);
SV *retsv= Nullsv;
D_imp_xxh(sth);
if (kl < 2)
return Nullsv;
if (DBIc_TRACE_LEVEL(imp_xxh) >= 2)
PerlIO_printf(DBIc_LOGPIO(imp_xxh),
" -> dbd_st_FETCH_attrib for %p, key %s\n",
sth, key);
switch (*key) {
case 'N':
if (strEQ(key, "NAME"))
retsv= ST_FETCH_AV(AV_ATTRIB_NAME);
else if (strEQ(key, "NULLABLE"))
retsv= ST_FETCH_AV(AV_ATTRIB_NULLABLE);
break;
case 'P':
if (strEQ(key, "PRECISION"))
retsv= ST_FETCH_AV(AV_ATTRIB_PRECISION);
if (strEQ(key, "ParamValues"))
{
HV *pvhv= newHV();
if (DBIc_NUM_PARAMS(imp_sth))
{
int n;
char key[100];
I32 keylen;
for (n= 0; n < DBIc_NUM_PARAMS(imp_sth); n++)
{
keylen= sprintf(key, "%d", n);
(void)hv_store(pvhv, key,
keylen, newSVsv(imp_sth->params[n].value), 0);
}
}
retsv= sv_2mortal(newRV_noinc((SV*)pvhv));
}
break;
case 'S':
if (strEQ(key, "SCALE"))
retsv= ST_FETCH_AV(AV_ATTRIB_SCALE);
break;
case 'T':
if (strEQ(key, "TYPE"))
retsv= ST_FETCH_AV(AV_ATTRIB_SQL_TYPE);
break;
case 'm':
switch (kl) {
case 10:
if (strEQ(key, "mysql_type"))
retsv= ST_FETCH_AV(AV_ATTRIB_TYPE);
break;
case 11:
if (strEQ(key, "mysql_table"))
retsv= ST_FETCH_AV(AV_ATTRIB_TABLE);
break;
case 12:
if ( strEQ(key, "mysql_is_key"))
retsv= ST_FETCH_AV(AV_ATTRIB_IS_KEY);
else if (strEQ(key, "mysql_is_num"))
retsv= ST_FETCH_AV(AV_ATTRIB_IS_NUM);
else if (strEQ(key, "mysql_length"))
retsv= ST_FETCH_AV(AV_ATTRIB_LENGTH);
else if (strEQ(key, "mysql_result"))
retsv= sv_2mortal(newSViv(PTR2IV(imp_sth->result)));
break;
case 13:
if (strEQ(key, "mysql_is_blob"))
retsv= ST_FETCH_AV(AV_ATTRIB_IS_BLOB);
break;
case 14:
if (strEQ(key, "mysql_insertid"))
{
/* We cannot return an IV, because the insertid is a long. */
if (DBIc_TRACE_LEVEL(imp_xxh) >= 2)
PerlIO_printf(DBIc_LOGPIO(imp_xxh), "INSERT ID %llu\n", imp_sth->insertid);
return sv_2mortal(my_ulonglong2str(aTHX_ imp_sth->insertid));
}
break;
case 15:
if (strEQ(key, "mysql_type_name"))
retsv = ST_FETCH_AV(AV_ATTRIB_TYPE_NAME);
break;
case 16:
if ( strEQ(key, "mysql_is_pri_key"))
retsv= ST_FETCH_AV(AV_ATTRIB_IS_PRI_KEY);
else if (strEQ(key, "mysql_max_length"))
retsv= ST_FETCH_AV(AV_ATTRIB_MAX_LENGTH);
else if (strEQ(key, "mysql_use_result"))
retsv= boolSV(imp_sth->use_mysql_use_result);
break;
case 19:
if (strEQ(key, "mysql_warning_count"))
retsv= sv_2mortal(newSViv((IV) imp_sth->warning_count));
break;
case 20:
if (strEQ(key, "mysql_server_prepare"))
#if MYSQL_VERSION_ID >= SERVER_PREPARE_VERSION
retsv= sv_2mortal(newSViv((IV) imp_sth->use_server_side_prepare));
#else
retsv= boolSV(0);
#endif
break;
case 23:
if (strEQ(key, "mysql_is_auto_increment"))
retsv = ST_FETCH_AV(AV_ATTRIB_IS_AUTO_INCREMENT);
break;
case 37:
if (strEQ(key, "mysql_server_prepare_disable_fallback"))
#if MYSQL_VERSION_ID >= SERVER_PREPARE_VERSION
retsv= sv_2mortal(newSViv((IV) imp_sth->disable_fallback_for_server_prepare));
#else
retsv= boolSV(0);
#endif
break;
}
break;
}
return retsv;
}
/***************************************************************************
*
* Name: dbd_st_blob_read
*
* Purpose: Used for blob reads if the statement handles "LongTruncOk"
* attribute (currently not supported by DBD::mysql)
*
* Input: SV* - statement handle from which a blob will be fetched
* imp_sth - drivers private statement handle data
* field - field number of the blob (note, that a row may
* contain more than one blob)
* offset - the offset of the field, where to start reading
* len - maximum number of bytes to read
* destrv - RV* that tells us where to store
* destoffset - destination offset
*
* Returns: TRUE for success, FALSE otherwise; do_error will
* be called in the latter case
*
**************************************************************************/
int dbd_st_blob_read (
SV *sth,
imp_sth_t *imp_sth,
int field,
long offset,
long len,
SV *destrv,
long destoffset)
{
/* quell warnings */
sth= sth;
imp_sth=imp_sth;
field= field;
offset= offset;
len= len;
destrv= destrv;
destoffset= destoffset;
return FALSE;
}
/***************************************************************************
*
* Name: dbd_bind_ph
*
* Purpose: Binds a statement value to a parameter
*
* Input: sth - statement handle
* imp_sth - drivers private statement handle data
* param - parameter number, counting starts with 1
* value - value being inserted for parameter "param"
* sql_type - SQL type of the value
* attribs - bind parameter attributes, currently this must be
* one of the values SQL_CHAR, ...
* inout - TRUE, if parameter is an output variable (currently
* this is not supported)
* maxlen - ???
*
* Returns: TRUE for success, FALSE otherwise
*
**************************************************************************/
int dbd_bind_ph(SV *sth, imp_sth_t *imp_sth, SV *param, SV *value,
IV sql_type, SV *attribs, int is_inout, IV maxlen) {
dTHX;
int rc;
int param_num= SvIV(param);
int idx= param_num - 1;
char *err_msg;
D_imp_xxh(sth);
#if MYSQL_VERSION_ID >= SERVER_PREPARE_VERSION
STRLEN slen;
char *buffer= NULL;
int buffer_is_null= 0;
int buffer_is_unsigned= 0;
int buffer_length= 0;
unsigned int buffer_type= 0;
#endif
D_imp_dbh_from_sth;
ASYNC_CHECK_RETURN(sth, FALSE);
if (DBIc_TRACE_LEVEL(imp_xxh) >= 2)
PerlIO_printf(DBIc_LOGPIO(imp_xxh),
" Called: dbd_bind_ph\n");
attribs= attribs;
maxlen= maxlen;
if (param_num <= 0 || param_num > DBIc_NUM_PARAMS(imp_sth))
{
do_error(sth, JW_ERR_ILLEGAL_PARAM_NUM, "Illegal parameter number", NULL);
return FALSE;
}
/*
This fixes the bug whereby no warning was issued upon binding a
defined non-numeric as numeric
*/
if (SvOK(value) &&
(sql_type == SQL_NUMERIC ||
sql_type == SQL_DECIMAL ||
sql_type == SQL_INTEGER ||
sql_type == SQL_SMALLINT ||
sql_type == SQL_FLOAT ||
sql_type == SQL_REAL ||
sql_type == SQL_DOUBLE) )
{
if (! looks_like_number(value))
{
err_msg = SvPVX(sv_2mortal(newSVpvf(
"Binding non-numeric field %d, value %s as a numeric!",
param_num, neatsvpv(value,0))));
do_error(sth, JW_ERR_ILLEGAL_PARAM_NUM, err_msg, NULL);
}
}
if (is_inout)
{
do_error(sth, JW_ERR_NOT_IMPLEMENTED, "Output parameters not implemented", NULL);
return FALSE;
}
rc = bind_param(&imp_sth->params[idx], value, sql_type);
#if MYSQL_VERSION_ID >= SERVER_PREPARE_VERSION
if (imp_sth->use_server_side_prepare)
{
switch(sql_type) {
case SQL_NUMERIC:
case SQL_INTEGER:
case SQL_SMALLINT:
case SQL_TINYINT:
#if IVSIZE >= 8
case SQL_BIGINT:
buffer_type= MYSQL_TYPE_LONGLONG;
#else
buffer_type= MYSQL_TYPE_LONG;
#endif
break;
case SQL_DOUBLE:
case SQL_DECIMAL:
case SQL_FLOAT:
case SQL_REAL:
buffer_type= MYSQL_TYPE_DOUBLE;
break;
case SQL_CHAR:
case SQL_VARCHAR:
case SQL_DATE:
case SQL_TIME:
case SQL_TIMESTAMP:
case SQL_LONGVARCHAR:
case SQL_BINARY:
case SQL_VARBINARY:
case SQL_LONGVARBINARY:
buffer_type= MYSQL_TYPE_BLOB;
break;
default:
buffer_type= MYSQL_TYPE_STRING;
}
buffer_is_null = !(SvOK(imp_sth->params[idx].value) && imp_sth->params[idx].value);
if (! buffer_is_null) {
switch(buffer_type) {
case MYSQL_TYPE_LONG:
case MYSQL_TYPE_LONGLONG:
/* INT */
if (!SvIOK(imp_sth->params[idx].value) && DBIc_TRACE_LEVEL(imp_xxh) >= 2)
PerlIO_printf(DBIc_LOGPIO(imp_xxh), "\t\tTRY TO BIND AN INT NUMBER\n");
buffer_length = sizeof imp_sth->fbind[idx].numeric_val.lval;
imp_sth->fbind[idx].numeric_val.lval= SvIV(imp_sth->params[idx].value);
buffer=(void*)&(imp_sth->fbind[idx].numeric_val.lval);
if (!SvIOK(imp_sth->params[idx].value))
{
if (DBIc_TRACE_LEVEL(imp_xxh) >= 2)
PerlIO_printf(DBIc_LOGPIO(imp_xxh),
" Conversion to INT NUMBER was not successful -> '%s' --> (unsigned) '%"UVuf"' / (signed) '%"IVdf"' <- fallback to STRING\n",
SvPV_nolen(imp_sth->params[idx].value), imp_sth->fbind[idx].numeric_val.lval, imp_sth->fbind[idx].numeric_val.lval);
buffer_type = MYSQL_TYPE_STRING;
break;
}
if (SvIsUV(imp_sth->params[idx].value))
buffer_is_unsigned= 1;
if (DBIc_TRACE_LEVEL(imp_xxh) >= 2)
PerlIO_printf(DBIc_LOGPIO(imp_xxh),
" SCALAR type %"IVdf" ->%"IVdf"<- IS A INT NUMBER\n",
sql_type, *(IV *)buffer);
break;
case MYSQL_TYPE_DOUBLE:
if (!SvNOK(imp_sth->params[idx].value) && DBIc_TRACE_LEVEL(imp_xxh) >= 2)
PerlIO_printf(DBIc_LOGPIO(imp_xxh), "\t\tTRY TO BIND A FLOAT NUMBER\n");
buffer_length = sizeof imp_sth->fbind[idx].numeric_val.dval;
imp_sth->fbind[idx].numeric_val.dval= SvNV(imp_sth->params[idx].value);
buffer=(char*)&(imp_sth->fbind[idx].numeric_val.dval);
if (DBIc_TRACE_LEVEL(imp_xxh) >= 2)
PerlIO_printf(DBIc_LOGPIO(imp_xxh),
" SCALAR type %"IVdf" ->%f<- IS A FLOAT NUMBER\n",
sql_type, (double)(*buffer));
break;
case MYSQL_TYPE_BLOB:
if (DBIc_TRACE_LEVEL(imp_xxh) >= 2)
PerlIO_printf(DBIc_LOGPIO(imp_xxh),
" SCALAR type BLOB\n");
break;
case MYSQL_TYPE_STRING:
if (DBIc_TRACE_LEVEL(imp_xxh) >= 2)
PerlIO_printf(DBIc_LOGPIO(imp_xxh),
" SCALAR type STRING %"IVdf", buffertype=%d\n", sql_type, buffer_type);
break;
default:
croak("Bug in DBD::Mysql file dbdimp.c#dbd_bind_ph: do not know how to handle unknown buffer type.");
}
if (buffer_type == MYSQL_TYPE_STRING || buffer_type == MYSQL_TYPE_BLOB)
{
buffer= SvPV(imp_sth->params[idx].value, slen);
buffer_length= slen;
if (DBIc_TRACE_LEVEL(imp_xxh) >= 2)
PerlIO_printf(DBIc_LOGPIO(imp_xxh),
" SCALAR type %"IVdf" ->length %d<- IS A STRING or BLOB\n",
sql_type, buffer_length);
}
}
else
{
/*case: buffer_is_null != 0*/
buffer= NULL;
if (DBIc_TRACE_LEVEL(imp_xxh) >= 2)
PerlIO_printf(DBIc_LOGPIO(imp_xxh),
" SCALAR NULL VALUE: buffer type is: %d\n", buffer_type);
}
/* Type of column was changed. Force to rebind */
if (imp_sth->bind[idx].buffer_type != buffer_type || imp_sth->bind[idx].is_unsigned != buffer_is_unsigned) {
if (DBIc_TRACE_LEVEL(imp_xxh) >= 2)
PerlIO_printf(DBIc_LOGPIO(imp_xxh),
" FORCE REBIND: buffer type changed from %d to %d, sql-type=%"IVdf"\n",
(int) imp_sth->bind[idx].buffer_type, buffer_type, sql_type);
imp_sth->has_been_bound = 0;
}
/* prepare has been called */
if (imp_sth->has_been_bound)
{
imp_sth->stmt->params[idx].buffer= buffer;
imp_sth->stmt->params[idx].buffer_length= buffer_length;
}
imp_sth->bind[idx].buffer_type= buffer_type;
imp_sth->bind[idx].buffer= buffer;
imp_sth->bind[idx].buffer_length= buffer_length;
imp_sth->bind[idx].is_unsigned= buffer_is_unsigned;
imp_sth->fbind[idx].length= buffer_length;
imp_sth->fbind[idx].is_null= buffer_is_null;
}
#endif
return rc;
}
/***************************************************************************
*
* Name: mysql_db_reconnect
*
* Purpose: If the server has disconnected, try to reconnect.
*
* Input: h - database or statement handle
*
* Returns: TRUE for success, FALSE otherwise
*
**************************************************************************/
int mysql_db_reconnect(SV* h)
{
dTHX;
D_imp_xxh(h);
imp_dbh_t* imp_dbh;
MYSQL save_socket;
if (DBIc_TYPE(imp_xxh) == DBIt_ST)
{
imp_dbh = (imp_dbh_t*) DBIc_PARENT_COM(imp_xxh);
h = DBIc_PARENT_H(imp_xxh);
}
else
imp_dbh= (imp_dbh_t*) imp_xxh;
if (mysql_errno(imp_dbh->pmysql) != CR_SERVER_GONE_ERROR &&
mysql_errno(imp_dbh->pmysql) != CR_SERVER_LOST)
/* Other error */
return FALSE;
if (!DBIc_has(imp_dbh, DBIcf_AutoCommit) || !imp_dbh->auto_reconnect)
{
/* We never reconnect if AutoCommit is turned off.
* Otherwise we might get an inconsistent transaction
* state.
*/
return FALSE;
}
/* my_login will blow away imp_dbh->mysql so we save a copy of
* imp_dbh->mysql and put it back where it belongs if the reconnect
* fail. Think server is down & reconnect fails but the application eval{}s
* the execute, so next time $dbh->quote() gets called, instant SIGSEGV!
*/
save_socket= *(imp_dbh->pmysql);
memcpy (&save_socket, imp_dbh->pmysql,sizeof(save_socket));
memset (imp_dbh->pmysql,0,sizeof(*(imp_dbh->pmysql)));
/* we should disconnect the db handle before reconnecting, this will
* prevent my_login from thinking it's adopting an active child which
* would prevent the handle from actually reconnecting
*/
if (!dbd_db_disconnect(h, imp_dbh) || !my_login(aTHX_ h, imp_dbh))
{
do_error(h, mysql_errno(imp_dbh->pmysql), mysql_error(imp_dbh->pmysql),
mysql_sqlstate(imp_dbh->pmysql));
memcpy (imp_dbh->pmysql, &save_socket, sizeof(save_socket));
++imp_dbh->stats.auto_reconnects_failed;
return FALSE;
}
/*
* Tell DBI, that dbh->disconnect should be called for this handle
*/
DBIc_ACTIVE_on(imp_dbh);
++imp_dbh->stats.auto_reconnects_ok;
return TRUE;
}
/**************************************************************************
*
* Name: dbd_db_type_info_all
*
* Purpose: Implements $dbh->type_info_all
*
* Input: dbh - database handle
* imp_sth - drivers private database handle data
*
* Returns: RV to AV of types
*
**************************************************************************/
#define PV_PUSH(c) \
if (c) { \
sv= newSVpv((char*) (c), 0); \
SvREADONLY_on(sv); \
} else { \
sv= &PL_sv_undef; \
} \
av_push(row, sv);
#define IV_PUSH(i) sv= newSViv((i)); SvREADONLY_on(sv); av_push(row, sv);
AV *dbd_db_type_info_all(SV *dbh, imp_dbh_t *imp_dbh)
{
dTHX;
AV *av= newAV();
AV *row;
HV *hv;
SV *sv;
int i;
const char *cols[] = {
"TYPE_NAME",
"DATA_TYPE",
"COLUMN_SIZE",
"LITERAL_PREFIX",
"LITERAL_SUFFIX",
"CREATE_PARAMS",
"NULLABLE",
"CASE_SENSITIVE",
"SEARCHABLE",
"UNSIGNED_ATTRIBUTE",
"FIXED_PREC_SCALE",
"AUTO_UNIQUE_VALUE",
"LOCAL_TYPE_NAME",
"MINIMUM_SCALE",
"MAXIMUM_SCALE",
"NUM_PREC_RADIX",
"SQL_DATATYPE",
"SQL_DATETIME_SUB",
"INTERVAL_PRECISION",
"mysql_native_type",
"mysql_is_num"
};
dbh= dbh;
imp_dbh= imp_dbh;
hv= newHV();
av_push(av, newRV_noinc((SV*) hv));
for (i= 0; i < (int)(sizeof(cols) / sizeof(const char*)); i++)
{
if (!hv_store(hv, (char*) cols[i], strlen(cols[i]), newSViv(i), 0))
{
SvREFCNT_dec((SV*) av);
return Nullav;
}
}
for (i= 0; i < (int)SQL_GET_TYPE_INFO_num; i++)
{
const sql_type_info_t *t= &SQL_GET_TYPE_INFO_values[i];
row= newAV();
av_push(av, newRV_noinc((SV*) row));
PV_PUSH(t->type_name);
IV_PUSH(t->data_type);
IV_PUSH(t->column_size);
PV_PUSH(t->literal_prefix);
PV_PUSH(t->literal_suffix);
PV_PUSH(t->create_params);
IV_PUSH(t->nullable);
IV_PUSH(t->case_sensitive);
IV_PUSH(t->searchable);
IV_PUSH(t->unsigned_attribute);
IV_PUSH(t->fixed_prec_scale);
IV_PUSH(t->auto_unique_value);
PV_PUSH(t->local_type_name);
IV_PUSH(t->minimum_scale);
IV_PUSH(t->maximum_scale);
if (t->num_prec_radix)
{
IV_PUSH(t->num_prec_radix);
}
else
av_push(row, &PL_sv_undef);
IV_PUSH(t->sql_datatype); /* SQL_DATATYPE*/
IV_PUSH(t->sql_datetime_sub); /* SQL_DATETIME_SUB*/
IV_PUSH(t->interval_precision); /* INTERVAL_PERCISION */
IV_PUSH(t->native_type);
IV_PUSH(t->is_num);
}
return av;
}
/*
dbd_db_quote
Properly quotes a value
*/
SV* dbd_db_quote(SV *dbh, SV *str, SV *type)
{
dTHX;
SV *result;
if (SvGMAGICAL(str))
mg_get(str);
if (!SvOK(str))
result= newSVpvn("NULL", 4);
else
{
char *ptr, *sptr;
STRLEN len;
D_imp_dbh(dbh);
if (type && SvMAGICAL(type))
mg_get(type);
if (type && SvOK(type))
{
int i;
int tp= SvIV(type);
for (i= 0; i < (int)SQL_GET_TYPE_INFO_num; i++)
{
const sql_type_info_t *t= &SQL_GET_TYPE_INFO_values[i];
if (t->data_type == tp)
{
if (!t->literal_prefix)
return Nullsv;
break;
}
}
}
ptr= SvPV(str, len);
result= newSV(len*2+3);
#ifdef SvUTF8
if (SvUTF8(str)) SvUTF8_on(result);
#endif
sptr= SvPVX(result);
*sptr++ = '\'';
sptr+= mysql_real_escape_string(imp_dbh->pmysql, sptr,
ptr, len);
*sptr++= '\'';
SvPOK_on(result);
SvCUR_set(result, sptr - SvPVX(result));
/* Never hurts NUL terminating a Per string */
*sptr++= '\0';
}
return result;
}
#ifdef DBD_MYSQL_INSERT_ID_IS_GOOD
SV *mysql_db_last_insert_id(SV *dbh, imp_dbh_t *imp_dbh,
SV *catalog, SV *schema, SV *table, SV *field, SV *attr)
{
dTHX;
/* all these non-op settings are to stifle OS X compile warnings */
imp_dbh= imp_dbh;
dbh= dbh;
catalog= catalog;
schema= schema;
table= table;
field= field;
attr= attr;
ASYNC_CHECK_RETURN(dbh, &PL_sv_undef);
return sv_2mortal(my_ulonglong2str(aTHX_ mysql_insert_id(imp_dbh->pmysql)));
}
#endif
#if MYSQL_ASYNC
int mysql_db_async_result(SV* h, MYSQL_RES** resp)
{
dTHX;
D_imp_xxh(h);
imp_dbh_t* dbh;
MYSQL* svsock = NULL;
MYSQL_RES* _res;
int retval = 0;
int htype;
if(! resp) {
resp = &_res;
}
htype = DBIc_TYPE(imp_xxh);
if(htype == DBIt_DB) {
D_imp_dbh(h);
dbh = imp_dbh;
} else {
D_imp_sth(h);
D_imp_dbh_from_sth;
dbh = imp_dbh;
}
if(! dbh->async_query_in_flight) {
do_error(h, 2000, "Gathering asynchronous results for a synchronous handle", "HY000");
return -1;
}
if(dbh->async_query_in_flight != imp_xxh) {
do_error(h, 2000, "Gathering async_query_in_flight results for the wrong handle", "HY000");
return -1;
}
dbh->async_query_in_flight = NULL;
svsock= dbh->pmysql;
retval= mysql_read_query_result(svsock);
if(! retval) {
*resp= mysql_store_result(svsock);
if (mysql_errno(svsock))
do_error(h, mysql_errno(svsock), mysql_error(svsock), mysql_sqlstate(svsock));
if (!*resp)
retval= mysql_affected_rows(svsock);
else {
retval= mysql_num_rows(*resp);
if(resp == &_res) {
mysql_free_result(*resp);
}
}
if(htype == DBIt_ST) {
D_imp_sth(h);
D_imp_dbh_from_sth;
if((my_ulonglong)retval+1 != (my_ulonglong)-1) {
if(! *resp) {
imp_sth->insertid= mysql_insert_id(svsock);
#if MYSQL_VERSION_ID >= MULTIPLE_RESULT_SET_VERSION
if(! mysql_more_results(svsock))
DBIc_ACTIVE_off(imp_sth);
#endif
} else {
DBIc_NUM_FIELDS(imp_sth)= mysql_num_fields(imp_sth->result);
imp_sth->done_desc= 0;
imp_sth->fetch_done= 0;
}
}
imp_sth->warning_count = mysql_warning_count(imp_dbh->pmysql);
}
} else {
do_error(h, mysql_errno(svsock), mysql_error(svsock),
mysql_sqlstate(svsock));
return -1;
}
return retval;
}
int mysql_db_async_ready(SV* h)
{
dTHX;
D_imp_xxh(h);
imp_dbh_t* dbh;
int htype;
htype = DBIc_TYPE(imp_xxh);
if(htype == DBIt_DB) {
D_imp_dbh(h);
dbh = imp_dbh;
} else {
D_imp_sth(h);
D_imp_dbh_from_sth;
dbh = imp_dbh;
}
if(dbh->async_query_in_flight) {
if(dbh->async_query_in_flight == imp_xxh) {
struct pollfd fds;
int retval;
fds.fd = dbh->pmysql->net.fd;
fds.events = POLLIN;
retval = poll(&fds, 1, 0);
if(retval < 0) {
do_error(h, errno, strerror(errno), "HY000");
}
return retval;
} else {
do_error(h, 2000, "Calling mysql_async_ready on the wrong handle", "HY000");
return -1;
}
} else {
do_error(h, 2000, "Handle is not in asynchronous mode", "HY000");
return -1;
}
}
#endif
static int parse_number(char *string, STRLEN len, char **end)
{
int seen_neg;
int seen_dec;
int seen_e;
int seen_plus;
int seen_digit;
char *cp;
seen_neg= seen_dec= seen_e= seen_plus= seen_digit= 0;
if (len <= 0) {
len= strlen(string);
}
cp= string;
/* Skip leading whitespace */
while (*cp && isspace(*cp))
cp++;
for ( ; *cp; cp++)
{
if ('-' == *cp)
{
if (seen_neg >= 2)
{
/*
third '-'. number can contains two '-'.
because -1e-10 is valid number */
break;
}
seen_neg += 1;
}
else if ('.' == *cp)
{
if (seen_dec)
{
/* second '.' */
break;
}
seen_dec= 1;
}
else if ('e' == *cp)
{
if (seen_e)
{
/* second 'e' */
break;
}
seen_e= 1;
}
else if ('+' == *cp)
{
if (seen_plus)
{
/* second '+' */
break;
}
seen_plus= 1;
}
else if (!isdigit(*cp))
{
/* Not sure why this was changed */
/* seen_digit= 1; */
break;
}
}
*end= cp;
/* length 0 -> not a number */
/* Need to revisit this */
/*if (len == 0 || cp - string < (int) len || seen_digit == 0) {*/
if (len == 0 || cp - string < (int) len) {
return -1;
}
return 0;
}
| ./CrossVul/dataset_final_sorted/CWE-416/c/bad_4902_0 |
crossvul-cpp_data_good_903_1 | /*
servers-reconnect.c : irssi
Copyright (C) 1999-2000 Timo Sirainen
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License along
with this program; if not, write to the Free Software Foundation, Inc.,
51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
*/
#include "module.h"
#include <irssi/src/core/commands.h>
#include <irssi/src/core/network.h>
#include <irssi/src/core/signals.h>
#include <irssi/src/irc/core/modes.h>
#include <irssi/src/irc/core/irc-servers.h>
#include <irssi/src/core/settings.h>
static void sig_server_connect_copy(SERVER_CONNECT_REC **dest,
IRC_SERVER_CONNECT_REC *src)
{
IRC_SERVER_CONNECT_REC *rec;
g_return_if_fail(dest != NULL);
if (!IS_IRC_SERVER_CONNECT(src))
return;
rec = g_new0(IRC_SERVER_CONNECT_REC, 1);
rec->chat_type = IRC_PROTOCOL;
rec->max_cmds_at_once = src->max_cmds_at_once;
rec->cmd_queue_speed = src->cmd_queue_speed;
rec->max_query_chans = src->max_query_chans;
rec->max_kicks = src->max_kicks;
rec->max_modes = src->max_modes;
rec->max_msgs = src->max_msgs;
rec->max_whois = src->max_whois;
rec->usermode = g_strdup(src->usermode);
rec->alternate_nick = g_strdup(src->alternate_nick);
rec->sasl_mechanism = src->sasl_mechanism;
rec->sasl_username = g_strdup(src->sasl_username);
rec->sasl_password = g_strdup(src->sasl_password);
*dest = (SERVER_CONNECT_REC *) rec;
}
static void sig_server_reconnect_save_status(IRC_SERVER_CONNECT_REC *conn,
IRC_SERVER_REC *server)
{
if (!IS_IRC_SERVER_CONNECT(conn) || !IS_IRC_SERVER(server) ||
!server->connected)
return;
g_free_not_null(conn->channels);
conn->channels = irc_server_get_channels(server);
g_free_not_null(conn->usermode);
conn->usermode = g_strdup(server->wanted_usermode);
}
static void sig_connected(IRC_SERVER_REC *server)
{
if (!IS_IRC_SERVER(server) || !server->connrec->reconnection)
return;
if (server->connrec->away_reason != NULL)
irc_server_send_away(server, server->connrec->away_reason);
}
static void event_nick_collision(IRC_SERVER_REC *server, const char *data)
{
time_t new_connect;
if (!IS_IRC_SERVER(server))
return;
/* after server kills us because of nick collision, we want to
connect back immediately. but no matter how hard they kill us,
don't connect to the server more than once in every 10 seconds. */
new_connect = server->connect_time+10 -
settings_get_time("server_reconnect_time")/1000;
if (server->connect_time > new_connect)
server->connect_time = new_connect;
server->nick_collision = TRUE;
}
static void event_kill(IRC_SERVER_REC *server, const char *data,
const char *nick, const char *addr)
{
if (addr != NULL && !server->nick_collision) {
/* don't reconnect if we were killed by an oper (not server) */
server->no_reconnect = TRUE;
}
}
void irc_servers_reconnect_init(void)
{
signal_add("server connect copy", (SIGNAL_FUNC) sig_server_connect_copy);
signal_add("server reconnect save status", (SIGNAL_FUNC) sig_server_reconnect_save_status);
signal_add("event connected", (SIGNAL_FUNC) sig_connected);
signal_add("event 436", (SIGNAL_FUNC) event_nick_collision);
signal_add("event kill", (SIGNAL_FUNC) event_kill);
}
void irc_servers_reconnect_deinit(void)
{
signal_remove("server connect copy", (SIGNAL_FUNC) sig_server_connect_copy);
signal_remove("server reconnect save status", (SIGNAL_FUNC) sig_server_reconnect_save_status);
signal_remove("event connected", (SIGNAL_FUNC) sig_connected);
signal_remove("event 436", (SIGNAL_FUNC) event_nick_collision);
signal_remove("event kill", (SIGNAL_FUNC) event_kill);
}
| ./CrossVul/dataset_final_sorted/CWE-416/c/good_903_1 |
crossvul-cpp_data_bad_454_0 | /*
* (Tentative) USB Audio Driver for ALSA
*
* Copyright (c) 2002 by Takashi Iwai <tiwai@suse.de>
*
* Many codes borrowed from audio.c by
* Alan Cox (alan@lxorguk.ukuu.org.uk)
* Thomas Sailer (sailer@ife.ee.ethz.ch)
*
* Audio Class 3.0 support by Ruslan Bilovol <ruslan.bilovol@gmail.com>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*
*
* NOTES:
*
* - the linked URBs would be preferred but not used so far because of
* the instability of unlinking.
* - type II is not supported properly. there is no device which supports
* this type *correctly*. SB extigy looks as if it supports, but it's
* indeed an AC3 stream packed in SPDIF frames (i.e. no real AC3 stream).
*/
#include <linux/bitops.h>
#include <linux/init.h>
#include <linux/list.h>
#include <linux/slab.h>
#include <linux/string.h>
#include <linux/ctype.h>
#include <linux/usb.h>
#include <linux/moduleparam.h>
#include <linux/mutex.h>
#include <linux/usb/audio.h>
#include <linux/usb/audio-v2.h>
#include <linux/usb/audio-v3.h>
#include <linux/module.h>
#include <sound/control.h>
#include <sound/core.h>
#include <sound/info.h>
#include <sound/pcm.h>
#include <sound/pcm_params.h>
#include <sound/initval.h>
#include "usbaudio.h"
#include "card.h"
#include "midi.h"
#include "mixer.h"
#include "proc.h"
#include "quirks.h"
#include "endpoint.h"
#include "helper.h"
#include "debug.h"
#include "pcm.h"
#include "format.h"
#include "power.h"
#include "stream.h"
MODULE_AUTHOR("Takashi Iwai <tiwai@suse.de>");
MODULE_DESCRIPTION("USB Audio");
MODULE_LICENSE("GPL");
MODULE_SUPPORTED_DEVICE("{{Generic,USB Audio}}");
static int index[SNDRV_CARDS] = SNDRV_DEFAULT_IDX; /* Index 0-MAX */
static char *id[SNDRV_CARDS] = SNDRV_DEFAULT_STR; /* ID for this card */
static bool enable[SNDRV_CARDS] = SNDRV_DEFAULT_ENABLE_PNP;/* Enable this card */
/* Vendor/product IDs for this card */
static int vid[SNDRV_CARDS] = { [0 ... (SNDRV_CARDS-1)] = -1 };
static int pid[SNDRV_CARDS] = { [0 ... (SNDRV_CARDS-1)] = -1 };
static int device_setup[SNDRV_CARDS]; /* device parameter for this card */
static bool ignore_ctl_error;
static bool autoclock = true;
static char *quirk_alias[SNDRV_CARDS];
bool snd_usb_use_vmalloc = true;
module_param_array(index, int, NULL, 0444);
MODULE_PARM_DESC(index, "Index value for the USB audio adapter.");
module_param_array(id, charp, NULL, 0444);
MODULE_PARM_DESC(id, "ID string for the USB audio adapter.");
module_param_array(enable, bool, NULL, 0444);
MODULE_PARM_DESC(enable, "Enable USB audio adapter.");
module_param_array(vid, int, NULL, 0444);
MODULE_PARM_DESC(vid, "Vendor ID for the USB audio device.");
module_param_array(pid, int, NULL, 0444);
MODULE_PARM_DESC(pid, "Product ID for the USB audio device.");
module_param_array(device_setup, int, NULL, 0444);
MODULE_PARM_DESC(device_setup, "Specific device setup (if needed).");
module_param(ignore_ctl_error, bool, 0444);
MODULE_PARM_DESC(ignore_ctl_error,
"Ignore errors from USB controller for mixer interfaces.");
module_param(autoclock, bool, 0444);
MODULE_PARM_DESC(autoclock, "Enable auto-clock selection for UAC2 devices (default: yes).");
module_param_array(quirk_alias, charp, NULL, 0444);
MODULE_PARM_DESC(quirk_alias, "Quirk aliases, e.g. 0123abcd:5678beef.");
module_param_named(use_vmalloc, snd_usb_use_vmalloc, bool, 0444);
MODULE_PARM_DESC(use_vmalloc, "Use vmalloc for PCM intermediate buffers (default: yes).");
/*
* we keep the snd_usb_audio_t instances by ourselves for merging
* the all interfaces on the same card as one sound device.
*/
static DEFINE_MUTEX(register_mutex);
static struct snd_usb_audio *usb_chip[SNDRV_CARDS];
static struct usb_driver usb_audio_driver;
/*
* disconnect streams
* called from usb_audio_disconnect()
*/
static void snd_usb_stream_disconnect(struct snd_usb_stream *as)
{
int idx;
struct snd_usb_substream *subs;
for (idx = 0; idx < 2; idx++) {
subs = &as->substream[idx];
if (!subs->num_formats)
continue;
subs->interface = -1;
subs->data_endpoint = NULL;
subs->sync_endpoint = NULL;
}
}
static int snd_usb_create_stream(struct snd_usb_audio *chip, int ctrlif, int interface)
{
struct usb_device *dev = chip->dev;
struct usb_host_interface *alts;
struct usb_interface_descriptor *altsd;
struct usb_interface *iface = usb_ifnum_to_if(dev, interface);
if (!iface) {
dev_err(&dev->dev, "%u:%d : does not exist\n",
ctrlif, interface);
return -EINVAL;
}
alts = &iface->altsetting[0];
altsd = get_iface_desc(alts);
/*
* Android with both accessory and audio interfaces enabled gets the
* interface numbers wrong.
*/
if ((chip->usb_id == USB_ID(0x18d1, 0x2d04) ||
chip->usb_id == USB_ID(0x18d1, 0x2d05)) &&
interface == 0 &&
altsd->bInterfaceClass == USB_CLASS_VENDOR_SPEC &&
altsd->bInterfaceSubClass == USB_SUBCLASS_VENDOR_SPEC) {
interface = 2;
iface = usb_ifnum_to_if(dev, interface);
if (!iface)
return -EINVAL;
alts = &iface->altsetting[0];
altsd = get_iface_desc(alts);
}
if (usb_interface_claimed(iface)) {
dev_dbg(&dev->dev, "%d:%d: skipping, already claimed\n",
ctrlif, interface);
return -EINVAL;
}
if ((altsd->bInterfaceClass == USB_CLASS_AUDIO ||
altsd->bInterfaceClass == USB_CLASS_VENDOR_SPEC) &&
altsd->bInterfaceSubClass == USB_SUBCLASS_MIDISTREAMING) {
int err = __snd_usbmidi_create(chip->card, iface,
&chip->midi_list, NULL,
chip->usb_id);
if (err < 0) {
dev_err(&dev->dev,
"%u:%d: cannot create sequencer device\n",
ctrlif, interface);
return -EINVAL;
}
usb_driver_claim_interface(&usb_audio_driver, iface, (void *)-1L);
return 0;
}
if ((altsd->bInterfaceClass != USB_CLASS_AUDIO &&
altsd->bInterfaceClass != USB_CLASS_VENDOR_SPEC) ||
altsd->bInterfaceSubClass != USB_SUBCLASS_AUDIOSTREAMING) {
dev_dbg(&dev->dev,
"%u:%d: skipping non-supported interface %d\n",
ctrlif, interface, altsd->bInterfaceClass);
/* skip non-supported classes */
return -EINVAL;
}
if (snd_usb_get_speed(dev) == USB_SPEED_LOW) {
dev_err(&dev->dev, "low speed audio streaming not supported\n");
return -EINVAL;
}
if (! snd_usb_parse_audio_interface(chip, interface)) {
usb_set_interface(dev, interface, 0); /* reset the current interface */
usb_driver_claim_interface(&usb_audio_driver, iface, (void *)-1L);
}
return 0;
}
/*
* parse audio control descriptor and create pcm/midi streams
*/
static int snd_usb_create_streams(struct snd_usb_audio *chip, int ctrlif)
{
struct usb_device *dev = chip->dev;
struct usb_host_interface *host_iface;
struct usb_interface_descriptor *altsd;
int i, protocol;
/* find audiocontrol interface */
host_iface = &usb_ifnum_to_if(dev, ctrlif)->altsetting[0];
altsd = get_iface_desc(host_iface);
protocol = altsd->bInterfaceProtocol;
switch (protocol) {
default:
dev_warn(&dev->dev,
"unknown interface protocol %#02x, assuming v1\n",
protocol);
/* fall through */
case UAC_VERSION_1: {
struct uac1_ac_header_descriptor *h1;
int rest_bytes;
h1 = snd_usb_find_csint_desc(host_iface->extra,
host_iface->extralen,
NULL, UAC_HEADER);
if (!h1) {
dev_err(&dev->dev, "cannot find UAC_HEADER\n");
return -EINVAL;
}
rest_bytes = (void *)(host_iface->extra +
host_iface->extralen) - (void *)h1;
/* just to be sure -- this shouldn't hit at all */
if (rest_bytes <= 0) {
dev_err(&dev->dev, "invalid control header\n");
return -EINVAL;
}
if (rest_bytes < sizeof(*h1)) {
dev_err(&dev->dev, "too short v1 buffer descriptor\n");
return -EINVAL;
}
if (!h1->bInCollection) {
dev_info(&dev->dev, "skipping empty audio interface (v1)\n");
return -EINVAL;
}
if (rest_bytes < h1->bLength) {
dev_err(&dev->dev, "invalid buffer length (v1)\n");
return -EINVAL;
}
if (h1->bLength < sizeof(*h1) + h1->bInCollection) {
dev_err(&dev->dev, "invalid UAC_HEADER (v1)\n");
return -EINVAL;
}
for (i = 0; i < h1->bInCollection; i++)
snd_usb_create_stream(chip, ctrlif, h1->baInterfaceNr[i]);
break;
}
case UAC_VERSION_2:
case UAC_VERSION_3: {
struct usb_interface_assoc_descriptor *assoc =
usb_ifnum_to_if(dev, ctrlif)->intf_assoc;
if (!assoc) {
/*
* Firmware writers cannot count to three. So to find
* the IAD on the NuForce UDH-100, also check the next
* interface.
*/
struct usb_interface *iface =
usb_ifnum_to_if(dev, ctrlif + 1);
if (iface &&
iface->intf_assoc &&
iface->intf_assoc->bFunctionClass == USB_CLASS_AUDIO &&
iface->intf_assoc->bFunctionProtocol == UAC_VERSION_2)
assoc = iface->intf_assoc;
}
if (!assoc) {
dev_err(&dev->dev, "Audio class v2/v3 interfaces need an interface association\n");
return -EINVAL;
}
if (protocol == UAC_VERSION_3) {
int badd = assoc->bFunctionSubClass;
if (badd != UAC3_FUNCTION_SUBCLASS_FULL_ADC_3_0 &&
(badd < UAC3_FUNCTION_SUBCLASS_GENERIC_IO ||
badd > UAC3_FUNCTION_SUBCLASS_SPEAKERPHONE)) {
dev_err(&dev->dev,
"Unsupported UAC3 BADD profile\n");
return -EINVAL;
}
chip->badd_profile = badd;
}
for (i = 0; i < assoc->bInterfaceCount; i++) {
int intf = assoc->bFirstInterface + i;
if (intf != ctrlif)
snd_usb_create_stream(chip, ctrlif, intf);
}
break;
}
}
return 0;
}
/*
* free the chip instance
*
* here we have to do not much, since pcm and controls are already freed
*
*/
static void snd_usb_audio_free(struct snd_card *card)
{
struct snd_usb_audio *chip = card->private_data;
struct snd_usb_endpoint *ep, *n;
list_for_each_entry_safe(ep, n, &chip->ep_list, list)
snd_usb_endpoint_free(ep);
mutex_destroy(&chip->mutex);
if (!atomic_read(&chip->shutdown))
dev_set_drvdata(&chip->dev->dev, NULL);
}
static void usb_audio_make_shortname(struct usb_device *dev,
struct snd_usb_audio *chip,
const struct snd_usb_audio_quirk *quirk)
{
struct snd_card *card = chip->card;
if (quirk && quirk->product_name && *quirk->product_name) {
strlcpy(card->shortname, quirk->product_name,
sizeof(card->shortname));
return;
}
/* retrieve the device string as shortname */
if (!dev->descriptor.iProduct ||
usb_string(dev, dev->descriptor.iProduct,
card->shortname, sizeof(card->shortname)) <= 0) {
/* no name available from anywhere, so use ID */
sprintf(card->shortname, "USB Device %#04x:%#04x",
USB_ID_VENDOR(chip->usb_id),
USB_ID_PRODUCT(chip->usb_id));
}
strim(card->shortname);
}
static void usb_audio_make_longname(struct usb_device *dev,
struct snd_usb_audio *chip,
const struct snd_usb_audio_quirk *quirk)
{
struct snd_card *card = chip->card;
int len;
/* shortcut - if any pre-defined string is given, use it */
if (quirk && quirk->profile_name && *quirk->profile_name) {
strlcpy(card->longname, quirk->profile_name,
sizeof(card->longname));
return;
}
if (quirk && quirk->vendor_name && *quirk->vendor_name) {
len = strlcpy(card->longname, quirk->vendor_name, sizeof(card->longname));
} else {
/* retrieve the vendor and device strings as longname */
if (dev->descriptor.iManufacturer)
len = usb_string(dev, dev->descriptor.iManufacturer,
card->longname, sizeof(card->longname));
else
len = 0;
/* we don't really care if there isn't any vendor string */
}
if (len > 0) {
strim(card->longname);
if (*card->longname)
strlcat(card->longname, " ", sizeof(card->longname));
}
strlcat(card->longname, card->shortname, sizeof(card->longname));
len = strlcat(card->longname, " at ", sizeof(card->longname));
if (len < sizeof(card->longname))
usb_make_path(dev, card->longname + len, sizeof(card->longname) - len);
switch (snd_usb_get_speed(dev)) {
case USB_SPEED_LOW:
strlcat(card->longname, ", low speed", sizeof(card->longname));
break;
case USB_SPEED_FULL:
strlcat(card->longname, ", full speed", sizeof(card->longname));
break;
case USB_SPEED_HIGH:
strlcat(card->longname, ", high speed", sizeof(card->longname));
break;
case USB_SPEED_SUPER:
strlcat(card->longname, ", super speed", sizeof(card->longname));
break;
case USB_SPEED_SUPER_PLUS:
strlcat(card->longname, ", super speed plus", sizeof(card->longname));
break;
default:
break;
}
}
/*
* create a chip instance and set its names.
*/
static int snd_usb_audio_create(struct usb_interface *intf,
struct usb_device *dev, int idx,
const struct snd_usb_audio_quirk *quirk,
unsigned int usb_id,
struct snd_usb_audio **rchip)
{
struct snd_card *card;
struct snd_usb_audio *chip;
int err;
char component[14];
*rchip = NULL;
switch (snd_usb_get_speed(dev)) {
case USB_SPEED_LOW:
case USB_SPEED_FULL:
case USB_SPEED_HIGH:
case USB_SPEED_WIRELESS:
case USB_SPEED_SUPER:
case USB_SPEED_SUPER_PLUS:
break;
default:
dev_err(&dev->dev, "unknown device speed %d\n", snd_usb_get_speed(dev));
return -ENXIO;
}
err = snd_card_new(&intf->dev, index[idx], id[idx], THIS_MODULE,
sizeof(*chip), &card);
if (err < 0) {
dev_err(&dev->dev, "cannot create card instance %d\n", idx);
return err;
}
chip = card->private_data;
mutex_init(&chip->mutex);
init_waitqueue_head(&chip->shutdown_wait);
chip->index = idx;
chip->dev = dev;
chip->card = card;
chip->setup = device_setup[idx];
chip->autoclock = autoclock;
atomic_set(&chip->active, 1); /* avoid autopm during probing */
atomic_set(&chip->usage_count, 0);
atomic_set(&chip->shutdown, 0);
chip->usb_id = usb_id;
INIT_LIST_HEAD(&chip->pcm_list);
INIT_LIST_HEAD(&chip->ep_list);
INIT_LIST_HEAD(&chip->midi_list);
INIT_LIST_HEAD(&chip->mixer_list);
card->private_free = snd_usb_audio_free;
strcpy(card->driver, "USB-Audio");
sprintf(component, "USB%04x:%04x",
USB_ID_VENDOR(chip->usb_id), USB_ID_PRODUCT(chip->usb_id));
snd_component_add(card, component);
usb_audio_make_shortname(dev, chip, quirk);
usb_audio_make_longname(dev, chip, quirk);
snd_usb_audio_create_proc(chip);
*rchip = chip;
return 0;
}
/* look for a matching quirk alias id */
static bool get_alias_id(struct usb_device *dev, unsigned int *id)
{
int i;
unsigned int src, dst;
for (i = 0; i < ARRAY_SIZE(quirk_alias); i++) {
if (!quirk_alias[i] ||
sscanf(quirk_alias[i], "%x:%x", &src, &dst) != 2 ||
src != *id)
continue;
dev_info(&dev->dev,
"device (%04x:%04x): applying quirk alias %04x:%04x\n",
USB_ID_VENDOR(*id), USB_ID_PRODUCT(*id),
USB_ID_VENDOR(dst), USB_ID_PRODUCT(dst));
*id = dst;
return true;
}
return false;
}
static const struct usb_device_id usb_audio_ids[]; /* defined below */
/* look for the corresponding quirk */
static const struct snd_usb_audio_quirk *
get_alias_quirk(struct usb_device *dev, unsigned int id)
{
const struct usb_device_id *p;
for (p = usb_audio_ids; p->match_flags; p++) {
/* FIXME: this checks only vendor:product pair in the list */
if ((p->match_flags & USB_DEVICE_ID_MATCH_DEVICE) ==
USB_DEVICE_ID_MATCH_DEVICE &&
p->idVendor == USB_ID_VENDOR(id) &&
p->idProduct == USB_ID_PRODUCT(id))
return (const struct snd_usb_audio_quirk *)p->driver_info;
}
return NULL;
}
/*
* probe the active usb device
*
* note that this can be called multiple times per a device, when it
* includes multiple audio control interfaces.
*
* thus we check the usb device pointer and creates the card instance
* only at the first time. the successive calls of this function will
* append the pcm interface to the corresponding card.
*/
static int usb_audio_probe(struct usb_interface *intf,
const struct usb_device_id *usb_id)
{
struct usb_device *dev = interface_to_usbdev(intf);
const struct snd_usb_audio_quirk *quirk =
(const struct snd_usb_audio_quirk *)usb_id->driver_info;
struct snd_usb_audio *chip;
int i, err;
struct usb_host_interface *alts;
int ifnum;
u32 id;
alts = &intf->altsetting[0];
ifnum = get_iface_desc(alts)->bInterfaceNumber;
id = USB_ID(le16_to_cpu(dev->descriptor.idVendor),
le16_to_cpu(dev->descriptor.idProduct));
if (get_alias_id(dev, &id))
quirk = get_alias_quirk(dev, id);
if (quirk && quirk->ifnum >= 0 && ifnum != quirk->ifnum)
return -ENXIO;
err = snd_usb_apply_boot_quirk(dev, intf, quirk, id);
if (err < 0)
return err;
/*
* found a config. now register to ALSA
*/
/* check whether it's already registered */
chip = NULL;
mutex_lock(®ister_mutex);
for (i = 0; i < SNDRV_CARDS; i++) {
if (usb_chip[i] && usb_chip[i]->dev == dev) {
if (atomic_read(&usb_chip[i]->shutdown)) {
dev_err(&dev->dev, "USB device is in the shutdown state, cannot create a card instance\n");
err = -EIO;
goto __error;
}
chip = usb_chip[i];
atomic_inc(&chip->active); /* avoid autopm */
break;
}
}
if (! chip) {
/* it's a fresh one.
* now look for an empty slot and create a new card instance
*/
for (i = 0; i < SNDRV_CARDS; i++)
if (!usb_chip[i] &&
(vid[i] == -1 || vid[i] == USB_ID_VENDOR(id)) &&
(pid[i] == -1 || pid[i] == USB_ID_PRODUCT(id))) {
if (enable[i]) {
err = snd_usb_audio_create(intf, dev, i, quirk,
id, &chip);
if (err < 0)
goto __error;
chip->pm_intf = intf;
break;
} else if (vid[i] != -1 || pid[i] != -1) {
dev_info(&dev->dev,
"device (%04x:%04x) is disabled\n",
USB_ID_VENDOR(id),
USB_ID_PRODUCT(id));
err = -ENOENT;
goto __error;
}
}
if (!chip) {
dev_err(&dev->dev, "no available usb audio device\n");
err = -ENODEV;
goto __error;
}
}
dev_set_drvdata(&dev->dev, chip);
/*
* For devices with more than one control interface, we assume the
* first contains the audio controls. We might need a more specific
* check here in the future.
*/
if (!chip->ctrl_intf)
chip->ctrl_intf = alts;
chip->txfr_quirk = 0;
err = 1; /* continue */
if (quirk && quirk->ifnum != QUIRK_NO_INTERFACE) {
/* need some special handlings */
err = snd_usb_create_quirk(chip, intf, &usb_audio_driver, quirk);
if (err < 0)
goto __error;
}
if (err > 0) {
/* create normal USB audio interfaces */
err = snd_usb_create_streams(chip, ifnum);
if (err < 0)
goto __error;
err = snd_usb_create_mixer(chip, ifnum, ignore_ctl_error);
if (err < 0)
goto __error;
}
/* we are allowed to call snd_card_register() many times */
err = snd_card_register(chip->card);
if (err < 0)
goto __error;
usb_chip[chip->index] = chip;
chip->num_interfaces++;
usb_set_intfdata(intf, chip);
atomic_dec(&chip->active);
mutex_unlock(®ister_mutex);
return 0;
__error:
if (chip) {
if (!chip->num_interfaces)
snd_card_free(chip->card);
atomic_dec(&chip->active);
}
mutex_unlock(®ister_mutex);
return err;
}
/*
* we need to take care of counter, since disconnection can be called also
* many times as well as usb_audio_probe().
*/
static void usb_audio_disconnect(struct usb_interface *intf)
{
struct snd_usb_audio *chip = usb_get_intfdata(intf);
struct snd_card *card;
struct list_head *p;
if (chip == (void *)-1L)
return;
card = chip->card;
mutex_lock(®ister_mutex);
if (atomic_inc_return(&chip->shutdown) == 1) {
struct snd_usb_stream *as;
struct snd_usb_endpoint *ep;
struct usb_mixer_interface *mixer;
/* wait until all pending tasks done;
* they are protected by snd_usb_lock_shutdown()
*/
wait_event(chip->shutdown_wait,
!atomic_read(&chip->usage_count));
snd_card_disconnect(card);
/* release the pcm resources */
list_for_each_entry(as, &chip->pcm_list, list) {
snd_usb_stream_disconnect(as);
}
/* release the endpoint resources */
list_for_each_entry(ep, &chip->ep_list, list) {
snd_usb_endpoint_release(ep);
}
/* release the midi resources */
list_for_each(p, &chip->midi_list) {
snd_usbmidi_disconnect(p);
}
/* release mixer resources */
list_for_each_entry(mixer, &chip->mixer_list, list) {
snd_usb_mixer_disconnect(mixer);
}
}
chip->num_interfaces--;
if (chip->num_interfaces <= 0) {
usb_chip[chip->index] = NULL;
mutex_unlock(®ister_mutex);
snd_card_free_when_closed(card);
} else {
mutex_unlock(®ister_mutex);
}
}
/* lock the shutdown (disconnect) task and autoresume */
int snd_usb_lock_shutdown(struct snd_usb_audio *chip)
{
int err;
atomic_inc(&chip->usage_count);
if (atomic_read(&chip->shutdown)) {
err = -EIO;
goto error;
}
err = snd_usb_autoresume(chip);
if (err < 0)
goto error;
return 0;
error:
if (atomic_dec_and_test(&chip->usage_count))
wake_up(&chip->shutdown_wait);
return err;
}
/* autosuspend and unlock the shutdown */
void snd_usb_unlock_shutdown(struct snd_usb_audio *chip)
{
snd_usb_autosuspend(chip);
if (atomic_dec_and_test(&chip->usage_count))
wake_up(&chip->shutdown_wait);
}
#ifdef CONFIG_PM
int snd_usb_autoresume(struct snd_usb_audio *chip)
{
if (atomic_read(&chip->shutdown))
return -EIO;
if (atomic_inc_return(&chip->active) == 1)
return usb_autopm_get_interface(chip->pm_intf);
return 0;
}
void snd_usb_autosuspend(struct snd_usb_audio *chip)
{
if (atomic_read(&chip->shutdown))
return;
if (atomic_dec_and_test(&chip->active))
usb_autopm_put_interface(chip->pm_intf);
}
static int usb_audio_suspend(struct usb_interface *intf, pm_message_t message)
{
struct snd_usb_audio *chip = usb_get_intfdata(intf);
struct snd_usb_stream *as;
struct usb_mixer_interface *mixer;
struct list_head *p;
if (chip == (void *)-1L)
return 0;
chip->autosuspended = !!PMSG_IS_AUTO(message);
if (!chip->autosuspended)
snd_power_change_state(chip->card, SNDRV_CTL_POWER_D3hot);
if (!chip->num_suspended_intf++) {
list_for_each_entry(as, &chip->pcm_list, list) {
snd_pcm_suspend_all(as->pcm);
snd_usb_pcm_suspend(as);
as->substream[0].need_setup_ep =
as->substream[1].need_setup_ep = true;
}
list_for_each(p, &chip->midi_list)
snd_usbmidi_suspend(p);
list_for_each_entry(mixer, &chip->mixer_list, list)
snd_usb_mixer_suspend(mixer);
}
return 0;
}
static int __usb_audio_resume(struct usb_interface *intf, bool reset_resume)
{
struct snd_usb_audio *chip = usb_get_intfdata(intf);
struct snd_usb_stream *as;
struct usb_mixer_interface *mixer;
struct list_head *p;
int err = 0;
if (chip == (void *)-1L)
return 0;
if (--chip->num_suspended_intf)
return 0;
atomic_inc(&chip->active); /* avoid autopm */
list_for_each_entry(as, &chip->pcm_list, list) {
err = snd_usb_pcm_resume(as);
if (err < 0)
goto err_out;
}
/*
* ALSA leaves material resumption to user space
* we just notify and restart the mixers
*/
list_for_each_entry(mixer, &chip->mixer_list, list) {
err = snd_usb_mixer_resume(mixer, reset_resume);
if (err < 0)
goto err_out;
}
list_for_each(p, &chip->midi_list) {
snd_usbmidi_resume(p);
}
if (!chip->autosuspended)
snd_power_change_state(chip->card, SNDRV_CTL_POWER_D0);
chip->autosuspended = 0;
err_out:
atomic_dec(&chip->active); /* allow autopm after this point */
return err;
}
static int usb_audio_resume(struct usb_interface *intf)
{
return __usb_audio_resume(intf, false);
}
static int usb_audio_reset_resume(struct usb_interface *intf)
{
return __usb_audio_resume(intf, true);
}
#else
#define usb_audio_suspend NULL
#define usb_audio_resume NULL
#define usb_audio_reset_resume NULL
#endif /* CONFIG_PM */
static const struct usb_device_id usb_audio_ids [] = {
#include "quirks-table.h"
{ .match_flags = (USB_DEVICE_ID_MATCH_INT_CLASS | USB_DEVICE_ID_MATCH_INT_SUBCLASS),
.bInterfaceClass = USB_CLASS_AUDIO,
.bInterfaceSubClass = USB_SUBCLASS_AUDIOCONTROL },
{ } /* Terminating entry */
};
MODULE_DEVICE_TABLE(usb, usb_audio_ids);
/*
* entry point for linux usb interface
*/
static struct usb_driver usb_audio_driver = {
.name = "snd-usb-audio",
.probe = usb_audio_probe,
.disconnect = usb_audio_disconnect,
.suspend = usb_audio_suspend,
.resume = usb_audio_resume,
.reset_resume = usb_audio_reset_resume,
.id_table = usb_audio_ids,
.supports_autosuspend = 1,
};
module_usb_driver(usb_audio_driver);
| ./CrossVul/dataset_final_sorted/CWE-416/c/bad_454_0 |
crossvul-cpp_data_good_4020_0 | /* exif-mnote-data-canon.c
*
* Copyright (c) 2002, 2003 Lutz Mueller <lutz@users.sourceforge.net>
* Copyright (c) 2003 Matthieu Castet <mat-c@users.sourceforge.net>
*
* This library is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2 of the License, or (at your option) any later version.
*
* This library is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with this library; if not, write to the
* Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
* Boston, MA 02110-1301 USA.
*/
#include <config.h>
#include "exif-mnote-data-canon.h"
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include <libexif/exif-byte-order.h>
#include <libexif/exif-utils.h>
#include <libexif/exif-data.h>
#define CHECKOVERFLOW(offset,datasize,structsize) (( offset >= datasize) || (structsize > datasize) || (offset > datasize - structsize ))
static void
exif_mnote_data_canon_clear (ExifMnoteDataCanon *n)
{
ExifMnoteData *d = (ExifMnoteData *) n;
unsigned int i;
if (!n) return;
if (n->entries) {
for (i = 0; i < n->count; i++)
if (n->entries[i].data) {
exif_mem_free (d->mem, n->entries[i].data);
n->entries[i].data = NULL;
}
exif_mem_free (d->mem, n->entries);
n->entries = NULL;
n->count = 0;
}
}
static void
exif_mnote_data_canon_free (ExifMnoteData *n)
{
if (!n) return;
exif_mnote_data_canon_clear ((ExifMnoteDataCanon *) n);
}
static void
exif_mnote_data_canon_get_tags (ExifMnoteDataCanon *dc, unsigned int n,
unsigned int *m, unsigned int *s)
{
unsigned int from = 0, to;
if (!dc || !m) return;
for (*m = 0; *m < dc->count; (*m)++) {
to = from + mnote_canon_entry_count_values (&dc->entries[*m]);
if (to > n) {
if (s) *s = n - from;
break;
}
from = to;
}
}
static char *
exif_mnote_data_canon_get_value (ExifMnoteData *note, unsigned int n, char *val, unsigned int maxlen)
{
ExifMnoteDataCanon *dc = (ExifMnoteDataCanon *) note;
unsigned int m, s;
if (!dc) return NULL;
exif_mnote_data_canon_get_tags (dc, n, &m, &s);
if (m >= dc->count) return NULL;
return mnote_canon_entry_get_value (&dc->entries[m], s, val, maxlen);
}
static void
exif_mnote_data_canon_set_byte_order (ExifMnoteData *d, ExifByteOrder o)
{
ExifByteOrder o_orig;
ExifMnoteDataCanon *n = (ExifMnoteDataCanon *) d;
unsigned int i;
if (!n) return;
o_orig = n->order;
n->order = o;
for (i = 0; i < n->count; i++) {
if (n->entries[i].components && (n->entries[i].size/n->entries[i].components < exif_format_get_size (n->entries[i].format)))
continue;
n->entries[i].order = o;
exif_array_set_byte_order (n->entries[i].format, n->entries[i].data,
n->entries[i].components, o_orig, o);
}
}
static void
exif_mnote_data_canon_set_offset (ExifMnoteData *n, unsigned int o)
{
if (n) ((ExifMnoteDataCanon *) n)->offset = o;
}
static void
exif_mnote_data_canon_save (ExifMnoteData *ne,
unsigned char **buf, unsigned int *buf_size)
{
ExifMnoteDataCanon *n = (ExifMnoteDataCanon *) ne;
size_t i, o, s, doff;
unsigned char *t;
size_t ts;
if (!n || !buf || !buf_size) return;
/*
* Allocate enough memory for all entries and the number
* of entries.
*/
*buf_size = 2 + n->count * 12 + 4;
*buf = exif_mem_alloc (ne->mem, sizeof (char) * *buf_size);
if (!*buf) {
EXIF_LOG_NO_MEMORY(ne->log, "ExifMnoteCanon", *buf_size);
return;
}
/* Save the number of entries */
exif_set_short (*buf, n->order, (ExifShort) n->count);
/* Save each entry */
for (i = 0; i < n->count; i++) {
o = 2 + i * 12;
exif_set_short (*buf + o + 0, n->order, (ExifShort) n->entries[i].tag);
exif_set_short (*buf + o + 2, n->order, (ExifShort) n->entries[i].format);
exif_set_long (*buf + o + 4, n->order,
n->entries[i].components);
o += 8;
s = exif_format_get_size (n->entries[i].format) *
n->entries[i].components;
if (s > 65536) {
/* Corrupt data: EXIF data size is limited to the
* maximum size of a JPEG segment (64 kb).
*/
continue;
}
if (s > 4) {
ts = *buf_size + s;
/* Ensure even offsets. Set padding bytes to 0. */
if (s & 1) ts += 1;
t = exif_mem_realloc (ne->mem, *buf,
sizeof (char) * ts);
if (!t) {
EXIF_LOG_NO_MEMORY(ne->log, "ExifMnoteCanon", ts);
return;
}
*buf = t;
*buf_size = ts;
doff = *buf_size - s;
if (s & 1) { doff--; *(*buf + *buf_size - 1) = '\0'; }
exif_set_long (*buf + o, n->order, n->offset + doff);
} else
doff = o;
/*
* Write the data. Fill unneeded bytes with 0. Do not
* crash if data is NULL.
*/
if (!n->entries[i].data) memset (*buf + doff, 0, s);
else memcpy (*buf + doff, n->entries[i].data, s);
if (s < 4) memset (*buf + doff + s, 0, (4 - s));
}
}
/* XXX
* FIXME: exif_mnote_data_canon_load() may fail and there is no
* semantics to express that.
* See bug #1054323 for details, especially the comment by liblit
* after it has supposedly been fixed:
*
* https://sourceforge.net/tracker/?func=detail&aid=1054323&group_id=12272&atid=112272
* Unfortunately, the "return" statements aren't commented at
* all, so it isn't trivial to find out what is a normal
* return, and what is a reaction to an error condition.
*/
static void
exif_mnote_data_canon_load (ExifMnoteData *ne,
const unsigned char *buf, unsigned int buf_size)
{
ExifMnoteDataCanon *n = (ExifMnoteDataCanon *) ne;
ExifShort c;
size_t i, tcount, o, datao;
if (!n || !buf || !buf_size) {
exif_log (ne->log, EXIF_LOG_CODE_CORRUPT_DATA,
"ExifMnoteCanon", "Short MakerNote");
return;
}
datao = 6 + n->offset;
if (CHECKOVERFLOW(datao, buf_size, 2)) {
exif_log (ne->log, EXIF_LOG_CODE_CORRUPT_DATA,
"ExifMnoteCanon", "Short MakerNote");
return;
}
/* Read the number of tags */
c = exif_get_short (buf + datao, n->order);
datao += 2;
/* Remove any old entries */
exif_mnote_data_canon_clear (n);
/* Reserve enough space for all the possible MakerNote tags */
n->entries = exif_mem_alloc (ne->mem, sizeof (MnoteCanonEntry) * c);
if (!n->entries) {
EXIF_LOG_NO_MEMORY(ne->log, "ExifMnoteCanon", sizeof (MnoteCanonEntry) * c);
return;
}
/* Parse the entries */
tcount = 0;
for (i = c, o = datao; i; --i, o += 12) {
size_t s;
memset(&n->entries[tcount], 0, sizeof(MnoteCanonEntry));
if (CHECKOVERFLOW(o,buf_size,12)) {
exif_log (ne->log, EXIF_LOG_CODE_CORRUPT_DATA,
"ExifMnoteCanon", "Short MakerNote");
break;
}
n->entries[tcount].tag = exif_get_short (buf + o, n->order);
n->entries[tcount].format = exif_get_short (buf + o + 2, n->order);
n->entries[tcount].components = exif_get_long (buf + o + 4, n->order);
n->entries[tcount].order = n->order;
exif_log (ne->log, EXIF_LOG_CODE_DEBUG, "ExifMnoteCanon",
"Loading entry 0x%x ('%s')...", n->entries[tcount].tag,
mnote_canon_tag_get_name (n->entries[tcount].tag));
/* Check if we overflow the multiplication. Use buf_size as the max size for integer overflow detection,
* we will check the buffer sizes closer later. */
if ( exif_format_get_size (n->entries[tcount].format) &&
buf_size / exif_format_get_size (n->entries[tcount].format) < n->entries[tcount].components
) {
exif_log (ne->log, EXIF_LOG_CODE_CORRUPT_DATA,
"ExifMnoteCanon", "Tag size overflow detected (%u * %lu)", exif_format_get_size (n->entries[tcount].format), n->entries[tcount].components);
continue;
}
/*
* Size? If bigger than 4 bytes, the actual data is not
* in the entry but somewhere else (offset).
*/
s = exif_format_get_size (n->entries[tcount].format) *
n->entries[tcount].components;
n->entries[tcount].size = s;
if (!s) {
exif_log (ne->log, EXIF_LOG_CODE_CORRUPT_DATA,
"ExifMnoteCanon",
"Invalid zero-length tag size");
continue;
} else {
size_t dataofs = o + 8;
if (s > 4) dataofs = exif_get_long (buf + dataofs, n->order) + 6;
if (CHECKOVERFLOW(dataofs, buf_size, s)) {
exif_log (ne->log, EXIF_LOG_CODE_DEBUG,
"ExifMnoteCanon",
"Tag data past end of buffer (%u > %u)",
(unsigned)(dataofs + s), buf_size);
continue;
}
n->entries[tcount].data = exif_mem_alloc (ne->mem, s);
if (!n->entries[tcount].data) {
EXIF_LOG_NO_MEMORY(ne->log, "ExifMnoteCanon", s);
continue;
}
memcpy (n->entries[tcount].data, buf + dataofs, s);
}
/* Tag was successfully parsed */
++tcount;
}
/* Store the count of successfully parsed tags */
n->count = tcount;
}
static unsigned int
exif_mnote_data_canon_count (ExifMnoteData *n)
{
ExifMnoteDataCanon *dc = (ExifMnoteDataCanon *) n;
unsigned int i, c;
for (i = c = 0; dc && (i < dc->count); i++)
c += mnote_canon_entry_count_values (&dc->entries[i]);
return c;
}
static unsigned int
exif_mnote_data_canon_get_id (ExifMnoteData *d, unsigned int i)
{
ExifMnoteDataCanon *dc = (ExifMnoteDataCanon *) d;
unsigned int m;
if (!dc) return 0;
exif_mnote_data_canon_get_tags (dc, i, &m, NULL);
if (m >= dc->count) return 0;
return dc->entries[m].tag;
}
static const char *
exif_mnote_data_canon_get_name (ExifMnoteData *note, unsigned int i)
{
ExifMnoteDataCanon *dc = (ExifMnoteDataCanon *) note;
unsigned int m, s;
if (!dc) return NULL;
exif_mnote_data_canon_get_tags (dc, i, &m, &s);
if (m >= dc->count) return NULL;
return mnote_canon_tag_get_name_sub (dc->entries[m].tag, s, dc->options);
}
static const char *
exif_mnote_data_canon_get_title (ExifMnoteData *note, unsigned int i)
{
ExifMnoteDataCanon *dc = (ExifMnoteDataCanon *) note;
unsigned int m, s;
if (!dc) return NULL;
exif_mnote_data_canon_get_tags (dc, i, &m, &s);
if (m >= dc->count) return NULL;
return mnote_canon_tag_get_title_sub (dc->entries[m].tag, s, dc->options);
}
static const char *
exif_mnote_data_canon_get_description (ExifMnoteData *note, unsigned int i)
{
ExifMnoteDataCanon *dc = (ExifMnoteDataCanon *) note;
unsigned int m;
if (!dc) return NULL;
exif_mnote_data_canon_get_tags (dc, i, &m, NULL);
if (m >= dc->count) return NULL;
return mnote_canon_tag_get_description (dc->entries[m].tag);
}
int
exif_mnote_data_canon_identify (const ExifData *ed, const ExifEntry *e)
{
char value[8];
(void) e; /* unused */
ExifEntry *em = exif_data_get_entry (ed, EXIF_TAG_MAKE);
if (!em)
return 0;
return !strcmp (exif_entry_get_value (em, value, sizeof (value)), "Canon");
}
ExifMnoteData *
exif_mnote_data_canon_new (ExifMem *mem, ExifDataOption o)
{
ExifMnoteData *d;
ExifMnoteDataCanon *dc;
if (!mem) return NULL;
d = exif_mem_alloc (mem, sizeof (ExifMnoteDataCanon));
if (!d)
return NULL;
exif_mnote_data_construct (d, mem);
/* Set up function pointers */
d->methods.free = exif_mnote_data_canon_free;
d->methods.set_byte_order = exif_mnote_data_canon_set_byte_order;
d->methods.set_offset = exif_mnote_data_canon_set_offset;
d->methods.load = exif_mnote_data_canon_load;
d->methods.save = exif_mnote_data_canon_save;
d->methods.count = exif_mnote_data_canon_count;
d->methods.get_id = exif_mnote_data_canon_get_id;
d->methods.get_name = exif_mnote_data_canon_get_name;
d->methods.get_title = exif_mnote_data_canon_get_title;
d->methods.get_description = exif_mnote_data_canon_get_description;
d->methods.get_value = exif_mnote_data_canon_get_value;
dc = (ExifMnoteDataCanon*)d;
dc->options = o;
return d;
}
| ./CrossVul/dataset_final_sorted/CWE-416/c/good_4020_0 |
crossvul-cpp_data_bad_3400_0 | /*
** gc.c - garbage collector for mruby
**
** See Copyright Notice in mruby.h
*/
#include <string.h>
#include <stdlib.h>
#include <mruby.h>
#include <mruby/array.h>
#include <mruby/class.h>
#include <mruby/data.h>
#include <mruby/hash.h>
#include <mruby/proc.h>
#include <mruby/range.h>
#include <mruby/string.h>
#include <mruby/variable.h>
#include <mruby/gc.h>
#include <mruby/error.h>
/*
= Tri-color Incremental Garbage Collection
mruby's GC is Tri-color Incremental GC with Mark & Sweep.
Algorithm details are omitted.
Instead, the implementation part is described below.
== Object's Color
Each object can be painted in three colors:
* White - Unmarked.
* Gray - Marked, But the child objects are unmarked.
* Black - Marked, the child objects are also marked.
== Two White Types
There're two white color types in a flip-flop fashion: White-A and White-B,
which respectively represent the Current White color (the newly allocated
objects in the current GC cycle) and the Sweep Target White color (the
dead objects to be swept).
A and B will be switched just at the beginning of the next GC cycle. At
that time, all the dead objects have been swept, while the newly created
objects in the current GC cycle which finally remains White are now
regarded as dead objects. Instead of traversing all the White-A objects and
painting them as White-B, just switch the meaning of White-A and White-B as
this will be much cheaper.
As a result, the objects we sweep in the current GC cycle are always
left from the previous GC cycle. This allows us to sweep objects
incrementally, without the disturbance of the newly created objects.
== Execution Timing
GC Execution Time and Each step interval are decided by live objects count.
List of Adjustment API:
* gc_interval_ratio_set
* gc_step_ratio_set
For details, see the comments for each function.
== Write Barrier
mruby implementer and C extension library writer must insert a write
barrier when updating a reference from a field of an object.
When updating a reference from a field of object A to object B,
two different types of write barrier are available:
* mrb_field_write_barrier - target B object for a mark.
* mrb_write_barrier - target A object for a mark.
== Generational Mode
mruby's GC offers an Generational Mode while re-using the tri-color GC
infrastructure. It will treat the Black objects as Old objects after each
sweep phase, instead of painting them White. The key ideas are still the same
as traditional generational GC:
* Minor GC - just traverse the Young objects (Gray objects) in the mark
phase, then only sweep the newly created objects, and leave
the Old objects live.
* Major GC - same as a full regular GC cycle.
The difference from "traditional" generational GC is, that the major GC
in mruby is triggered incrementally in a tri-color manner.
For details, see the comments for each function.
*/
struct free_obj {
MRB_OBJECT_HEADER;
struct RBasic *next;
};
typedef struct {
union {
struct free_obj free;
struct RBasic basic;
struct RObject object;
struct RClass klass;
struct RString string;
struct RArray array;
struct RHash hash;
struct RRange range;
struct RData data;
struct RProc proc;
struct REnv env;
struct RException exc;
#ifdef MRB_WORD_BOXING
struct RFloat floatv;
struct RCptr cptr;
#endif
} as;
} RVALUE;
#ifdef GC_PROFILE
#include <stdio.h>
#include <sys/time.h>
static double program_invoke_time = 0;
static double gc_time = 0;
static double gc_total_time = 0;
static double
gettimeofday_time(void)
{
struct timeval tv;
gettimeofday(&tv, NULL);
return tv.tv_sec + tv.tv_usec * 1e-6;
}
#define GC_INVOKE_TIME_REPORT(with) do {\
fprintf(stderr, "%s\n", with);\
fprintf(stderr, "gc_invoke: %19.3f\n", gettimeofday_time() - program_invoke_time);\
fprintf(stderr, "is_generational: %d\n", is_generational(gc));\
fprintf(stderr, "is_major_gc: %d\n", is_major_gc(gc));\
} while(0)
#define GC_TIME_START do {\
gc_time = gettimeofday_time();\
} while(0)
#define GC_TIME_STOP_AND_REPORT do {\
gc_time = gettimeofday_time() - gc_time;\
gc_total_time += gc_time;\
fprintf(stderr, "gc_state: %d\n", gc->state);\
fprintf(stderr, "live: %zu\n", gc->live);\
fprintf(stderr, "majorgc_old_threshold: %zu\n", gc->majorgc_old_threshold);\
fprintf(stderr, "gc_threshold: %zu\n", gc->threshold);\
fprintf(stderr, "gc_time: %30.20f\n", gc_time);\
fprintf(stderr, "gc_total_time: %30.20f\n\n", gc_total_time);\
} while(0)
#else
#define GC_INVOKE_TIME_REPORT(s)
#define GC_TIME_START
#define GC_TIME_STOP_AND_REPORT
#endif
#ifdef GC_DEBUG
#define DEBUG(x) (x)
#else
#define DEBUG(x)
#endif
#ifndef MRB_HEAP_PAGE_SIZE
#define MRB_HEAP_PAGE_SIZE 1024
#endif
#define GC_STEP_SIZE 1024
/* white: 011, black: 100, gray: 000 */
#define GC_GRAY 0
#define GC_WHITE_A 1
#define GC_WHITE_B (1 << 1)
#define GC_BLACK (1 << 2)
#define GC_WHITES (GC_WHITE_A | GC_WHITE_B)
#define GC_COLOR_MASK 7
#define paint_gray(o) ((o)->color = GC_GRAY)
#define paint_black(o) ((o)->color = GC_BLACK)
#define paint_white(o) ((o)->color = GC_WHITES)
#define paint_partial_white(s, o) ((o)->color = (s)->current_white_part)
#define is_gray(o) ((o)->color == GC_GRAY)
#define is_white(o) ((o)->color & GC_WHITES)
#define is_black(o) ((o)->color & GC_BLACK)
#define flip_white_part(s) ((s)->current_white_part = other_white_part(s))
#define other_white_part(s) ((s)->current_white_part ^ GC_WHITES)
#define is_dead(s, o) (((o)->color & other_white_part(s) & GC_WHITES) || (o)->tt == MRB_TT_FREE)
#define objects(p) ((RVALUE *)p->objects)
MRB_API void*
mrb_realloc_simple(mrb_state *mrb, void *p, size_t len)
{
void *p2;
p2 = (mrb->allocf)(mrb, p, len, mrb->allocf_ud);
if (!p2 && len > 0 && mrb->gc.heaps) {
mrb_full_gc(mrb);
p2 = (mrb->allocf)(mrb, p, len, mrb->allocf_ud);
}
return p2;
}
MRB_API void*
mrb_realloc(mrb_state *mrb, void *p, size_t len)
{
void *p2;
p2 = mrb_realloc_simple(mrb, p, len);
if (!p2 && len) {
if (mrb->gc.out_of_memory) {
mrb_exc_raise(mrb, mrb_obj_value(mrb->nomem_err));
/* mrb_panic(mrb); */
}
else {
mrb->gc.out_of_memory = TRUE;
mrb_exc_raise(mrb, mrb_obj_value(mrb->nomem_err));
}
}
else {
mrb->gc.out_of_memory = FALSE;
}
return p2;
}
MRB_API void*
mrb_malloc(mrb_state *mrb, size_t len)
{
return mrb_realloc(mrb, 0, len);
}
MRB_API void*
mrb_malloc_simple(mrb_state *mrb, size_t len)
{
return mrb_realloc_simple(mrb, 0, len);
}
MRB_API void*
mrb_calloc(mrb_state *mrb, size_t nelem, size_t len)
{
void *p;
if (nelem > 0 && len > 0 &&
nelem <= SIZE_MAX / len) {
size_t size;
size = nelem * len;
p = mrb_malloc(mrb, size);
memset(p, 0, size);
}
else {
p = NULL;
}
return p;
}
MRB_API void
mrb_free(mrb_state *mrb, void *p)
{
(mrb->allocf)(mrb, p, 0, mrb->allocf_ud);
}
MRB_API mrb_bool
mrb_object_dead_p(mrb_state *mrb, struct RBasic *object) {
return is_dead(&mrb->gc, object);
}
static void
link_heap_page(mrb_gc *gc, mrb_heap_page *page)
{
page->next = gc->heaps;
if (gc->heaps)
gc->heaps->prev = page;
gc->heaps = page;
}
static void
unlink_heap_page(mrb_gc *gc, mrb_heap_page *page)
{
if (page->prev)
page->prev->next = page->next;
if (page->next)
page->next->prev = page->prev;
if (gc->heaps == page)
gc->heaps = page->next;
page->prev = NULL;
page->next = NULL;
}
static void
link_free_heap_page(mrb_gc *gc, mrb_heap_page *page)
{
page->free_next = gc->free_heaps;
if (gc->free_heaps) {
gc->free_heaps->free_prev = page;
}
gc->free_heaps = page;
}
static void
unlink_free_heap_page(mrb_gc *gc, mrb_heap_page *page)
{
if (page->free_prev)
page->free_prev->free_next = page->free_next;
if (page->free_next)
page->free_next->free_prev = page->free_prev;
if (gc->free_heaps == page)
gc->free_heaps = page->free_next;
page->free_prev = NULL;
page->free_next = NULL;
}
static void
add_heap(mrb_state *mrb, mrb_gc *gc)
{
mrb_heap_page *page = (mrb_heap_page *)mrb_calloc(mrb, 1, sizeof(mrb_heap_page) + MRB_HEAP_PAGE_SIZE * sizeof(RVALUE));
RVALUE *p, *e;
struct RBasic *prev = NULL;
for (p = objects(page), e=p+MRB_HEAP_PAGE_SIZE; p<e; p++) {
p->as.free.tt = MRB_TT_FREE;
p->as.free.next = prev;
prev = &p->as.basic;
}
page->freelist = prev;
link_heap_page(gc, page);
link_free_heap_page(gc, page);
}
#define DEFAULT_GC_INTERVAL_RATIO 200
#define DEFAULT_GC_STEP_RATIO 200
#define DEFAULT_MAJOR_GC_INC_RATIO 200
#define is_generational(gc) ((gc)->generational)
#define is_major_gc(gc) (is_generational(gc) && (gc)->full)
#define is_minor_gc(gc) (is_generational(gc) && !(gc)->full)
void
mrb_gc_init(mrb_state *mrb, mrb_gc *gc)
{
#ifndef MRB_GC_FIXED_ARENA
gc->arena = (struct RBasic**)mrb_malloc(mrb, sizeof(struct RBasic*)*MRB_GC_ARENA_SIZE);
gc->arena_capa = MRB_GC_ARENA_SIZE;
#endif
gc->current_white_part = GC_WHITE_A;
gc->heaps = NULL;
gc->free_heaps = NULL;
add_heap(mrb, gc);
gc->interval_ratio = DEFAULT_GC_INTERVAL_RATIO;
gc->step_ratio = DEFAULT_GC_STEP_RATIO;
#ifndef MRB_GC_TURN_OFF_GENERATIONAL
gc->generational = TRUE;
gc->full = TRUE;
#endif
#ifdef GC_PROFILE
program_invoke_time = gettimeofday_time();
#endif
}
static void obj_free(mrb_state *mrb, struct RBasic *obj, int end);
void
free_heap(mrb_state *mrb, mrb_gc *gc)
{
mrb_heap_page *page = gc->heaps;
mrb_heap_page *tmp;
RVALUE *p, *e;
while (page) {
tmp = page;
page = page->next;
for (p = objects(tmp), e=p+MRB_HEAP_PAGE_SIZE; p<e; p++) {
if (p->as.free.tt != MRB_TT_FREE)
obj_free(mrb, &p->as.basic, TRUE);
}
mrb_free(mrb, tmp);
}
}
void
mrb_gc_destroy(mrb_state *mrb, mrb_gc *gc)
{
free_heap(mrb, gc);
#ifndef MRB_GC_FIXED_ARENA
mrb_free(mrb, gc->arena);
#endif
}
static void
gc_protect(mrb_state *mrb, mrb_gc *gc, struct RBasic *p)
{
#ifdef MRB_GC_FIXED_ARENA
if (gc->arena_idx >= MRB_GC_ARENA_SIZE) {
/* arena overflow error */
gc->arena_idx = MRB_GC_ARENA_SIZE - 4; /* force room in arena */
mrb_exc_raise(mrb, mrb_obj_value(mrb->arena_err));
}
#else
if (gc->arena_idx >= gc->arena_capa) {
/* extend arena */
gc->arena_capa = (int)(gc->arena_capa * 1.5);
gc->arena = (struct RBasic**)mrb_realloc(mrb, gc->arena, sizeof(struct RBasic*)*gc->arena_capa);
}
#endif
gc->arena[gc->arena_idx++] = p;
}
/* mrb_gc_protect() leaves the object in the arena */
MRB_API void
mrb_gc_protect(mrb_state *mrb, mrb_value obj)
{
if (mrb_immediate_p(obj)) return;
gc_protect(mrb, &mrb->gc, mrb_basic_ptr(obj));
}
#define GC_ROOT_NAME "_gc_root_"
/* mrb_gc_register() keeps the object from GC.
Register your object when it's exported to C world,
without reference from Ruby world, e.g. callback
arguments. Don't forget to remove the obejct using
mrb_gc_unregister, otherwise your object will leak.
*/
MRB_API void
mrb_gc_register(mrb_state *mrb, mrb_value obj)
{
mrb_sym root = mrb_intern_lit(mrb, GC_ROOT_NAME);
mrb_value table = mrb_gv_get(mrb, root);
if (mrb_nil_p(table) || mrb_type(table) != MRB_TT_ARRAY) {
table = mrb_ary_new(mrb);
mrb_gv_set(mrb, root, table);
}
mrb_ary_push(mrb, table, obj);
}
/* mrb_gc_unregister() removes the object from GC root. */
MRB_API void
mrb_gc_unregister(mrb_state *mrb, mrb_value obj)
{
mrb_sym root = mrb_intern_lit(mrb, GC_ROOT_NAME);
mrb_value table = mrb_gv_get(mrb, root);
struct RArray *a;
mrb_int i;
if (mrb_nil_p(table)) return;
if (mrb_type(table) != MRB_TT_ARRAY) {
mrb_gv_set(mrb, root, mrb_nil_value());
return;
}
a = mrb_ary_ptr(table);
mrb_ary_modify(mrb, a);
for (i = 0; i < a->len; i++) {
if (mrb_obj_eq(mrb, a->ptr[i], obj)) {
a->len--;
memmove(&a->ptr[i], &a->ptr[i + 1], (a->len - i) * sizeof(a->ptr[i]));
break;
}
}
}
MRB_API struct RBasic*
mrb_obj_alloc(mrb_state *mrb, enum mrb_vtype ttype, struct RClass *cls)
{
struct RBasic *p;
static const RVALUE RVALUE_zero = { { { MRB_TT_FALSE } } };
mrb_gc *gc = &mrb->gc;
if (cls) {
enum mrb_vtype tt;
switch (cls->tt) {
case MRB_TT_CLASS:
case MRB_TT_SCLASS:
case MRB_TT_MODULE:
case MRB_TT_ENV:
break;
default:
mrb_raise(mrb, E_TYPE_ERROR, "allocation failure");
}
tt = MRB_INSTANCE_TT(cls);
if (tt != MRB_TT_FALSE &&
ttype != MRB_TT_SCLASS &&
ttype != MRB_TT_ICLASS &&
ttype != MRB_TT_ENV &&
ttype != tt) {
mrb_raisef(mrb, E_TYPE_ERROR, "allocation failure of %S", mrb_obj_value(cls));
}
}
#ifdef MRB_GC_STRESS
mrb_full_gc(mrb);
#endif
if (gc->threshold < gc->live) {
mrb_incremental_gc(mrb);
}
if (gc->free_heaps == NULL) {
add_heap(mrb, gc);
}
p = gc->free_heaps->freelist;
gc->free_heaps->freelist = ((struct free_obj*)p)->next;
if (gc->free_heaps->freelist == NULL) {
unlink_free_heap_page(gc, gc->free_heaps);
}
gc->live++;
gc_protect(mrb, gc, p);
*(RVALUE *)p = RVALUE_zero;
p->tt = ttype;
p->c = cls;
paint_partial_white(gc, p);
return p;
}
static inline void
add_gray_list(mrb_state *mrb, mrb_gc *gc, struct RBasic *obj)
{
#ifdef MRB_GC_STRESS
if (obj->tt > MRB_TT_MAXDEFINE) {
abort();
}
#endif
paint_gray(obj);
obj->gcnext = gc->gray_list;
gc->gray_list = obj;
}
static void
mark_context_stack(mrb_state *mrb, struct mrb_context *c)
{
size_t i;
size_t e;
if (c->stack == NULL) return;
e = c->stack - c->stbase;
if (c->ci) e += c->ci->nregs;
if (c->stbase + e > c->stend) e = c->stend - c->stbase;
for (i=0; i<e; i++) {
mrb_value v = c->stbase[i];
if (!mrb_immediate_p(v)) {
if (mrb_basic_ptr(v)->tt == MRB_TT_FREE) {
c->stbase[i] = mrb_nil_value();
}
else {
mrb_gc_mark(mrb, mrb_basic_ptr(v));
}
}
}
}
static void
mark_context(mrb_state *mrb, struct mrb_context *c)
{
int i;
mrb_callinfo *ci;
/* mark stack */
mark_context_stack(mrb, c);
/* mark VM stack */
if (c->cibase) {
for (ci = c->cibase; ci <= c->ci; ci++) {
mrb_gc_mark(mrb, (struct RBasic*)ci->env);
mrb_gc_mark(mrb, (struct RBasic*)ci->proc);
mrb_gc_mark(mrb, (struct RBasic*)ci->target_class);
}
}
/* mark ensure stack */
for (i=0; i<c->esize; i++) {
if (c->ensure[i] == NULL) break;
mrb_gc_mark(mrb, (struct RBasic*)c->ensure[i]);
}
/* mark fibers */
if (c->prev && c->prev->fib) {
mrb_gc_mark(mrb, (struct RBasic*)c->prev->fib);
}
}
static void
gc_mark_children(mrb_state *mrb, mrb_gc *gc, struct RBasic *obj)
{
mrb_assert(is_gray(obj));
paint_black(obj);
gc->gray_list = obj->gcnext;
mrb_gc_mark(mrb, (struct RBasic*)obj->c);
switch (obj->tt) {
case MRB_TT_ICLASS:
{
struct RClass *c = (struct RClass*)obj;
if (MRB_FLAG_TEST(c, MRB_FLAG_IS_ORIGIN))
mrb_gc_mark_mt(mrb, c);
mrb_gc_mark(mrb, (struct RBasic*)((struct RClass*)obj)->super);
}
break;
case MRB_TT_CLASS:
case MRB_TT_MODULE:
case MRB_TT_SCLASS:
{
struct RClass *c = (struct RClass*)obj;
mrb_gc_mark_mt(mrb, c);
mrb_gc_mark(mrb, (struct RBasic*)c->super);
}
/* fall through */
case MRB_TT_OBJECT:
case MRB_TT_DATA:
case MRB_TT_EXCEPTION:
mrb_gc_mark_iv(mrb, (struct RObject*)obj);
break;
case MRB_TT_PROC:
{
struct RProc *p = (struct RProc*)obj;
mrb_gc_mark(mrb, (struct RBasic*)p->env);
mrb_gc_mark(mrb, (struct RBasic*)p->target_class);
}
break;
case MRB_TT_ENV:
{
struct REnv *e = (struct REnv*)obj;
mrb_int i, len;
if MRB_ENV_STACK_SHARED_P(e) break;
len = MRB_ENV_STACK_LEN(e);
for (i=0; i<len; i++) {
mrb_gc_mark_value(mrb, e->stack[i]);
}
}
break;
case MRB_TT_FIBER:
{
struct mrb_context *c = ((struct RFiber*)obj)->cxt;
if (c) mark_context(mrb, c);
}
break;
case MRB_TT_ARRAY:
{
struct RArray *a = (struct RArray*)obj;
size_t i, e;
for (i=0,e=a->len; i<e; i++) {
mrb_gc_mark_value(mrb, a->ptr[i]);
}
}
break;
case MRB_TT_HASH:
mrb_gc_mark_iv(mrb, (struct RObject*)obj);
mrb_gc_mark_hash(mrb, (struct RHash*)obj);
break;
case MRB_TT_STRING:
break;
case MRB_TT_RANGE:
{
struct RRange *r = (struct RRange*)obj;
if (r->edges) {
mrb_gc_mark_value(mrb, r->edges->beg);
mrb_gc_mark_value(mrb, r->edges->end);
}
}
break;
default:
break;
}
}
MRB_API void
mrb_gc_mark(mrb_state *mrb, struct RBasic *obj)
{
if (obj == 0) return;
if (!is_white(obj)) return;
mrb_assert((obj)->tt != MRB_TT_FREE);
add_gray_list(mrb, &mrb->gc, obj);
}
static void
obj_free(mrb_state *mrb, struct RBasic *obj, int end)
{
DEBUG(fprintf(stderr, "obj_free(%p,tt=%d)\n",obj,obj->tt));
switch (obj->tt) {
/* immediate - no mark */
case MRB_TT_TRUE:
case MRB_TT_FIXNUM:
case MRB_TT_SYMBOL:
/* cannot happen */
return;
case MRB_TT_FLOAT:
#ifdef MRB_WORD_BOXING
break;
#else
return;
#endif
case MRB_TT_OBJECT:
mrb_gc_free_iv(mrb, (struct RObject*)obj);
break;
case MRB_TT_EXCEPTION:
mrb_gc_free_iv(mrb, (struct RObject*)obj);
if ((struct RObject*)obj == mrb->backtrace.exc)
mrb->backtrace.exc = 0;
break;
case MRB_TT_CLASS:
case MRB_TT_MODULE:
case MRB_TT_SCLASS:
mrb_gc_free_mt(mrb, (struct RClass*)obj);
mrb_gc_free_iv(mrb, (struct RObject*)obj);
break;
case MRB_TT_ICLASS:
if (MRB_FLAG_TEST(obj, MRB_FLAG_IS_ORIGIN))
mrb_gc_free_mt(mrb, (struct RClass*)obj);
break;
case MRB_TT_ENV:
{
struct REnv *e = (struct REnv*)obj;
if (MRB_ENV_STACK_SHARED_P(e)) {
/* cannot be freed */
return;
}
mrb_free(mrb, e->stack);
e->stack = NULL;
}
break;
case MRB_TT_FIBER:
{
struct mrb_context *c = ((struct RFiber*)obj)->cxt;
if (!end && c && c != mrb->root_c) {
mrb_callinfo *ci = c->ci;
mrb_callinfo *ce = c->cibase;
while (ce <= ci) {
struct REnv *e = ci->env;
if (e && !is_dead(&mrb->gc, e) &&
e->tt == MRB_TT_ENV && MRB_ENV_STACK_SHARED_P(e)) {
mrb_env_unshare(mrb, e);
}
ci--;
}
mrb_free_context(mrb, c);
}
}
break;
case MRB_TT_ARRAY:
if (ARY_SHARED_P(obj))
mrb_ary_decref(mrb, ((struct RArray*)obj)->aux.shared);
else
mrb_free(mrb, ((struct RArray*)obj)->ptr);
break;
case MRB_TT_HASH:
mrb_gc_free_iv(mrb, (struct RObject*)obj);
mrb_gc_free_hash(mrb, (struct RHash*)obj);
break;
case MRB_TT_STRING:
mrb_gc_free_str(mrb, (struct RString*)obj);
break;
case MRB_TT_PROC:
{
struct RProc *p = (struct RProc*)obj;
if (!MRB_PROC_CFUNC_P(p) && p->body.irep) {
mrb_irep_decref(mrb, p->body.irep);
}
}
break;
case MRB_TT_RANGE:
mrb_free(mrb, ((struct RRange*)obj)->edges);
break;
case MRB_TT_DATA:
{
struct RData *d = (struct RData*)obj;
if (d->type && d->type->dfree) {
d->type->dfree(mrb, d->data);
}
mrb_gc_free_iv(mrb, (struct RObject*)obj);
}
break;
default:
break;
}
obj->tt = MRB_TT_FREE;
}
static void
root_scan_phase(mrb_state *mrb, mrb_gc *gc)
{
size_t i, e;
if (!is_minor_gc(gc)) {
gc->gray_list = NULL;
gc->atomic_gray_list = NULL;
}
mrb_gc_mark_gv(mrb);
/* mark arena */
for (i=0,e=gc->arena_idx; i<e; i++) {
mrb_gc_mark(mrb, gc->arena[i]);
}
/* mark class hierarchy */
mrb_gc_mark(mrb, (struct RBasic*)mrb->object_class);
/* mark built-in classes */
mrb_gc_mark(mrb, (struct RBasic*)mrb->class_class);
mrb_gc_mark(mrb, (struct RBasic*)mrb->module_class);
mrb_gc_mark(mrb, (struct RBasic*)mrb->proc_class);
mrb_gc_mark(mrb, (struct RBasic*)mrb->string_class);
mrb_gc_mark(mrb, (struct RBasic*)mrb->array_class);
mrb_gc_mark(mrb, (struct RBasic*)mrb->hash_class);
mrb_gc_mark(mrb, (struct RBasic*)mrb->float_class);
mrb_gc_mark(mrb, (struct RBasic*)mrb->fixnum_class);
mrb_gc_mark(mrb, (struct RBasic*)mrb->true_class);
mrb_gc_mark(mrb, (struct RBasic*)mrb->false_class);
mrb_gc_mark(mrb, (struct RBasic*)mrb->nil_class);
mrb_gc_mark(mrb, (struct RBasic*)mrb->symbol_class);
mrb_gc_mark(mrb, (struct RBasic*)mrb->kernel_module);
mrb_gc_mark(mrb, (struct RBasic*)mrb->eException_class);
mrb_gc_mark(mrb, (struct RBasic*)mrb->eStandardError_class);
/* mark top_self */
mrb_gc_mark(mrb, (struct RBasic*)mrb->top_self);
/* mark exception */
mrb_gc_mark(mrb, (struct RBasic*)mrb->exc);
/* mark backtrace */
mrb_gc_mark(mrb, (struct RBasic*)mrb->backtrace.exc);
e = (size_t)mrb->backtrace.n;
for (i=0; i<e; i++) {
mrb_gc_mark(mrb, (struct RBasic*)mrb->backtrace.entries[i].klass);
}
/* mark pre-allocated exception */
mrb_gc_mark(mrb, (struct RBasic*)mrb->nomem_err);
mrb_gc_mark(mrb, (struct RBasic*)mrb->stack_err);
#ifdef MRB_GC_FIXED_ARENA
mrb_gc_mark(mrb, (struct RBasic*)mrb->arena_err);
#endif
mark_context(mrb, mrb->root_c);
if (mrb->root_c->fib) {
mrb_gc_mark(mrb, (struct RBasic*)mrb->root_c->fib);
}
if (mrb->root_c != mrb->c) {
mark_context(mrb, mrb->c);
}
}
static size_t
gc_gray_mark(mrb_state *mrb, mrb_gc *gc, struct RBasic *obj)
{
size_t children = 0;
gc_mark_children(mrb, gc, obj);
switch (obj->tt) {
case MRB_TT_ICLASS:
children++;
break;
case MRB_TT_CLASS:
case MRB_TT_SCLASS:
case MRB_TT_MODULE:
{
struct RClass *c = (struct RClass*)obj;
children += mrb_gc_mark_iv_size(mrb, (struct RObject*)obj);
children += mrb_gc_mark_mt_size(mrb, c);
children++;
}
break;
case MRB_TT_OBJECT:
case MRB_TT_DATA:
case MRB_TT_EXCEPTION:
children += mrb_gc_mark_iv_size(mrb, (struct RObject*)obj);
break;
case MRB_TT_ENV:
children += (int)obj->flags;
break;
case MRB_TT_FIBER:
{
struct mrb_context *c = ((struct RFiber*)obj)->cxt;
size_t i;
mrb_callinfo *ci;
if (!c) break;
/* mark stack */
i = c->stack - c->stbase;
if (c->ci) i += c->ci->nregs;
if (c->stbase + i > c->stend) i = c->stend - c->stbase;
children += i;
/* mark ensure stack */
children += (c->ci) ? c->ci->eidx : 0;
/* mark closure */
if (c->cibase) {
for (i=0, ci = c->cibase; ci <= c->ci; i++, ci++)
;
}
children += i;
}
break;
case MRB_TT_ARRAY:
{
struct RArray *a = (struct RArray*)obj;
children += a->len;
}
break;
case MRB_TT_HASH:
children += mrb_gc_mark_iv_size(mrb, (struct RObject*)obj);
children += mrb_gc_mark_hash_size(mrb, (struct RHash*)obj);
break;
case MRB_TT_PROC:
case MRB_TT_RANGE:
children+=2;
break;
default:
break;
}
return children;
}
static void
gc_mark_gray_list(mrb_state *mrb, mrb_gc *gc) {
while (gc->gray_list) {
if (is_gray(gc->gray_list))
gc_mark_children(mrb, gc, gc->gray_list);
else
gc->gray_list = gc->gray_list->gcnext;
}
}
static size_t
incremental_marking_phase(mrb_state *mrb, mrb_gc *gc, size_t limit)
{
size_t tried_marks = 0;
while (gc->gray_list && tried_marks < limit) {
tried_marks += gc_gray_mark(mrb, gc, gc->gray_list);
}
return tried_marks;
}
static void
final_marking_phase(mrb_state *mrb, mrb_gc *gc)
{
mark_context_stack(mrb, mrb->root_c);
gc_mark_gray_list(mrb, gc);
mrb_assert(gc->gray_list == NULL);
gc->gray_list = gc->atomic_gray_list;
gc->atomic_gray_list = NULL;
gc_mark_gray_list(mrb, gc);
mrb_assert(gc->gray_list == NULL);
}
static void
prepare_incremental_sweep(mrb_state *mrb, mrb_gc *gc)
{
gc->state = MRB_GC_STATE_SWEEP;
gc->sweeps = gc->heaps;
gc->live_after_mark = gc->live;
}
static size_t
incremental_sweep_phase(mrb_state *mrb, mrb_gc *gc, size_t limit)
{
mrb_heap_page *page = gc->sweeps;
size_t tried_sweep = 0;
while (page && (tried_sweep < limit)) {
RVALUE *p = objects(page);
RVALUE *e = p + MRB_HEAP_PAGE_SIZE;
size_t freed = 0;
mrb_bool dead_slot = TRUE;
mrb_bool full = (page->freelist == NULL);
if (is_minor_gc(gc) && page->old) {
/* skip a slot which doesn't contain any young object */
p = e;
dead_slot = FALSE;
}
while (p<e) {
if (is_dead(gc, &p->as.basic)) {
if (p->as.basic.tt != MRB_TT_FREE) {
obj_free(mrb, &p->as.basic, FALSE);
if (p->as.basic.tt == MRB_TT_FREE) {
p->as.free.next = page->freelist;
page->freelist = (struct RBasic*)p;
freed++;
}
else {
dead_slot = FALSE;
}
}
}
else {
if (!is_generational(gc))
paint_partial_white(gc, &p->as.basic); /* next gc target */
dead_slot = FALSE;
}
p++;
}
/* free dead slot */
if (dead_slot && freed < MRB_HEAP_PAGE_SIZE) {
mrb_heap_page *next = page->next;
unlink_heap_page(gc, page);
unlink_free_heap_page(gc, page);
mrb_free(mrb, page);
page = next;
}
else {
if (full && freed > 0) {
link_free_heap_page(gc, page);
}
if (page->freelist == NULL && is_minor_gc(gc))
page->old = TRUE;
else
page->old = FALSE;
page = page->next;
}
tried_sweep += MRB_HEAP_PAGE_SIZE;
gc->live -= freed;
gc->live_after_mark -= freed;
}
gc->sweeps = page;
return tried_sweep;
}
static size_t
incremental_gc(mrb_state *mrb, mrb_gc *gc, size_t limit)
{
switch (gc->state) {
case MRB_GC_STATE_ROOT:
root_scan_phase(mrb, gc);
gc->state = MRB_GC_STATE_MARK;
flip_white_part(gc);
return 0;
case MRB_GC_STATE_MARK:
if (gc->gray_list) {
return incremental_marking_phase(mrb, gc, limit);
}
else {
final_marking_phase(mrb, gc);
prepare_incremental_sweep(mrb, gc);
return 0;
}
case MRB_GC_STATE_SWEEP: {
size_t tried_sweep = 0;
tried_sweep = incremental_sweep_phase(mrb, gc, limit);
if (tried_sweep == 0)
gc->state = MRB_GC_STATE_ROOT;
return tried_sweep;
}
default:
/* unknown state */
mrb_assert(0);
return 0;
}
}
static void
incremental_gc_until(mrb_state *mrb, mrb_gc *gc, mrb_gc_state to_state)
{
do {
incremental_gc(mrb, gc, SIZE_MAX);
} while (gc->state != to_state);
}
static void
incremental_gc_step(mrb_state *mrb, mrb_gc *gc)
{
size_t limit = 0, result = 0;
limit = (GC_STEP_SIZE/100) * gc->step_ratio;
while (result < limit) {
result += incremental_gc(mrb, gc, limit);
if (gc->state == MRB_GC_STATE_ROOT)
break;
}
gc->threshold = gc->live + GC_STEP_SIZE;
}
static void
clear_all_old(mrb_state *mrb, mrb_gc *gc)
{
mrb_bool origin_mode = gc->generational;
mrb_assert(is_generational(gc));
if (is_major_gc(gc)) {
/* finish the half baked GC */
incremental_gc_until(mrb, gc, MRB_GC_STATE_ROOT);
}
/* Sweep the dead objects, then reset all the live objects
* (including all the old objects, of course) to white. */
gc->generational = FALSE;
prepare_incremental_sweep(mrb, gc);
incremental_gc_until(mrb, gc, MRB_GC_STATE_ROOT);
gc->generational = origin_mode;
/* The gray objects have already been painted as white */
gc->atomic_gray_list = gc->gray_list = NULL;
}
MRB_API void
mrb_incremental_gc(mrb_state *mrb)
{
mrb_gc *gc = &mrb->gc;
if (gc->disabled) return;
GC_INVOKE_TIME_REPORT("mrb_incremental_gc()");
GC_TIME_START;
if (is_minor_gc(gc)) {
incremental_gc_until(mrb, gc, MRB_GC_STATE_ROOT);
}
else {
incremental_gc_step(mrb, gc);
}
if (gc->state == MRB_GC_STATE_ROOT) {
mrb_assert(gc->live >= gc->live_after_mark);
gc->threshold = (gc->live_after_mark/100) * gc->interval_ratio;
if (gc->threshold < GC_STEP_SIZE) {
gc->threshold = GC_STEP_SIZE;
}
if (is_major_gc(gc)) {
gc->majorgc_old_threshold = gc->live_after_mark/100 * DEFAULT_MAJOR_GC_INC_RATIO;
gc->full = FALSE;
}
else if (is_minor_gc(gc)) {
if (gc->live > gc->majorgc_old_threshold) {
clear_all_old(mrb, gc);
gc->full = TRUE;
}
}
}
GC_TIME_STOP_AND_REPORT;
}
/* Perform a full gc cycle */
MRB_API void
mrb_full_gc(mrb_state *mrb)
{
mrb_gc *gc = &mrb->gc;
if (gc->disabled) return;
GC_INVOKE_TIME_REPORT("mrb_full_gc()");
GC_TIME_START;
if (is_generational(gc)) {
/* clear all the old objects back to young */
clear_all_old(mrb, gc);
gc->full = TRUE;
}
else if (gc->state != MRB_GC_STATE_ROOT) {
/* finish half baked GC cycle */
incremental_gc_until(mrb, gc, MRB_GC_STATE_ROOT);
}
incremental_gc_until(mrb, gc, MRB_GC_STATE_ROOT);
gc->threshold = (gc->live_after_mark/100) * gc->interval_ratio;
if (is_generational(gc)) {
gc->majorgc_old_threshold = gc->live_after_mark/100 * DEFAULT_MAJOR_GC_INC_RATIO;
gc->full = FALSE;
}
GC_TIME_STOP_AND_REPORT;
}
MRB_API void
mrb_garbage_collect(mrb_state *mrb)
{
mrb_full_gc(mrb);
}
MRB_API int
mrb_gc_arena_save(mrb_state *mrb)
{
return mrb->gc.arena_idx;
}
MRB_API void
mrb_gc_arena_restore(mrb_state *mrb, int idx)
{
mrb_gc *gc = &mrb->gc;
#ifndef MRB_GC_FIXED_ARENA
int capa = gc->arena_capa;
if (idx < capa / 2) {
capa = (int)(capa * 0.66);
if (capa < MRB_GC_ARENA_SIZE) {
capa = MRB_GC_ARENA_SIZE;
}
if (capa != gc->arena_capa) {
gc->arena = (struct RBasic**)mrb_realloc(mrb, gc->arena, sizeof(struct RBasic*)*capa);
gc->arena_capa = capa;
}
}
#endif
gc->arena_idx = idx;
}
/*
* Field write barrier
* Paint obj(Black) -> value(White) to obj(Black) -> value(Gray).
*/
MRB_API void
mrb_field_write_barrier(mrb_state *mrb, struct RBasic *obj, struct RBasic *value)
{
mrb_gc *gc = &mrb->gc;
if (!is_black(obj)) return;
if (!is_white(value)) return;
mrb_assert(gc->state == MRB_GC_STATE_MARK || (!is_dead(gc, value) && !is_dead(gc, obj)));
mrb_assert(is_generational(gc) || gc->state != MRB_GC_STATE_ROOT);
if (is_generational(gc) || gc->state == MRB_GC_STATE_MARK) {
add_gray_list(mrb, gc, value);
}
else {
mrb_assert(gc->state == MRB_GC_STATE_SWEEP);
paint_partial_white(gc, obj); /* for never write barriers */
}
}
/*
* Write barrier
* Paint obj(Black) to obj(Gray).
*
* The object that is painted gray will be traversed atomically in final
* mark phase. So you use this write barrier if it's frequency written spot.
* e.g. Set element on Array.
*/
MRB_API void
mrb_write_barrier(mrb_state *mrb, struct RBasic *obj)
{
mrb_gc *gc = &mrb->gc;
if (!is_black(obj)) return;
mrb_assert(!is_dead(gc, obj));
mrb_assert(is_generational(gc) || gc->state != MRB_GC_STATE_ROOT);
paint_gray(obj);
obj->gcnext = gc->atomic_gray_list;
gc->atomic_gray_list = obj;
}
/*
* call-seq:
* GC.start -> nil
*
* Initiates full garbage collection.
*
*/
static mrb_value
gc_start(mrb_state *mrb, mrb_value obj)
{
mrb_full_gc(mrb);
return mrb_nil_value();
}
/*
* call-seq:
* GC.enable -> true or false
*
* Enables garbage collection, returning <code>true</code> if garbage
* collection was previously disabled.
*
* GC.disable #=> false
* GC.enable #=> true
* GC.enable #=> false
*
*/
static mrb_value
gc_enable(mrb_state *mrb, mrb_value obj)
{
mrb_bool old = mrb->gc.disabled;
mrb->gc.disabled = FALSE;
return mrb_bool_value(old);
}
/*
* call-seq:
* GC.disable -> true or false
*
* Disables garbage collection, returning <code>true</code> if garbage
* collection was already disabled.
*
* GC.disable #=> false
* GC.disable #=> true
*
*/
static mrb_value
gc_disable(mrb_state *mrb, mrb_value obj)
{
mrb_bool old = mrb->gc.disabled;
mrb->gc.disabled = TRUE;
return mrb_bool_value(old);
}
/*
* call-seq:
* GC.interval_ratio -> fixnum
*
* Returns ratio of GC interval. Default value is 200(%).
*
*/
static mrb_value
gc_interval_ratio_get(mrb_state *mrb, mrb_value obj)
{
return mrb_fixnum_value(mrb->gc.interval_ratio);
}
/*
* call-seq:
* GC.interval_ratio = fixnum -> nil
*
* Updates ratio of GC interval. Default value is 200(%).
* GC start as soon as after end all step of GC if you set 100(%).
*
*/
static mrb_value
gc_interval_ratio_set(mrb_state *mrb, mrb_value obj)
{
mrb_int ratio;
mrb_get_args(mrb, "i", &ratio);
mrb->gc.interval_ratio = ratio;
return mrb_nil_value();
}
/*
* call-seq:
* GC.step_ratio -> fixnum
*
* Returns step span ratio of Incremental GC. Default value is 200(%).
*
*/
static mrb_value
gc_step_ratio_get(mrb_state *mrb, mrb_value obj)
{
return mrb_fixnum_value(mrb->gc.step_ratio);
}
/*
* call-seq:
* GC.step_ratio = fixnum -> nil
*
* Updates step span ratio of Incremental GC. Default value is 200(%).
* 1 step of incrementalGC becomes long if a rate is big.
*
*/
static mrb_value
gc_step_ratio_set(mrb_state *mrb, mrb_value obj)
{
mrb_int ratio;
mrb_get_args(mrb, "i", &ratio);
mrb->gc.step_ratio = ratio;
return mrb_nil_value();
}
static void
change_gen_gc_mode(mrb_state *mrb, mrb_gc *gc, mrb_bool enable)
{
if (is_generational(gc) && !enable) {
clear_all_old(mrb, gc);
mrb_assert(gc->state == MRB_GC_STATE_ROOT);
gc->full = FALSE;
}
else if (!is_generational(gc) && enable) {
incremental_gc_until(mrb, gc, MRB_GC_STATE_ROOT);
gc->majorgc_old_threshold = gc->live_after_mark/100 * DEFAULT_MAJOR_GC_INC_RATIO;
gc->full = FALSE;
}
gc->generational = enable;
}
/*
* call-seq:
* GC.generational_mode -> true or false
*
* Returns generational or normal gc mode.
*
*/
static mrb_value
gc_generational_mode_get(mrb_state *mrb, mrb_value self)
{
return mrb_bool_value(mrb->gc.generational);
}
/*
* call-seq:
* GC.generational_mode = true or false -> true or false
*
* Changes to generational or normal gc mode.
*
*/
static mrb_value
gc_generational_mode_set(mrb_state *mrb, mrb_value self)
{
mrb_bool enable;
mrb_get_args(mrb, "b", &enable);
if (mrb->gc.generational != enable)
change_gen_gc_mode(mrb, &mrb->gc, enable);
return mrb_bool_value(enable);
}
static void
gc_each_objects(mrb_state *mrb, mrb_gc *gc, mrb_each_object_callback *callback, void *data)
{
mrb_heap_page* page = gc->heaps;
while (page != NULL) {
RVALUE *p, *pend;
p = objects(page);
pend = p + MRB_HEAP_PAGE_SIZE;
for (;p < pend; p++) {
(*callback)(mrb, &p->as.basic, data);
}
page = page->next;
}
}
void
mrb_objspace_each_objects(mrb_state *mrb, mrb_each_object_callback *callback, void *data)
{
gc_each_objects(mrb, &mrb->gc, callback, data);
}
#ifdef GC_TEST
#ifdef GC_DEBUG
static mrb_value gc_test(mrb_state *, mrb_value);
#endif
#endif
void
mrb_init_gc(mrb_state *mrb)
{
struct RClass *gc;
gc = mrb_define_module(mrb, "GC");
mrb_define_class_method(mrb, gc, "start", gc_start, MRB_ARGS_NONE());
mrb_define_class_method(mrb, gc, "enable", gc_enable, MRB_ARGS_NONE());
mrb_define_class_method(mrb, gc, "disable", gc_disable, MRB_ARGS_NONE());
mrb_define_class_method(mrb, gc, "interval_ratio", gc_interval_ratio_get, MRB_ARGS_NONE());
mrb_define_class_method(mrb, gc, "interval_ratio=", gc_interval_ratio_set, MRB_ARGS_REQ(1));
mrb_define_class_method(mrb, gc, "step_ratio", gc_step_ratio_get, MRB_ARGS_NONE());
mrb_define_class_method(mrb, gc, "step_ratio=", gc_step_ratio_set, MRB_ARGS_REQ(1));
mrb_define_class_method(mrb, gc, "generational_mode=", gc_generational_mode_set, MRB_ARGS_REQ(1));
mrb_define_class_method(mrb, gc, "generational_mode", gc_generational_mode_get, MRB_ARGS_NONE());
#ifdef GC_TEST
#ifdef GC_DEBUG
mrb_define_class_method(mrb, gc, "test", gc_test, MRB_ARGS_NONE());
#endif
#endif
}
#ifdef GC_TEST
#ifdef GC_DEBUG
void
test_mrb_field_write_barrier(void)
{
mrb_state *mrb = mrb_open();
struct RBasic *obj, *value;
mrb_gc *gc = &mrb->gc;
puts("test_mrb_field_write_barrier");
gc->generational = FALSE;
obj = mrb_basic_ptr(mrb_ary_new(mrb));
value = mrb_basic_ptr(mrb_str_new_lit(mrb, "value"));
paint_black(obj);
paint_partial_white(gc, value);
puts(" in MRB_GC_STATE_MARK");
gc->state = MRB_GC_STATE_MARK;
mrb_field_write_barrier(mrb, obj, value);
mrb_assert(is_gray(value));
puts(" in MRB_GC_STATE_SWEEP");
paint_partial_white(gc, value);
gc->state = MRB_GC_STATE_SWEEP;
mrb_field_write_barrier(mrb, obj, value);
mrb_assert(obj->color & gc->current_white_part);
mrb_assert(value->color & gc->current_white_part);
puts(" fail with black");
gc->state = MRB_GC_STATE_MARK;
paint_white(obj);
paint_partial_white(gc, value);
mrb_field_write_barrier(mrb, obj, value);
mrb_assert(obj->color & gc->current_white_part);
puts(" fail with gray");
gc->state = MRB_GC_STATE_MARK;
paint_black(obj);
paint_gray(value);
mrb_field_write_barrier(mrb, obj, value);
mrb_assert(is_gray(value));
{
puts("test_mrb_field_write_barrier_value");
obj = mrb_basic_ptr(mrb_ary_new(mrb));
mrb_value value = mrb_str_new_lit(mrb, "value");
paint_black(obj);
paint_partial_white(gc, mrb_basic_ptr(value));
gc->state = MRB_GC_STATE_MARK;
mrb_field_write_barrier_value(mrb, obj, value);
mrb_assert(is_gray(mrb_basic_ptr(value)));
}
mrb_close(mrb);
}
void
test_mrb_write_barrier(void)
{
mrb_state *mrb = mrb_open();
struct RBasic *obj;
mrb_gc *gc = &mrb->gc;
puts("test_mrb_write_barrier");
obj = mrb_basic_ptr(mrb_ary_new(mrb));
paint_black(obj);
puts(" in MRB_GC_STATE_MARK");
gc->state = MRB_GC_STATE_MARK;
mrb_write_barrier(mrb, obj);
mrb_assert(is_gray(obj));
mrb_assert(gc->atomic_gray_list == obj);
puts(" fail with gray");
paint_gray(obj);
mrb_write_barrier(mrb, obj);
mrb_assert(is_gray(obj));
mrb_close(mrb);
}
void
test_add_gray_list(void)
{
mrb_state *mrb = mrb_open();
struct RBasic *obj1, *obj2;
mrb_gc *gc = &mrb->gc;
puts("test_add_gray_list");
change_gen_gc_mode(mrb, gc, FALSE);
mrb_assert(gc->gray_list == NULL);
obj1 = mrb_basic_ptr(mrb_str_new_lit(mrb, "test"));
add_gray_list(mrb, gc, obj1);
mrb_assert(gc->gray_list == obj1);
mrb_assert(is_gray(obj1));
obj2 = mrb_basic_ptr(mrb_str_new_lit(mrb, "test"));
add_gray_list(mrb, gc, obj2);
mrb_assert(gc->gray_list == obj2);
mrb_assert(gc->gray_list->gcnext == obj1);
mrb_assert(is_gray(obj2));
mrb_close(mrb);
}
void
test_gc_gray_mark(void)
{
mrb_state *mrb = mrb_open();
mrb_value obj_v, value_v;
struct RBasic *obj;
size_t gray_num = 0;
mrb_gc *gc = &mrb->gc;
puts("test_gc_gray_mark");
puts(" in MRB_TT_CLASS");
obj = (struct RBasic*)mrb->object_class;
paint_gray(obj);
gray_num = gc_gray_mark(mrb, gc, obj);
mrb_assert(is_black(obj));
mrb_assert(gray_num > 1);
puts(" in MRB_TT_ARRAY");
obj_v = mrb_ary_new(mrb);
value_v = mrb_str_new_lit(mrb, "test");
paint_gray(mrb_basic_ptr(obj_v));
paint_partial_white(gc, mrb_basic_ptr(value_v));
mrb_ary_push(mrb, obj_v, value_v);
gray_num = gc_gray_mark(mrb, gc, mrb_basic_ptr(obj_v));
mrb_assert(is_black(mrb_basic_ptr(obj_v)));
mrb_assert(is_gray(mrb_basic_ptr(value_v)));
mrb_assert(gray_num == 1);
mrb_close(mrb);
}
void
test_incremental_gc(void)
{
mrb_state *mrb = mrb_open();
size_t max = ~0, live = 0, total = 0, freed = 0;
RVALUE *free;
mrb_heap_page *page;
mrb_gc *gc = &mrb->gc;
puts("test_incremental_gc");
change_gen_gc_mode(mrb, gc, FALSE);
puts(" in mrb_full_gc");
mrb_full_gc(mrb);
mrb_assert(gc->state == MRB_GC_STATE_ROOT);
puts(" in MRB_GC_STATE_ROOT");
incremental_gc(mrb, gc, max);
mrb_assert(gc->state == MRB_GC_STATE_MARK);
puts(" in MRB_GC_STATE_MARK");
incremental_gc_until(mrb, gc, MRB_GC_STATE_SWEEP);
mrb_assert(gc->state == MRB_GC_STATE_SWEEP);
puts(" in MRB_GC_STATE_SWEEP");
page = gc->heaps;
while (page) {
RVALUE *p = objects(page);
RVALUE *e = p + MRB_HEAP_PAGE_SIZE;
while (p<e) {
if (is_black(&p->as.basic)) {
live++;
}
if (is_gray(&p->as.basic) && !is_dead(gc, &p->as.basic)) {
printf("%p\n", &p->as.basic);
}
p++;
}
page = page->next;
total += MRB_HEAP_PAGE_SIZE;
}
mrb_assert(gc->gray_list == NULL);
incremental_gc(mrb, gc, max);
mrb_assert(gc->state == MRB_GC_STATE_SWEEP);
incremental_gc(mrb, gc, max);
mrb_assert(gc->state == MRB_GC_STATE_ROOT);
free = (RVALUE*)gc->heaps->freelist;
while (free) {
freed++;
free = (RVALUE*)free->as.free.next;
}
mrb_assert(gc->live == live);
mrb_assert(gc->live == total-freed);
puts("test_incremental_gc(gen)");
incremental_gc_until(mrb, gc, MRB_GC_STATE_SWEEP);
change_gen_gc_mode(mrb, gc, TRUE);
mrb_assert(gc->full == FALSE);
mrb_assert(gc->state == MRB_GC_STATE_ROOT);
puts(" in minor");
mrb_assert(is_minor_gc(gc));
mrb_assert(gc->majorgc_old_threshold > 0);
gc->majorgc_old_threshold = 0;
mrb_incremental_gc(mrb);
mrb_assert(gc->full == TRUE);
mrb_assert(gc->state == MRB_GC_STATE_ROOT);
puts(" in major");
mrb_assert(is_major_gc(gc));
do {
mrb_incremental_gc(mrb);
} while (gc->state != MRB_GC_STATE_ROOT);
mrb_assert(gc->full == FALSE);
mrb_close(mrb);
}
void
test_incremental_sweep_phase(void)
{
mrb_state *mrb = mrb_open();
mrb_gc *gc = &mrb->gc;
puts("test_incremental_sweep_phase");
add_heap(mrb, gc);
gc->sweeps = gc->heaps;
mrb_assert(gc->heaps->next->next == NULL);
mrb_assert(gc->free_heaps->next->next == NULL);
incremental_sweep_phase(mrb, gc, MRB_HEAP_PAGE_SIZE * 3);
mrb_assert(gc->heaps->next == NULL);
mrb_assert(gc->heaps == gc->free_heaps);
mrb_close(mrb);
}
static mrb_value
gc_test(mrb_state *mrb, mrb_value self)
{
test_mrb_field_write_barrier();
test_mrb_write_barrier();
test_add_gray_list();
test_gc_gray_mark();
test_incremental_gc();
test_incremental_sweep_phase();
return mrb_nil_value();
}
#endif /* GC_DEBUG */
#endif /* GC_TEST */
| ./CrossVul/dataset_final_sorted/CWE-416/c/bad_3400_0 |
crossvul-cpp_data_bad_2905_0 | /*
* USB Serial Console driver
*
* Copyright (C) 2001 - 2002 Greg Kroah-Hartman (greg@kroah.com)
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License version
* 2 as published by the Free Software Foundation.
*
* Thanks to Randy Dunlap for the original version of this code.
*
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/slab.h>
#include <linux/tty.h>
#include <linux/console.h>
#include <linux/serial.h>
#include <linux/usb.h>
#include <linux/usb/serial.h>
struct usbcons_info {
int magic;
int break_flag;
struct usb_serial_port *port;
};
static struct usbcons_info usbcons_info;
static struct console usbcons;
/*
* ------------------------------------------------------------
* USB Serial console driver
*
* Much of the code here is copied from drivers/char/serial.c
* and implements a phony serial console in the same way that
* serial.c does so that in case some software queries it,
* it will get the same results.
*
* Things that are different from the way the serial port code
* does things, is that we call the lower level usb-serial
* driver code to initialize the device, and we set the initial
* console speeds based on the command line arguments.
* ------------------------------------------------------------
*/
static const struct tty_operations usb_console_fake_tty_ops = {
};
/*
* The parsing of the command line works exactly like the
* serial.c code, except that the specifier is "ttyUSB" instead
* of "ttyS".
*/
static int usb_console_setup(struct console *co, char *options)
{
struct usbcons_info *info = &usbcons_info;
int baud = 9600;
int bits = 8;
int parity = 'n';
int doflow = 0;
int cflag = CREAD | HUPCL | CLOCAL;
char *s;
struct usb_serial *serial;
struct usb_serial_port *port;
int retval;
struct tty_struct *tty = NULL;
struct ktermios dummy;
if (options) {
baud = simple_strtoul(options, NULL, 10);
s = options;
while (*s >= '0' && *s <= '9')
s++;
if (*s)
parity = *s++;
if (*s)
bits = *s++ - '0';
if (*s)
doflow = (*s++ == 'r');
}
/* Sane default */
if (baud == 0)
baud = 9600;
switch (bits) {
case 7:
cflag |= CS7;
break;
default:
case 8:
cflag |= CS8;
break;
}
switch (parity) {
case 'o': case 'O':
cflag |= PARODD;
break;
case 'e': case 'E':
cflag |= PARENB;
break;
}
co->cflag = cflag;
/*
* no need to check the index here: if the index is wrong, console
* code won't call us
*/
port = usb_serial_port_get_by_minor(co->index);
if (port == NULL) {
/* no device is connected yet, sorry :( */
pr_err("No USB device connected to ttyUSB%i\n", co->index);
return -ENODEV;
}
serial = port->serial;
retval = usb_autopm_get_interface(serial->interface);
if (retval)
goto error_get_interface;
tty_port_tty_set(&port->port, NULL);
info->port = port;
++port->port.count;
if (!tty_port_initialized(&port->port)) {
if (serial->type->set_termios) {
/*
* allocate a fake tty so the driver can initialize
* the termios structure, then later call set_termios to
* configure according to command line arguments
*/
tty = kzalloc(sizeof(*tty), GFP_KERNEL);
if (!tty) {
retval = -ENOMEM;
goto reset_open_count;
}
kref_init(&tty->kref);
tty->driver = usb_serial_tty_driver;
tty->index = co->index;
init_ldsem(&tty->ldisc_sem);
spin_lock_init(&tty->files_lock);
INIT_LIST_HEAD(&tty->tty_files);
kref_get(&tty->driver->kref);
__module_get(tty->driver->owner);
tty->ops = &usb_console_fake_tty_ops;
tty_init_termios(tty);
tty_port_tty_set(&port->port, tty);
}
/* only call the device specific open if this
* is the first time the port is opened */
retval = serial->type->open(NULL, port);
if (retval) {
dev_err(&port->dev, "could not open USB console port\n");
goto fail;
}
if (serial->type->set_termios) {
tty->termios.c_cflag = cflag;
tty_termios_encode_baud_rate(&tty->termios, baud, baud);
memset(&dummy, 0, sizeof(struct ktermios));
serial->type->set_termios(tty, port, &dummy);
tty_port_tty_set(&port->port, NULL);
tty_kref_put(tty);
}
tty_port_set_initialized(&port->port, 1);
}
/* Now that any required fake tty operations are completed restore
* the tty port count */
--port->port.count;
/* The console is special in terms of closing the device so
* indicate this port is now acting as a system console. */
port->port.console = 1;
mutex_unlock(&serial->disc_mutex);
return retval;
fail:
tty_port_tty_set(&port->port, NULL);
tty_kref_put(tty);
reset_open_count:
port->port.count = 0;
usb_autopm_put_interface(serial->interface);
error_get_interface:
usb_serial_put(serial);
mutex_unlock(&serial->disc_mutex);
return retval;
}
static void usb_console_write(struct console *co,
const char *buf, unsigned count)
{
static struct usbcons_info *info = &usbcons_info;
struct usb_serial_port *port = info->port;
struct usb_serial *serial;
int retval = -ENODEV;
if (!port || port->serial->dev->state == USB_STATE_NOTATTACHED)
return;
serial = port->serial;
if (count == 0)
return;
dev_dbg(&port->dev, "%s - %d byte(s)\n", __func__, count);
if (!port->port.console) {
dev_dbg(&port->dev, "%s - port not opened\n", __func__);
return;
}
while (count) {
unsigned int i;
unsigned int lf;
/* search for LF so we can insert CR if necessary */
for (i = 0, lf = 0 ; i < count ; i++) {
if (*(buf + i) == 10) {
lf = 1;
i++;
break;
}
}
/* pass on to the driver specific version of this function if
it is available */
retval = serial->type->write(NULL, port, buf, i);
dev_dbg(&port->dev, "%s - write: %d\n", __func__, retval);
if (lf) {
/* append CR after LF */
unsigned char cr = 13;
retval = serial->type->write(NULL, port, &cr, 1);
dev_dbg(&port->dev, "%s - write cr: %d\n",
__func__, retval);
}
buf += i;
count -= i;
}
}
static struct tty_driver *usb_console_device(struct console *co, int *index)
{
struct tty_driver **p = (struct tty_driver **)co->data;
if (!*p)
return NULL;
*index = co->index;
return *p;
}
static struct console usbcons = {
.name = "ttyUSB",
.write = usb_console_write,
.device = usb_console_device,
.setup = usb_console_setup,
.flags = CON_PRINTBUFFER,
.index = -1,
.data = &usb_serial_tty_driver,
};
void usb_serial_console_disconnect(struct usb_serial *serial)
{
if (serial->port[0] && serial->port[0] == usbcons_info.port) {
usb_serial_console_exit();
usb_serial_put(serial);
}
}
void usb_serial_console_init(int minor)
{
if (minor == 0) {
/*
* Call register_console() if this is the first device plugged
* in. If we call it earlier, then the callback to
* console_setup() will fail, as there is not a device seen by
* the USB subsystem yet.
*/
/*
* Register console.
* NOTES:
* console_setup() is called (back) immediately (from
* register_console). console_write() is called immediately
* from register_console iff CON_PRINTBUFFER is set in flags.
*/
pr_debug("registering the USB serial console.\n");
register_console(&usbcons);
}
}
void usb_serial_console_exit(void)
{
if (usbcons_info.port) {
unregister_console(&usbcons);
usbcons_info.port->port.console = 0;
usbcons_info.port = NULL;
}
}
| ./CrossVul/dataset_final_sorted/CWE-416/c/bad_2905_0 |
crossvul-cpp_data_bad_2638_0 | /*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% M M AAA TTTTT L AAA BBBB %
% MM MM A A T L A A B B %
% M M M AAAAA T L AAAAA BBBB %
% M M A A T L A A B B %
% M M A A T LLLLL A A BBBB %
% %
% %
% Read MATLAB Image Format %
% %
% Software Design %
% Jaroslav Fojtik %
% 2001-2008 %
% %
% %
% Permission is hereby granted, free of charge, to any person obtaining a %
% copy of this software and associated documentation files ("ImageMagick"), %
% to deal in ImageMagick without restriction, including without limitation %
% the rights to use, copy, modify, merge, publish, distribute, sublicense, %
% and/or sell copies of ImageMagick, and to permit persons to whom the %
% ImageMagick is furnished to do so, subject to the following conditions: %
% %
% The above copyright notice and this permission notice shall be included in %
% all copies or substantial portions of ImageMagick. %
% %
% The software is provided "as is", without warranty of any kind, express or %
% implied, including but not limited to the warranties of merchantability, %
% fitness for a particular purpose and noninfringement. In no event shall %
% ImageMagick Studio be liable for any claim, damages or other liability, %
% whether in an action of contract, tort or otherwise, arising from, out of %
% or in connection with ImageMagick or the use or other dealings in %
% ImageMagick. %
% %
% Except as contained in this notice, the name of the ImageMagick Studio %
% shall not be used in advertising or otherwise to promote the sale, use or %
% other dealings in ImageMagick without prior written authorization from the %
% ImageMagick Studio. %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
%
*/
/*
Include declarations.
*/
#include "MagickCore/studio.h"
#include "MagickCore/attribute.h"
#include "MagickCore/blob.h"
#include "MagickCore/blob-private.h"
#include "MagickCore/cache.h"
#include "MagickCore/color-private.h"
#include "MagickCore/colormap.h"
#include "MagickCore/colorspace-private.h"
#include "MagickCore/distort.h"
#include "MagickCore/exception.h"
#include "MagickCore/exception-private.h"
#include "MagickCore/image.h"
#include "MagickCore/image-private.h"
#include "MagickCore/list.h"
#include "MagickCore/magick.h"
#include "MagickCore/memory_.h"
#include "MagickCore/monitor.h"
#include "MagickCore/monitor-private.h"
#include "MagickCore/pixel-accessor.h"
#include "MagickCore/quantum.h"
#include "MagickCore/quantum-private.h"
#include "MagickCore/option.h"
#include "MagickCore/pixel.h"
#include "MagickCore/resource_.h"
#include "MagickCore/static.h"
#include "MagickCore/string_.h"
#include "MagickCore/module.h"
#include "MagickCore/transform.h"
#include "MagickCore/utility-private.h"
#if defined(MAGICKCORE_ZLIB_DELEGATE)
#include "zlib.h"
#endif
/*
Forward declaration.
*/
static MagickBooleanType
WriteMATImage(const ImageInfo *,Image *,ExceptionInfo *);
/* Auto coloring method, sorry this creates some artefact inside data
MinReal+j*MaxComplex = red MaxReal+j*MaxComplex = black
MinReal+j*0 = white MaxReal+j*0 = black
MinReal+j*MinComplex = blue MaxReal+j*MinComplex = black
*/
typedef struct
{
char identific[124];
unsigned short Version;
char EndianIndicator[2];
unsigned long DataType;
unsigned int ObjectSize;
unsigned long unknown1;
unsigned long unknown2;
unsigned short unknown5;
unsigned char StructureFlag;
unsigned char StructureClass;
unsigned long unknown3;
unsigned long unknown4;
unsigned long DimFlag;
unsigned long SizeX;
unsigned long SizeY;
unsigned short Flag1;
unsigned short NameFlag;
}
MATHeader;
static const char *MonthsTab[12]={"Jan","Feb","Mar","Apr","May","Jun","Jul","Aug","Sep","Oct","Nov","Dec"};
static const char *DayOfWTab[7]={"Sun","Mon","Tue","Wed","Thu","Fri","Sat"};
static const char *OsDesc=
#if defined(MAGICKCORE_WINDOWS_SUPPORT)
"PCWIN";
#else
#ifdef __APPLE__
"MAC";
#else
"LNX86";
#endif
#endif
typedef enum
{
miINT8 = 1, /* 8 bit signed */
miUINT8, /* 8 bit unsigned */
miINT16, /* 16 bit signed */
miUINT16, /* 16 bit unsigned */
miINT32, /* 32 bit signed */
miUINT32, /* 32 bit unsigned */
miSINGLE, /* IEEE 754 single precision float */
miRESERVE1,
miDOUBLE, /* IEEE 754 double precision float */
miRESERVE2,
miRESERVE3,
miINT64, /* 64 bit signed */
miUINT64, /* 64 bit unsigned */
miMATRIX, /* MATLAB array */
miCOMPRESSED, /* Compressed Data */
miUTF8, /* Unicode UTF-8 Encoded Character Data */
miUTF16, /* Unicode UTF-16 Encoded Character Data */
miUTF32 /* Unicode UTF-32 Encoded Character Data */
} mat5_data_type;
typedef enum
{
mxCELL_CLASS=1, /* cell array */
mxSTRUCT_CLASS, /* structure */
mxOBJECT_CLASS, /* object */
mxCHAR_CLASS, /* character array */
mxSPARSE_CLASS, /* sparse array */
mxDOUBLE_CLASS, /* double precision array */
mxSINGLE_CLASS, /* single precision floating point */
mxINT8_CLASS, /* 8 bit signed integer */
mxUINT8_CLASS, /* 8 bit unsigned integer */
mxINT16_CLASS, /* 16 bit signed integer */
mxUINT16_CLASS, /* 16 bit unsigned integer */
mxINT32_CLASS, /* 32 bit signed integer */
mxUINT32_CLASS, /* 32 bit unsigned integer */
mxINT64_CLASS, /* 64 bit signed integer */
mxUINT64_CLASS, /* 64 bit unsigned integer */
mxFUNCTION_CLASS /* Function handle */
} arrayclasstype;
#define FLAG_COMPLEX 0x8
#define FLAG_GLOBAL 0x4
#define FLAG_LOGICAL 0x2
static const QuantumType z2qtype[4] = {GrayQuantum, BlueQuantum, GreenQuantum, RedQuantum};
static void InsertComplexDoubleRow(Image *image,double *p,int y,double MinVal,
double MaxVal,ExceptionInfo *exception)
{
double f;
int x;
register Quantum *q;
if (MinVal == 0)
MinVal = -1;
if (MaxVal == 0)
MaxVal = 1;
q=QueueAuthenticPixels(image,0,y,image->columns,1,exception);
if (q == (Quantum *) NULL)
return;
for (x = 0; x < (ssize_t) image->columns; x++)
{
if (*p > 0)
{
f = (*p / MaxVal) * (QuantumRange-GetPixelRed(image,q));
if (f + GetPixelRed(image,q) > QuantumRange)
SetPixelRed(image,QuantumRange,q);
else
SetPixelRed(image,GetPixelRed(image,q)+(int) f,q);
if ((int) f / 2.0 > GetPixelGreen(image,q))
{
SetPixelGreen(image,0,q);
SetPixelBlue(image,0,q);
}
else
{
SetPixelBlue(image,GetPixelBlue(image,q)-(int) (f/2.0),q);
SetPixelGreen(image,GetPixelBlue(image,q),q);
}
}
if (*p < 0)
{
f = (*p / MaxVal) * (QuantumRange-GetPixelBlue(image,q));
if (f+GetPixelBlue(image,q) > QuantumRange)
SetPixelBlue(image,QuantumRange,q);
else
SetPixelBlue(image,GetPixelBlue(image,q)+(int) f,q);
if ((int) f / 2.0 > GetPixelGreen(image,q))
{
SetPixelRed(image,0,q);
SetPixelGreen(image,0,q);
}
else
{
SetPixelRed(image,GetPixelRed(image,q)-(int) (f/2.0),q);
SetPixelGreen(image,GetPixelRed(image,q),q);
}
}
p++;
q+=GetPixelChannels(image);
}
if (!SyncAuthenticPixels(image,exception))
return;
return;
}
static void InsertComplexFloatRow(Image *image,float *p,int y,double MinVal,
double MaxVal,ExceptionInfo *exception)
{
double f;
int x;
register Quantum *q;
if (MinVal == 0)
MinVal = -1;
if (MaxVal == 0)
MaxVal = 1;
q = QueueAuthenticPixels(image, 0, y, image->columns, 1,exception);
if (q == (Quantum *) NULL)
return;
for (x = 0; x < (ssize_t) image->columns; x++)
{
if (*p > 0)
{
f = (*p / MaxVal) * (QuantumRange-GetPixelRed(image,q));
if (f+GetPixelRed(image,q) > QuantumRange)
SetPixelRed(image,QuantumRange,q);
else
SetPixelRed(image,GetPixelRed(image,q)+(int) f,q);
if ((int) f / 2.0 > GetPixelGreen(image,q))
{
SetPixelGreen(image,0,q);
SetPixelBlue(image,0,q);
}
else
{
SetPixelBlue(image,GetPixelBlue(image,q)-(int) (f/2.0),q);
SetPixelGreen(image,GetPixelBlue(image,q),q);
}
}
if (*p < 0)
{
f = (*p / MaxVal) * (QuantumRange - GetPixelBlue(image,q));
if (f + GetPixelBlue(image,q) > QuantumRange)
SetPixelBlue(image,QuantumRange,q);
else
SetPixelBlue(image,GetPixelBlue(image,q)+
(int) f,q);
if ((int) f / 2.0 > GetPixelGreen(image,q))
{
SetPixelGreen(image,0,q);
SetPixelRed(image,0,q);
}
else
{
SetPixelRed(image,GetPixelRed(image,q)-(int) (f/2.0),q);
SetPixelGreen(image,GetPixelRed(image,q),q);
}
}
p++;
q++;
}
if (!SyncAuthenticPixels(image,exception))
return;
return;
}
/************** READERS ******************/
/* This function reads one block of floats*/
static void ReadBlobFloatsLSB(Image * image, size_t len, float *data)
{
while (len >= 4)
{
*data++ = ReadBlobFloat(image);
len -= sizeof(float);
}
if (len > 0)
(void) SeekBlob(image, len, SEEK_CUR);
}
static void ReadBlobFloatsMSB(Image * image, size_t len, float *data)
{
while (len >= 4)
{
*data++ = ReadBlobFloat(image);
len -= sizeof(float);
}
if (len > 0)
(void) SeekBlob(image, len, SEEK_CUR);
}
/* This function reads one block of doubles*/
static void ReadBlobDoublesLSB(Image * image, size_t len, double *data)
{
while (len >= 8)
{
*data++ = ReadBlobDouble(image);
len -= sizeof(double);
}
if (len > 0)
(void) SeekBlob(image, len, SEEK_CUR);
}
static void ReadBlobDoublesMSB(Image * image, size_t len, double *data)
{
while (len >= 8)
{
*data++ = ReadBlobDouble(image);
len -= sizeof(double);
}
if (len > 0)
(void) SeekBlob(image, len, SEEK_CUR);
}
/* Calculate minimum and maximum from a given block of data */
static void CalcMinMax(Image *image, int endian_indicator, int SizeX, int SizeY, size_t CellType, unsigned ldblk, void *BImgBuff, double *Min, double *Max)
{
MagickOffsetType filepos;
int i, x;
void (*ReadBlobDoublesXXX)(Image * image, size_t len, double *data);
void (*ReadBlobFloatsXXX)(Image * image, size_t len, float *data);
double *dblrow;
float *fltrow;
if (endian_indicator == LSBEndian)
{
ReadBlobDoublesXXX = ReadBlobDoublesLSB;
ReadBlobFloatsXXX = ReadBlobFloatsLSB;
}
else /* MI */
{
ReadBlobDoublesXXX = ReadBlobDoublesMSB;
ReadBlobFloatsXXX = ReadBlobFloatsMSB;
}
filepos = TellBlob(image); /* Please note that file seeking occurs only in the case of doubles */
for (i = 0; i < SizeY; i++)
{
if (CellType==miDOUBLE)
{
ReadBlobDoublesXXX(image, ldblk, (double *)BImgBuff);
dblrow = (double *)BImgBuff;
if (i == 0)
{
*Min = *Max = *dblrow;
}
for (x = 0; x < SizeX; x++)
{
if (*Min > *dblrow)
*Min = *dblrow;
if (*Max < *dblrow)
*Max = *dblrow;
dblrow++;
}
}
if (CellType==miSINGLE)
{
ReadBlobFloatsXXX(image, ldblk, (float *)BImgBuff);
fltrow = (float *)BImgBuff;
if (i == 0)
{
*Min = *Max = *fltrow;
}
for (x = 0; x < (ssize_t) SizeX; x++)
{
if (*Min > *fltrow)
*Min = *fltrow;
if (*Max < *fltrow)
*Max = *fltrow;
fltrow++;
}
}
}
(void) SeekBlob(image, filepos, SEEK_SET);
}
static void FixSignedValues(const Image *image,Quantum *q, int y)
{
while(y-->0)
{
/* Please note that negative values will overflow
Q=8; QuantumRange=255: <0;127> + 127+1 = <128; 255>
<-1;-128> + 127+1 = <0; 127> */
SetPixelRed(image,GetPixelRed(image,q)+QuantumRange/2+1,q);
SetPixelGreen(image,GetPixelGreen(image,q)+QuantumRange/2+1,q);
SetPixelBlue(image,GetPixelBlue(image,q)+QuantumRange/2+1,q);
q++;
}
}
/** Fix whole row of logical/binary data. It means pack it. */
static void FixLogical(unsigned char *Buff,int ldblk)
{
unsigned char mask=128;
unsigned char *BuffL = Buff;
unsigned char val = 0;
while(ldblk-->0)
{
if(*Buff++ != 0)
val |= mask;
mask >>= 1;
if(mask==0)
{
*BuffL++ = val;
val = 0;
mask = 128;
}
}
*BuffL = val;
}
#if defined(MAGICKCORE_ZLIB_DELEGATE)
static voidpf AcquireZIPMemory(voidpf context,unsigned int items,
unsigned int size)
{
(void) context;
return((voidpf) AcquireQuantumMemory(items,size));
}
static void RelinquishZIPMemory(voidpf context,voidpf memory)
{
(void) context;
memory=RelinquishMagickMemory(memory);
}
#endif
#if defined(MAGICKCORE_ZLIB_DELEGATE)
/** This procedure decompreses an image block for a new MATLAB format. */
static Image *decompress_block(Image *orig, unsigned int *Size, ImageInfo *clone_info, ExceptionInfo *exception)
{
Image *image2;
void *cache_block, *decompress_block;
z_stream zip_info;
FILE *mat_file;
size_t magick_size;
size_t extent;
int file;
int status;
int zip_status;
ssize_t TotalSize = 0;
if(clone_info==NULL) return NULL;
if(clone_info->file) /* Close file opened from previous transaction. */
{
fclose(clone_info->file);
clone_info->file = NULL;
(void) remove_utf8(clone_info->filename);
}
cache_block = AcquireQuantumMemory((size_t)(*Size < 16384) ? *Size: 16384,sizeof(unsigned char *));
if(cache_block==NULL) return NULL;
decompress_block = AcquireQuantumMemory((size_t)(4096),sizeof(unsigned char *));
if(decompress_block==NULL)
{
RelinquishMagickMemory(cache_block);
return NULL;
}
mat_file=0;
file = AcquireUniqueFileResource(clone_info->filename);
if (file != -1)
mat_file = fdopen(file,"w");
if(!mat_file)
{
RelinquishMagickMemory(cache_block);
RelinquishMagickMemory(decompress_block);
(void) LogMagickEvent(CoderEvent,GetMagickModule(),"Cannot create file stream for decompressed image");
return NULL;
}
zip_info.zalloc=AcquireZIPMemory;
zip_info.zfree=RelinquishZIPMemory;
zip_info.opaque = (voidpf) NULL;
zip_status = inflateInit(&zip_info);
if (zip_status != Z_OK)
{
RelinquishMagickMemory(cache_block);
RelinquishMagickMemory(decompress_block);
(void) ThrowMagickException(exception,GetMagickModule(),CorruptImageError,
"UnableToUncompressImage","`%s'",clone_info->filename);
(void) fclose(mat_file);
RelinquishUniqueFileResource(clone_info->filename);
return NULL;
}
/* zip_info.next_out = 8*4;*/
zip_info.avail_in = 0;
zip_info.total_out = 0;
while(*Size>0 && !EOFBlob(orig))
{
magick_size = ReadBlob(orig, (*Size < 16384) ? *Size : 16384, (unsigned char *) cache_block);
zip_info.next_in = (Bytef *) cache_block;
zip_info.avail_in = (uInt) magick_size;
while(zip_info.avail_in>0)
{
zip_info.avail_out = 4096;
zip_info.next_out = (Bytef *) decompress_block;
zip_status = inflate(&zip_info,Z_NO_FLUSH);
if ((zip_status != Z_OK) && (zip_status != Z_STREAM_END))
break;
extent=fwrite(decompress_block, 4096-zip_info.avail_out, 1, mat_file);
(void) extent;
TotalSize += 4096-zip_info.avail_out;
if(zip_status == Z_STREAM_END) goto DblBreak;
}
if ((zip_status != Z_OK) && (zip_status != Z_STREAM_END))
break;
*Size -= magick_size;
}
DblBreak:
inflateEnd(&zip_info);
(void)fclose(mat_file);
RelinquishMagickMemory(cache_block);
RelinquishMagickMemory(decompress_block);
*Size = TotalSize;
if((clone_info->file=fopen(clone_info->filename,"rb"))==NULL) goto UnlinkFile;
if( (image2 = AcquireImage(clone_info,exception))==NULL ) goto EraseFile;
status = OpenBlob(clone_info,image2,ReadBinaryBlobMode,exception);
if (status == MagickFalse)
{
DeleteImageFromList(&image2);
EraseFile:
fclose(clone_info->file);
clone_info->file = NULL;
UnlinkFile:
RelinquishUniqueFileResource(clone_info->filename);
return NULL;
}
return image2;
}
#endif
static Image *ReadMATImageV4(const ImageInfo *image_info,Image *image,
ExceptionInfo *exception)
{
typedef struct {
unsigned char Type[4];
unsigned int nRows;
unsigned int nCols;
unsigned int imagf;
unsigned int nameLen;
} MAT4_HDR;
long
ldblk;
EndianType
endian;
Image
*rotate_image;
MagickBooleanType
status;
MAT4_HDR
HDR;
QuantumInfo
*quantum_info;
QuantumFormatType
format_type;
register ssize_t
i;
ssize_t
count,
y;
unsigned char
*pixels;
unsigned int
depth;
quantum_info=(QuantumInfo *) NULL;
(void) SeekBlob(image,0,SEEK_SET);
while (EOFBlob(image) != MagickFalse)
{
/*
Object parser loop.
*/
ldblk=ReadBlobLSBLong(image);
if ((ldblk > 9999) || (ldblk < 0))
break;
HDR.Type[3]=ldblk % 10; ldblk /= 10; /* T digit */
HDR.Type[2]=ldblk % 10; ldblk /= 10; /* P digit */
HDR.Type[1]=ldblk % 10; ldblk /= 10; /* O digit */
HDR.Type[0]=ldblk; /* M digit */
if (HDR.Type[3] != 0)
break; /* Data format */
if (HDR.Type[2] != 0)
break; /* Always 0 */
if (HDR.Type[0] == 0)
{
HDR.nRows=ReadBlobLSBLong(image);
HDR.nCols=ReadBlobLSBLong(image);
HDR.imagf=ReadBlobLSBLong(image);
HDR.nameLen=ReadBlobLSBLong(image);
endian=LSBEndian;
}
else
{
HDR.nRows=ReadBlobMSBLong(image);
HDR.nCols=ReadBlobMSBLong(image);
HDR.imagf=ReadBlobMSBLong(image);
HDR.nameLen=ReadBlobMSBLong(image);
endian=MSBEndian;
}
if ((HDR.imagf != 0) && (HDR.imagf != 1))
break;
if (HDR.nameLen > 0xFFFF)
return((Image *) NULL);
for (i=0; i < (ssize_t) HDR.nameLen; i++)
{
int
byte;
/*
Skip matrix name.
*/
byte=ReadBlobByte(image);
if (byte == EOF)
{
ThrowFileException(exception,CorruptImageError,"UnexpectedEndOfFile",
image->filename);
break;
}
}
image->columns=(size_t) HDR.nRows;
image->rows=(size_t) HDR.nCols;
SetImageColorspace(image,GRAYColorspace,exception);
if (image_info->ping != MagickFalse)
{
Swap(image->columns,image->rows);
return(image);
}
status=SetImageExtent(image,image->columns,image->rows,exception);
if (status == MagickFalse)
return((Image *) NULL);
quantum_info=AcquireQuantumInfo(image_info,image);
if (quantum_info == (QuantumInfo *) NULL)
return((Image *) NULL);
switch(HDR.Type[1])
{
case 0:
format_type=FloatingPointQuantumFormat;
depth=64;
break;
case 1:
format_type=FloatingPointQuantumFormat;
depth=32;
break;
case 2:
format_type=UnsignedQuantumFormat;
depth=16;
break;
case 3:
format_type=SignedQuantumFormat;
depth=16;
break;
case 4:
format_type=UnsignedQuantumFormat;
depth=8;
break;
default:
format_type=UnsignedQuantumFormat;
depth=8;
break;
}
image->depth=depth;
if (HDR.Type[0] != 0)
SetQuantumEndian(image,quantum_info,MSBEndian);
status=SetQuantumFormat(image,quantum_info,format_type);
status=SetQuantumDepth(image,quantum_info,depth);
status=SetQuantumEndian(image,quantum_info,endian);
SetQuantumScale(quantum_info,1.0);
pixels=(unsigned char *) GetQuantumPixels(quantum_info);
for (y=0; y < (ssize_t) image->rows; y++)
{
register Quantum
*magick_restrict q;
count=ReadBlob(image,depth/8*image->columns,(char *) pixels);
if (count == -1)
break;
q=QueueAuthenticPixels(image,0,image->rows-y-1,image->columns,1,
exception);
if (q == (Quantum *) NULL)
break;
(void) ImportQuantumPixels(image,(CacheView *) NULL,quantum_info,
GrayQuantum,pixels,exception);
if ((HDR.Type[1] == 2) || (HDR.Type[1] == 3))
FixSignedValues(image,q,(int) image->columns);
if (SyncAuthenticPixels(image,exception) == MagickFalse)
break;
if (image->previous == (Image *) NULL)
{
status=SetImageProgress(image,LoadImageTag,(MagickOffsetType) y,
image->rows);
if (status == MagickFalse)
break;
}
}
if (HDR.imagf == 1)
for (y=0; y < (ssize_t) image->rows; y++)
{
/*
Read complex pixels.
*/
count=ReadBlob(image,depth/8*image->columns,(char *) pixels);
if (count == -1)
break;
if (HDR.Type[1] == 0)
InsertComplexDoubleRow(image,(double *) pixels,y,0,0,exception);
else
InsertComplexFloatRow(image,(float *) pixels,y,0,0,exception);
}
if (quantum_info != (QuantumInfo *) NULL)
quantum_info=DestroyQuantumInfo(quantum_info);
rotate_image=RotateImage(image,90.0,exception);
if (rotate_image != (Image *) NULL)
{
image=DestroyImage(image);
image=rotate_image;
}
if (EOFBlob(image) != MagickFalse)
{
ThrowFileException(exception,CorruptImageError,"UnexpectedEndOfFile",
image->filename);
break;
}
/*
Proceed to next image.
*/
if (image_info->number_scenes != 0)
if (image->scene >= (image_info->scene+image_info->number_scenes-1))
break;
/*
Allocate next image structure.
*/
AcquireNextImage(image_info,image,exception);
if (GetNextImageInList(image) == (Image *) NULL)
{
image=DestroyImageList(image);
return((Image *) NULL);
}
image=SyncNextImageInList(image);
status=SetImageProgress(image,LoadImagesTag,TellBlob(image),
GetBlobSize(image));
if (status == MagickFalse)
break;
}
(void) CloseBlob(image);
return(GetFirstImageInList(image));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% R e a d M A T L A B i m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ReadMATImage() reads an MAT X image file and returns it. It
% allocates the memory necessary for the new Image structure and returns a
% pointer to the new image.
%
% The format of the ReadMATImage method is:
%
% Image *ReadMATImage(const ImageInfo *image_info,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: Method ReadMATImage returns a pointer to the image after
% reading. A null image is returned if there is a memory shortage or if
% the image cannot be read.
%
% o image_info: Specifies a pointer to a ImageInfo structure.
%
% o exception: return any errors or warnings in this structure.
%
*/
static Image *ReadMATImage(const ImageInfo *image_info,ExceptionInfo *exception)
{
Image *image, *image2=NULL,
*rotated_image;
register Quantum *q;
unsigned int status;
MATHeader MATLAB_HDR;
size_t size;
size_t CellType;
QuantumInfo *quantum_info;
ImageInfo *clone_info;
int i;
ssize_t ldblk;
unsigned char *BImgBuff = NULL;
double MinVal, MaxVal;
unsigned z, z2;
unsigned Frames;
int logging;
int sample_size;
MagickOffsetType filepos=0x80;
BlobInfo *blob;
size_t one;
unsigned int (*ReadBlobXXXLong)(Image *image);
unsigned short (*ReadBlobXXXShort)(Image *image);
void (*ReadBlobDoublesXXX)(Image * image, size_t len, double *data);
void (*ReadBlobFloatsXXX)(Image * image, size_t len, float *data);
assert(image_info != (const ImageInfo *) NULL);
assert(image_info->signature == MagickCoreSignature);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
logging = LogMagickEvent(CoderEvent,GetMagickModule(),"enter");
/*
Open image file.
*/
image = AcquireImage(image_info,exception);
status = OpenBlob(image_info, image, ReadBinaryBlobMode, exception);
if (status == MagickFalse)
{
image=DestroyImageList(image);
return((Image *) NULL);
}
/*
Read MATLAB image.
*/
quantum_info=(QuantumInfo *) NULL;
clone_info=(ImageInfo *) NULL;
if (ReadBlob(image,124,(unsigned char *) &MATLAB_HDR.identific) != 124)
ThrowReaderException(CorruptImageError,"ImproperImageHeader");
if (strncmp(MATLAB_HDR.identific,"MATLAB",6) != 0)
{
image2=ReadMATImageV4(image_info,image,exception);
if (image2 == NULL)
goto MATLAB_KO;
image=image2;
goto END_OF_READING;
}
MATLAB_HDR.Version = ReadBlobLSBShort(image);
if(ReadBlob(image,2,(unsigned char *) &MATLAB_HDR.EndianIndicator) != 2)
ThrowReaderException(CorruptImageError,"ImproperImageHeader");
if (logging)
(void) LogMagickEvent(CoderEvent,GetMagickModule()," Endian %c%c",
MATLAB_HDR.EndianIndicator[0],MATLAB_HDR.EndianIndicator[1]);
if (!strncmp(MATLAB_HDR.EndianIndicator, "IM", 2))
{
ReadBlobXXXLong = ReadBlobLSBLong;
ReadBlobXXXShort = ReadBlobLSBShort;
ReadBlobDoublesXXX = ReadBlobDoublesLSB;
ReadBlobFloatsXXX = ReadBlobFloatsLSB;
image->endian = LSBEndian;
}
else if (!strncmp(MATLAB_HDR.EndianIndicator, "MI", 2))
{
ReadBlobXXXLong = ReadBlobMSBLong;
ReadBlobXXXShort = ReadBlobMSBShort;
ReadBlobDoublesXXX = ReadBlobDoublesMSB;
ReadBlobFloatsXXX = ReadBlobFloatsMSB;
image->endian = MSBEndian;
}
else
goto MATLAB_KO; /* unsupported endian */
if (strncmp(MATLAB_HDR.identific, "MATLAB", 6))
{
MATLAB_KO:
if ((image != image2) && (image2 != (Image *) NULL))
image2=DestroyImage(image2);
if (clone_info != (ImageInfo *) NULL)
clone_info=DestroyImageInfo(clone_info);
ThrowReaderException(CorruptImageError,"ImproperImageHeader");
}
filepos = TellBlob(image);
while(!EOFBlob(image)) /* object parser loop */
{
Frames = 1;
(void) SeekBlob(image,filepos,SEEK_SET);
/* printf("pos=%X\n",TellBlob(image)); */
MATLAB_HDR.DataType = ReadBlobXXXLong(image);
if(EOFBlob(image)) break;
MATLAB_HDR.ObjectSize = ReadBlobXXXLong(image);
if(EOFBlob(image)) break;
if((MagickSizeType) (MATLAB_HDR.ObjectSize+filepos) > GetBlobSize(image))
goto MATLAB_KO;
filepos += MATLAB_HDR.ObjectSize + 4 + 4;
clone_info=CloneImageInfo(image_info);
image2 = image;
#if defined(MAGICKCORE_ZLIB_DELEGATE)
if(MATLAB_HDR.DataType == miCOMPRESSED)
{
image2 = decompress_block(image,&MATLAB_HDR.ObjectSize,clone_info,exception);
if(image2==NULL) continue;
MATLAB_HDR.DataType = ReadBlobXXXLong(image2); /* replace compressed object type. */
}
#endif
if (MATLAB_HDR.DataType!=miMATRIX)
{
clone_info=DestroyImageInfo(clone_info);
continue; /* skip another objects. */
}
MATLAB_HDR.unknown1 = ReadBlobXXXLong(image2);
MATLAB_HDR.unknown2 = ReadBlobXXXLong(image2);
MATLAB_HDR.unknown5 = ReadBlobXXXLong(image2);
MATLAB_HDR.StructureClass = MATLAB_HDR.unknown5 & 0xFF;
MATLAB_HDR.StructureFlag = (MATLAB_HDR.unknown5>>8) & 0xFF;
MATLAB_HDR.unknown3 = ReadBlobXXXLong(image2);
if(image!=image2)
MATLAB_HDR.unknown4 = ReadBlobXXXLong(image2); /* ??? don't understand why ?? */
MATLAB_HDR.unknown4 = ReadBlobXXXLong(image2);
MATLAB_HDR.DimFlag = ReadBlobXXXLong(image2);
MATLAB_HDR.SizeX = ReadBlobXXXLong(image2);
MATLAB_HDR.SizeY = ReadBlobXXXLong(image2);
switch(MATLAB_HDR.DimFlag)
{
case 8: z2=z=1; break; /* 2D matrix*/
case 12: z2=z = ReadBlobXXXLong(image2); /* 3D matrix RGB*/
(void) ReadBlobXXXLong(image2);
if(z!=3) ThrowReaderException(CoderError, "MultidimensionalMatricesAreNotSupported");
break;
case 16: z2=z = ReadBlobXXXLong(image2); /* 4D matrix animation */
if(z!=3 && z!=1)
ThrowReaderException(CoderError, "MultidimensionalMatricesAreNotSupported");
Frames = ReadBlobXXXLong(image2);
if (Frames == 0)
ThrowReaderException(CorruptImageError,"ImproperImageHeader");
break;
default:
if (clone_info != (ImageInfo *) NULL)
clone_info=DestroyImageInfo(clone_info);
if ((image != image2) && (image2 != (Image *) NULL))
image2=DestroyImage(image2);
ThrowReaderException(CoderError, "MultidimensionalMatricesAreNotSupported");
}
MATLAB_HDR.Flag1 = ReadBlobXXXShort(image2);
MATLAB_HDR.NameFlag = ReadBlobXXXShort(image2);
if (logging) (void)LogMagickEvent(CoderEvent,GetMagickModule(),
"MATLAB_HDR.StructureClass %d",MATLAB_HDR.StructureClass);
if (MATLAB_HDR.StructureClass != mxCHAR_CLASS &&
MATLAB_HDR.StructureClass != mxSINGLE_CLASS && /* float + complex float */
MATLAB_HDR.StructureClass != mxDOUBLE_CLASS && /* double + complex double */
MATLAB_HDR.StructureClass != mxINT8_CLASS &&
MATLAB_HDR.StructureClass != mxUINT8_CLASS && /* uint8 + uint8 3D */
MATLAB_HDR.StructureClass != mxINT16_CLASS &&
MATLAB_HDR.StructureClass != mxUINT16_CLASS && /* uint16 + uint16 3D */
MATLAB_HDR.StructureClass != mxINT32_CLASS &&
MATLAB_HDR.StructureClass != mxUINT32_CLASS && /* uint32 + uint32 3D */
MATLAB_HDR.StructureClass != mxINT64_CLASS &&
MATLAB_HDR.StructureClass != mxUINT64_CLASS) /* uint64 + uint64 3D */
ThrowReaderException(CoderError,"UnsupportedCellTypeInTheMatrix");
switch (MATLAB_HDR.NameFlag)
{
case 0:
size = ReadBlobXXXLong(image2); /* Object name string size */
size = 4 * (ssize_t) ((size + 3 + 1) / 4);
(void) SeekBlob(image2, size, SEEK_CUR);
break;
case 1:
case 2:
case 3:
case 4:
(void) ReadBlob(image2, 4, (unsigned char *) &size); /* Object name string */
break;
default:
goto MATLAB_KO;
}
CellType = ReadBlobXXXLong(image2); /* Additional object type */
if (logging)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
"MATLAB_HDR.CellType: %.20g",(double) CellType);
(void) ReadBlob(image2, 4, (unsigned char *) &size); /* data size */
NEXT_FRAME:
switch (CellType)
{
case miINT8:
case miUINT8:
sample_size = 8;
if(MATLAB_HDR.StructureFlag & FLAG_LOGICAL)
image->depth = 1;
else
image->depth = 8; /* Byte type cell */
ldblk = (ssize_t) MATLAB_HDR.SizeX;
break;
case miINT16:
case miUINT16:
sample_size = 16;
image->depth = 16; /* Word type cell */
ldblk = (ssize_t) (2 * MATLAB_HDR.SizeX);
break;
case miINT32:
case miUINT32:
sample_size = 32;
image->depth = 32; /* Dword type cell */
ldblk = (ssize_t) (4 * MATLAB_HDR.SizeX);
break;
case miINT64:
case miUINT64:
sample_size = 64;
image->depth = 64; /* Qword type cell */
ldblk = (ssize_t) (8 * MATLAB_HDR.SizeX);
break;
case miSINGLE:
sample_size = 32;
image->depth = 32; /* double type cell */
(void) SetImageOption(clone_info,"quantum:format","floating-point");
if (MATLAB_HDR.StructureFlag & FLAG_COMPLEX)
{ /* complex float type cell */
}
ldblk = (ssize_t) (4 * MATLAB_HDR.SizeX);
break;
case miDOUBLE:
sample_size = 64;
image->depth = 64; /* double type cell */
(void) SetImageOption(clone_info,"quantum:format","floating-point");
DisableMSCWarning(4127)
if (sizeof(double) != 8)
RestoreMSCWarning
ThrowReaderException(CoderError, "IncompatibleSizeOfDouble");
if (MATLAB_HDR.StructureFlag & FLAG_COMPLEX)
{ /* complex double type cell */
}
ldblk = (ssize_t) (8 * MATLAB_HDR.SizeX);
break;
default:
if ((image != image2) && (image2 != (Image *) NULL))
image2=DestroyImage(image2);
if (clone_info)
clone_info=DestroyImageInfo(clone_info);
ThrowReaderException(CoderError, "UnsupportedCellTypeInTheMatrix");
}
(void) sample_size;
image->columns = MATLAB_HDR.SizeX;
image->rows = MATLAB_HDR.SizeY;
one=1;
image->colors = one << image->depth;
if (image->columns == 0 || image->rows == 0)
goto MATLAB_KO;
if((unsigned long)ldblk*MATLAB_HDR.SizeY > MATLAB_HDR.ObjectSize)
goto MATLAB_KO;
/* Image is gray when no complex flag is set and 2D Matrix */
if ((MATLAB_HDR.DimFlag == 8) &&
((MATLAB_HDR.StructureFlag & FLAG_COMPLEX) == 0))
{
image->type=GrayscaleType;
SetImageColorspace(image,GRAYColorspace,exception);
}
/*
If ping is true, then only set image size and colors without
reading any image data.
*/
if (image_info->ping)
{
size_t temp = image->columns;
image->columns = image->rows;
image->rows = temp;
goto done_reading; /* !!!!!! BAD !!!! */
}
status=SetImageExtent(image,image->columns,image->rows,exception);
if (status == MagickFalse)
{
if ((image != image2) && (image2 != (Image *) NULL))
image2=DestroyImage(image2);
return(DestroyImageList(image));
}
quantum_info=AcquireQuantumInfo(clone_info,image);
if (quantum_info == (QuantumInfo *) NULL)
ThrowReaderException(ResourceLimitError,"MemoryAllocationFailed");
/* ----- Load raster data ----- */
BImgBuff = (unsigned char *) AcquireQuantumMemory((size_t) (ldblk),sizeof(double)); /* Ldblk was set in the check phase */
if (BImgBuff == NULL)
ThrowReaderException(ResourceLimitError,"MemoryAllocationFailed");
(void) ResetMagickMemory(BImgBuff,0,ldblk*sizeof(double));
MinVal = 0;
MaxVal = 0;
if (CellType==miDOUBLE || CellType==miSINGLE) /* Find Min and Max Values for floats */
{
CalcMinMax(image2, image_info->endian, MATLAB_HDR.SizeX, MATLAB_HDR.SizeY, CellType, ldblk, BImgBuff, &quantum_info->minimum, &quantum_info->maximum);
}
/* Main loop for reading all scanlines */
if(z==1) z=0; /* read grey scanlines */
/* else read color scanlines */
do
{
for (i = 0; i < (ssize_t) MATLAB_HDR.SizeY; i++)
{
q=GetAuthenticPixels(image,0,MATLAB_HDR.SizeY-i-1,image->columns,1,exception);
if (q == (Quantum *) NULL)
{
if (logging) (void)LogMagickEvent(CoderEvent,GetMagickModule(),
" MAT set image pixels returns unexpected NULL on a row %u.", (unsigned)(MATLAB_HDR.SizeY-i-1));
goto done_reading; /* Skip image rotation, when cannot set image pixels */
}
if(ReadBlob(image2,ldblk,(unsigned char *)BImgBuff) != (ssize_t) ldblk)
{
if (logging) (void)LogMagickEvent(CoderEvent,GetMagickModule(),
" MAT cannot read scanrow %u from a file.", (unsigned)(MATLAB_HDR.SizeY-i-1));
goto ExitLoop;
}
if((CellType==miINT8 || CellType==miUINT8) && (MATLAB_HDR.StructureFlag & FLAG_LOGICAL))
{
FixLogical((unsigned char *)BImgBuff,ldblk);
if(ImportQuantumPixels(image,(CacheView *) NULL,quantum_info,z2qtype[z],BImgBuff,exception) <= 0)
{
ImportQuantumPixelsFailed:
if (logging) (void)LogMagickEvent(CoderEvent,GetMagickModule(),
" MAT failed to ImportQuantumPixels for a row %u", (unsigned)(MATLAB_HDR.SizeY-i-1));
break;
}
}
else
{
if(ImportQuantumPixels(image,(CacheView *) NULL,quantum_info,z2qtype[z],BImgBuff,exception) <= 0)
goto ImportQuantumPixelsFailed;
if (z<=1 && /* fix only during a last pass z==0 || z==1 */
(CellType==miINT8 || CellType==miINT16 || CellType==miINT32 || CellType==miINT64))
FixSignedValues(image,q,MATLAB_HDR.SizeX);
}
if (!SyncAuthenticPixels(image,exception))
{
if (logging) (void)LogMagickEvent(CoderEvent,GetMagickModule(),
" MAT failed to sync image pixels for a row %u", (unsigned)(MATLAB_HDR.SizeY-i-1));
goto ExitLoop;
}
}
} while(z-- >= 2);
ExitLoop:
/* Read complex part of numbers here */
if (MATLAB_HDR.StructureFlag & FLAG_COMPLEX)
{ /* Find Min and Max Values for complex parts of floats */
CellType = ReadBlobXXXLong(image2); /* Additional object type */
i = ReadBlobXXXLong(image2); /* size of a complex part - toss away*/
if (CellType==miDOUBLE || CellType==miSINGLE)
{
CalcMinMax(image2, image_info->endian, MATLAB_HDR.SizeX, MATLAB_HDR.SizeY, CellType, ldblk, BImgBuff, &MinVal, &MaxVal);
}
if (CellType==miDOUBLE)
for (i = 0; i < (ssize_t) MATLAB_HDR.SizeY; i++)
{
ReadBlobDoublesXXX(image2, ldblk, (double *)BImgBuff);
InsertComplexDoubleRow(image, (double *)BImgBuff, i, MinVal, MaxVal,
exception);
}
if (CellType==miSINGLE)
for (i = 0; i < (ssize_t) MATLAB_HDR.SizeY; i++)
{
ReadBlobFloatsXXX(image2, ldblk, (float *)BImgBuff);
InsertComplexFloatRow(image,(float *)BImgBuff,i,MinVal,MaxVal,
exception);
}
}
/* Image is gray when no complex flag is set and 2D Matrix AGAIN!!! */
if ((MATLAB_HDR.DimFlag == 8) &&
((MATLAB_HDR.StructureFlag & FLAG_COMPLEX) == 0))
image->type=GrayscaleType;
if (image->depth == 1)
image->type=BilevelType;
if(image2==image)
image2 = NULL; /* Remove shadow copy to an image before rotation. */
/* Rotate image. */
rotated_image = RotateImage(image, 90.0, exception);
if (rotated_image != (Image *) NULL)
{
/* Remove page offsets added by RotateImage */
rotated_image->page.x=0;
rotated_image->page.y=0;
blob = rotated_image->blob;
rotated_image->blob = image->blob;
rotated_image->colors = image->colors;
image->blob = blob;
AppendImageToList(&image,rotated_image);
DeleteImageFromList(&image);
}
done_reading:
if(image2!=NULL)
if(image2!=image)
{
DeleteImageFromList(&image2);
if(clone_info)
{
if(clone_info->file)
{
fclose(clone_info->file);
clone_info->file = NULL;
(void) remove_utf8(clone_info->filename);
}
}
}
/* Allocate next image structure. */
AcquireNextImage(image_info,image,exception);
if (image->next == (Image *) NULL) break;
image=SyncNextImageInList(image);
image->columns=image->rows=0;
image->colors=0;
/* row scan buffer is no longer needed */
RelinquishMagickMemory(BImgBuff);
BImgBuff = NULL;
if(--Frames>0)
{
z = z2;
if(image2==NULL) image2 = image;
goto NEXT_FRAME;
}
if ((image2!=NULL) && (image2!=image)) /* Does shadow temporary decompressed image exist? */
{
/* CloseBlob(image2); */
DeleteImageFromList(&image2);
if(clone_info)
{
if(clone_info->file)
{
fclose(clone_info->file);
clone_info->file = NULL;
(void) remove_utf8(clone_info->filename);
}
}
}
if (quantum_info != (QuantumInfo *) NULL)
quantum_info=DestroyQuantumInfo(quantum_info);
if (clone_info)
clone_info=DestroyImageInfo(clone_info);
}
RelinquishMagickMemory(BImgBuff);
if (quantum_info != (QuantumInfo *) NULL)
quantum_info=DestroyQuantumInfo(quantum_info);
END_OF_READING:
if (clone_info)
clone_info=DestroyImageInfo(clone_info);
CloseBlob(image);
{
Image *p;
ssize_t scene=0;
/*
Rewind list, removing any empty images while rewinding.
*/
p=image;
image=NULL;
while (p != (Image *) NULL)
{
Image *tmp=p;
if ((p->rows == 0) || (p->columns == 0)) {
p=p->previous;
DeleteImageFromList(&tmp);
} else {
image=p;
p=p->previous;
}
}
/*
Fix scene numbers
*/
for (p=image; p != (Image *) NULL; p=p->next)
p->scene=scene++;
}
if(clone_info != NULL) /* cleanup garbage file from compression */
{
if(clone_info->file)
{
fclose(clone_info->file);
clone_info->file = NULL;
(void) remove_utf8(clone_info->filename);
}
DestroyImageInfo(clone_info);
clone_info = NULL;
}
if (logging) (void)LogMagickEvent(CoderEvent,GetMagickModule(),"return");
if (image==NULL)
ThrowReaderException(CorruptImageError,"ImproperImageHeader")
else
if ((image != image2) && (image2 != (Image *) NULL))
image2=DestroyImage(image2);
return (image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% R e g i s t e r M A T I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% Method RegisterMATImage adds attributes for the MAT image format to
% the list of supported formats. The attributes include the image format
% tag, a method to read and/or write the format, whether the format
% supports the saving of more than one frame to the same file or blob,
% whether the format supports native in-memory I/O, and a brief
% description of the format.
%
% The format of the RegisterMATImage method is:
%
% size_t RegisterMATImage(void)
%
*/
ModuleExport size_t RegisterMATImage(void)
{
MagickInfo
*entry;
entry=AcquireMagickInfo("MAT","MAT","MATLAB level 5 image format");
entry->decoder=(DecodeImageHandler *) ReadMATImage;
entry->encoder=(EncodeImageHandler *) WriteMATImage;
entry->flags^=CoderBlobSupportFlag;
entry->flags|=CoderDecoderSeekableStreamFlag;
(void) RegisterMagickInfo(entry);
return(MagickImageCoderSignature);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% U n r e g i s t e r M A T I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% Method UnregisterMATImage removes format registrations made by the
% MAT module from the list of supported formats.
%
% The format of the UnregisterMATImage method is:
%
% UnregisterMATImage(void)
%
*/
ModuleExport void UnregisterMATImage(void)
{
(void) UnregisterMagickInfo("MAT");
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% W r i t e M A T L A B I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% Function WriteMATImage writes an Matlab matrix to a file.
%
% The format of the WriteMATImage method is:
%
% MagickBooleanType WriteMATImage(const ImageInfo *image_info,
% Image *image,ExceptionInfo *exception)
%
% A description of each parameter follows.
%
% o image_info: Specifies a pointer to a ImageInfo structure.
%
% o image: A pointer to an Image structure.
%
% o exception: return any errors or warnings in this structure.
%
*/
static MagickBooleanType WriteMATImage(const ImageInfo *image_info,Image *image,
ExceptionInfo *exception)
{
char
MATLAB_HDR[0x80];
MagickBooleanType
status;
MagickOffsetType
scene;
struct tm
local_time;
time_t
current_time;
/*
Open output image file.
*/
assert(image_info != (const ImageInfo *) NULL);
assert(image_info->signature == MagickCoreSignature);
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
(void) LogMagickEvent(CoderEvent,GetMagickModule(),"enter MAT");
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
status=OpenBlob(image_info,image,WriteBinaryBlobMode,exception);
if (status == MagickFalse)
return(MagickFalse);
image->depth=8;
current_time=time((time_t *) NULL);
#if defined(MAGICKCORE_HAVE_LOCALTIME_R)
(void) localtime_r(¤t_time,&local_time);
#else
(void) memcpy(&local_time,localtime(¤t_time),sizeof(local_time));
#endif
(void) memset(MATLAB_HDR,' ',MagickMin(sizeof(MATLAB_HDR),124));
FormatLocaleString(MATLAB_HDR,sizeof(MATLAB_HDR),
"MATLAB 5.0 MAT-file, Platform: %s, Created on: %s %s %2d %2d:%2d:%2d %d",
OsDesc,DayOfWTab[local_time.tm_wday],MonthsTab[local_time.tm_mon],
local_time.tm_mday,local_time.tm_hour,local_time.tm_min,
local_time.tm_sec,local_time.tm_year+1900);
MATLAB_HDR[0x7C]=0;
MATLAB_HDR[0x7D]=1;
MATLAB_HDR[0x7E]='I';
MATLAB_HDR[0x7F]='M';
(void) WriteBlob(image,sizeof(MATLAB_HDR),(unsigned char *) MATLAB_HDR);
scene=0;
do
{
char
padding;
MagickBooleanType
is_gray;
QuantumInfo
*quantum_info;
size_t
data_size;
unsigned char
*pixels;
unsigned int
z;
(void) TransformImageColorspace(image,sRGBColorspace,exception);
is_gray=SetImageGray(image,exception);
z=(is_gray != MagickFalse) ? 0 : 3;
/*
Store MAT header.
*/
data_size = image->rows * image->columns;
if (is_gray == MagickFalse)
data_size*=3;
padding=((unsigned char)(data_size-1) & 0x7) ^ 0x7;
(void) WriteBlobLSBLong(image,miMATRIX);
(void) WriteBlobLSBLong(image,(unsigned int) data_size+padding+
((is_gray != MagickFalse) ? 48 : 56));
(void) WriteBlobLSBLong(image,0x6); /* 0x88 */
(void) WriteBlobLSBLong(image,0x8); /* 0x8C */
(void) WriteBlobLSBLong(image,0x6); /* 0x90 */
(void) WriteBlobLSBLong(image,0);
(void) WriteBlobLSBLong(image,0x5); /* 0x98 */
(void) WriteBlobLSBLong(image,(is_gray != MagickFalse) ? 0x8 : 0xC); /* 0x9C - DimFlag */
(void) WriteBlobLSBLong(image,(unsigned int) image->rows); /* x: 0xA0 */
(void) WriteBlobLSBLong(image,(unsigned int) image->columns); /* y: 0xA4 */
if (is_gray == MagickFalse)
{
(void) WriteBlobLSBLong(image,3); /* z: 0xA8 */
(void) WriteBlobLSBLong(image,0);
}
(void) WriteBlobLSBShort(image,1); /* 0xB0 */
(void) WriteBlobLSBShort(image,1); /* 0xB2 */
(void) WriteBlobLSBLong(image,'M'); /* 0xB4 */
(void) WriteBlobLSBLong(image,0x2); /* 0xB8 */
(void) WriteBlobLSBLong(image,(unsigned int) data_size); /* 0xBC */
/*
Store image data.
*/
quantum_info=AcquireQuantumInfo(image_info,image);
if (quantum_info == (QuantumInfo *) NULL)
ThrowWriterException(ResourceLimitError,"MemoryAllocationFailed");
pixels=(unsigned char *) GetQuantumPixels(quantum_info);
do
{
const Quantum
*p;
ssize_t
y;
for (y=0; y < (ssize_t)image->columns; y++)
{
p=GetVirtualPixels(image,y,0,1,image->rows,exception);
if (p == (const Quantum *) NULL)
break;
(void) ExportQuantumPixels(image,(CacheView *) NULL,quantum_info,
z2qtype[z],pixels,exception);
(void) WriteBlob(image,image->rows,pixels);
}
if (SyncAuthenticPixels(image,exception) == MagickFalse)
break;
} while (z-- >= 2);
while (padding-- > 0)
(void) WriteBlobByte(image,0);
quantum_info=DestroyQuantumInfo(quantum_info);
if (GetNextImageInList(image) == (Image *) NULL)
break;
image=SyncNextImageInList(image);
status=SetImageProgress(image,SaveImagesTag,scene++,
GetImageListLength(image));
if (status == MagickFalse)
break;
} while (image_info->adjoin != MagickFalse);
(void) CloseBlob(image);
return(status);
}
| ./CrossVul/dataset_final_sorted/CWE-416/c/bad_2638_0 |
crossvul-cpp_data_good_3018_0 | /*
* Copyright (c) 2014-2015 Hisilicon Limited.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*/
#include <linux/clk.h>
#include <linux/cpumask.h>
#include <linux/etherdevice.h>
#include <linux/if_vlan.h>
#include <linux/interrupt.h>
#include <linux/io.h>
#include <linux/ip.h>
#include <linux/ipv6.h>
#include <linux/module.h>
#include <linux/phy.h>
#include <linux/platform_device.h>
#include <linux/skbuff.h>
#include "hnae.h"
#include "hns_enet.h"
#include "hns_dsaf_mac.h"
#define NIC_MAX_Q_PER_VF 16
#define HNS_NIC_TX_TIMEOUT (5 * HZ)
#define SERVICE_TIMER_HZ (1 * HZ)
#define NIC_TX_CLEAN_MAX_NUM 256
#define NIC_RX_CLEAN_MAX_NUM 64
#define RCB_IRQ_NOT_INITED 0
#define RCB_IRQ_INITED 1
#define HNS_BUFFER_SIZE_2048 2048
#define BD_MAX_SEND_SIZE 8191
#define SKB_TMP_LEN(SKB) \
(((SKB)->transport_header - (SKB)->mac_header) + tcp_hdrlen(SKB))
static void fill_v2_desc(struct hnae_ring *ring, void *priv,
int size, dma_addr_t dma, int frag_end,
int buf_num, enum hns_desc_type type, int mtu)
{
struct hnae_desc *desc = &ring->desc[ring->next_to_use];
struct hnae_desc_cb *desc_cb = &ring->desc_cb[ring->next_to_use];
struct iphdr *iphdr;
struct ipv6hdr *ipv6hdr;
struct sk_buff *skb;
__be16 protocol;
u8 bn_pid = 0;
u8 rrcfv = 0;
u8 ip_offset = 0;
u8 tvsvsn = 0;
u16 mss = 0;
u8 l4_len = 0;
u16 paylen = 0;
desc_cb->priv = priv;
desc_cb->length = size;
desc_cb->dma = dma;
desc_cb->type = type;
desc->addr = cpu_to_le64(dma);
desc->tx.send_size = cpu_to_le16((u16)size);
/* config bd buffer end */
hnae_set_bit(rrcfv, HNSV2_TXD_VLD_B, 1);
hnae_set_field(bn_pid, HNSV2_TXD_BUFNUM_M, 0, buf_num - 1);
/* fill port_id in the tx bd for sending management pkts */
hnae_set_field(bn_pid, HNSV2_TXD_PORTID_M,
HNSV2_TXD_PORTID_S, ring->q->handle->dport_id);
if (type == DESC_TYPE_SKB) {
skb = (struct sk_buff *)priv;
if (skb->ip_summed == CHECKSUM_PARTIAL) {
skb_reset_mac_len(skb);
protocol = skb->protocol;
ip_offset = ETH_HLEN;
if (protocol == htons(ETH_P_8021Q)) {
ip_offset += VLAN_HLEN;
protocol = vlan_get_protocol(skb);
skb->protocol = protocol;
}
if (skb->protocol == htons(ETH_P_IP)) {
iphdr = ip_hdr(skb);
hnae_set_bit(rrcfv, HNSV2_TXD_L3CS_B, 1);
hnae_set_bit(rrcfv, HNSV2_TXD_L4CS_B, 1);
/* check for tcp/udp header */
if (iphdr->protocol == IPPROTO_TCP &&
skb_is_gso(skb)) {
hnae_set_bit(tvsvsn,
HNSV2_TXD_TSE_B, 1);
l4_len = tcp_hdrlen(skb);
mss = skb_shinfo(skb)->gso_size;
paylen = skb->len - SKB_TMP_LEN(skb);
}
} else if (skb->protocol == htons(ETH_P_IPV6)) {
hnae_set_bit(tvsvsn, HNSV2_TXD_IPV6_B, 1);
ipv6hdr = ipv6_hdr(skb);
hnae_set_bit(rrcfv, HNSV2_TXD_L4CS_B, 1);
/* check for tcp/udp header */
if (ipv6hdr->nexthdr == IPPROTO_TCP &&
skb_is_gso(skb) && skb_is_gso_v6(skb)) {
hnae_set_bit(tvsvsn,
HNSV2_TXD_TSE_B, 1);
l4_len = tcp_hdrlen(skb);
mss = skb_shinfo(skb)->gso_size;
paylen = skb->len - SKB_TMP_LEN(skb);
}
}
desc->tx.ip_offset = ip_offset;
desc->tx.tse_vlan_snap_v6_sctp_nth = tvsvsn;
desc->tx.mss = cpu_to_le16(mss);
desc->tx.l4_len = l4_len;
desc->tx.paylen = cpu_to_le16(paylen);
}
}
hnae_set_bit(rrcfv, HNSV2_TXD_FE_B, frag_end);
desc->tx.bn_pid = bn_pid;
desc->tx.ra_ri_cs_fe_vld = rrcfv;
ring_ptr_move_fw(ring, next_to_use);
}
static const struct acpi_device_id hns_enet_acpi_match[] = {
{ "HISI00C1", 0 },
{ "HISI00C2", 0 },
{ },
};
MODULE_DEVICE_TABLE(acpi, hns_enet_acpi_match);
static void fill_desc(struct hnae_ring *ring, void *priv,
int size, dma_addr_t dma, int frag_end,
int buf_num, enum hns_desc_type type, int mtu)
{
struct hnae_desc *desc = &ring->desc[ring->next_to_use];
struct hnae_desc_cb *desc_cb = &ring->desc_cb[ring->next_to_use];
struct sk_buff *skb;
__be16 protocol;
u32 ip_offset;
u32 asid_bufnum_pid = 0;
u32 flag_ipoffset = 0;
desc_cb->priv = priv;
desc_cb->length = size;
desc_cb->dma = dma;
desc_cb->type = type;
desc->addr = cpu_to_le64(dma);
desc->tx.send_size = cpu_to_le16((u16)size);
/*config bd buffer end */
flag_ipoffset |= 1 << HNS_TXD_VLD_B;
asid_bufnum_pid |= buf_num << HNS_TXD_BUFNUM_S;
if (type == DESC_TYPE_SKB) {
skb = (struct sk_buff *)priv;
if (skb->ip_summed == CHECKSUM_PARTIAL) {
protocol = skb->protocol;
ip_offset = ETH_HLEN;
/*if it is a SW VLAN check the next protocol*/
if (protocol == htons(ETH_P_8021Q)) {
ip_offset += VLAN_HLEN;
protocol = vlan_get_protocol(skb);
skb->protocol = protocol;
}
if (skb->protocol == htons(ETH_P_IP)) {
flag_ipoffset |= 1 << HNS_TXD_L3CS_B;
/* check for tcp/udp header */
flag_ipoffset |= 1 << HNS_TXD_L4CS_B;
} else if (skb->protocol == htons(ETH_P_IPV6)) {
/* ipv6 has not l3 cs, check for L4 header */
flag_ipoffset |= 1 << HNS_TXD_L4CS_B;
}
flag_ipoffset |= ip_offset << HNS_TXD_IPOFFSET_S;
}
}
flag_ipoffset |= frag_end << HNS_TXD_FE_B;
desc->tx.asid_bufnum_pid = cpu_to_le16(asid_bufnum_pid);
desc->tx.flag_ipoffset = cpu_to_le32(flag_ipoffset);
ring_ptr_move_fw(ring, next_to_use);
}
static void unfill_desc(struct hnae_ring *ring)
{
ring_ptr_move_bw(ring, next_to_use);
}
static int hns_nic_maybe_stop_tx(
struct sk_buff **out_skb, int *bnum, struct hnae_ring *ring)
{
struct sk_buff *skb = *out_skb;
struct sk_buff *new_skb = NULL;
int buf_num;
/* no. of segments (plus a header) */
buf_num = skb_shinfo(skb)->nr_frags + 1;
if (unlikely(buf_num > ring->max_desc_num_per_pkt)) {
if (ring_space(ring) < 1)
return -EBUSY;
new_skb = skb_copy(skb, GFP_ATOMIC);
if (!new_skb)
return -ENOMEM;
dev_kfree_skb_any(skb);
*out_skb = new_skb;
buf_num = 1;
} else if (buf_num > ring_space(ring)) {
return -EBUSY;
}
*bnum = buf_num;
return 0;
}
static int hns_nic_maybe_stop_tso(
struct sk_buff **out_skb, int *bnum, struct hnae_ring *ring)
{
int i;
int size;
int buf_num;
int frag_num;
struct sk_buff *skb = *out_skb;
struct sk_buff *new_skb = NULL;
struct skb_frag_struct *frag;
size = skb_headlen(skb);
buf_num = (size + BD_MAX_SEND_SIZE - 1) / BD_MAX_SEND_SIZE;
frag_num = skb_shinfo(skb)->nr_frags;
for (i = 0; i < frag_num; i++) {
frag = &skb_shinfo(skb)->frags[i];
size = skb_frag_size(frag);
buf_num += (size + BD_MAX_SEND_SIZE - 1) / BD_MAX_SEND_SIZE;
}
if (unlikely(buf_num > ring->max_desc_num_per_pkt)) {
buf_num = (skb->len + BD_MAX_SEND_SIZE - 1) / BD_MAX_SEND_SIZE;
if (ring_space(ring) < buf_num)
return -EBUSY;
/* manual split the send packet */
new_skb = skb_copy(skb, GFP_ATOMIC);
if (!new_skb)
return -ENOMEM;
dev_kfree_skb_any(skb);
*out_skb = new_skb;
} else if (ring_space(ring) < buf_num) {
return -EBUSY;
}
*bnum = buf_num;
return 0;
}
static void fill_tso_desc(struct hnae_ring *ring, void *priv,
int size, dma_addr_t dma, int frag_end,
int buf_num, enum hns_desc_type type, int mtu)
{
int frag_buf_num;
int sizeoflast;
int k;
frag_buf_num = (size + BD_MAX_SEND_SIZE - 1) / BD_MAX_SEND_SIZE;
sizeoflast = size % BD_MAX_SEND_SIZE;
sizeoflast = sizeoflast ? sizeoflast : BD_MAX_SEND_SIZE;
/* when the frag size is bigger than hardware, split this frag */
for (k = 0; k < frag_buf_num; k++)
fill_v2_desc(ring, priv,
(k == frag_buf_num - 1) ?
sizeoflast : BD_MAX_SEND_SIZE,
dma + BD_MAX_SEND_SIZE * k,
frag_end && (k == frag_buf_num - 1) ? 1 : 0,
buf_num,
(type == DESC_TYPE_SKB && !k) ?
DESC_TYPE_SKB : DESC_TYPE_PAGE,
mtu);
}
netdev_tx_t hns_nic_net_xmit_hw(struct net_device *ndev,
struct sk_buff *skb,
struct hns_nic_ring_data *ring_data)
{
struct hns_nic_priv *priv = netdev_priv(ndev);
struct hnae_ring *ring = ring_data->ring;
struct device *dev = ring_to_dev(ring);
struct netdev_queue *dev_queue;
struct skb_frag_struct *frag;
int buf_num;
int seg_num;
dma_addr_t dma;
int size, next_to_use;
int i;
switch (priv->ops.maybe_stop_tx(&skb, &buf_num, ring)) {
case -EBUSY:
ring->stats.tx_busy++;
goto out_net_tx_busy;
case -ENOMEM:
ring->stats.sw_err_cnt++;
netdev_err(ndev, "no memory to xmit!\n");
goto out_err_tx_ok;
default:
break;
}
/* no. of segments (plus a header) */
seg_num = skb_shinfo(skb)->nr_frags + 1;
next_to_use = ring->next_to_use;
/* fill the first part */
size = skb_headlen(skb);
dma = dma_map_single(dev, skb->data, size, DMA_TO_DEVICE);
if (dma_mapping_error(dev, dma)) {
netdev_err(ndev, "TX head DMA map failed\n");
ring->stats.sw_err_cnt++;
goto out_err_tx_ok;
}
priv->ops.fill_desc(ring, skb, size, dma, seg_num == 1 ? 1 : 0,
buf_num, DESC_TYPE_SKB, ndev->mtu);
/* fill the fragments */
for (i = 1; i < seg_num; i++) {
frag = &skb_shinfo(skb)->frags[i - 1];
size = skb_frag_size(frag);
dma = skb_frag_dma_map(dev, frag, 0, size, DMA_TO_DEVICE);
if (dma_mapping_error(dev, dma)) {
netdev_err(ndev, "TX frag(%d) DMA map failed\n", i);
ring->stats.sw_err_cnt++;
goto out_map_frag_fail;
}
priv->ops.fill_desc(ring, skb_frag_page(frag), size, dma,
seg_num - 1 == i ? 1 : 0, buf_num,
DESC_TYPE_PAGE, ndev->mtu);
}
/*complete translate all packets*/
dev_queue = netdev_get_tx_queue(ndev, skb->queue_mapping);
netdev_tx_sent_queue(dev_queue, skb->len);
netif_trans_update(ndev);
ndev->stats.tx_bytes += skb->len;
ndev->stats.tx_packets++;
wmb(); /* commit all data before submit */
assert(skb->queue_mapping < priv->ae_handle->q_num);
hnae_queue_xmit(priv->ae_handle->qs[skb->queue_mapping], buf_num);
ring->stats.tx_pkts++;
ring->stats.tx_bytes += skb->len;
return NETDEV_TX_OK;
out_map_frag_fail:
while (ring->next_to_use != next_to_use) {
unfill_desc(ring);
if (ring->next_to_use != next_to_use)
dma_unmap_page(dev,
ring->desc_cb[ring->next_to_use].dma,
ring->desc_cb[ring->next_to_use].length,
DMA_TO_DEVICE);
else
dma_unmap_single(dev,
ring->desc_cb[next_to_use].dma,
ring->desc_cb[next_to_use].length,
DMA_TO_DEVICE);
}
out_err_tx_ok:
dev_kfree_skb_any(skb);
return NETDEV_TX_OK;
out_net_tx_busy:
netif_stop_subqueue(ndev, skb->queue_mapping);
/* Herbert's original patch had:
* smp_mb__after_netif_stop_queue();
* but since that doesn't exist yet, just open code it.
*/
smp_mb();
return NETDEV_TX_BUSY;
}
/**
* hns_nic_get_headlen - determine size of header for RSC/LRO/GRO/FCOE
* @data: pointer to the start of the headers
* @max: total length of section to find headers in
*
* This function is meant to determine the length of headers that will
* be recognized by hardware for LRO, GRO, and RSC offloads. The main
* motivation of doing this is to only perform one pull for IPv4 TCP
* packets so that we can do basic things like calculating the gso_size
* based on the average data per packet.
**/
static unsigned int hns_nic_get_headlen(unsigned char *data, u32 flag,
unsigned int max_size)
{
unsigned char *network;
u8 hlen;
/* this should never happen, but better safe than sorry */
if (max_size < ETH_HLEN)
return max_size;
/* initialize network frame pointer */
network = data;
/* set first protocol and move network header forward */
network += ETH_HLEN;
/* handle any vlan tag if present */
if (hnae_get_field(flag, HNS_RXD_VLAN_M, HNS_RXD_VLAN_S)
== HNS_RX_FLAG_VLAN_PRESENT) {
if ((typeof(max_size))(network - data) > (max_size - VLAN_HLEN))
return max_size;
network += VLAN_HLEN;
}
/* handle L3 protocols */
if (hnae_get_field(flag, HNS_RXD_L3ID_M, HNS_RXD_L3ID_S)
== HNS_RX_FLAG_L3ID_IPV4) {
if ((typeof(max_size))(network - data) >
(max_size - sizeof(struct iphdr)))
return max_size;
/* access ihl as a u8 to avoid unaligned access on ia64 */
hlen = (network[0] & 0x0F) << 2;
/* verify hlen meets minimum size requirements */
if (hlen < sizeof(struct iphdr))
return network - data;
/* record next protocol if header is present */
} else if (hnae_get_field(flag, HNS_RXD_L3ID_M, HNS_RXD_L3ID_S)
== HNS_RX_FLAG_L3ID_IPV6) {
if ((typeof(max_size))(network - data) >
(max_size - sizeof(struct ipv6hdr)))
return max_size;
/* record next protocol */
hlen = sizeof(struct ipv6hdr);
} else {
return network - data;
}
/* relocate pointer to start of L4 header */
network += hlen;
/* finally sort out TCP/UDP */
if (hnae_get_field(flag, HNS_RXD_L4ID_M, HNS_RXD_L4ID_S)
== HNS_RX_FLAG_L4ID_TCP) {
if ((typeof(max_size))(network - data) >
(max_size - sizeof(struct tcphdr)))
return max_size;
/* access doff as a u8 to avoid unaligned access on ia64 */
hlen = (network[12] & 0xF0) >> 2;
/* verify hlen meets minimum size requirements */
if (hlen < sizeof(struct tcphdr))
return network - data;
network += hlen;
} else if (hnae_get_field(flag, HNS_RXD_L4ID_M, HNS_RXD_L4ID_S)
== HNS_RX_FLAG_L4ID_UDP) {
if ((typeof(max_size))(network - data) >
(max_size - sizeof(struct udphdr)))
return max_size;
network += sizeof(struct udphdr);
}
/* If everything has gone correctly network should be the
* data section of the packet and will be the end of the header.
* If not then it probably represents the end of the last recognized
* header.
*/
if ((typeof(max_size))(network - data) < max_size)
return network - data;
else
return max_size;
}
static void hns_nic_reuse_page(struct sk_buff *skb, int i,
struct hnae_ring *ring, int pull_len,
struct hnae_desc_cb *desc_cb)
{
struct hnae_desc *desc;
int truesize, size;
int last_offset;
bool twobufs;
twobufs = ((PAGE_SIZE < 8192) &&
hnae_buf_size(ring) == HNS_BUFFER_SIZE_2048);
desc = &ring->desc[ring->next_to_clean];
size = le16_to_cpu(desc->rx.size);
if (twobufs) {
truesize = hnae_buf_size(ring);
} else {
truesize = ALIGN(size, L1_CACHE_BYTES);
last_offset = hnae_page_size(ring) - hnae_buf_size(ring);
}
skb_add_rx_frag(skb, i, desc_cb->priv, desc_cb->page_offset + pull_len,
size - pull_len, truesize - pull_len);
/* avoid re-using remote pages,flag default unreuse */
if (unlikely(page_to_nid(desc_cb->priv) != numa_node_id()))
return;
if (twobufs) {
/* if we are only owner of page we can reuse it */
if (likely(page_count(desc_cb->priv) == 1)) {
/* flip page offset to other buffer */
desc_cb->page_offset ^= truesize;
desc_cb->reuse_flag = 1;
/* bump ref count on page before it is given*/
get_page(desc_cb->priv);
}
return;
}
/* move offset up to the next cache line */
desc_cb->page_offset += truesize;
if (desc_cb->page_offset <= last_offset) {
desc_cb->reuse_flag = 1;
/* bump ref count on page before it is given*/
get_page(desc_cb->priv);
}
}
static void get_v2rx_desc_bnum(u32 bnum_flag, int *out_bnum)
{
*out_bnum = hnae_get_field(bnum_flag,
HNS_RXD_BUFNUM_M, HNS_RXD_BUFNUM_S) + 1;
}
static void get_rx_desc_bnum(u32 bnum_flag, int *out_bnum)
{
*out_bnum = hnae_get_field(bnum_flag,
HNS_RXD_BUFNUM_M, HNS_RXD_BUFNUM_S);
}
static void hns_nic_rx_checksum(struct hns_nic_ring_data *ring_data,
struct sk_buff *skb, u32 flag)
{
struct net_device *netdev = ring_data->napi.dev;
u32 l3id;
u32 l4id;
/* check if RX checksum offload is enabled */
if (unlikely(!(netdev->features & NETIF_F_RXCSUM)))
return;
/* In hardware, we only support checksum for the following protocols:
* 1) IPv4,
* 2) TCP(over IPv4 or IPv6),
* 3) UDP(over IPv4 or IPv6),
* 4) SCTP(over IPv4 or IPv6)
* but we support many L3(IPv4, IPv6, MPLS, PPPoE etc) and L4(TCP,
* UDP, GRE, SCTP, IGMP, ICMP etc.) protocols.
*
* Hardware limitation:
* Our present hardware RX Descriptor lacks L3/L4 checksum "Status &
* Error" bit (which usually can be used to indicate whether checksum
* was calculated by the hardware and if there was any error encountered
* during checksum calculation).
*
* Software workaround:
* We do get info within the RX descriptor about the kind of L3/L4
* protocol coming in the packet and the error status. These errors
* might not just be checksum errors but could be related to version,
* length of IPv4, UDP, TCP etc.
* Because there is no-way of knowing if it is a L3/L4 error due to bad
* checksum or any other L3/L4 error, we will not (cannot) convey
* checksum status for such cases to upper stack and will not maintain
* the RX L3/L4 checksum counters as well.
*/
l3id = hnae_get_field(flag, HNS_RXD_L3ID_M, HNS_RXD_L3ID_S);
l4id = hnae_get_field(flag, HNS_RXD_L4ID_M, HNS_RXD_L4ID_S);
/* check L3 protocol for which checksum is supported */
if ((l3id != HNS_RX_FLAG_L3ID_IPV4) && (l3id != HNS_RX_FLAG_L3ID_IPV6))
return;
/* check for any(not just checksum)flagged L3 protocol errors */
if (unlikely(hnae_get_bit(flag, HNS_RXD_L3E_B)))
return;
/* we do not support checksum of fragmented packets */
if (unlikely(hnae_get_bit(flag, HNS_RXD_FRAG_B)))
return;
/* check L4 protocol for which checksum is supported */
if ((l4id != HNS_RX_FLAG_L4ID_TCP) &&
(l4id != HNS_RX_FLAG_L4ID_UDP) &&
(l4id != HNS_RX_FLAG_L4ID_SCTP))
return;
/* check for any(not just checksum)flagged L4 protocol errors */
if (unlikely(hnae_get_bit(flag, HNS_RXD_L4E_B)))
return;
/* now, this has to be a packet with valid RX checksum */
skb->ip_summed = CHECKSUM_UNNECESSARY;
}
static int hns_nic_poll_rx_skb(struct hns_nic_ring_data *ring_data,
struct sk_buff **out_skb, int *out_bnum)
{
struct hnae_ring *ring = ring_data->ring;
struct net_device *ndev = ring_data->napi.dev;
struct hns_nic_priv *priv = netdev_priv(ndev);
struct sk_buff *skb;
struct hnae_desc *desc;
struct hnae_desc_cb *desc_cb;
unsigned char *va;
int bnum, length, i;
int pull_len;
u32 bnum_flag;
desc = &ring->desc[ring->next_to_clean];
desc_cb = &ring->desc_cb[ring->next_to_clean];
prefetch(desc);
va = (unsigned char *)desc_cb->buf + desc_cb->page_offset;
/* prefetch first cache line of first page */
prefetch(va);
#if L1_CACHE_BYTES < 128
prefetch(va + L1_CACHE_BYTES);
#endif
skb = *out_skb = napi_alloc_skb(&ring_data->napi,
HNS_RX_HEAD_SIZE);
if (unlikely(!skb)) {
netdev_err(ndev, "alloc rx skb fail\n");
ring->stats.sw_err_cnt++;
return -ENOMEM;
}
prefetchw(skb->data);
length = le16_to_cpu(desc->rx.pkt_len);
bnum_flag = le32_to_cpu(desc->rx.ipoff_bnum_pid_flag);
priv->ops.get_rxd_bnum(bnum_flag, &bnum);
*out_bnum = bnum;
if (length <= HNS_RX_HEAD_SIZE) {
memcpy(__skb_put(skb, length), va, ALIGN(length, sizeof(long)));
/* we can reuse buffer as-is, just make sure it is local */
if (likely(page_to_nid(desc_cb->priv) == numa_node_id()))
desc_cb->reuse_flag = 1;
else /* this page cannot be reused so discard it */
put_page(desc_cb->priv);
ring_ptr_move_fw(ring, next_to_clean);
if (unlikely(bnum != 1)) { /* check err*/
*out_bnum = 1;
goto out_bnum_err;
}
} else {
ring->stats.seg_pkt_cnt++;
pull_len = hns_nic_get_headlen(va, bnum_flag, HNS_RX_HEAD_SIZE);
memcpy(__skb_put(skb, pull_len), va,
ALIGN(pull_len, sizeof(long)));
hns_nic_reuse_page(skb, 0, ring, pull_len, desc_cb);
ring_ptr_move_fw(ring, next_to_clean);
if (unlikely(bnum >= (int)MAX_SKB_FRAGS)) { /* check err*/
*out_bnum = 1;
goto out_bnum_err;
}
for (i = 1; i < bnum; i++) {
desc = &ring->desc[ring->next_to_clean];
desc_cb = &ring->desc_cb[ring->next_to_clean];
hns_nic_reuse_page(skb, i, ring, 0, desc_cb);
ring_ptr_move_fw(ring, next_to_clean);
}
}
/* check except process, free skb and jump the desc */
if (unlikely((!bnum) || (bnum > ring->max_desc_num_per_pkt))) {
out_bnum_err:
*out_bnum = *out_bnum ? *out_bnum : 1; /* ntc moved,cannot 0*/
netdev_err(ndev, "invalid bnum(%d,%d,%d,%d),%016llx,%016llx\n",
bnum, ring->max_desc_num_per_pkt,
length, (int)MAX_SKB_FRAGS,
((u64 *)desc)[0], ((u64 *)desc)[1]);
ring->stats.err_bd_num++;
dev_kfree_skb_any(skb);
return -EDOM;
}
bnum_flag = le32_to_cpu(desc->rx.ipoff_bnum_pid_flag);
if (unlikely(!hnae_get_bit(bnum_flag, HNS_RXD_VLD_B))) {
netdev_err(ndev, "no valid bd,%016llx,%016llx\n",
((u64 *)desc)[0], ((u64 *)desc)[1]);
ring->stats.non_vld_descs++;
dev_kfree_skb_any(skb);
return -EINVAL;
}
if (unlikely((!desc->rx.pkt_len) ||
hnae_get_bit(bnum_flag, HNS_RXD_DROP_B))) {
ring->stats.err_pkt_len++;
dev_kfree_skb_any(skb);
return -EFAULT;
}
if (unlikely(hnae_get_bit(bnum_flag, HNS_RXD_L2E_B))) {
ring->stats.l2_err++;
dev_kfree_skb_any(skb);
return -EFAULT;
}
ring->stats.rx_pkts++;
ring->stats.rx_bytes += skb->len;
/* indicate to upper stack if our hardware has already calculated
* the RX checksum
*/
hns_nic_rx_checksum(ring_data, skb, bnum_flag);
return 0;
}
static void
hns_nic_alloc_rx_buffers(struct hns_nic_ring_data *ring_data, int cleand_count)
{
int i, ret;
struct hnae_desc_cb res_cbs;
struct hnae_desc_cb *desc_cb;
struct hnae_ring *ring = ring_data->ring;
struct net_device *ndev = ring_data->napi.dev;
for (i = 0; i < cleand_count; i++) {
desc_cb = &ring->desc_cb[ring->next_to_use];
if (desc_cb->reuse_flag) {
ring->stats.reuse_pg_cnt++;
hnae_reuse_buffer(ring, ring->next_to_use);
} else {
ret = hnae_reserve_buffer_map(ring, &res_cbs);
if (ret) {
ring->stats.sw_err_cnt++;
netdev_err(ndev, "hnae reserve buffer map failed.\n");
break;
}
hnae_replace_buffer(ring, ring->next_to_use, &res_cbs);
}
ring_ptr_move_fw(ring, next_to_use);
}
wmb(); /* make all data has been write before submit */
writel_relaxed(i, ring->io_base + RCB_REG_HEAD);
}
/* return error number for error or number of desc left to take
*/
static void hns_nic_rx_up_pro(struct hns_nic_ring_data *ring_data,
struct sk_buff *skb)
{
struct net_device *ndev = ring_data->napi.dev;
skb->protocol = eth_type_trans(skb, ndev);
(void)napi_gro_receive(&ring_data->napi, skb);
}
static int hns_desc_unused(struct hnae_ring *ring)
{
int ntc = ring->next_to_clean;
int ntu = ring->next_to_use;
return ((ntc >= ntu) ? 0 : ring->desc_num) + ntc - ntu;
}
static int hns_nic_rx_poll_one(struct hns_nic_ring_data *ring_data,
int budget, void *v)
{
struct hnae_ring *ring = ring_data->ring;
struct sk_buff *skb;
int num, bnum;
#define RCB_NOF_ALLOC_RX_BUFF_ONCE 16
int recv_pkts, recv_bds, clean_count, err;
int unused_count = hns_desc_unused(ring);
num = readl_relaxed(ring->io_base + RCB_REG_FBDNUM);
rmb(); /* make sure num taken effect before the other data is touched */
recv_pkts = 0, recv_bds = 0, clean_count = 0;
num -= unused_count;
while (recv_pkts < budget && recv_bds < num) {
/* reuse or realloc buffers */
if (clean_count + unused_count >= RCB_NOF_ALLOC_RX_BUFF_ONCE) {
hns_nic_alloc_rx_buffers(ring_data,
clean_count + unused_count);
clean_count = 0;
unused_count = hns_desc_unused(ring);
}
/* poll one pkt */
err = hns_nic_poll_rx_skb(ring_data, &skb, &bnum);
if (unlikely(!skb)) /* this fault cannot be repaired */
goto out;
recv_bds += bnum;
clean_count += bnum;
if (unlikely(err)) { /* do jump the err */
recv_pkts++;
continue;
}
/* do update ip stack process*/
((void (*)(struct hns_nic_ring_data *, struct sk_buff *))v)(
ring_data, skb);
recv_pkts++;
}
out:
/* make all data has been write before submit */
if (clean_count + unused_count > 0)
hns_nic_alloc_rx_buffers(ring_data,
clean_count + unused_count);
return recv_pkts;
}
static bool hns_nic_rx_fini_pro(struct hns_nic_ring_data *ring_data)
{
struct hnae_ring *ring = ring_data->ring;
int num = 0;
ring_data->ring->q->handle->dev->ops->toggle_ring_irq(ring, 0);
/* for hardware bug fixed */
num = readl_relaxed(ring->io_base + RCB_REG_FBDNUM);
if (num > 0) {
ring_data->ring->q->handle->dev->ops->toggle_ring_irq(
ring_data->ring, 1);
return false;
} else {
return true;
}
}
static bool hns_nic_rx_fini_pro_v2(struct hns_nic_ring_data *ring_data)
{
struct hnae_ring *ring = ring_data->ring;
int num;
num = readl_relaxed(ring->io_base + RCB_REG_FBDNUM);
if (!num)
return true;
else
return false;
}
static inline void hns_nic_reclaim_one_desc(struct hnae_ring *ring,
int *bytes, int *pkts)
{
struct hnae_desc_cb *desc_cb = &ring->desc_cb[ring->next_to_clean];
(*pkts) += (desc_cb->type == DESC_TYPE_SKB);
(*bytes) += desc_cb->length;
/* desc_cb will be cleaned, after hnae_free_buffer_detach*/
hnae_free_buffer_detach(ring, ring->next_to_clean);
ring_ptr_move_fw(ring, next_to_clean);
}
static int is_valid_clean_head(struct hnae_ring *ring, int h)
{
int u = ring->next_to_use;
int c = ring->next_to_clean;
if (unlikely(h > ring->desc_num))
return 0;
assert(u > 0 && u < ring->desc_num);
assert(c > 0 && c < ring->desc_num);
assert(u != c && h != c); /* must be checked before call this func */
return u > c ? (h > c && h <= u) : (h > c || h <= u);
}
/* netif_tx_lock will turn down the performance, set only when necessary */
#ifdef CONFIG_NET_POLL_CONTROLLER
#define NETIF_TX_LOCK(ring) spin_lock(&(ring)->lock)
#define NETIF_TX_UNLOCK(ring) spin_unlock(&(ring)->lock)
#else
#define NETIF_TX_LOCK(ring)
#define NETIF_TX_UNLOCK(ring)
#endif
/* reclaim all desc in one budget
* return error or number of desc left
*/
static int hns_nic_tx_poll_one(struct hns_nic_ring_data *ring_data,
int budget, void *v)
{
struct hnae_ring *ring = ring_data->ring;
struct net_device *ndev = ring_data->napi.dev;
struct netdev_queue *dev_queue;
struct hns_nic_priv *priv = netdev_priv(ndev);
int head;
int bytes, pkts;
NETIF_TX_LOCK(ring);
head = readl_relaxed(ring->io_base + RCB_REG_HEAD);
rmb(); /* make sure head is ready before touch any data */
if (is_ring_empty(ring) || head == ring->next_to_clean) {
NETIF_TX_UNLOCK(ring);
return 0; /* no data to poll */
}
if (!is_valid_clean_head(ring, head)) {
netdev_err(ndev, "wrong head (%d, %d-%d)\n", head,
ring->next_to_use, ring->next_to_clean);
ring->stats.io_err_cnt++;
NETIF_TX_UNLOCK(ring);
return -EIO;
}
bytes = 0;
pkts = 0;
while (head != ring->next_to_clean) {
hns_nic_reclaim_one_desc(ring, &bytes, &pkts);
/* issue prefetch for next Tx descriptor */
prefetch(&ring->desc_cb[ring->next_to_clean]);
}
NETIF_TX_UNLOCK(ring);
dev_queue = netdev_get_tx_queue(ndev, ring_data->queue_index);
netdev_tx_completed_queue(dev_queue, pkts, bytes);
if (unlikely(priv->link && !netif_carrier_ok(ndev)))
netif_carrier_on(ndev);
if (unlikely(pkts && netif_carrier_ok(ndev) &&
(ring_space(ring) >= ring->max_desc_num_per_pkt * 2))) {
/* Make sure that anybody stopping the queue after this
* sees the new next_to_clean.
*/
smp_mb();
if (netif_tx_queue_stopped(dev_queue) &&
!test_bit(NIC_STATE_DOWN, &priv->state)) {
netif_tx_wake_queue(dev_queue);
ring->stats.restart_queue++;
}
}
return 0;
}
static bool hns_nic_tx_fini_pro(struct hns_nic_ring_data *ring_data)
{
struct hnae_ring *ring = ring_data->ring;
int head;
ring_data->ring->q->handle->dev->ops->toggle_ring_irq(ring, 0);
head = readl_relaxed(ring->io_base + RCB_REG_HEAD);
if (head != ring->next_to_clean) {
ring_data->ring->q->handle->dev->ops->toggle_ring_irq(
ring_data->ring, 1);
return false;
} else {
return true;
}
}
static bool hns_nic_tx_fini_pro_v2(struct hns_nic_ring_data *ring_data)
{
struct hnae_ring *ring = ring_data->ring;
int head = readl_relaxed(ring->io_base + RCB_REG_HEAD);
if (head == ring->next_to_clean)
return true;
else
return false;
}
static void hns_nic_tx_clr_all_bufs(struct hns_nic_ring_data *ring_data)
{
struct hnae_ring *ring = ring_data->ring;
struct net_device *ndev = ring_data->napi.dev;
struct netdev_queue *dev_queue;
int head;
int bytes, pkts;
NETIF_TX_LOCK(ring);
head = ring->next_to_use; /* ntu :soft setted ring position*/
bytes = 0;
pkts = 0;
while (head != ring->next_to_clean)
hns_nic_reclaim_one_desc(ring, &bytes, &pkts);
NETIF_TX_UNLOCK(ring);
dev_queue = netdev_get_tx_queue(ndev, ring_data->queue_index);
netdev_tx_reset_queue(dev_queue);
}
static int hns_nic_common_poll(struct napi_struct *napi, int budget)
{
int clean_complete = 0;
struct hns_nic_ring_data *ring_data =
container_of(napi, struct hns_nic_ring_data, napi);
struct hnae_ring *ring = ring_data->ring;
try_again:
clean_complete += ring_data->poll_one(
ring_data, budget - clean_complete,
ring_data->ex_process);
if (clean_complete < budget) {
if (ring_data->fini_process(ring_data)) {
napi_complete(napi);
ring->q->handle->dev->ops->toggle_ring_irq(ring, 0);
} else {
goto try_again;
}
}
return clean_complete;
}
static irqreturn_t hns_irq_handle(int irq, void *dev)
{
struct hns_nic_ring_data *ring_data = (struct hns_nic_ring_data *)dev;
ring_data->ring->q->handle->dev->ops->toggle_ring_irq(
ring_data->ring, 1);
napi_schedule(&ring_data->napi);
return IRQ_HANDLED;
}
/**
*hns_nic_adjust_link - adjust net work mode by the phy stat or new param
*@ndev: net device
*/
static void hns_nic_adjust_link(struct net_device *ndev)
{
struct hns_nic_priv *priv = netdev_priv(ndev);
struct hnae_handle *h = priv->ae_handle;
int state = 1;
if (ndev->phydev) {
h->dev->ops->adjust_link(h, ndev->phydev->speed,
ndev->phydev->duplex);
state = ndev->phydev->link;
}
state = state && h->dev->ops->get_status(h);
if (state != priv->link) {
if (state) {
netif_carrier_on(ndev);
netif_tx_wake_all_queues(ndev);
netdev_info(ndev, "link up\n");
} else {
netif_carrier_off(ndev);
netdev_info(ndev, "link down\n");
}
priv->link = state;
}
}
/**
*hns_nic_init_phy - init phy
*@ndev: net device
*@h: ae handle
* Return 0 on success, negative on failure
*/
int hns_nic_init_phy(struct net_device *ndev, struct hnae_handle *h)
{
struct phy_device *phy_dev = h->phy_dev;
int ret;
if (!h->phy_dev)
return 0;
if (h->phy_if != PHY_INTERFACE_MODE_XGMII) {
phy_dev->dev_flags = 0;
ret = phy_connect_direct(ndev, phy_dev, hns_nic_adjust_link,
h->phy_if);
} else {
ret = phy_attach_direct(ndev, phy_dev, 0, h->phy_if);
}
if (unlikely(ret))
return -ENODEV;
phy_dev->supported &= h->if_support;
phy_dev->advertising = phy_dev->supported;
if (h->phy_if == PHY_INTERFACE_MODE_XGMII)
phy_dev->autoneg = false;
return 0;
}
static int hns_nic_ring_open(struct net_device *netdev, int idx)
{
struct hns_nic_priv *priv = netdev_priv(netdev);
struct hnae_handle *h = priv->ae_handle;
napi_enable(&priv->ring_data[idx].napi);
enable_irq(priv->ring_data[idx].ring->irq);
h->dev->ops->toggle_ring_irq(priv->ring_data[idx].ring, 0);
return 0;
}
static int hns_nic_net_set_mac_address(struct net_device *ndev, void *p)
{
struct hns_nic_priv *priv = netdev_priv(ndev);
struct hnae_handle *h = priv->ae_handle;
struct sockaddr *mac_addr = p;
int ret;
if (!mac_addr || !is_valid_ether_addr((const u8 *)mac_addr->sa_data))
return -EADDRNOTAVAIL;
ret = h->dev->ops->set_mac_addr(h, mac_addr->sa_data);
if (ret) {
netdev_err(ndev, "set_mac_address fail, ret=%d!\n", ret);
return ret;
}
memcpy(ndev->dev_addr, mac_addr->sa_data, ndev->addr_len);
return 0;
}
void hns_nic_update_stats(struct net_device *netdev)
{
struct hns_nic_priv *priv = netdev_priv(netdev);
struct hnae_handle *h = priv->ae_handle;
h->dev->ops->update_stats(h, &netdev->stats);
}
/* set mac addr if it is configed. or leave it to the AE driver */
static void hns_init_mac_addr(struct net_device *ndev)
{
struct hns_nic_priv *priv = netdev_priv(ndev);
if (!device_get_mac_address(priv->dev, ndev->dev_addr, ETH_ALEN)) {
eth_hw_addr_random(ndev);
dev_warn(priv->dev, "No valid mac, use random mac %pM",
ndev->dev_addr);
}
}
static void hns_nic_ring_close(struct net_device *netdev, int idx)
{
struct hns_nic_priv *priv = netdev_priv(netdev);
struct hnae_handle *h = priv->ae_handle;
h->dev->ops->toggle_ring_irq(priv->ring_data[idx].ring, 1);
disable_irq(priv->ring_data[idx].ring->irq);
napi_disable(&priv->ring_data[idx].napi);
}
static int hns_nic_init_affinity_mask(int q_num, int ring_idx,
struct hnae_ring *ring, cpumask_t *mask)
{
int cpu;
/* Diffrent irq banlance between 16core and 32core.
* The cpu mask set by ring index according to the ring flag
* which indicate the ring is tx or rx.
*/
if (q_num == num_possible_cpus()) {
if (is_tx_ring(ring))
cpu = ring_idx;
else
cpu = ring_idx - q_num;
} else {
if (is_tx_ring(ring))
cpu = ring_idx * 2;
else
cpu = (ring_idx - q_num) * 2 + 1;
}
cpumask_clear(mask);
cpumask_set_cpu(cpu, mask);
return cpu;
}
static int hns_nic_init_irq(struct hns_nic_priv *priv)
{
struct hnae_handle *h = priv->ae_handle;
struct hns_nic_ring_data *rd;
int i;
int ret;
int cpu;
for (i = 0; i < h->q_num * 2; i++) {
rd = &priv->ring_data[i];
if (rd->ring->irq_init_flag == RCB_IRQ_INITED)
break;
snprintf(rd->ring->ring_name, RCB_RING_NAME_LEN,
"%s-%s%d", priv->netdev->name,
(is_tx_ring(rd->ring) ? "tx" : "rx"), rd->queue_index);
rd->ring->ring_name[RCB_RING_NAME_LEN - 1] = '\0';
ret = request_irq(rd->ring->irq,
hns_irq_handle, 0, rd->ring->ring_name, rd);
if (ret) {
netdev_err(priv->netdev, "request irq(%d) fail\n",
rd->ring->irq);
return ret;
}
disable_irq(rd->ring->irq);
cpu = hns_nic_init_affinity_mask(h->q_num, i,
rd->ring, &rd->mask);
if (cpu_online(cpu))
irq_set_affinity_hint(rd->ring->irq,
&rd->mask);
rd->ring->irq_init_flag = RCB_IRQ_INITED;
}
return 0;
}
static int hns_nic_net_up(struct net_device *ndev)
{
struct hns_nic_priv *priv = netdev_priv(ndev);
struct hnae_handle *h = priv->ae_handle;
int i, j;
int ret;
ret = hns_nic_init_irq(priv);
if (ret != 0) {
netdev_err(ndev, "hns init irq failed! ret=%d\n", ret);
return ret;
}
for (i = 0; i < h->q_num * 2; i++) {
ret = hns_nic_ring_open(ndev, i);
if (ret)
goto out_has_some_queues;
}
ret = h->dev->ops->set_mac_addr(h, ndev->dev_addr);
if (ret)
goto out_set_mac_addr_err;
ret = h->dev->ops->start ? h->dev->ops->start(h) : 0;
if (ret)
goto out_start_err;
if (ndev->phydev)
phy_start(ndev->phydev);
clear_bit(NIC_STATE_DOWN, &priv->state);
(void)mod_timer(&priv->service_timer, jiffies + SERVICE_TIMER_HZ);
return 0;
out_start_err:
netif_stop_queue(ndev);
out_set_mac_addr_err:
out_has_some_queues:
for (j = i - 1; j >= 0; j--)
hns_nic_ring_close(ndev, j);
set_bit(NIC_STATE_DOWN, &priv->state);
return ret;
}
static void hns_nic_net_down(struct net_device *ndev)
{
int i;
struct hnae_ae_ops *ops;
struct hns_nic_priv *priv = netdev_priv(ndev);
if (test_and_set_bit(NIC_STATE_DOWN, &priv->state))
return;
(void)del_timer_sync(&priv->service_timer);
netif_tx_stop_all_queues(ndev);
netif_carrier_off(ndev);
netif_tx_disable(ndev);
priv->link = 0;
if (ndev->phydev)
phy_stop(ndev->phydev);
ops = priv->ae_handle->dev->ops;
if (ops->stop)
ops->stop(priv->ae_handle);
netif_tx_stop_all_queues(ndev);
for (i = priv->ae_handle->q_num - 1; i >= 0; i--) {
hns_nic_ring_close(ndev, i);
hns_nic_ring_close(ndev, i + priv->ae_handle->q_num);
/* clean tx buffers*/
hns_nic_tx_clr_all_bufs(priv->ring_data + i);
}
}
void hns_nic_net_reset(struct net_device *ndev)
{
struct hns_nic_priv *priv = netdev_priv(ndev);
struct hnae_handle *handle = priv->ae_handle;
while (test_and_set_bit(NIC_STATE_RESETTING, &priv->state))
usleep_range(1000, 2000);
(void)hnae_reinit_handle(handle);
clear_bit(NIC_STATE_RESETTING, &priv->state);
}
void hns_nic_net_reinit(struct net_device *netdev)
{
struct hns_nic_priv *priv = netdev_priv(netdev);
netif_trans_update(priv->netdev);
while (test_and_set_bit(NIC_STATE_REINITING, &priv->state))
usleep_range(1000, 2000);
hns_nic_net_down(netdev);
hns_nic_net_reset(netdev);
(void)hns_nic_net_up(netdev);
clear_bit(NIC_STATE_REINITING, &priv->state);
}
static int hns_nic_net_open(struct net_device *ndev)
{
struct hns_nic_priv *priv = netdev_priv(ndev);
struct hnae_handle *h = priv->ae_handle;
int ret;
if (test_bit(NIC_STATE_TESTING, &priv->state))
return -EBUSY;
priv->link = 0;
netif_carrier_off(ndev);
ret = netif_set_real_num_tx_queues(ndev, h->q_num);
if (ret < 0) {
netdev_err(ndev, "netif_set_real_num_tx_queues fail, ret=%d!\n",
ret);
return ret;
}
ret = netif_set_real_num_rx_queues(ndev, h->q_num);
if (ret < 0) {
netdev_err(ndev,
"netif_set_real_num_rx_queues fail, ret=%d!\n", ret);
return ret;
}
ret = hns_nic_net_up(ndev);
if (ret) {
netdev_err(ndev,
"hns net up fail, ret=%d!\n", ret);
return ret;
}
return 0;
}
static int hns_nic_net_stop(struct net_device *ndev)
{
hns_nic_net_down(ndev);
return 0;
}
static void hns_tx_timeout_reset(struct hns_nic_priv *priv);
static void hns_nic_net_timeout(struct net_device *ndev)
{
struct hns_nic_priv *priv = netdev_priv(ndev);
hns_tx_timeout_reset(priv);
}
static int hns_nic_do_ioctl(struct net_device *netdev, struct ifreq *ifr,
int cmd)
{
struct phy_device *phy_dev = netdev->phydev;
if (!netif_running(netdev))
return -EINVAL;
if (!phy_dev)
return -ENOTSUPP;
return phy_mii_ioctl(phy_dev, ifr, cmd);
}
/* use only for netconsole to poll with the device without interrupt */
#ifdef CONFIG_NET_POLL_CONTROLLER
void hns_nic_poll_controller(struct net_device *ndev)
{
struct hns_nic_priv *priv = netdev_priv(ndev);
unsigned long flags;
int i;
local_irq_save(flags);
for (i = 0; i < priv->ae_handle->q_num * 2; i++)
napi_schedule(&priv->ring_data[i].napi);
local_irq_restore(flags);
}
#endif
static netdev_tx_t hns_nic_net_xmit(struct sk_buff *skb,
struct net_device *ndev)
{
struct hns_nic_priv *priv = netdev_priv(ndev);
assert(skb->queue_mapping < ndev->ae_handle->q_num);
return hns_nic_net_xmit_hw(ndev, skb,
&tx_ring_data(priv, skb->queue_mapping));
}
static void hns_nic_drop_rx_fetch(struct hns_nic_ring_data *ring_data,
struct sk_buff *skb)
{
dev_kfree_skb_any(skb);
}
#define HNS_LB_TX_RING 0
static struct sk_buff *hns_assemble_skb(struct net_device *ndev)
{
struct sk_buff *skb;
struct ethhdr *ethhdr;
int frame_len;
/* allocate test skb */
skb = alloc_skb(64, GFP_KERNEL);
if (!skb)
return NULL;
skb_put(skb, 64);
skb->dev = ndev;
memset(skb->data, 0xFF, skb->len);
/* must be tcp/ip package */
ethhdr = (struct ethhdr *)skb->data;
ethhdr->h_proto = htons(ETH_P_IP);
frame_len = skb->len & (~1ul);
memset(&skb->data[frame_len / 2], 0xAA,
frame_len / 2 - 1);
skb->queue_mapping = HNS_LB_TX_RING;
return skb;
}
static int hns_enable_serdes_lb(struct net_device *ndev)
{
struct hns_nic_priv *priv = netdev_priv(ndev);
struct hnae_handle *h = priv->ae_handle;
struct hnae_ae_ops *ops = h->dev->ops;
int speed, duplex;
int ret;
ret = ops->set_loopback(h, MAC_INTERNALLOOP_SERDES, 1);
if (ret)
return ret;
ret = ops->start ? ops->start(h) : 0;
if (ret)
return ret;
/* link adjust duplex*/
if (h->phy_if != PHY_INTERFACE_MODE_XGMII)
speed = 1000;
else
speed = 10000;
duplex = 1;
ops->adjust_link(h, speed, duplex);
/* wait h/w ready */
mdelay(300);
return 0;
}
static void hns_disable_serdes_lb(struct net_device *ndev)
{
struct hns_nic_priv *priv = netdev_priv(ndev);
struct hnae_handle *h = priv->ae_handle;
struct hnae_ae_ops *ops = h->dev->ops;
ops->stop(h);
ops->set_loopback(h, MAC_INTERNALLOOP_SERDES, 0);
}
/**
*hns_nic_clear_all_rx_fetch - clear the chip fetched descriptions. The
*function as follows:
* 1. if one rx ring has found the page_offset is not equal 0 between head
* and tail, it means that the chip fetched the wrong descs for the ring
* which buffer size is 4096.
* 2. we set the chip serdes loopback and set rss indirection to the ring.
* 3. construct 64-bytes ip broadcast packages, wait the associated rx ring
* recieving all packages and it will fetch new descriptions.
* 4. recover to the original state.
*
*@ndev: net device
*/
static int hns_nic_clear_all_rx_fetch(struct net_device *ndev)
{
struct hns_nic_priv *priv = netdev_priv(ndev);
struct hnae_handle *h = priv->ae_handle;
struct hnae_ae_ops *ops = h->dev->ops;
struct hns_nic_ring_data *rd;
struct hnae_ring *ring;
struct sk_buff *skb;
u32 *org_indir;
u32 *cur_indir;
int indir_size;
int head, tail;
int fetch_num;
int i, j;
bool found;
int retry_times;
int ret = 0;
/* alloc indir memory */
indir_size = ops->get_rss_indir_size(h) * sizeof(*org_indir);
org_indir = kzalloc(indir_size, GFP_KERNEL);
if (!org_indir)
return -ENOMEM;
/* store the orginal indirection */
ops->get_rss(h, org_indir, NULL, NULL);
cur_indir = kzalloc(indir_size, GFP_KERNEL);
if (!cur_indir) {
ret = -ENOMEM;
goto cur_indir_alloc_err;
}
/* set loopback */
if (hns_enable_serdes_lb(ndev)) {
ret = -EINVAL;
goto enable_serdes_lb_err;
}
/* foreach every rx ring to clear fetch desc */
for (i = 0; i < h->q_num; i++) {
ring = &h->qs[i]->rx_ring;
head = readl_relaxed(ring->io_base + RCB_REG_HEAD);
tail = readl_relaxed(ring->io_base + RCB_REG_TAIL);
found = false;
fetch_num = ring_dist(ring, head, tail);
while (head != tail) {
if (ring->desc_cb[head].page_offset != 0) {
found = true;
break;
}
head++;
if (head == ring->desc_num)
head = 0;
}
if (found) {
for (j = 0; j < indir_size / sizeof(*org_indir); j++)
cur_indir[j] = i;
ops->set_rss(h, cur_indir, NULL, 0);
for (j = 0; j < fetch_num; j++) {
/* alloc one skb and init */
skb = hns_assemble_skb(ndev);
if (!skb)
goto out;
rd = &tx_ring_data(priv, skb->queue_mapping);
hns_nic_net_xmit_hw(ndev, skb, rd);
retry_times = 0;
while (retry_times++ < 10) {
mdelay(10);
/* clean rx */
rd = &rx_ring_data(priv, i);
if (rd->poll_one(rd, fetch_num,
hns_nic_drop_rx_fetch))
break;
}
retry_times = 0;
while (retry_times++ < 10) {
mdelay(10);
/* clean tx ring 0 send package */
rd = &tx_ring_data(priv,
HNS_LB_TX_RING);
if (rd->poll_one(rd, fetch_num, NULL))
break;
}
}
}
}
out:
/* restore everything */
ops->set_rss(h, org_indir, NULL, 0);
hns_disable_serdes_lb(ndev);
enable_serdes_lb_err:
kfree(cur_indir);
cur_indir_alloc_err:
kfree(org_indir);
return ret;
}
static int hns_nic_change_mtu(struct net_device *ndev, int new_mtu)
{
struct hns_nic_priv *priv = netdev_priv(ndev);
struct hnae_handle *h = priv->ae_handle;
bool if_running = netif_running(ndev);
int ret;
/* MTU < 68 is an error and causes problems on some kernels */
if (new_mtu < 68)
return -EINVAL;
/* MTU no change */
if (new_mtu == ndev->mtu)
return 0;
if (!h->dev->ops->set_mtu)
return -ENOTSUPP;
if (if_running) {
(void)hns_nic_net_stop(ndev);
msleep(100);
}
if (priv->enet_ver != AE_VERSION_1 &&
ndev->mtu <= BD_SIZE_2048_MAX_MTU &&
new_mtu > BD_SIZE_2048_MAX_MTU) {
/* update desc */
hnae_reinit_all_ring_desc(h);
/* clear the package which the chip has fetched */
ret = hns_nic_clear_all_rx_fetch(ndev);
/* the page offset must be consist with desc */
hnae_reinit_all_ring_page_off(h);
if (ret) {
netdev_err(ndev, "clear the fetched desc fail\n");
goto out;
}
}
ret = h->dev->ops->set_mtu(h, new_mtu);
if (ret) {
netdev_err(ndev, "set mtu fail, return value %d\n",
ret);
goto out;
}
/* finally, set new mtu to netdevice */
ndev->mtu = new_mtu;
out:
if (if_running) {
if (hns_nic_net_open(ndev)) {
netdev_err(ndev, "hns net open fail\n");
ret = -EINVAL;
}
}
return ret;
}
static int hns_nic_set_features(struct net_device *netdev,
netdev_features_t features)
{
struct hns_nic_priv *priv = netdev_priv(netdev);
switch (priv->enet_ver) {
case AE_VERSION_1:
if (features & (NETIF_F_TSO | NETIF_F_TSO6))
netdev_info(netdev, "enet v1 do not support tso!\n");
break;
default:
if (features & (NETIF_F_TSO | NETIF_F_TSO6)) {
priv->ops.fill_desc = fill_tso_desc;
priv->ops.maybe_stop_tx = hns_nic_maybe_stop_tso;
/* The chip only support 7*4096 */
netif_set_gso_max_size(netdev, 7 * 4096);
} else {
priv->ops.fill_desc = fill_v2_desc;
priv->ops.maybe_stop_tx = hns_nic_maybe_stop_tx;
}
break;
}
netdev->features = features;
return 0;
}
static netdev_features_t hns_nic_fix_features(
struct net_device *netdev, netdev_features_t features)
{
struct hns_nic_priv *priv = netdev_priv(netdev);
switch (priv->enet_ver) {
case AE_VERSION_1:
features &= ~(NETIF_F_TSO | NETIF_F_TSO6 |
NETIF_F_HW_VLAN_CTAG_FILTER);
break;
default:
break;
}
return features;
}
static int hns_nic_uc_sync(struct net_device *netdev, const unsigned char *addr)
{
struct hns_nic_priv *priv = netdev_priv(netdev);
struct hnae_handle *h = priv->ae_handle;
if (h->dev->ops->add_uc_addr)
return h->dev->ops->add_uc_addr(h, addr);
return 0;
}
static int hns_nic_uc_unsync(struct net_device *netdev,
const unsigned char *addr)
{
struct hns_nic_priv *priv = netdev_priv(netdev);
struct hnae_handle *h = priv->ae_handle;
if (h->dev->ops->rm_uc_addr)
return h->dev->ops->rm_uc_addr(h, addr);
return 0;
}
/**
* nic_set_multicast_list - set mutl mac address
* @netdev: net device
* @p: mac address
*
* return void
*/
void hns_set_multicast_list(struct net_device *ndev)
{
struct hns_nic_priv *priv = netdev_priv(ndev);
struct hnae_handle *h = priv->ae_handle;
struct netdev_hw_addr *ha = NULL;
if (!h) {
netdev_err(ndev, "hnae handle is null\n");
return;
}
if (h->dev->ops->clr_mc_addr)
if (h->dev->ops->clr_mc_addr(h))
netdev_err(ndev, "clear multicast address fail\n");
if (h->dev->ops->set_mc_addr) {
netdev_for_each_mc_addr(ha, ndev)
if (h->dev->ops->set_mc_addr(h, ha->addr))
netdev_err(ndev, "set multicast fail\n");
}
}
void hns_nic_set_rx_mode(struct net_device *ndev)
{
struct hns_nic_priv *priv = netdev_priv(ndev);
struct hnae_handle *h = priv->ae_handle;
if (h->dev->ops->set_promisc_mode) {
if (ndev->flags & IFF_PROMISC)
h->dev->ops->set_promisc_mode(h, 1);
else
h->dev->ops->set_promisc_mode(h, 0);
}
hns_set_multicast_list(ndev);
if (__dev_uc_sync(ndev, hns_nic_uc_sync, hns_nic_uc_unsync))
netdev_err(ndev, "sync uc address fail\n");
}
static void hns_nic_get_stats64(struct net_device *ndev,
struct rtnl_link_stats64 *stats)
{
int idx = 0;
u64 tx_bytes = 0;
u64 rx_bytes = 0;
u64 tx_pkts = 0;
u64 rx_pkts = 0;
struct hns_nic_priv *priv = netdev_priv(ndev);
struct hnae_handle *h = priv->ae_handle;
for (idx = 0; idx < h->q_num; idx++) {
tx_bytes += h->qs[idx]->tx_ring.stats.tx_bytes;
tx_pkts += h->qs[idx]->tx_ring.stats.tx_pkts;
rx_bytes += h->qs[idx]->rx_ring.stats.rx_bytes;
rx_pkts += h->qs[idx]->rx_ring.stats.rx_pkts;
}
stats->tx_bytes = tx_bytes;
stats->tx_packets = tx_pkts;
stats->rx_bytes = rx_bytes;
stats->rx_packets = rx_pkts;
stats->rx_errors = ndev->stats.rx_errors;
stats->multicast = ndev->stats.multicast;
stats->rx_length_errors = ndev->stats.rx_length_errors;
stats->rx_crc_errors = ndev->stats.rx_crc_errors;
stats->rx_missed_errors = ndev->stats.rx_missed_errors;
stats->tx_errors = ndev->stats.tx_errors;
stats->rx_dropped = ndev->stats.rx_dropped;
stats->tx_dropped = ndev->stats.tx_dropped;
stats->collisions = ndev->stats.collisions;
stats->rx_over_errors = ndev->stats.rx_over_errors;
stats->rx_frame_errors = ndev->stats.rx_frame_errors;
stats->rx_fifo_errors = ndev->stats.rx_fifo_errors;
stats->tx_aborted_errors = ndev->stats.tx_aborted_errors;
stats->tx_carrier_errors = ndev->stats.tx_carrier_errors;
stats->tx_fifo_errors = ndev->stats.tx_fifo_errors;
stats->tx_heartbeat_errors = ndev->stats.tx_heartbeat_errors;
stats->tx_window_errors = ndev->stats.tx_window_errors;
stats->rx_compressed = ndev->stats.rx_compressed;
stats->tx_compressed = ndev->stats.tx_compressed;
}
static u16
hns_nic_select_queue(struct net_device *ndev, struct sk_buff *skb,
void *accel_priv, select_queue_fallback_t fallback)
{
struct ethhdr *eth_hdr = (struct ethhdr *)skb->data;
struct hns_nic_priv *priv = netdev_priv(ndev);
/* fix hardware broadcast/multicast packets queue loopback */
if (!AE_IS_VER1(priv->enet_ver) &&
is_multicast_ether_addr(eth_hdr->h_dest))
return 0;
else
return fallback(ndev, skb);
}
static const struct net_device_ops hns_nic_netdev_ops = {
.ndo_open = hns_nic_net_open,
.ndo_stop = hns_nic_net_stop,
.ndo_start_xmit = hns_nic_net_xmit,
.ndo_tx_timeout = hns_nic_net_timeout,
.ndo_set_mac_address = hns_nic_net_set_mac_address,
.ndo_change_mtu = hns_nic_change_mtu,
.ndo_do_ioctl = hns_nic_do_ioctl,
.ndo_set_features = hns_nic_set_features,
.ndo_fix_features = hns_nic_fix_features,
.ndo_get_stats64 = hns_nic_get_stats64,
#ifdef CONFIG_NET_POLL_CONTROLLER
.ndo_poll_controller = hns_nic_poll_controller,
#endif
.ndo_set_rx_mode = hns_nic_set_rx_mode,
.ndo_select_queue = hns_nic_select_queue,
};
static void hns_nic_update_link_status(struct net_device *netdev)
{
struct hns_nic_priv *priv = netdev_priv(netdev);
struct hnae_handle *h = priv->ae_handle;
if (h->phy_dev) {
if (h->phy_if != PHY_INTERFACE_MODE_XGMII)
return;
(void)genphy_read_status(h->phy_dev);
}
hns_nic_adjust_link(netdev);
}
/* for dumping key regs*/
static void hns_nic_dump(struct hns_nic_priv *priv)
{
struct hnae_handle *h = priv->ae_handle;
struct hnae_ae_ops *ops = h->dev->ops;
u32 *data, reg_num, i;
if (ops->get_regs_len && ops->get_regs) {
reg_num = ops->get_regs_len(priv->ae_handle);
reg_num = (reg_num + 3ul) & ~3ul;
data = kcalloc(reg_num, sizeof(u32), GFP_KERNEL);
if (data) {
ops->get_regs(priv->ae_handle, data);
for (i = 0; i < reg_num; i += 4)
pr_info("0x%08x: 0x%08x 0x%08x 0x%08x 0x%08x\n",
i, data[i], data[i + 1],
data[i + 2], data[i + 3]);
kfree(data);
}
}
for (i = 0; i < h->q_num; i++) {
pr_info("tx_queue%d_next_to_clean:%d\n",
i, h->qs[i]->tx_ring.next_to_clean);
pr_info("tx_queue%d_next_to_use:%d\n",
i, h->qs[i]->tx_ring.next_to_use);
pr_info("rx_queue%d_next_to_clean:%d\n",
i, h->qs[i]->rx_ring.next_to_clean);
pr_info("rx_queue%d_next_to_use:%d\n",
i, h->qs[i]->rx_ring.next_to_use);
}
}
/* for resetting subtask */
static void hns_nic_reset_subtask(struct hns_nic_priv *priv)
{
enum hnae_port_type type = priv->ae_handle->port_type;
if (!test_bit(NIC_STATE2_RESET_REQUESTED, &priv->state))
return;
clear_bit(NIC_STATE2_RESET_REQUESTED, &priv->state);
/* If we're already down, removing or resetting, just bail */
if (test_bit(NIC_STATE_DOWN, &priv->state) ||
test_bit(NIC_STATE_REMOVING, &priv->state) ||
test_bit(NIC_STATE_RESETTING, &priv->state))
return;
hns_nic_dump(priv);
netdev_info(priv->netdev, "try to reset %s port!\n",
(type == HNAE_PORT_DEBUG ? "debug" : "service"));
rtnl_lock();
/* put off any impending NetWatchDogTimeout */
netif_trans_update(priv->netdev);
if (type == HNAE_PORT_DEBUG) {
hns_nic_net_reinit(priv->netdev);
} else {
netif_carrier_off(priv->netdev);
netif_tx_disable(priv->netdev);
}
rtnl_unlock();
}
/* for doing service complete*/
static void hns_nic_service_event_complete(struct hns_nic_priv *priv)
{
WARN_ON(!test_bit(NIC_STATE_SERVICE_SCHED, &priv->state));
/* make sure to commit the things */
smp_mb__before_atomic();
clear_bit(NIC_STATE_SERVICE_SCHED, &priv->state);
}
static void hns_nic_service_task(struct work_struct *work)
{
struct hns_nic_priv *priv
= container_of(work, struct hns_nic_priv, service_task);
struct hnae_handle *h = priv->ae_handle;
hns_nic_update_link_status(priv->netdev);
h->dev->ops->update_led_status(h);
hns_nic_update_stats(priv->netdev);
hns_nic_reset_subtask(priv);
hns_nic_service_event_complete(priv);
}
static void hns_nic_task_schedule(struct hns_nic_priv *priv)
{
if (!test_bit(NIC_STATE_DOWN, &priv->state) &&
!test_bit(NIC_STATE_REMOVING, &priv->state) &&
!test_and_set_bit(NIC_STATE_SERVICE_SCHED, &priv->state))
(void)schedule_work(&priv->service_task);
}
static void hns_nic_service_timer(unsigned long data)
{
struct hns_nic_priv *priv = (struct hns_nic_priv *)data;
(void)mod_timer(&priv->service_timer, jiffies + SERVICE_TIMER_HZ);
hns_nic_task_schedule(priv);
}
/**
* hns_tx_timeout_reset - initiate reset due to Tx timeout
* @priv: driver private struct
**/
static void hns_tx_timeout_reset(struct hns_nic_priv *priv)
{
/* Do the reset outside of interrupt context */
if (!test_bit(NIC_STATE_DOWN, &priv->state)) {
set_bit(NIC_STATE2_RESET_REQUESTED, &priv->state);
netdev_warn(priv->netdev,
"initiating reset due to tx timeout(%llu,0x%lx)\n",
priv->tx_timeout_count, priv->state);
priv->tx_timeout_count++;
hns_nic_task_schedule(priv);
}
}
static int hns_nic_init_ring_data(struct hns_nic_priv *priv)
{
struct hnae_handle *h = priv->ae_handle;
struct hns_nic_ring_data *rd;
bool is_ver1 = AE_IS_VER1(priv->enet_ver);
int i;
if (h->q_num > NIC_MAX_Q_PER_VF) {
netdev_err(priv->netdev, "too much queue (%d)\n", h->q_num);
return -EINVAL;
}
priv->ring_data = kzalloc(h->q_num * sizeof(*priv->ring_data) * 2,
GFP_KERNEL);
if (!priv->ring_data)
return -ENOMEM;
for (i = 0; i < h->q_num; i++) {
rd = &priv->ring_data[i];
rd->queue_index = i;
rd->ring = &h->qs[i]->tx_ring;
rd->poll_one = hns_nic_tx_poll_one;
rd->fini_process = is_ver1 ? hns_nic_tx_fini_pro :
hns_nic_tx_fini_pro_v2;
netif_napi_add(priv->netdev, &rd->napi,
hns_nic_common_poll, NIC_TX_CLEAN_MAX_NUM);
rd->ring->irq_init_flag = RCB_IRQ_NOT_INITED;
}
for (i = h->q_num; i < h->q_num * 2; i++) {
rd = &priv->ring_data[i];
rd->queue_index = i - h->q_num;
rd->ring = &h->qs[i - h->q_num]->rx_ring;
rd->poll_one = hns_nic_rx_poll_one;
rd->ex_process = hns_nic_rx_up_pro;
rd->fini_process = is_ver1 ? hns_nic_rx_fini_pro :
hns_nic_rx_fini_pro_v2;
netif_napi_add(priv->netdev, &rd->napi,
hns_nic_common_poll, NIC_RX_CLEAN_MAX_NUM);
rd->ring->irq_init_flag = RCB_IRQ_NOT_INITED;
}
return 0;
}
static void hns_nic_uninit_ring_data(struct hns_nic_priv *priv)
{
struct hnae_handle *h = priv->ae_handle;
int i;
for (i = 0; i < h->q_num * 2; i++) {
netif_napi_del(&priv->ring_data[i].napi);
if (priv->ring_data[i].ring->irq_init_flag == RCB_IRQ_INITED) {
(void)irq_set_affinity_hint(
priv->ring_data[i].ring->irq,
NULL);
free_irq(priv->ring_data[i].ring->irq,
&priv->ring_data[i]);
}
priv->ring_data[i].ring->irq_init_flag = RCB_IRQ_NOT_INITED;
}
kfree(priv->ring_data);
}
static void hns_nic_set_priv_ops(struct net_device *netdev)
{
struct hns_nic_priv *priv = netdev_priv(netdev);
struct hnae_handle *h = priv->ae_handle;
if (AE_IS_VER1(priv->enet_ver)) {
priv->ops.fill_desc = fill_desc;
priv->ops.get_rxd_bnum = get_rx_desc_bnum;
priv->ops.maybe_stop_tx = hns_nic_maybe_stop_tx;
} else {
priv->ops.get_rxd_bnum = get_v2rx_desc_bnum;
if ((netdev->features & NETIF_F_TSO) ||
(netdev->features & NETIF_F_TSO6)) {
priv->ops.fill_desc = fill_tso_desc;
priv->ops.maybe_stop_tx = hns_nic_maybe_stop_tso;
/* This chip only support 7*4096 */
netif_set_gso_max_size(netdev, 7 * 4096);
} else {
priv->ops.fill_desc = fill_v2_desc;
priv->ops.maybe_stop_tx = hns_nic_maybe_stop_tx;
}
/* enable tso when init
* control tso on/off through TSE bit in bd
*/
h->dev->ops->set_tso_stats(h, 1);
}
}
static int hns_nic_try_get_ae(struct net_device *ndev)
{
struct hns_nic_priv *priv = netdev_priv(ndev);
struct hnae_handle *h;
int ret;
h = hnae_get_handle(&priv->netdev->dev,
priv->fwnode, priv->port_id, NULL);
if (IS_ERR_OR_NULL(h)) {
ret = -ENODEV;
dev_dbg(priv->dev, "has not handle, register notifier!\n");
goto out;
}
priv->ae_handle = h;
ret = hns_nic_init_phy(ndev, h);
if (ret) {
dev_err(priv->dev, "probe phy device fail!\n");
goto out_init_phy;
}
ret = hns_nic_init_ring_data(priv);
if (ret) {
ret = -ENOMEM;
goto out_init_ring_data;
}
hns_nic_set_priv_ops(ndev);
ret = register_netdev(ndev);
if (ret) {
dev_err(priv->dev, "probe register netdev fail!\n");
goto out_reg_ndev_fail;
}
return 0;
out_reg_ndev_fail:
hns_nic_uninit_ring_data(priv);
priv->ring_data = NULL;
out_init_phy:
out_init_ring_data:
hnae_put_handle(priv->ae_handle);
priv->ae_handle = NULL;
out:
return ret;
}
static int hns_nic_notifier_action(struct notifier_block *nb,
unsigned long action, void *data)
{
struct hns_nic_priv *priv =
container_of(nb, struct hns_nic_priv, notifier_block);
assert(action == HNAE_AE_REGISTER);
if (!hns_nic_try_get_ae(priv->netdev)) {
hnae_unregister_notifier(&priv->notifier_block);
priv->notifier_block.notifier_call = NULL;
}
return 0;
}
static int hns_nic_dev_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct net_device *ndev;
struct hns_nic_priv *priv;
u32 port_id;
int ret;
ndev = alloc_etherdev_mq(sizeof(struct hns_nic_priv), NIC_MAX_Q_PER_VF);
if (!ndev)
return -ENOMEM;
platform_set_drvdata(pdev, ndev);
priv = netdev_priv(ndev);
priv->dev = dev;
priv->netdev = ndev;
if (dev_of_node(dev)) {
struct device_node *ae_node;
if (of_device_is_compatible(dev->of_node,
"hisilicon,hns-nic-v1"))
priv->enet_ver = AE_VERSION_1;
else
priv->enet_ver = AE_VERSION_2;
ae_node = of_parse_phandle(dev->of_node, "ae-handle", 0);
if (IS_ERR_OR_NULL(ae_node)) {
ret = PTR_ERR(ae_node);
dev_err(dev, "not find ae-handle\n");
goto out_read_prop_fail;
}
priv->fwnode = &ae_node->fwnode;
} else if (is_acpi_node(dev->fwnode)) {
struct acpi_reference_args args;
if (acpi_dev_found(hns_enet_acpi_match[0].id))
priv->enet_ver = AE_VERSION_1;
else if (acpi_dev_found(hns_enet_acpi_match[1].id))
priv->enet_ver = AE_VERSION_2;
else
return -ENXIO;
/* try to find port-idx-in-ae first */
ret = acpi_node_get_property_reference(dev->fwnode,
"ae-handle", 0, &args);
if (ret) {
dev_err(dev, "not find ae-handle\n");
goto out_read_prop_fail;
}
priv->fwnode = acpi_fwnode_handle(args.adev);
} else {
dev_err(dev, "cannot read cfg data from OF or acpi\n");
return -ENXIO;
}
ret = device_property_read_u32(dev, "port-idx-in-ae", &port_id);
if (ret) {
/* only for old code compatible */
ret = device_property_read_u32(dev, "port-id", &port_id);
if (ret)
goto out_read_prop_fail;
/* for old dts, we need to caculate the port offset */
port_id = port_id < HNS_SRV_OFFSET ? port_id + HNS_DEBUG_OFFSET
: port_id - HNS_SRV_OFFSET;
}
priv->port_id = port_id;
hns_init_mac_addr(ndev);
ndev->watchdog_timeo = HNS_NIC_TX_TIMEOUT;
ndev->priv_flags |= IFF_UNICAST_FLT;
ndev->netdev_ops = &hns_nic_netdev_ops;
hns_ethtool_set_ops(ndev);
ndev->features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
NETIF_F_RXCSUM | NETIF_F_SG | NETIF_F_GSO |
NETIF_F_GRO;
ndev->vlan_features |=
NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_RXCSUM;
ndev->vlan_features |= NETIF_F_SG | NETIF_F_GSO | NETIF_F_GRO;
/* MTU range: 68 - 9578 (v1) or 9706 (v2) */
ndev->min_mtu = MAC_MIN_MTU;
switch (priv->enet_ver) {
case AE_VERSION_2:
ndev->features |= NETIF_F_TSO | NETIF_F_TSO6;
ndev->hw_features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
NETIF_F_RXCSUM | NETIF_F_SG | NETIF_F_GSO |
NETIF_F_GRO | NETIF_F_TSO | NETIF_F_TSO6;
ndev->max_mtu = MAC_MAX_MTU_V2 -
(ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN);
break;
default:
ndev->max_mtu = MAC_MAX_MTU -
(ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN);
break;
}
SET_NETDEV_DEV(ndev, dev);
if (!dma_set_mask_and_coherent(dev, DMA_BIT_MASK(64)))
dev_dbg(dev, "set mask to 64bit\n");
else
dev_err(dev, "set mask to 64bit fail!\n");
/* carrier off reporting is important to ethtool even BEFORE open */
netif_carrier_off(ndev);
setup_timer(&priv->service_timer, hns_nic_service_timer,
(unsigned long)priv);
INIT_WORK(&priv->service_task, hns_nic_service_task);
set_bit(NIC_STATE_SERVICE_INITED, &priv->state);
clear_bit(NIC_STATE_SERVICE_SCHED, &priv->state);
set_bit(NIC_STATE_DOWN, &priv->state);
if (hns_nic_try_get_ae(priv->netdev)) {
priv->notifier_block.notifier_call = hns_nic_notifier_action;
ret = hnae_register_notifier(&priv->notifier_block);
if (ret) {
dev_err(dev, "register notifier fail!\n");
goto out_notify_fail;
}
dev_dbg(dev, "has not handle, register notifier!\n");
}
return 0;
out_notify_fail:
(void)cancel_work_sync(&priv->service_task);
out_read_prop_fail:
free_netdev(ndev);
return ret;
}
static int hns_nic_dev_remove(struct platform_device *pdev)
{
struct net_device *ndev = platform_get_drvdata(pdev);
struct hns_nic_priv *priv = netdev_priv(ndev);
if (ndev->reg_state != NETREG_UNINITIALIZED)
unregister_netdev(ndev);
if (priv->ring_data)
hns_nic_uninit_ring_data(priv);
priv->ring_data = NULL;
if (ndev->phydev)
phy_disconnect(ndev->phydev);
if (!IS_ERR_OR_NULL(priv->ae_handle))
hnae_put_handle(priv->ae_handle);
priv->ae_handle = NULL;
if (priv->notifier_block.notifier_call)
hnae_unregister_notifier(&priv->notifier_block);
priv->notifier_block.notifier_call = NULL;
set_bit(NIC_STATE_REMOVING, &priv->state);
(void)cancel_work_sync(&priv->service_task);
free_netdev(ndev);
return 0;
}
static const struct of_device_id hns_enet_of_match[] = {
{.compatible = "hisilicon,hns-nic-v1",},
{.compatible = "hisilicon,hns-nic-v2",},
{},
};
MODULE_DEVICE_TABLE(of, hns_enet_of_match);
static struct platform_driver hns_nic_dev_driver = {
.driver = {
.name = "hns-nic",
.of_match_table = hns_enet_of_match,
.acpi_match_table = ACPI_PTR(hns_enet_acpi_match),
},
.probe = hns_nic_dev_probe,
.remove = hns_nic_dev_remove,
};
module_platform_driver(hns_nic_dev_driver);
MODULE_DESCRIPTION("HISILICON HNS Ethernet driver");
MODULE_AUTHOR("Hisilicon, Inc.");
MODULE_LICENSE("GPL");
MODULE_ALIAS("platform:hns-nic");
| ./CrossVul/dataset_final_sorted/CWE-416/c/good_3018_0 |
crossvul-cpp_data_bad_4231_0 | /*
** $Id: ldo.c $
** Stack and Call structure of Lua
** See Copyright Notice in lua.h
*/
#define ldo_c
#define LUA_CORE
#include "lprefix.h"
#include <setjmp.h>
#include <stdlib.h>
#include <string.h>
#include "lua.h"
#include "lapi.h"
#include "ldebug.h"
#include "ldo.h"
#include "lfunc.h"
#include "lgc.h"
#include "lmem.h"
#include "lobject.h"
#include "lopcodes.h"
#include "lparser.h"
#include "lstate.h"
#include "lstring.h"
#include "ltable.h"
#include "ltm.h"
#include "lundump.h"
#include "lvm.h"
#include "lzio.h"
#define errorstatus(s) ((s) > LUA_YIELD)
/*
** {======================================================
** Error-recovery functions
** =======================================================
*/
/*
** LUAI_THROW/LUAI_TRY define how Lua does exception handling. By
** default, Lua handles errors with exceptions when compiling as
** C++ code, with _longjmp/_setjmp when asked to use them, and with
** longjmp/setjmp otherwise.
*/
#if !defined(LUAI_THROW) /* { */
#if defined(__cplusplus) && !defined(LUA_USE_LONGJMP) /* { */
/* C++ exceptions */
#define LUAI_THROW(L,c) throw(c)
#define LUAI_TRY(L,c,a) \
try { a } catch(...) { if ((c)->status == 0) (c)->status = -1; }
#define luai_jmpbuf int /* dummy variable */
#elif defined(LUA_USE_POSIX) /* }{ */
/* in POSIX, try _longjmp/_setjmp (more efficient) */
#define LUAI_THROW(L,c) _longjmp((c)->b, 1)
#define LUAI_TRY(L,c,a) if (_setjmp((c)->b) == 0) { a }
#define luai_jmpbuf jmp_buf
#else /* }{ */
/* ISO C handling with long jumps */
#define LUAI_THROW(L,c) longjmp((c)->b, 1)
#define LUAI_TRY(L,c,a) if (setjmp((c)->b) == 0) { a }
#define luai_jmpbuf jmp_buf
#endif /* } */
#endif /* } */
/* chain list of long jump buffers */
struct lua_longjmp {
struct lua_longjmp *previous;
luai_jmpbuf b;
volatile int status; /* error code */
};
void luaD_seterrorobj (lua_State *L, int errcode, StkId oldtop) {
switch (errcode) {
case LUA_ERRMEM: { /* memory error? */
setsvalue2s(L, oldtop, G(L)->memerrmsg); /* reuse preregistered msg. */
break;
}
case LUA_ERRERR: {
setsvalue2s(L, oldtop, luaS_newliteral(L, "error in error handling"));
break;
}
case CLOSEPROTECT: {
setnilvalue(s2v(oldtop)); /* no error message */
break;
}
default: {
setobjs2s(L, oldtop, L->top - 1); /* error message on current top */
break;
}
}
L->top = oldtop + 1;
}
l_noret luaD_throw (lua_State *L, int errcode) {
if (L->errorJmp) { /* thread has an error handler? */
L->errorJmp->status = errcode; /* set status */
LUAI_THROW(L, L->errorJmp); /* jump to it */
}
else { /* thread has no error handler */
global_State *g = G(L);
errcode = luaF_close(L, L->stack, errcode); /* close all upvalues */
L->status = cast_byte(errcode); /* mark it as dead */
if (g->mainthread->errorJmp) { /* main thread has a handler? */
setobjs2s(L, g->mainthread->top++, L->top - 1); /* copy error obj. */
luaD_throw(g->mainthread, errcode); /* re-throw in main thread */
}
else { /* no handler at all; abort */
if (g->panic) { /* panic function? */
luaD_seterrorobj(L, errcode, L->top); /* assume EXTRA_STACK */
if (L->ci->top < L->top)
L->ci->top = L->top; /* pushing msg. can break this invariant */
lua_unlock(L);
g->panic(L); /* call panic function (last chance to jump out) */
}
abort();
}
}
}
int luaD_rawrunprotected (lua_State *L, Pfunc f, void *ud) {
global_State *g = G(L);
l_uint32 oldnCcalls = g->Cstacklimit - (L->nCcalls + L->nci);
struct lua_longjmp lj;
lj.status = LUA_OK;
lj.previous = L->errorJmp; /* chain new error handler */
L->errorJmp = &lj;
LUAI_TRY(L, &lj,
(*f)(L, ud);
);
L->errorJmp = lj.previous; /* restore old error handler */
L->nCcalls = g->Cstacklimit - oldnCcalls - L->nci;
return lj.status;
}
/* }====================================================== */
/*
** {==================================================================
** Stack reallocation
** ===================================================================
*/
static void correctstack (lua_State *L, StkId oldstack, StkId newstack) {
CallInfo *ci;
UpVal *up;
if (oldstack == newstack)
return; /* stack address did not change */
L->top = (L->top - oldstack) + newstack;
for (up = L->openupval; up != NULL; up = up->u.open.next)
up->v = s2v((uplevel(up) - oldstack) + newstack);
for (ci = L->ci; ci != NULL; ci = ci->previous) {
ci->top = (ci->top - oldstack) + newstack;
ci->func = (ci->func - oldstack) + newstack;
if (isLua(ci))
ci->u.l.trap = 1; /* signal to update 'trap' in 'luaV_execute' */
}
}
/* some space for error handling */
#define ERRORSTACKSIZE (LUAI_MAXSTACK + 200)
int luaD_reallocstack (lua_State *L, int newsize, int raiseerror) {
int lim = L->stacksize;
StkId newstack = luaM_reallocvector(L, L->stack, lim, newsize, StackValue);
lua_assert(newsize <= LUAI_MAXSTACK || newsize == ERRORSTACKSIZE);
lua_assert(L->stack_last - L->stack == L->stacksize - EXTRA_STACK);
if (unlikely(newstack == NULL)) { /* reallocation failed? */
if (raiseerror)
luaM_error(L);
else return 0; /* do not raise an error */
}
for (; lim < newsize; lim++)
setnilvalue(s2v(newstack + lim)); /* erase new segment */
correctstack(L, L->stack, newstack);
L->stack = newstack;
L->stacksize = newsize;
L->stack_last = L->stack + newsize - EXTRA_STACK;
return 1;
}
/*
** Try to grow the stack by at least 'n' elements. when 'raiseerror'
** is true, raises any error; otherwise, return 0 in case of errors.
*/
int luaD_growstack (lua_State *L, int n, int raiseerror) {
int size = L->stacksize;
int newsize = 2 * size; /* tentative new size */
if (unlikely(size > LUAI_MAXSTACK)) { /* need more space after extra size? */
if (raiseerror)
luaD_throw(L, LUA_ERRERR); /* error inside message handler */
else return 0;
}
else {
int needed = cast_int(L->top - L->stack) + n + EXTRA_STACK;
if (newsize > LUAI_MAXSTACK) /* cannot cross the limit */
newsize = LUAI_MAXSTACK;
if (newsize < needed) /* but must respect what was asked for */
newsize = needed;
if (unlikely(newsize > LUAI_MAXSTACK)) { /* stack overflow? */
/* add extra size to be able to handle the error message */
luaD_reallocstack(L, ERRORSTACKSIZE, raiseerror);
if (raiseerror)
luaG_runerror(L, "stack overflow");
else return 0;
}
} /* else no errors */
return luaD_reallocstack(L, newsize, raiseerror);
}
static int stackinuse (lua_State *L) {
CallInfo *ci;
StkId lim = L->top;
for (ci = L->ci; ci != NULL; ci = ci->previous) {
if (lim < ci->top) lim = ci->top;
}
lua_assert(lim <= L->stack_last);
return cast_int(lim - L->stack) + 1; /* part of stack in use */
}
void luaD_shrinkstack (lua_State *L) {
int inuse = stackinuse(L);
int goodsize = inuse + BASIC_STACK_SIZE;
if (goodsize > LUAI_MAXSTACK)
goodsize = LUAI_MAXSTACK; /* respect stack limit */
/* if thread is currently not handling a stack overflow and its
good size is smaller than current size, shrink its stack */
if (inuse <= (LUAI_MAXSTACK - EXTRA_STACK) && goodsize < L->stacksize)
luaD_reallocstack(L, goodsize, 0); /* ok if that fails */
else /* don't change stack */
condmovestack(L,{},{}); /* (change only for debugging) */
luaE_shrinkCI(L); /* shrink CI list */
}
void luaD_inctop (lua_State *L) {
luaD_checkstack(L, 1);
L->top++;
}
/* }================================================================== */
/*
** Call a hook for the given event. Make sure there is a hook to be
** called. (Both 'L->hook' and 'L->hookmask', which trigger this
** function, can be changed asynchronously by signals.)
*/
void luaD_hook (lua_State *L, int event, int line,
int ftransfer, int ntransfer) {
lua_Hook hook = L->hook;
if (hook && L->allowhook) { /* make sure there is a hook */
int mask = CIST_HOOKED;
CallInfo *ci = L->ci;
ptrdiff_t top = savestack(L, L->top);
ptrdiff_t ci_top = savestack(L, ci->top);
lua_Debug ar;
ar.event = event;
ar.currentline = line;
ar.i_ci = ci;
if (ntransfer != 0) {
mask |= CIST_TRAN; /* 'ci' has transfer information */
ci->u2.transferinfo.ftransfer = ftransfer;
ci->u2.transferinfo.ntransfer = ntransfer;
}
luaD_checkstack(L, LUA_MINSTACK); /* ensure minimum stack size */
if (L->top + LUA_MINSTACK > ci->top)
ci->top = L->top + LUA_MINSTACK;
L->allowhook = 0; /* cannot call hooks inside a hook */
ci->callstatus |= mask;
lua_unlock(L);
(*hook)(L, &ar);
lua_lock(L);
lua_assert(!L->allowhook);
L->allowhook = 1;
ci->top = restorestack(L, ci_top);
L->top = restorestack(L, top);
ci->callstatus &= ~mask;
}
}
/*
** Executes a call hook for Lua functions. This function is called
** whenever 'hookmask' is not zero, so it checks whether call hooks are
** active.
*/
void luaD_hookcall (lua_State *L, CallInfo *ci) {
int hook = (ci->callstatus & CIST_TAIL) ? LUA_HOOKTAILCALL : LUA_HOOKCALL;
Proto *p;
if (!(L->hookmask & LUA_MASKCALL)) /* some other hook? */
return; /* don't call hook */
p = clLvalue(s2v(ci->func))->p;
L->top = ci->top; /* prepare top */
ci->u.l.savedpc++; /* hooks assume 'pc' is already incremented */
luaD_hook(L, hook, -1, 1, p->numparams);
ci->u.l.savedpc--; /* correct 'pc' */
}
static StkId rethook (lua_State *L, CallInfo *ci, StkId firstres, int nres) {
ptrdiff_t oldtop = savestack(L, L->top); /* hook may change top */
int delta = 0;
if (isLuacode(ci)) {
Proto *p = clLvalue(s2v(ci->func))->p;
if (p->is_vararg)
delta = ci->u.l.nextraargs + p->numparams + 1;
if (L->top < ci->top)
L->top = ci->top; /* correct top to run hook */
}
if (L->hookmask & LUA_MASKRET) { /* is return hook on? */
int ftransfer;
ci->func += delta; /* if vararg, back to virtual 'func' */
ftransfer = cast(unsigned short, firstres - ci->func);
luaD_hook(L, LUA_HOOKRET, -1, ftransfer, nres); /* call it */
ci->func -= delta;
}
if (isLua(ci->previous))
L->oldpc = ci->previous->u.l.savedpc; /* update 'oldpc' */
return restorestack(L, oldtop);
}
/*
** Check whether 'func' has a '__call' metafield. If so, put it in the
** stack, below original 'func', so that 'luaD_call' can call it. Raise
** an error if there is no '__call' metafield.
*/
void luaD_tryfuncTM (lua_State *L, StkId func) {
const TValue *tm = luaT_gettmbyobj(L, s2v(func), TM_CALL);
StkId p;
if (unlikely(ttisnil(tm)))
luaG_typeerror(L, s2v(func), "call"); /* nothing to call */
for (p = L->top; p > func; p--) /* open space for metamethod */
setobjs2s(L, p, p-1);
L->top++; /* stack space pre-allocated by the caller */
setobj2s(L, func, tm); /* metamethod is the new function to be called */
}
/*
** Given 'nres' results at 'firstResult', move 'wanted' of them to 'res'.
** Handle most typical cases (zero results for commands, one result for
** expressions, multiple results for tail calls/single parameters)
** separated.
*/
static void moveresults (lua_State *L, StkId res, int nres, int wanted) {
StkId firstresult;
int i;
switch (wanted) { /* handle typical cases separately */
case 0: /* no values needed */
L->top = res;
return;
case 1: /* one value needed */
if (nres == 0) /* no results? */
setnilvalue(s2v(res)); /* adjust with nil */
else
setobjs2s(L, res, L->top - nres); /* move it to proper place */
L->top = res + 1;
return;
case LUA_MULTRET:
wanted = nres; /* we want all results */
break;
default: /* multiple results (or to-be-closed variables) */
if (hastocloseCfunc(wanted)) { /* to-be-closed variables? */
ptrdiff_t savedres = savestack(L, res);
luaF_close(L, res, LUA_OK); /* may change the stack */
res = restorestack(L, savedres);
wanted = codeNresults(wanted); /* correct value */
if (wanted == LUA_MULTRET)
wanted = nres;
}
break;
}
firstresult = L->top - nres; /* index of first result */
/* move all results to correct place */
for (i = 0; i < nres && i < wanted; i++)
setobjs2s(L, res + i, firstresult + i);
for (; i < wanted; i++) /* complete wanted number of results */
setnilvalue(s2v(res + i));
L->top = res + wanted; /* top points after the last result */
}
/*
** Finishes a function call: calls hook if necessary, removes CallInfo,
** moves current number of results to proper place.
*/
void luaD_poscall (lua_State *L, CallInfo *ci, int nres) {
if (L->hookmask)
L->top = rethook(L, ci, L->top - nres, nres);
L->ci = ci->previous; /* back to caller */
/* move results to proper place */
moveresults(L, ci->func, nres, ci->nresults);
}
#define next_ci(L) (L->ci->next ? L->ci->next : luaE_extendCI(L))
/*
** Prepare a function for a tail call, building its call info on top
** of the current call info. 'narg1' is the number of arguments plus 1
** (so that it includes the function itself).
*/
void luaD_pretailcall (lua_State *L, CallInfo *ci, StkId func, int narg1) {
Proto *p = clLvalue(s2v(func))->p;
int fsize = p->maxstacksize; /* frame size */
int nfixparams = p->numparams;
int i;
for (i = 0; i < narg1; i++) /* move down function and arguments */
setobjs2s(L, ci->func + i, func + i);
checkstackGC(L, fsize);
func = ci->func; /* moved-down function */
for (; narg1 <= nfixparams; narg1++)
setnilvalue(s2v(func + narg1)); /* complete missing arguments */
ci->top = func + 1 + fsize; /* top for new function */
lua_assert(ci->top <= L->stack_last);
ci->u.l.savedpc = p->code; /* starting point */
ci->callstatus |= CIST_TAIL;
L->top = func + narg1; /* set top */
}
/*
** Call a function (C or Lua). The function to be called is at *func.
** The arguments are on the stack, right after the function.
** When returns, all the results are on the stack, starting at the original
** function position.
*/
void luaD_call (lua_State *L, StkId func, int nresults) {
lua_CFunction f;
retry:
switch (ttypetag(s2v(func))) {
case LUA_VCCL: /* C closure */
f = clCvalue(s2v(func))->f;
goto Cfunc;
case LUA_VLCF: /* light C function */
f = fvalue(s2v(func));
Cfunc: {
int n; /* number of returns */
CallInfo *ci = next_ci(L);
checkstackp(L, LUA_MINSTACK, func); /* ensure minimum stack size */
ci->nresults = nresults;
ci->callstatus = CIST_C;
ci->top = L->top + LUA_MINSTACK;
ci->func = func;
L->ci = ci;
lua_assert(ci->top <= L->stack_last);
if (L->hookmask & LUA_MASKCALL) {
int narg = cast_int(L->top - func) - 1;
luaD_hook(L, LUA_HOOKCALL, -1, 1, narg);
}
lua_unlock(L);
n = (*f)(L); /* do the actual call */
lua_lock(L);
api_checknelems(L, n);
luaD_poscall(L, ci, n);
break;
}
case LUA_VLCL: { /* Lua function */
CallInfo *ci = next_ci(L);
Proto *p = clLvalue(s2v(func))->p;
int narg = cast_int(L->top - func) - 1; /* number of real arguments */
int nfixparams = p->numparams;
int fsize = p->maxstacksize; /* frame size */
checkstackp(L, fsize, func);
ci->nresults = nresults;
ci->u.l.savedpc = p->code; /* starting point */
ci->callstatus = 0;
ci->top = func + 1 + fsize;
ci->func = func;
L->ci = ci;
for (; narg < nfixparams; narg++)
setnilvalue(s2v(L->top++)); /* complete missing arguments */
lua_assert(ci->top <= L->stack_last);
luaV_execute(L, ci); /* run the function */
break;
}
default: { /* not a function */
checkstackp(L, 1, func); /* space for metamethod */
luaD_tryfuncTM(L, func); /* try to get '__call' metamethod */
goto retry; /* try again with metamethod */
}
}
}
/*
** Similar to 'luaD_call', but does not allow yields during the call.
** If there is a stack overflow, freeing all CI structures will
** force the subsequent call to invoke 'luaE_extendCI', which then
** will raise any errors.
*/
void luaD_callnoyield (lua_State *L, StkId func, int nResults) {
incXCcalls(L);
if (getCcalls(L) <= CSTACKERR) /* possible stack overflow? */
luaE_freeCI(L);
luaD_call(L, func, nResults);
decXCcalls(L);
}
/*
** Completes the execution of an interrupted C function, calling its
** continuation function.
*/
static void finishCcall (lua_State *L, int status) {
CallInfo *ci = L->ci;
int n;
/* must have a continuation and must be able to call it */
lua_assert(ci->u.c.k != NULL && yieldable(L));
/* error status can only happen in a protected call */
lua_assert((ci->callstatus & CIST_YPCALL) || status == LUA_YIELD);
if (ci->callstatus & CIST_YPCALL) { /* was inside a pcall? */
ci->callstatus &= ~CIST_YPCALL; /* continuation is also inside it */
L->errfunc = ci->u.c.old_errfunc; /* with the same error function */
}
/* finish 'lua_callk'/'lua_pcall'; CIST_YPCALL and 'errfunc' already
handled */
adjustresults(L, ci->nresults);
lua_unlock(L);
n = (*ci->u.c.k)(L, status, ci->u.c.ctx); /* call continuation function */
lua_lock(L);
api_checknelems(L, n);
luaD_poscall(L, ci, n); /* finish 'luaD_call' */
}
/*
** Executes "full continuation" (everything in the stack) of a
** previously interrupted coroutine until the stack is empty (or another
** interruption long-jumps out of the loop). If the coroutine is
** recovering from an error, 'ud' points to the error status, which must
** be passed to the first continuation function (otherwise the default
** status is LUA_YIELD).
*/
static void unroll (lua_State *L, void *ud) {
CallInfo *ci;
if (ud != NULL) /* error status? */
finishCcall(L, *(int *)ud); /* finish 'lua_pcallk' callee */
while ((ci = L->ci) != &L->base_ci) { /* something in the stack */
if (!isLua(ci)) /* C function? */
finishCcall(L, LUA_YIELD); /* complete its execution */
else { /* Lua function */
luaV_finishOp(L); /* finish interrupted instruction */
luaV_execute(L, ci); /* execute down to higher C 'boundary' */
}
}
}
/*
** Try to find a suspended protected call (a "recover point") for the
** given thread.
*/
static CallInfo *findpcall (lua_State *L) {
CallInfo *ci;
for (ci = L->ci; ci != NULL; ci = ci->previous) { /* search for a pcall */
if (ci->callstatus & CIST_YPCALL)
return ci;
}
return NULL; /* no pending pcall */
}
/*
** Recovers from an error in a coroutine. Finds a recover point (if
** there is one) and completes the execution of the interrupted
** 'luaD_pcall'. If there is no recover point, returns zero.
*/
static int recover (lua_State *L, int status) {
StkId oldtop;
CallInfo *ci = findpcall(L);
if (ci == NULL) return 0; /* no recovery point */
/* "finish" luaD_pcall */
oldtop = restorestack(L, ci->u2.funcidx);
luaF_close(L, oldtop, status); /* may change the stack */
oldtop = restorestack(L, ci->u2.funcidx);
luaD_seterrorobj(L, status, oldtop);
L->ci = ci;
L->allowhook = getoah(ci->callstatus); /* restore original 'allowhook' */
luaD_shrinkstack(L);
L->errfunc = ci->u.c.old_errfunc;
return 1; /* continue running the coroutine */
}
/*
** Signal an error in the call to 'lua_resume', not in the execution
** of the coroutine itself. (Such errors should not be handled by any
** coroutine error handler and should not kill the coroutine.)
*/
static int resume_error (lua_State *L, const char *msg, int narg) {
L->top -= narg; /* remove args from the stack */
setsvalue2s(L, L->top, luaS_new(L, msg)); /* push error message */
api_incr_top(L);
lua_unlock(L);
return LUA_ERRRUN;
}
/*
** Do the work for 'lua_resume' in protected mode. Most of the work
** depends on the status of the coroutine: initial state, suspended
** inside a hook, or regularly suspended (optionally with a continuation
** function), plus erroneous cases: non-suspended coroutine or dead
** coroutine.
*/
static void resume (lua_State *L, void *ud) {
int n = *(cast(int*, ud)); /* number of arguments */
StkId firstArg = L->top - n; /* first argument */
CallInfo *ci = L->ci;
if (L->status == LUA_OK) { /* starting a coroutine? */
luaD_call(L, firstArg - 1, LUA_MULTRET);
}
else { /* resuming from previous yield */
lua_assert(L->status == LUA_YIELD);
L->status = LUA_OK; /* mark that it is running (again) */
if (isLua(ci)) /* yielded inside a hook? */
luaV_execute(L, ci); /* just continue running Lua code */
else { /* 'common' yield */
if (ci->u.c.k != NULL) { /* does it have a continuation function? */
lua_unlock(L);
n = (*ci->u.c.k)(L, LUA_YIELD, ci->u.c.ctx); /* call continuation */
lua_lock(L);
api_checknelems(L, n);
}
luaD_poscall(L, ci, n); /* finish 'luaD_call' */
}
unroll(L, NULL); /* run continuation */
}
}
LUA_API int lua_resume (lua_State *L, lua_State *from, int nargs,
int *nresults) {
int status;
lua_lock(L);
if (L->status == LUA_OK) { /* may be starting a coroutine */
if (L->ci != &L->base_ci) /* not in base level? */
return resume_error(L, "cannot resume non-suspended coroutine", nargs);
else if (L->top - (L->ci->func + 1) == nargs) /* no function? */
return resume_error(L, "cannot resume dead coroutine", nargs);
}
else if (L->status != LUA_YIELD) /* ended with errors? */
return resume_error(L, "cannot resume dead coroutine", nargs);
if (from == NULL)
L->nCcalls = CSTACKTHREAD;
else /* correct 'nCcalls' for this thread */
L->nCcalls = getCcalls(from) + from->nci - L->nci - CSTACKCF;
if (L->nCcalls <= CSTACKERR)
return resume_error(L, "C stack overflow", nargs);
luai_userstateresume(L, nargs);
api_checknelems(L, (L->status == LUA_OK) ? nargs + 1 : nargs);
status = luaD_rawrunprotected(L, resume, &nargs);
/* continue running after recoverable errors */
while (errorstatus(status) && recover(L, status)) {
/* unroll continuation */
status = luaD_rawrunprotected(L, unroll, &status);
}
if (likely(!errorstatus(status)))
lua_assert(status == L->status); /* normal end or yield */
else { /* unrecoverable error */
L->status = cast_byte(status); /* mark thread as 'dead' */
luaD_seterrorobj(L, status, L->top); /* push error message */
L->ci->top = L->top;
}
*nresults = (status == LUA_YIELD) ? L->ci->u2.nyield
: cast_int(L->top - (L->ci->func + 1));
lua_unlock(L);
return status;
}
LUA_API int lua_isyieldable (lua_State *L) {
return yieldable(L);
}
LUA_API int lua_yieldk (lua_State *L, int nresults, lua_KContext ctx,
lua_KFunction k) {
CallInfo *ci;
luai_userstateyield(L, nresults);
lua_lock(L);
ci = L->ci;
api_checknelems(L, nresults);
if (unlikely(!yieldable(L))) {
if (L != G(L)->mainthread)
luaG_runerror(L, "attempt to yield across a C-call boundary");
else
luaG_runerror(L, "attempt to yield from outside a coroutine");
}
L->status = LUA_YIELD;
if (isLua(ci)) { /* inside a hook? */
lua_assert(!isLuacode(ci));
api_check(L, k == NULL, "hooks cannot continue after yielding");
ci->u2.nyield = 0; /* no results */
}
else {
if ((ci->u.c.k = k) != NULL) /* is there a continuation? */
ci->u.c.ctx = ctx; /* save context */
ci->u2.nyield = nresults; /* save number of results */
luaD_throw(L, LUA_YIELD);
}
lua_assert(ci->callstatus & CIST_HOOKED); /* must be inside a hook */
lua_unlock(L);
return 0; /* return to 'luaD_hook' */
}
/*
** Call the C function 'func' in protected mode, restoring basic
** thread information ('allowhook', etc.) and in particular
** its stack level in case of errors.
*/
int luaD_pcall (lua_State *L, Pfunc func, void *u,
ptrdiff_t old_top, ptrdiff_t ef) {
int status;
CallInfo *old_ci = L->ci;
lu_byte old_allowhooks = L->allowhook;
ptrdiff_t old_errfunc = L->errfunc;
L->errfunc = ef;
status = luaD_rawrunprotected(L, func, u);
if (unlikely(status != LUA_OK)) { /* an error occurred? */
StkId oldtop = restorestack(L, old_top);
L->ci = old_ci;
L->allowhook = old_allowhooks;
status = luaF_close(L, oldtop, status);
oldtop = restorestack(L, old_top); /* previous call may change stack */
luaD_seterrorobj(L, status, oldtop);
luaD_shrinkstack(L);
}
L->errfunc = old_errfunc;
return status;
}
/*
** Execute a protected parser.
*/
struct SParser { /* data to 'f_parser' */
ZIO *z;
Mbuffer buff; /* dynamic structure used by the scanner */
Dyndata dyd; /* dynamic structures used by the parser */
const char *mode;
const char *name;
};
static void checkmode (lua_State *L, const char *mode, const char *x) {
if (mode && strchr(mode, x[0]) == NULL) {
luaO_pushfstring(L,
"attempt to load a %s chunk (mode is '%s')", x, mode);
luaD_throw(L, LUA_ERRSYNTAX);
}
}
static void f_parser (lua_State *L, void *ud) {
LClosure *cl;
struct SParser *p = cast(struct SParser *, ud);
int c = zgetc(p->z); /* read first character */
if (c == LUA_SIGNATURE[0]) {
checkmode(L, p->mode, "binary");
cl = luaU_undump(L, p->z, p->name);
}
else {
checkmode(L, p->mode, "text");
cl = luaY_parser(L, p->z, &p->buff, &p->dyd, p->name, c);
}
lua_assert(cl->nupvalues == cl->p->sizeupvalues);
luaF_initupvals(L, cl);
}
int luaD_protectedparser (lua_State *L, ZIO *z, const char *name,
const char *mode) {
struct SParser p;
int status;
incnny(L); /* cannot yield during parsing */
p.z = z; p.name = name; p.mode = mode;
p.dyd.actvar.arr = NULL; p.dyd.actvar.size = 0;
p.dyd.gt.arr = NULL; p.dyd.gt.size = 0;
p.dyd.label.arr = NULL; p.dyd.label.size = 0;
luaZ_initbuffer(L, &p.buff);
status = luaD_pcall(L, f_parser, &p, savestack(L, L->top), L->errfunc);
luaZ_freebuffer(L, &p.buff);
luaM_freearray(L, p.dyd.actvar.arr, p.dyd.actvar.size);
luaM_freearray(L, p.dyd.gt.arr, p.dyd.gt.size);
luaM_freearray(L, p.dyd.label.arr, p.dyd.label.size);
decnny(L);
return status;
}
| ./CrossVul/dataset_final_sorted/CWE-416/c/bad_4231_0 |
crossvul-cpp_data_bad_1179_0 | /* load a GIF with giflib
*
* 10/2/16
* - from svgload.c
* 25/4/16
* - add giflib5 support
* 26/7/16
* - transparency was wrong if there was no EXTENSION_RECORD
* - write 1, 2, 3, or 4 bands depending on file contents
* 17/8/16
* - support unicode on win
* 19/8/16
* - better transparency detection, thanks diegocsandrim
* 25/11/16
* - support @n, page-height
* 5/10/17
* - colormap can be missing thanks Kleis
* 21/11/17
* - add "gif-delay", "gif-loop", "gif-comment" metadata
* - add dispose handling
* 13/8/18
* - init pages to 0 before load
* 14/2/19
* - rework as a sequential loader ... simpler, much lower mem use
* 23/8/18
* - allow GIF read errors during header scan
* - better feof() handling
*/
/*
This file is part of VIPS.
VIPS is free software; you can redistribute it and/or modify
it under the terms of the GNU Lesser General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU Lesser General Public License for more details.
You should have received a copy of the GNU Lesser General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
02110-1301 USA
*/
/*
These files are distributed with VIPS - http://www.vips.ecs.soton.ac.uk
*/
/*
#define DEBUG_VERBOSE
#define VIPS_DEBUG
*/
#ifdef HAVE_CONFIG_H
#include <config.h>
#endif /*HAVE_CONFIG_H*/
#include <vips/intl.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <errno.h>
#include <ctype.h>
#include <vips/vips.h>
#include <vips/buf.h>
#include <vips/internal.h>
#include <vips/debug.h>
#ifdef HAVE_GIFLIB
#include <gif_lib.h>
/* giflib 5 is rather different :-( functions have error returns and there's
* no LastError().
*
* GIFLIB_MAJOR was introduced in 4.1.6. Use it to test for giflib 5.x.
*/
#ifdef GIFLIB_MAJOR
# if GIFLIB_MAJOR > 4
# define HAVE_GIFLIB_5
# endif
#endif
/* Added in giflib5.
*/
#ifndef HAVE_GIFLIB_5
#define DISPOSAL_UNSPECIFIED 0
#define DISPOSE_DO_NOT 1
#define DISPOSE_BACKGROUND 2
#define DISPOSE_PREVIOUS 3
#endif
#define VIPS_TYPE_FOREIGN_LOAD_GIF (vips_foreign_load_gif_get_type())
#define VIPS_FOREIGN_LOAD_GIF( obj ) \
(G_TYPE_CHECK_INSTANCE_CAST( (obj), \
VIPS_TYPE_FOREIGN_LOAD_GIF, VipsForeignLoadGif ))
#define VIPS_FOREIGN_LOAD_GIF_CLASS( klass ) \
(G_TYPE_CHECK_CLASS_CAST( (klass), \
VIPS_TYPE_FOREIGN_LOAD_GIF, VipsForeignLoadGifClass))
#define VIPS_IS_FOREIGN_LOAD_GIF( obj ) \
(G_TYPE_CHECK_INSTANCE_TYPE( (obj), VIPS_TYPE_FOREIGN_LOAD_GIF ))
#define VIPS_IS_FOREIGN_LOAD_GIF_CLASS( klass ) \
(G_TYPE_CHECK_CLASS_TYPE( (klass), VIPS_TYPE_FOREIGN_LOAD_GIF ))
#define VIPS_FOREIGN_LOAD_GIF_GET_CLASS( obj ) \
(G_TYPE_INSTANCE_GET_CLASS( (obj), \
VIPS_TYPE_FOREIGN_LOAD_GIF, VipsForeignLoadGifClass ))
typedef struct _VipsForeignLoadGif {
VipsForeignLoad parent_object;
/* Load from this page (frame number).
*/
int page;
/* Load this many pages.
*/
int n;
GifFileType *file;
/* We decompress the whole thing to a huge RGBA memory image, and
* as we render, watch for bands and transparency. At the end of
* loading, we copy 1 or 3 bands, with or without transparency to
* output.
*/
gboolean has_transparency;
gboolean has_colour;
/* Delay in 1/100ths of a second. We only track a single delay
* value for the whole file, and we report the first delay we see. Some
* GIFs have a long delay on the final frame.
*/
gboolean has_delay;
int delay;
/* Number of times to loop the animation.
*/
int loop;
/* The GIF comment, if any.
*/
char *comment;
/* The number of pages (frame) in the image.
*/
int n_pages;
/* A memory image the sized of one frame ... we accumulate to this as
* we scan the image, and copy lines to the output on generate.
*/
VipsImage *frame;
/* A copy of the previous frame, in case we need a DISPOSE_PREVIOUS.
*/
VipsImage *previous;
/* The position of @frame, in pages.
*/
int current_page;
/* Decompress lines of the gif file to here.
*/
GifPixelType *line;
/* The current dispose method.
*/
int dispose;
/* Set for EOF detected.
*/
gboolean eof;
/* The current cmap unpacked to a simple LUT. Each uint32 is really an
* RGBA pixel ready to be blasted into @frame.
*/
guint32 cmap[256];
/* As we scan the file, the index of the transparent pixel for this
* frame.
*/
int transparency;
/* Params for DGifOpen(). Set by subclasses, called by base class in
* _open().
*/
InputFunc read_func;
} VipsForeignLoadGif;
typedef struct _VipsForeignLoadGifClass {
VipsForeignLoadClass parent_class;
/* Close and reopen gif->file.
*/
int (*open)( VipsForeignLoadGif *gif );
} VipsForeignLoadGifClass;
G_DEFINE_ABSTRACT_TYPE( VipsForeignLoadGif, vips_foreign_load_gif,
VIPS_TYPE_FOREIGN_LOAD );
/* From gif2rgb.c ... offsets and jumps for interlaced GIF images.
*/
static int
InterlacedOffset[] = { 0, 4, 2, 1 },
InterlacedJumps[] = { 8, 8, 4, 2 };
/* giflib4 was missing this.
*/
static const char *
vips_foreign_load_gif_errstr( int error_code )
{
#ifdef HAVE_GIFLIB_5
return( GifErrorString( error_code ) );
#else /*!HAVE_GIFLIB_5*/
switch( error_code ) {
case D_GIF_ERR_OPEN_FAILED:
return( _( "Failed to open given file" ) );
case D_GIF_ERR_READ_FAILED:
return( _( "Failed to read from given file" ) );
case D_GIF_ERR_NOT_GIF_FILE:
return( _( "Data is not a GIF file" ) );
case D_GIF_ERR_NO_SCRN_DSCR:
return( _( "No screen descriptor detected" ) );
case D_GIF_ERR_NO_IMAG_DSCR:
return( _( "No image descriptor detected" ) );
case D_GIF_ERR_NO_COLOR_MAP:
return( _( "Neither global nor local color map" ) );
case D_GIF_ERR_WRONG_RECORD:
return( _( "Wrong record type detected" ) );
case D_GIF_ERR_DATA_TOO_BIG:
return( _( "Number of pixels bigger than width * height" ) );
case D_GIF_ERR_NOT_ENOUGH_MEM:
return( _( "Failed to allocate required memory" ) );
case D_GIF_ERR_CLOSE_FAILED:
return( _( "Failed to close given file" ) );
case D_GIF_ERR_NOT_READABLE:
return( _( "Given file was not opened for read" ) );
case D_GIF_ERR_IMAGE_DEFECT:
return( _( "Image is defective, decoding aborted" ) );
case D_GIF_ERR_EOF_TOO_SOON:
return( _( "Image EOF detected, before image complete" ) );
default:
return( _( "Unknown error" ) );
}
#endif /*HAVE_GIFLIB_5*/
}
static void
vips_foreign_load_gif_error_vips( VipsForeignLoadGif *gif, int error )
{
VipsObjectClass *class = VIPS_OBJECT_GET_CLASS( gif );
const char *message;
if( (message = vips_foreign_load_gif_errstr( error )) )
vips_error( class->nickname, "%s", message );
}
static void
vips_foreign_load_gif_error( VipsForeignLoadGif *gif )
{
int error;
error = 0;
#ifdef HAVE_GIFLIB_5
if( gif->file )
error = gif->file->Error;
#else
error = GifLastError();
#endif
if( error )
vips_foreign_load_gif_error_vips( gif, error );
}
static void
vips_foreign_load_gif_close( VipsForeignLoadGif *gif )
{
#ifdef HAVE_GIFLIB_5
if( gif->file ) {
int error;
if( DGifCloseFile( gif->file, &error ) == GIF_ERROR )
vips_foreign_load_gif_error_vips( gif, error );
gif->file = NULL;
}
#else
if( gif->file ) {
if( DGifCloseFile( gif->file ) == GIF_ERROR )
vips_foreign_load_gif_error_vips( gif, GifLastError() );
gif->file = NULL;
}
#endif
}
static void
vips_foreign_load_gif_dispose( GObject *gobject )
{
VipsForeignLoadGif *gif = (VipsForeignLoadGif *) gobject;
vips_foreign_load_gif_close( gif );
VIPS_UNREF( gif->frame );
VIPS_UNREF( gif->previous );
VIPS_FREE( gif->comment );
VIPS_FREE( gif->line )
G_OBJECT_CLASS( vips_foreign_load_gif_parent_class )->
dispose( gobject );
}
static VipsForeignFlags
vips_foreign_load_gif_get_flags_filename( const char *filename )
{
return( VIPS_FOREIGN_SEQUENTIAL );
}
static VipsForeignFlags
vips_foreign_load_gif_get_flags( VipsForeignLoad *load )
{
return( VIPS_FOREIGN_SEQUENTIAL );
}
static gboolean
vips_foreign_load_gif_is_a_buffer( const void *buf, size_t len )
{
const guchar *str = (const guchar *) buf;
if( len >= 4 &&
str[0] == 'G' &&
str[1] == 'I' &&
str[2] == 'F' &&
str[3] == '8' )
return( 1 );
return( 0 );
}
static gboolean
vips_foreign_load_gif_is_a( const char *filename )
{
unsigned char buf[4];
if( vips__get_bytes( filename, buf, 4 ) == 4 &&
vips_foreign_load_gif_is_a_buffer( buf, 4 ) )
return( 1 );
return( 0 );
}
static int
vips_foreign_load_gif_ext_next( VipsForeignLoadGif *gif,
GifByteType **extension )
{
if( DGifGetExtensionNext( gif->file, extension ) == GIF_ERROR ) {
vips_foreign_load_gif_error( gif );
return( -1 );
}
if( *extension )
VIPS_DEBUG_MSG( "gifload: EXTENSION_NEXT\n" );
return( 0 );
}
static int
vips_foreign_load_gif_code_next( VipsForeignLoadGif *gif,
GifByteType **extension )
{
if( DGifGetCodeNext( gif->file, extension ) == GIF_ERROR ) {
vips_foreign_load_gif_error( gif );
return( -1 );
}
if( *extension )
VIPS_DEBUG_MSG( "gifload: CODE_NEXT\n" );
return( 0 );
}
/* Quickly scan an image record.
*/
static int
vips_foreign_load_gif_scan_image( VipsForeignLoadGif *gif )
{
VipsObjectClass *class = VIPS_OBJECT_GET_CLASS( gif );
GifFileType *file = gif->file;
ColorMapObject *map = file->Image.ColorMap ?
file->Image.ColorMap : file->SColorMap;
GifByteType *extension;
if( DGifGetImageDesc( gif->file ) == GIF_ERROR ) {
vips_foreign_load_gif_error( gif );
return( -1 );
}
/* Check that the frame looks sane. Perhaps giflib checks
* this for us.
*/
if( file->Image.Left < 0 ||
file->Image.Width < 1 ||
file->Image.Width > 10000 ||
file->Image.Left + file->Image.Width > file->SWidth ||
file->Image.Top < 0 ||
file->Image.Height < 1 ||
file->Image.Height > 10000 ||
file->Image.Top + file->Image.Height > file->SHeight ) {
vips_error( class->nickname, "%s", _( "bad frame size" ) );
return( -1 );
}
/* Test for a non-greyscale colourmap for this frame.
*/
if( !gif->has_colour &&
map ) {
int i;
for( i = 0; i < map->ColorCount; i++ )
if( map->Colors[i].Red != map->Colors[i].Green ||
map->Colors[i].Green != map->Colors[i].Blue ) {
gif->has_colour = TRUE;
break;
}
}
/* Step over compressed image data.
*/
do {
if( vips_foreign_load_gif_code_next( gif, &extension ) )
return( -1 );
} while( extension != NULL );
return( 0 );
}
static int
vips_foreign_load_gif_scan_application_ext( VipsForeignLoadGif *gif,
GifByteType *extension )
{
gboolean have_netscape;
/* The 11-byte NETSCAPE extension.
*/
have_netscape = FALSE;
if( extension[0] == 11 &&
vips_isprefix( "NETSCAPE2.0", (const char*) (extension + 1) ) )
have_netscape = TRUE;
while( extension != NULL ) {
if( vips_foreign_load_gif_ext_next( gif, &extension ) )
return( -1 );
if( have_netscape &&
extension &&
extension[0] == 3 &&
extension[1] == 1 )
gif->loop = extension[2] | (extension[3] << 8);
}
return( 0 );
}
static int
vips_foreign_load_gif_scan_comment_ext( VipsForeignLoadGif *gif,
GifByteType *extension )
{
VIPS_DEBUG_MSG( "gifload: type: comment\n" );
if( !gif->comment ) {
/* Up to 257 with a NULL terminator.
*/
char comment[257];
vips_strncpy( comment, (char *) (extension + 1), 256 );
comment[extension[0]] = '\0';
gif->comment = g_strdup( comment );
}
while( extension != NULL )
if( vips_foreign_load_gif_ext_next( gif, &extension ) )
return( -1 );
return( 0 );
}
static int
vips_foreign_load_gif_scan_extension( VipsForeignLoadGif *gif )
{
GifByteType *extension;
int ext_code;
if( DGifGetExtension( gif->file, &ext_code, &extension ) ==
GIF_ERROR ) {
vips_foreign_load_gif_error( gif );
return( -1 );
}
if( extension )
switch( ext_code ) {
case GRAPHICS_EXT_FUNC_CODE:
if( extension[0] == 4 &&
extension[1] & 0x1 ) {
VIPS_DEBUG_MSG( "gifload: has transp.\n" );
gif->has_transparency = TRUE;
}
if( !gif->has_delay ) {
VIPS_DEBUG_MSG( "gifload: has delay\n" );
gif->has_delay = TRUE;
gif->delay = extension[2] | (extension[3] << 8);
}
while( extension != NULL )
if( vips_foreign_load_gif_ext_next( gif,
&extension ) )
return( -1 );
break;
case APPLICATION_EXT_FUNC_CODE:
if( vips_foreign_load_gif_scan_application_ext( gif,
extension ) )
return( -1 );
break;
case COMMENT_EXT_FUNC_CODE:
if( vips_foreign_load_gif_scan_comment_ext( gif,
extension ) )
return( -1 );
break;
default:
/* Step over any NEXT blocks for unknown extensions.
*/
while( extension != NULL )
if( vips_foreign_load_gif_ext_next( gif,
&extension ) )
return( -1 );
break;
}
return( 0 );
}
static int
vips_foreign_load_gif_set_header( VipsForeignLoadGif *gif, VipsImage *image )
{
vips_image_init_fields( image,
gif->file->SWidth, gif->file->SHeight * gif->n,
(gif->has_colour ? 3 : 1) + (gif->has_transparency ? 1 : 0),
VIPS_FORMAT_UCHAR, VIPS_CODING_NONE,
gif->has_colour ?
VIPS_INTERPRETATION_sRGB : VIPS_INTERPRETATION_B_W,
1.0, 1.0 );
vips_image_pipelinev( image, VIPS_DEMAND_STYLE_FATSTRIP, NULL );
if( vips_object_argument_isset( VIPS_OBJECT( gif ), "n" ) )
vips_image_set_int( image,
VIPS_META_PAGE_HEIGHT, gif->file->SHeight );
vips_image_set_int( image, VIPS_META_N_PAGES, gif->n_pages );
vips_image_set_int( image, "gif-delay", gif->delay );
vips_image_set_int( image, "gif-loop", gif->loop );
if( gif->comment )
vips_image_set_string( image, "gif-comment", gif->comment );
return( 0 );
}
/* Attempt to quickly scan a GIF and discover what we need for our header. We
* need to scan the whole file to get n_pages, transparency and colour.
*
* Don't flag errors during header scan. Many GIFs do not follow spec.
*/
static int
vips_foreign_load_gif_header( VipsForeignLoad *load )
{
VipsObjectClass *class = VIPS_OBJECT_GET_CLASS( load );
VipsForeignLoadGifClass *gif_class =
(VipsForeignLoadGifClass *) VIPS_OBJECT_GET_CLASS( load );
VipsForeignLoadGif *gif = (VipsForeignLoadGif *) load;
GifRecordType record;
if( gif_class->open( gif ) )
return( -1 );
gif->n_pages = 0;
do {
if( DGifGetRecordType( gif->file, &record ) == GIF_ERROR )
continue;
switch( record ) {
case IMAGE_DESC_RECORD_TYPE:
(void) vips_foreign_load_gif_scan_image( gif );
gif->n_pages += 1;
break;
case EXTENSION_RECORD_TYPE:
/* We will need to fetch the extensions to check for
* cmaps and transparency.
*/
(void) vips_foreign_load_gif_scan_extension( gif );
break;
case TERMINATE_RECORD_TYPE:
gif->eof = TRUE;
break;
case SCREEN_DESC_RECORD_TYPE:
case UNDEFINED_RECORD_TYPE:
break;
default:
break;
}
} while( !gif->eof );
if( gif->n == -1 )
gif->n = gif->n_pages - gif->page;
if( gif->page < 0 ||
gif->n <= 0 ||
gif->page + gif->n > gif->n_pages ) {
vips_error( class->nickname, "%s", _( "bad page number" ) );
return( -1 );
}
/* And set the output vips header from what we've learned.
*/
if( vips_foreign_load_gif_set_header( gif, load->out ) )
return( -1 );
return( 0 );
}
static void
vips_foreign_load_gif_build_cmap( VipsForeignLoadGif *gif )
{
ColorMapObject *map = gif->file->Image.ColorMap ?
gif->file->Image.ColorMap : gif->file->SColorMap;
int v;
for( v = 0; v < 256; v++ ) {
VipsPel *q = (VipsPel *) &gif->cmap[v];
if( map &&
v < map->ColorCount ) {
q[0] = map->Colors[v].Red;
q[1] = map->Colors[v].Green;
q[2] = map->Colors[v].Blue;
q[3] = 255;
}
else {
/* If there's no map, just save the index.
*/
q[0] = v;
q[1] = v;
q[2] = v;
q[3] = 255;
}
}
}
static void
vips_foreign_load_gif_render_line( VipsForeignLoadGif *gif,
int width, VipsPel * restrict q, VipsPel * restrict p )
{
guint32 *iq;
int x;
iq = (guint32 *) q;
for( x = 0; x < width; x++ ) {
VipsPel v = p[x];
if( v == gif->transparency ) {
/* In DISPOSE_DO_NOT mode, the previous frame shows
* through (ie. we do nothing). In all other modes,
* it's just transparent.
*/
if( gif->dispose != DISPOSE_DO_NOT )
iq[x] = 0;
}
else
/* Blast in the RGBA for this value.
*/
iq[x] = gif->cmap[v];
}
}
/* Render the current gif frame into an RGBA buffer. GIFs can accumulate,
* depending on the current dispose mode.
*/
static int
vips_foreign_load_gif_render( VipsForeignLoadGif *gif )
{
GifFileType *file = gif->file;
/* Update the colour map for this frame.
*/
vips_foreign_load_gif_build_cmap( gif );
/* BACKGROUND means we reset the frame to 0 (transparent) before we
* render the next set of pixels.
*/
if( gif->dispose == DISPOSE_BACKGROUND )
memset( VIPS_IMAGE_ADDR( gif->frame, 0, 0 ), 0,
VIPS_IMAGE_SIZEOF_IMAGE( gif->frame ) );
/* PREVIOUS means we init the frame with the frame before last, ie. we
* undo the last render.
*
* Anything other than PREVIOUS, we must update the previous buffer,
*/
if( gif->dispose == DISPOSE_PREVIOUS )
memcpy( VIPS_IMAGE_ADDR( gif->frame, 0, 0 ),
VIPS_IMAGE_ADDR( gif->previous, 0, 0 ),
VIPS_IMAGE_SIZEOF_IMAGE( gif->frame ) );
else
memcpy( VIPS_IMAGE_ADDR( gif->previous, 0, 0 ),
VIPS_IMAGE_ADDR( gif->frame, 0, 0 ),
VIPS_IMAGE_SIZEOF_IMAGE( gif->frame ) );
if( file->Image.Interlace ) {
int i;
VIPS_DEBUG_MSG( "vips_foreign_load_gif_render: "
"interlaced frame of %d x %d pixels at %d x %d\n",
file->Image.Width, file->Image.Height,
file->Image.Left, file->Image.Top );
for( i = 0; i < 4; i++ ) {
int y;
for( y = InterlacedOffset[i];
y < file->Image.Height;
y += InterlacedJumps[i] ) {
VipsPel *q = VIPS_IMAGE_ADDR( gif->frame,
file->Image.Left, file->Image.Top + y );
if( DGifGetLine( gif->file, gif->line,
file->Image.Width ) == GIF_ERROR ) {
vips_foreign_load_gif_error( gif );
return( -1 );
}
vips_foreign_load_gif_render_line( gif,
file->Image.Width, q, gif->line );
}
}
}
else {
int y;
VIPS_DEBUG_MSG( "vips_foreign_load_gif_render: "
"non-interlaced frame of %d x %d pixels at %d x %d\n",
file->Image.Width, file->Image.Height,
file->Image.Left, file->Image.Top );
for( y = 0; y < file->Image.Height; y++ ) {
VipsPel *q = VIPS_IMAGE_ADDR( gif->frame,
file->Image.Left, file->Image.Top + y );
if( DGifGetLine( gif->file, gif->line,
file->Image.Width ) == GIF_ERROR ) {
vips_foreign_load_gif_error( gif );
return( -1 );
}
vips_foreign_load_gif_render_line( gif,
file->Image.Width, q, gif->line );
}
}
return( 0 );
}
static int
vips_foreign_load_gif_extension( VipsForeignLoadGif *gif )
{
GifByteType *extension;
int ext_code;
VIPS_DEBUG_MSG( "vips_foreign_load_gif_extension:\n" );
if( DGifGetExtension( gif->file, &ext_code, &extension ) ==
GIF_ERROR ) {
vips_foreign_load_gif_error( gif );
return( -1 );
}
if( extension &&
ext_code == GRAPHICS_EXT_FUNC_CODE &&
extension[0] == 4 ) {
/* Bytes are flags, delay low, delay high,
* transparency. Flag bit 1 means transparency
* is being set.
*/
gif->transparency = -1;
if( extension[1] & 0x1 )
gif->transparency = extension[4];
/* Set the current dispose mode. This is read during frame load
* to set the meaning of background and transparent pixels.
*/
gif->dispose = (extension[1] >> 2) & 0x7;
VIPS_DEBUG_MSG( "vips_foreign_load_gif_extension: "
"dispose = %d\n", gif->dispose );
}
while( extension != NULL )
if( vips_foreign_load_gif_ext_next( gif, &extension ) )
return( -1 );
return( 0 );
}
/* Read the next page from the file into @frame.
*/
static int
vips_foreign_load_gif_next_page( VipsForeignLoadGif *gif )
{
GifRecordType record;
gboolean have_read_frame;
have_read_frame = FALSE;
do {
if( DGifGetRecordType( gif->file, &record ) == GIF_ERROR ) {
vips_foreign_load_gif_error( gif );
return( -1 );
}
switch( record ) {
case IMAGE_DESC_RECORD_TYPE:
VIPS_DEBUG_MSG( "vips_foreign_load_gif_next_page: "
"IMAGE_DESC_RECORD_TYPE\n" );
if( DGifGetImageDesc( gif->file ) == GIF_ERROR ) {
vips_foreign_load_gif_error( gif );
return( -1 );
}
if( vips_foreign_load_gif_render( gif ) )
return( -1 );
have_read_frame = TRUE;
break;
case EXTENSION_RECORD_TYPE:
if( vips_foreign_load_gif_extension( gif ) )
return( -1 );
break;
case TERMINATE_RECORD_TYPE:
VIPS_DEBUG_MSG( "vips_foreign_load_gif_next_page: "
"TERMINATE_RECORD_TYPE\n" );
gif->eof = TRUE;
break;
case SCREEN_DESC_RECORD_TYPE:
VIPS_DEBUG_MSG( "vips_foreign_load_gif_next_page: "
"SCREEN_DESC_RECORD_TYPE\n" );
break;
case UNDEFINED_RECORD_TYPE:
VIPS_DEBUG_MSG( "vips_foreign_load_gif_next_page: "
"UNDEFINED_RECORD_TYPE\n" );
break;
default:
break;
}
} while( !have_read_frame &&
!gif->eof );
return( 0 );
}
static int
vips_foreign_load_gif_generate( VipsRegion *or,
void *seq, void *a, void *b, gboolean *stop )
{
VipsRect *r = &or->valid;
VipsForeignLoadGif *gif = (VipsForeignLoadGif *) a;
int y;
for( y = 0; y < r->height; y++ ) {
/* The page for this output line, and the line number in page.
*/
int page = (r->top + y) / gif->file->SHeight + gif->page;
int line = (r->top + y) % gif->file->SHeight;
VipsPel *p, *q;
int x;
g_assert( line >= 0 && line < gif->frame->Ysize );
g_assert( page >= 0 && page < gif->n_pages );
/* current_page == 0 means we've not loaded any pages yet. So
* we need to have loaded the page beyond the page we want.
*/
while( gif->current_page <= page ) {
if( vips_foreign_load_gif_next_page( gif ) )
return( -1 );
gif->current_page += 1;
}
/* @frame is always RGBA, but or may be G, GA, RGB or RGBA.
* We have to pick out the values we want.
*/
p = VIPS_IMAGE_ADDR( gif->frame, 0, line );
q = VIPS_REGION_ADDR( or, 0, r->top + y );
switch( or->im->Bands ) {
case 1:
for( x = 0; x < gif->frame->Xsize; x++ ) {
q[0] = p[1];
q += 1;
p += 4;
}
break;
case 2:
for( x = 0; x < gif->frame->Xsize; x++ ) {
q[0] = p[1];
q[1] = p[3];
q += 2;
p += 4;
}
break;
case 3:
for( x = 0; x < gif->frame->Xsize; x++ ) {
q[0] = p[0];
q[1] = p[1];
q[2] = p[2];
q += 3;
p += 4;
}
break;
case 4:
memcpy( q, p, VIPS_IMAGE_SIZEOF_LINE( gif->frame ) );
break;
default:
g_assert_not_reached();
break;
}
}
return( 0 );
}
static int
vips_foreign_load_gif_load( VipsForeignLoad *load )
{
VipsForeignLoadGifClass *class =
(VipsForeignLoadGifClass *) VIPS_OBJECT_GET_CLASS( load );
VipsForeignLoadGif *gif = (VipsForeignLoadGif *) load;
VipsImage **t = (VipsImage **)
vips_object_local_array( VIPS_OBJECT( load ), 4 );
/* Rewind.
*/
if( class->open( gif ) )
return( -1 );
VIPS_DEBUG_MSG( "vips_foreign_load_gif_load:\n" );
/* Make the memory image we accumulate pixels in. We always accumulate
* to RGBA, then trim down to whatever the output image needs on
* _generate.
*/
gif->frame = vips_image_new_memory();
vips_image_init_fields( gif->frame,
gif->file->SWidth, gif->file->SHeight, 4, VIPS_FORMAT_UCHAR,
VIPS_CODING_NONE, VIPS_INTERPRETATION_sRGB, 1.0, 1.0 );
if( vips_image_write_prepare( gif->frame ) )
return( -1 );
/* A copy of the previous state of the frame, in case we have to
* process a DISPOSE_PREVIOUS.
*/
gif->previous = vips_image_new_memory();
vips_image_init_fields( gif->previous,
gif->file->SWidth, gif->file->SHeight, 4, VIPS_FORMAT_UCHAR,
VIPS_CODING_NONE, VIPS_INTERPRETATION_sRGB, 1.0, 1.0 );
if( vips_image_write_prepare( gif->previous ) )
return( -1 );
/* Make the output pipeline.
*/
t[0] = vips_image_new();
if( vips_foreign_load_gif_set_header( gif, t[0] ) )
return( -1 );
/* Strips 8 pixels high to avoid too many tiny regions.
*/
if( vips_image_generate( t[0],
NULL, vips_foreign_load_gif_generate, NULL, gif, NULL ) ||
vips_sequential( t[0], &t[1],
"tile_height", VIPS__FATSTRIP_HEIGHT,
NULL ) ||
vips_image_write( t[1], load->real ) )
return( -1 );
return( 0 );
}
static int
vips_foreign_load_gif_open( VipsForeignLoadGif *gif )
{
#ifdef HAVE_GIFLIB_5
{
int error;
if( !(gif->file = DGifOpen( gif, gif->read_func, &error )) ) {
vips_foreign_load_gif_error_vips( gif, error );
return( -1 );
}
}
#else
if( !(gif->file = DGifOpen( gif, gif->read_func )) ) {
vips_foreign_load_gif_error_vips( gif, GifLastError() );
return( -1 );
}
#endif
gif->eof = FALSE;
gif->current_page = 0;
/* Allocate a line buffer now that we have the GIF width.
*/
VIPS_FREE( gif->line )
if( !(gif->line = VIPS_ARRAY( NULL, gif->file->SWidth, GifPixelType )) )
return( -1 );
return( 0 );
}
static void
vips_foreign_load_gif_class_init( VipsForeignLoadGifClass *class )
{
GObjectClass *gobject_class = G_OBJECT_CLASS( class );
VipsObjectClass *object_class = (VipsObjectClass *) class;
VipsForeignLoadClass *load_class = (VipsForeignLoadClass *) class;
VipsForeignLoadGifClass *gif_class = (VipsForeignLoadGifClass *) class;
gobject_class->dispose = vips_foreign_load_gif_dispose;
gobject_class->set_property = vips_object_set_property;
gobject_class->get_property = vips_object_get_property;
gif_class->open = vips_foreign_load_gif_open;
load_class->header = vips_foreign_load_gif_header;
load_class->load = vips_foreign_load_gif_load;
object_class->nickname = "gifload_base";
object_class->description = _( "load GIF with giflib" );
load_class->get_flags_filename =
vips_foreign_load_gif_get_flags_filename;
load_class->get_flags = vips_foreign_load_gif_get_flags;
VIPS_ARG_INT( class, "page", 20,
_( "Page" ),
_( "Load this page from the file" ),
VIPS_ARGUMENT_OPTIONAL_INPUT,
G_STRUCT_OFFSET( VipsForeignLoadGif, page ),
0, 100000, 0 );
VIPS_ARG_INT( class, "n", 21,
_( "n" ),
_( "Load this many pages" ),
VIPS_ARGUMENT_OPTIONAL_INPUT,
G_STRUCT_OFFSET( VipsForeignLoadGif, n ),
-1, 100000, 1 );
}
static void
vips_foreign_load_gif_init( VipsForeignLoadGif *gif )
{
gif->n = 1;
gif->transparency = -1;
gif->delay = 4;
gif->loop = 0;
gif->comment = NULL;
gif->dispose = 0;
}
typedef struct _VipsForeignLoadGifFile {
VipsForeignLoadGif parent_object;
/* Filename for load.
*/
char *filename;
/* The FILE* we read from.
*/
FILE *fp;
} VipsForeignLoadGifFile;
typedef VipsForeignLoadGifClass VipsForeignLoadGifFileClass;
G_DEFINE_TYPE( VipsForeignLoadGifFile, vips_foreign_load_gif_file,
vips_foreign_load_gif_get_type() );
static void
vips_foreign_load_gif_file_dispose( GObject *gobject )
{
VipsForeignLoadGifFile *file = (VipsForeignLoadGifFile *) gobject;
VIPS_FREEF( fclose, file->fp );
G_OBJECT_CLASS( vips_foreign_load_gif_file_parent_class )->
dispose( gobject );
}
/* Our input function for file open. We can't use DGifOpenFileName(), since
* that just calls open() and won't work with unicode on win32. We can't use
* DGifOpenFileHandle() since that's an fd from open() and you can't pass those
* across DLL boundaries on Windows.
*/
static int
vips_giflib_file_read( GifFileType *gfile, GifByteType *buffer, int n )
{
VipsForeignLoadGif *gif = (VipsForeignLoadGif *) gfile->UserData;
VipsForeignLoadGifFile *file = (VipsForeignLoadGifFile *) gif;
if( feof( file->fp ) )
gif->eof = TRUE;
return( (int) fread( (void *) buffer, 1, n, file->fp ) );
}
static int
vips_foreign_load_gif_file_open( VipsForeignLoadGif *gif )
{
VipsForeignLoad *load = (VipsForeignLoad *) gif;
VipsForeignLoadGifFile *file = (VipsForeignLoadGifFile *) gif;
if( !file->fp ) {
if( !(file->fp =
vips__file_open_read( file->filename, NULL, FALSE )) )
return( -1 );
VIPS_SETSTR( load->out->filename, file->filename );
}
else
rewind( file->fp );
vips_foreign_load_gif_close( gif );
gif->read_func = vips_giflib_file_read;
return( VIPS_FOREIGN_LOAD_GIF_CLASS(
vips_foreign_load_gif_file_parent_class )->open( gif ) );
}
static const char *vips_foreign_gif_suffs[] = {
".gif",
NULL
};
static void
vips_foreign_load_gif_file_class_init(
VipsForeignLoadGifFileClass *class )
{
GObjectClass *gobject_class = G_OBJECT_CLASS( class );
VipsObjectClass *object_class = (VipsObjectClass *) class;
VipsForeignClass *foreign_class = (VipsForeignClass *) class;
VipsForeignLoadClass *load_class = (VipsForeignLoadClass *) class;
VipsForeignLoadGifClass *gif_class = (VipsForeignLoadGifClass *) class;
gobject_class->dispose = vips_foreign_load_gif_file_dispose;
gobject_class->set_property = vips_object_set_property;
gobject_class->get_property = vips_object_get_property;
object_class->nickname = "gifload";
object_class->description = _( "load GIF with giflib" );
foreign_class->suffs = vips_foreign_gif_suffs;
load_class->is_a = vips_foreign_load_gif_is_a;
gif_class->open = vips_foreign_load_gif_file_open;
VIPS_ARG_STRING( class, "filename", 1,
_( "Filename" ),
_( "Filename to load from" ),
VIPS_ARGUMENT_REQUIRED_INPUT,
G_STRUCT_OFFSET( VipsForeignLoadGifFile, filename ),
NULL );
}
static void
vips_foreign_load_gif_file_init( VipsForeignLoadGifFile *file )
{
}
typedef struct _VipsForeignLoadGifBuffer {
VipsForeignLoadGif parent_object;
/* Load from a buffer.
*/
VipsArea *buf;
/* Current read point, bytes left in buffer.
*/
VipsPel *p;
size_t bytes_to_go;
} VipsForeignLoadGifBuffer;
typedef VipsForeignLoadGifClass VipsForeignLoadGifBufferClass;
G_DEFINE_TYPE( VipsForeignLoadGifBuffer, vips_foreign_load_gif_buffer,
vips_foreign_load_gif_get_type() );
/* Callback from the gif loader.
*
* Read up to len bytes into buffer, return number of bytes read, 0 for EOF.
*/
static int
vips_giflib_buffer_read( GifFileType *file, GifByteType *buf, int n )
{
VipsForeignLoadGif *gif = (VipsForeignLoadGif *) file->UserData;
VipsForeignLoadGifBuffer *buffer = (VipsForeignLoadGifBuffer *) gif;
size_t will_read = VIPS_MIN( n, buffer->bytes_to_go );
memcpy( buf, buffer->p, will_read );
buffer->p += will_read;
buffer->bytes_to_go -= will_read;
if( will_read == 0 )
gif->eof = TRUE;
return( will_read );
}
static int
vips_foreign_load_gif_buffer_open( VipsForeignLoadGif *gif )
{
VipsForeignLoadGifBuffer *buffer = (VipsForeignLoadGifBuffer *) gif;
vips_foreign_load_gif_close( gif );
buffer->p = buffer->buf->data;
buffer->bytes_to_go = buffer->buf->length;
gif->read_func = vips_giflib_buffer_read;;
return( VIPS_FOREIGN_LOAD_GIF_CLASS(
vips_foreign_load_gif_file_parent_class )->open( gif ) );
}
static void
vips_foreign_load_gif_buffer_class_init(
VipsForeignLoadGifBufferClass *class )
{
GObjectClass *gobject_class = G_OBJECT_CLASS( class );
VipsObjectClass *object_class = (VipsObjectClass *) class;
VipsForeignLoadClass *load_class = (VipsForeignLoadClass *) class;
VipsForeignLoadGifClass *gif_class = (VipsForeignLoadGifClass *) class;
gobject_class->set_property = vips_object_set_property;
gobject_class->get_property = vips_object_get_property;
object_class->nickname = "gifload_buffer";
object_class->description = _( "load GIF with giflib" );
load_class->is_a_buffer = vips_foreign_load_gif_is_a_buffer;
gif_class->open = vips_foreign_load_gif_buffer_open;
VIPS_ARG_BOXED( class, "buffer", 1,
_( "Buffer" ),
_( "Buffer to load from" ),
VIPS_ARGUMENT_REQUIRED_INPUT,
G_STRUCT_OFFSET( VipsForeignLoadGifBuffer, buf ),
VIPS_TYPE_BLOB );
}
static void
vips_foreign_load_gif_buffer_init( VipsForeignLoadGifBuffer *buffer )
{
}
#endif /*HAVE_GIFLIB*/
/**
* vips_gifload:
* @filename: file to load
* @out: (out): output image
* @...: %NULL-terminated list of optional named arguments
*
* Optional arguments:
*
* * @page: %gint, page (frame) to read
* * @n: %gint, load this many pages
*
* Read a GIF file into a VIPS image.
*
* Use @page to select a page to render, numbering from zero.
*
* Use @n to select the number of pages to render. The default is 1. Pages are
* rendered in a vertical column, with each individual page aligned to the
* left. Set to -1 to mean "until the end of the document". Use vips_grid()
* to change page layout.
*
* The whole GIF is rendered into memory on header access. The output image
* will be 1, 2, 3 or 4 bands depending on what the reader finds in the file.
*
* See also: vips_image_new_from_file().
*
* Returns: 0 on success, -1 on error.
*/
int
vips_gifload( const char *filename, VipsImage **out, ... )
{
va_list ap;
int result;
va_start( ap, out );
result = vips_call_split( "gifload", ap, filename, out );
va_end( ap );
return( result );
}
/**
* vips_gifload_buffer:
* @buf: (array length=len) (element-type guint8): memory area to load
* @len: (type gsize): size of memory area
* @out: (out): image to write
* @...: %NULL-terminated list of optional named arguments
*
* Optional arguments:
*
* * @page: %gint, page (frame) to read
* * @n: %gint, load this many pages
*
* Read a GIF-formatted memory block into a VIPS image. Exactly as
* vips_gifload(), but read from a memory buffer.
*
* You must not free the buffer while @out is active. The
* #VipsObject::postclose signal on @out is a good place to free.
*
* See also: vips_gifload().
*
* Returns: 0 on success, -1 on error.
*/
int
vips_gifload_buffer( void *buf, size_t len, VipsImage **out, ... )
{
va_list ap;
VipsBlob *blob;
int result;
/* We don't take a copy of the data or free it.
*/
blob = vips_blob_new( NULL, buf, len );
va_start( ap, out );
result = vips_call_split( "gifload_buffer", ap, blob, out );
va_end( ap );
vips_area_unref( VIPS_AREA( blob ) );
return( result );
}
| ./CrossVul/dataset_final_sorted/CWE-416/c/bad_1179_0 |
crossvul-cpp_data_good_1058_0 | /*
* fs/cifs/smb2pdu.c
*
* Copyright (C) International Business Machines Corp., 2009, 2013
* Etersoft, 2012
* Author(s): Steve French (sfrench@us.ibm.com)
* Pavel Shilovsky (pshilovsky@samba.org) 2012
*
* Contains the routines for constructing the SMB2 PDUs themselves
*
* This library is free software; you can redistribute it and/or modify
* it under the terms of the GNU Lesser General Public License as published
* by the Free Software Foundation; either version 2.1 of the License, or
* (at your option) any later version.
*
* This library is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See
* the GNU Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public License
* along with this library; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
/* SMB2 PDU handling routines here - except for leftovers (eg session setup) */
/* Note that there are handle based routines which must be */
/* treated slightly differently for reconnection purposes since we never */
/* want to reuse a stale file handle and only the caller knows the file info */
#include <linux/fs.h>
#include <linux/kernel.h>
#include <linux/vfs.h>
#include <linux/task_io_accounting_ops.h>
#include <linux/uaccess.h>
#include <linux/uuid.h>
#include <linux/pagemap.h>
#include <linux/xattr.h>
#include "smb2pdu.h"
#include "cifsglob.h"
#include "cifsacl.h"
#include "cifsproto.h"
#include "smb2proto.h"
#include "cifs_unicode.h"
#include "cifs_debug.h"
#include "ntlmssp.h"
#include "smb2status.h"
#include "smb2glob.h"
#include "cifspdu.h"
#include "cifs_spnego.h"
#include "smbdirect.h"
#include "trace.h"
#ifdef CONFIG_CIFS_DFS_UPCALL
#include "dfs_cache.h"
#endif
/*
* The following table defines the expected "StructureSize" of SMB2 requests
* in order by SMB2 command. This is similar to "wct" in SMB/CIFS requests.
*
* Note that commands are defined in smb2pdu.h in le16 but the array below is
* indexed by command in host byte order.
*/
static const int smb2_req_struct_sizes[NUMBER_OF_SMB2_COMMANDS] = {
/* SMB2_NEGOTIATE */ 36,
/* SMB2_SESSION_SETUP */ 25,
/* SMB2_LOGOFF */ 4,
/* SMB2_TREE_CONNECT */ 9,
/* SMB2_TREE_DISCONNECT */ 4,
/* SMB2_CREATE */ 57,
/* SMB2_CLOSE */ 24,
/* SMB2_FLUSH */ 24,
/* SMB2_READ */ 49,
/* SMB2_WRITE */ 49,
/* SMB2_LOCK */ 48,
/* SMB2_IOCTL */ 57,
/* SMB2_CANCEL */ 4,
/* SMB2_ECHO */ 4,
/* SMB2_QUERY_DIRECTORY */ 33,
/* SMB2_CHANGE_NOTIFY */ 32,
/* SMB2_QUERY_INFO */ 41,
/* SMB2_SET_INFO */ 33,
/* SMB2_OPLOCK_BREAK */ 24 /* BB this is 36 for LEASE_BREAK variant */
};
int smb3_encryption_required(const struct cifs_tcon *tcon)
{
if (!tcon)
return 0;
if ((tcon->ses->session_flags & SMB2_SESSION_FLAG_ENCRYPT_DATA) ||
(tcon->share_flags & SHI1005_FLAGS_ENCRYPT_DATA))
return 1;
if (tcon->seal &&
(tcon->ses->server->capabilities & SMB2_GLOBAL_CAP_ENCRYPTION))
return 1;
return 0;
}
static void
smb2_hdr_assemble(struct smb2_sync_hdr *shdr, __le16 smb2_cmd,
const struct cifs_tcon *tcon)
{
shdr->ProtocolId = SMB2_PROTO_NUMBER;
shdr->StructureSize = cpu_to_le16(64);
shdr->Command = smb2_cmd;
if (tcon && tcon->ses && tcon->ses->server) {
struct TCP_Server_Info *server = tcon->ses->server;
spin_lock(&server->req_lock);
/* Request up to 10 credits but don't go over the limit. */
if (server->credits >= server->max_credits)
shdr->CreditRequest = cpu_to_le16(0);
else
shdr->CreditRequest = cpu_to_le16(
min_t(int, server->max_credits -
server->credits, 10));
spin_unlock(&server->req_lock);
} else {
shdr->CreditRequest = cpu_to_le16(2);
}
shdr->ProcessId = cpu_to_le32((__u16)current->tgid);
if (!tcon)
goto out;
/* GLOBAL_CAP_LARGE_MTU will only be set if dialect > SMB2.02 */
/* See sections 2.2.4 and 3.2.4.1.5 of MS-SMB2 */
if ((tcon->ses) && (tcon->ses->server) &&
(tcon->ses->server->capabilities & SMB2_GLOBAL_CAP_LARGE_MTU))
shdr->CreditCharge = cpu_to_le16(1);
/* else CreditCharge MBZ */
shdr->TreeId = tcon->tid;
/* Uid is not converted */
if (tcon->ses)
shdr->SessionId = tcon->ses->Suid;
/*
* If we would set SMB2_FLAGS_DFS_OPERATIONS on open we also would have
* to pass the path on the Open SMB prefixed by \\server\share.
* Not sure when we would need to do the augmented path (if ever) and
* setting this flag breaks the SMB2 open operation since it is
* illegal to send an empty path name (without \\server\share prefix)
* when the DFS flag is set in the SMB open header. We could
* consider setting the flag on all operations other than open
* but it is safer to net set it for now.
*/
/* if (tcon->share_flags & SHI1005_FLAGS_DFS)
shdr->Flags |= SMB2_FLAGS_DFS_OPERATIONS; */
if (tcon->ses && tcon->ses->server && tcon->ses->server->sign &&
!smb3_encryption_required(tcon))
shdr->Flags |= SMB2_FLAGS_SIGNED;
out:
return;
}
#ifdef CONFIG_CIFS_DFS_UPCALL
static int __smb2_reconnect(const struct nls_table *nlsc,
struct cifs_tcon *tcon)
{
int rc;
struct dfs_cache_tgt_list tl;
struct dfs_cache_tgt_iterator *it = NULL;
char *tree;
const char *tcp_host;
size_t tcp_host_len;
const char *dfs_host;
size_t dfs_host_len;
tree = kzalloc(MAX_TREE_SIZE, GFP_KERNEL);
if (!tree)
return -ENOMEM;
if (tcon->ipc) {
scnprintf(tree, MAX_TREE_SIZE, "\\\\%s\\IPC$",
tcon->ses->server->hostname);
rc = SMB2_tcon(0, tcon->ses, tree, tcon, nlsc);
goto out;
}
if (!tcon->dfs_path) {
rc = SMB2_tcon(0, tcon->ses, tcon->treeName, tcon, nlsc);
goto out;
}
rc = dfs_cache_noreq_find(tcon->dfs_path + 1, NULL, &tl);
if (rc)
goto out;
extract_unc_hostname(tcon->ses->server->hostname, &tcp_host,
&tcp_host_len);
for (it = dfs_cache_get_tgt_iterator(&tl); it;
it = dfs_cache_get_next_tgt(&tl, it)) {
const char *tgt = dfs_cache_get_tgt_name(it);
extract_unc_hostname(tgt, &dfs_host, &dfs_host_len);
if (dfs_host_len != tcp_host_len
|| strncasecmp(dfs_host, tcp_host, dfs_host_len) != 0) {
cifs_dbg(FYI, "%s: skipping %.*s, doesn't match %.*s",
__func__,
(int)dfs_host_len, dfs_host,
(int)tcp_host_len, tcp_host);
continue;
}
scnprintf(tree, MAX_TREE_SIZE, "\\%s", tgt);
rc = SMB2_tcon(0, tcon->ses, tree, tcon, nlsc);
if (!rc)
break;
if (rc == -EREMOTE)
break;
}
if (!rc) {
if (it)
rc = dfs_cache_noreq_update_tgthint(tcon->dfs_path + 1,
it);
else
rc = -ENOENT;
}
dfs_cache_free_tgts(&tl);
out:
kfree(tree);
return rc;
}
#else
static inline int __smb2_reconnect(const struct nls_table *nlsc,
struct cifs_tcon *tcon)
{
return SMB2_tcon(0, tcon->ses, tcon->treeName, tcon, nlsc);
}
#endif
static int
smb2_reconnect(__le16 smb2_command, struct cifs_tcon *tcon)
{
int rc;
struct nls_table *nls_codepage;
struct cifs_ses *ses;
struct TCP_Server_Info *server;
int retries;
/*
* SMB2s NegProt, SessSetup, Logoff do not have tcon yet so
* check for tcp and smb session status done differently
* for those three - in the calling routine.
*/
if (tcon == NULL)
return 0;
if (smb2_command == SMB2_TREE_CONNECT)
return 0;
if (tcon->tidStatus == CifsExiting) {
/*
* only tree disconnect, open, and write,
* (and ulogoff which does not have tcon)
* are allowed as we start force umount.
*/
if ((smb2_command != SMB2_WRITE) &&
(smb2_command != SMB2_CREATE) &&
(smb2_command != SMB2_TREE_DISCONNECT)) {
cifs_dbg(FYI, "can not send cmd %d while umounting\n",
smb2_command);
return -ENODEV;
}
}
if ((!tcon->ses) || (tcon->ses->status == CifsExiting) ||
(!tcon->ses->server))
return -EIO;
ses = tcon->ses;
server = ses->server;
retries = server->nr_targets;
/*
* Give demultiplex thread up to 10 seconds to each target available for
* reconnect -- should be greater than cifs socket timeout which is 7
* seconds.
*/
while (server->tcpStatus == CifsNeedReconnect) {
/*
* Return to caller for TREE_DISCONNECT and LOGOFF and CLOSE
* here since they are implicitly done when session drops.
*/
switch (smb2_command) {
/*
* BB Should we keep oplock break and add flush to exceptions?
*/
case SMB2_TREE_DISCONNECT:
case SMB2_CANCEL:
case SMB2_CLOSE:
case SMB2_OPLOCK_BREAK:
return -EAGAIN;
}
rc = wait_event_interruptible_timeout(server->response_q,
(server->tcpStatus != CifsNeedReconnect),
10 * HZ);
if (rc < 0) {
cifs_dbg(FYI, "%s: aborting reconnect due to a received"
" signal by the process\n", __func__);
return -ERESTARTSYS;
}
/* are we still trying to reconnect? */
if (server->tcpStatus != CifsNeedReconnect)
break;
if (--retries)
continue;
/*
* on "soft" mounts we wait once. Hard mounts keep
* retrying until process is killed or server comes
* back on-line
*/
if (!tcon->retry) {
cifs_dbg(FYI, "gave up waiting on reconnect in smb_init\n");
return -EHOSTDOWN;
}
retries = server->nr_targets;
}
if (!tcon->ses->need_reconnect && !tcon->need_reconnect)
return 0;
nls_codepage = load_nls_default();
/*
* need to prevent multiple threads trying to simultaneously reconnect
* the same SMB session
*/
mutex_lock(&tcon->ses->session_mutex);
/*
* Recheck after acquire mutex. If another thread is negotiating
* and the server never sends an answer the socket will be closed
* and tcpStatus set to reconnect.
*/
if (server->tcpStatus == CifsNeedReconnect) {
rc = -EHOSTDOWN;
mutex_unlock(&tcon->ses->session_mutex);
goto out;
}
rc = cifs_negotiate_protocol(0, tcon->ses);
if (!rc && tcon->ses->need_reconnect)
rc = cifs_setup_session(0, tcon->ses, nls_codepage);
if (rc || !tcon->need_reconnect) {
mutex_unlock(&tcon->ses->session_mutex);
goto out;
}
cifs_mark_open_files_invalid(tcon);
if (tcon->use_persistent)
tcon->need_reopen_files = true;
rc = __smb2_reconnect(nls_codepage, tcon);
mutex_unlock(&tcon->ses->session_mutex);
cifs_dbg(FYI, "reconnect tcon rc = %d\n", rc);
if (rc) {
/* If sess reconnected but tcon didn't, something strange ... */
printk_once(KERN_WARNING "reconnect tcon failed rc = %d\n", rc);
goto out;
}
if (smb2_command != SMB2_INTERNAL_CMD)
queue_delayed_work(cifsiod_wq, &server->reconnect, 0);
atomic_inc(&tconInfoReconnectCount);
out:
/*
* Check if handle based operation so we know whether we can continue
* or not without returning to caller to reset file handle.
*/
/*
* BB Is flush done by server on drop of tcp session? Should we special
* case it and skip above?
*/
switch (smb2_command) {
case SMB2_FLUSH:
case SMB2_READ:
case SMB2_WRITE:
case SMB2_LOCK:
case SMB2_IOCTL:
case SMB2_QUERY_DIRECTORY:
case SMB2_CHANGE_NOTIFY:
case SMB2_QUERY_INFO:
case SMB2_SET_INFO:
rc = -EAGAIN;
}
unload_nls(nls_codepage);
return rc;
}
static void
fill_small_buf(__le16 smb2_command, struct cifs_tcon *tcon, void *buf,
unsigned int *total_len)
{
struct smb2_sync_pdu *spdu = (struct smb2_sync_pdu *)buf;
/* lookup word count ie StructureSize from table */
__u16 parmsize = smb2_req_struct_sizes[le16_to_cpu(smb2_command)];
/*
* smaller than SMALL_BUFFER_SIZE but bigger than fixed area of
* largest operations (Create)
*/
memset(buf, 0, 256);
smb2_hdr_assemble(&spdu->sync_hdr, smb2_command, tcon);
spdu->StructureSize2 = cpu_to_le16(parmsize);
*total_len = parmsize + sizeof(struct smb2_sync_hdr);
}
/*
* Allocate and return pointer to an SMB request hdr, and set basic
* SMB information in the SMB header. If the return code is zero, this
* function must have filled in request_buf pointer.
*/
static int
smb2_plain_req_init(__le16 smb2_command, struct cifs_tcon *tcon,
void **request_buf, unsigned int *total_len)
{
int rc;
rc = smb2_reconnect(smb2_command, tcon);
if (rc)
return rc;
/* BB eventually switch this to SMB2 specific small buf size */
if (smb2_command == SMB2_SET_INFO)
*request_buf = cifs_buf_get();
else
*request_buf = cifs_small_buf_get();
if (*request_buf == NULL) {
/* BB should we add a retry in here if not a writepage? */
return -ENOMEM;
}
fill_small_buf(smb2_command, tcon,
(struct smb2_sync_hdr *)(*request_buf),
total_len);
if (tcon != NULL) {
uint16_t com_code = le16_to_cpu(smb2_command);
cifs_stats_inc(&tcon->stats.smb2_stats.smb2_com_sent[com_code]);
cifs_stats_inc(&tcon->num_smbs_sent);
}
return rc;
}
#define SMB2_PREAUTH_INTEGRITY_CAPABILITIES cpu_to_le16(1)
#define SMB2_ENCRYPTION_CAPABILITIES cpu_to_le16(2)
#define SMB2_POSIX_EXTENSIONS_AVAILABLE cpu_to_le16(0x100)
static void
build_preauth_ctxt(struct smb2_preauth_neg_context *pneg_ctxt)
{
pneg_ctxt->ContextType = SMB2_PREAUTH_INTEGRITY_CAPABILITIES;
pneg_ctxt->DataLength = cpu_to_le16(38);
pneg_ctxt->HashAlgorithmCount = cpu_to_le16(1);
pneg_ctxt->SaltLength = cpu_to_le16(SMB311_SALT_SIZE);
get_random_bytes(pneg_ctxt->Salt, SMB311_SALT_SIZE);
pneg_ctxt->HashAlgorithms = SMB2_PREAUTH_INTEGRITY_SHA512;
}
static void
build_encrypt_ctxt(struct smb2_encryption_neg_context *pneg_ctxt)
{
pneg_ctxt->ContextType = SMB2_ENCRYPTION_CAPABILITIES;
pneg_ctxt->DataLength = cpu_to_le16(4); /* Cipher Count + le16 cipher */
pneg_ctxt->CipherCount = cpu_to_le16(1);
/* pneg_ctxt->Ciphers[0] = SMB2_ENCRYPTION_AES128_GCM;*/ /* not supported yet */
pneg_ctxt->Ciphers[0] = SMB2_ENCRYPTION_AES128_CCM;
}
static void
build_posix_ctxt(struct smb2_posix_neg_context *pneg_ctxt)
{
pneg_ctxt->ContextType = SMB2_POSIX_EXTENSIONS_AVAILABLE;
pneg_ctxt->DataLength = cpu_to_le16(POSIX_CTXT_DATA_LEN);
/* SMB2_CREATE_TAG_POSIX is "0x93AD25509CB411E7B42383DE968BCD7C" */
pneg_ctxt->Name[0] = 0x93;
pneg_ctxt->Name[1] = 0xAD;
pneg_ctxt->Name[2] = 0x25;
pneg_ctxt->Name[3] = 0x50;
pneg_ctxt->Name[4] = 0x9C;
pneg_ctxt->Name[5] = 0xB4;
pneg_ctxt->Name[6] = 0x11;
pneg_ctxt->Name[7] = 0xE7;
pneg_ctxt->Name[8] = 0xB4;
pneg_ctxt->Name[9] = 0x23;
pneg_ctxt->Name[10] = 0x83;
pneg_ctxt->Name[11] = 0xDE;
pneg_ctxt->Name[12] = 0x96;
pneg_ctxt->Name[13] = 0x8B;
pneg_ctxt->Name[14] = 0xCD;
pneg_ctxt->Name[15] = 0x7C;
}
static void
assemble_neg_contexts(struct smb2_negotiate_req *req,
unsigned int *total_len)
{
char *pneg_ctxt = (char *)req;
unsigned int ctxt_len;
if (*total_len > 200) {
/* In case length corrupted don't want to overrun smb buffer */
cifs_dbg(VFS, "Bad frame length assembling neg contexts\n");
return;
}
/*
* round up total_len of fixed part of SMB3 negotiate request to 8
* byte boundary before adding negotiate contexts
*/
*total_len = roundup(*total_len, 8);
pneg_ctxt = (*total_len) + (char *)req;
req->NegotiateContextOffset = cpu_to_le32(*total_len);
build_preauth_ctxt((struct smb2_preauth_neg_context *)pneg_ctxt);
ctxt_len = DIV_ROUND_UP(sizeof(struct smb2_preauth_neg_context), 8) * 8;
*total_len += ctxt_len;
pneg_ctxt += ctxt_len;
build_encrypt_ctxt((struct smb2_encryption_neg_context *)pneg_ctxt);
ctxt_len = DIV_ROUND_UP(sizeof(struct smb2_encryption_neg_context), 8) * 8;
*total_len += ctxt_len;
pneg_ctxt += ctxt_len;
build_posix_ctxt((struct smb2_posix_neg_context *)pneg_ctxt);
*total_len += sizeof(struct smb2_posix_neg_context);
req->NegotiateContextCount = cpu_to_le16(3);
}
static void decode_preauth_context(struct smb2_preauth_neg_context *ctxt)
{
unsigned int len = le16_to_cpu(ctxt->DataLength);
/* If invalid preauth context warn but use what we requested, SHA-512 */
if (len < MIN_PREAUTH_CTXT_DATA_LEN) {
printk_once(KERN_WARNING "server sent bad preauth context\n");
return;
}
if (le16_to_cpu(ctxt->HashAlgorithmCount) != 1)
printk_once(KERN_WARNING "illegal SMB3 hash algorithm count\n");
if (ctxt->HashAlgorithms != SMB2_PREAUTH_INTEGRITY_SHA512)
printk_once(KERN_WARNING "unknown SMB3 hash algorithm\n");
}
static int decode_encrypt_ctx(struct TCP_Server_Info *server,
struct smb2_encryption_neg_context *ctxt)
{
unsigned int len = le16_to_cpu(ctxt->DataLength);
cifs_dbg(FYI, "decode SMB3.11 encryption neg context of len %d\n", len);
if (len < MIN_ENCRYPT_CTXT_DATA_LEN) {
printk_once(KERN_WARNING "server sent bad crypto ctxt len\n");
return -EINVAL;
}
if (le16_to_cpu(ctxt->CipherCount) != 1) {
printk_once(KERN_WARNING "illegal SMB3.11 cipher count\n");
return -EINVAL;
}
cifs_dbg(FYI, "SMB311 cipher type:%d\n", le16_to_cpu(ctxt->Ciphers[0]));
if ((ctxt->Ciphers[0] != SMB2_ENCRYPTION_AES128_CCM) &&
(ctxt->Ciphers[0] != SMB2_ENCRYPTION_AES128_GCM)) {
printk_once(KERN_WARNING "invalid SMB3.11 cipher returned\n");
return -EINVAL;
}
server->cipher_type = ctxt->Ciphers[0];
server->capabilities |= SMB2_GLOBAL_CAP_ENCRYPTION;
return 0;
}
static int smb311_decode_neg_context(struct smb2_negotiate_rsp *rsp,
struct TCP_Server_Info *server,
unsigned int len_of_smb)
{
struct smb2_neg_context *pctx;
unsigned int offset = le32_to_cpu(rsp->NegotiateContextOffset);
unsigned int ctxt_cnt = le16_to_cpu(rsp->NegotiateContextCount);
unsigned int len_of_ctxts, i;
int rc = 0;
cifs_dbg(FYI, "decoding %d negotiate contexts\n", ctxt_cnt);
if (len_of_smb <= offset) {
cifs_dbg(VFS, "Invalid response: negotiate context offset\n");
return -EINVAL;
}
len_of_ctxts = len_of_smb - offset;
for (i = 0; i < ctxt_cnt; i++) {
int clen;
/* check that offset is not beyond end of SMB */
if (len_of_ctxts == 0)
break;
if (len_of_ctxts < sizeof(struct smb2_neg_context))
break;
pctx = (struct smb2_neg_context *)(offset + (char *)rsp);
clen = le16_to_cpu(pctx->DataLength);
if (clen > len_of_ctxts)
break;
if (pctx->ContextType == SMB2_PREAUTH_INTEGRITY_CAPABILITIES)
decode_preauth_context(
(struct smb2_preauth_neg_context *)pctx);
else if (pctx->ContextType == SMB2_ENCRYPTION_CAPABILITIES)
rc = decode_encrypt_ctx(server,
(struct smb2_encryption_neg_context *)pctx);
else if (pctx->ContextType == SMB2_POSIX_EXTENSIONS_AVAILABLE)
server->posix_ext_supported = true;
else
cifs_dbg(VFS, "unknown negcontext of type %d ignored\n",
le16_to_cpu(pctx->ContextType));
if (rc)
break;
/* offsets must be 8 byte aligned */
clen = (clen + 7) & ~0x7;
offset += clen + sizeof(struct smb2_neg_context);
len_of_ctxts -= clen;
}
return rc;
}
static struct create_posix *
create_posix_buf(umode_t mode)
{
struct create_posix *buf;
buf = kzalloc(sizeof(struct create_posix),
GFP_KERNEL);
if (!buf)
return NULL;
buf->ccontext.DataOffset =
cpu_to_le16(offsetof(struct create_posix, Mode));
buf->ccontext.DataLength = cpu_to_le32(4);
buf->ccontext.NameOffset =
cpu_to_le16(offsetof(struct create_posix, Name));
buf->ccontext.NameLength = cpu_to_le16(16);
/* SMB2_CREATE_TAG_POSIX is "0x93AD25509CB411E7B42383DE968BCD7C" */
buf->Name[0] = 0x93;
buf->Name[1] = 0xAD;
buf->Name[2] = 0x25;
buf->Name[3] = 0x50;
buf->Name[4] = 0x9C;
buf->Name[5] = 0xB4;
buf->Name[6] = 0x11;
buf->Name[7] = 0xE7;
buf->Name[8] = 0xB4;
buf->Name[9] = 0x23;
buf->Name[10] = 0x83;
buf->Name[11] = 0xDE;
buf->Name[12] = 0x96;
buf->Name[13] = 0x8B;
buf->Name[14] = 0xCD;
buf->Name[15] = 0x7C;
buf->Mode = cpu_to_le32(mode);
cifs_dbg(FYI, "mode on posix create 0%o", mode);
return buf;
}
static int
add_posix_context(struct kvec *iov, unsigned int *num_iovec, umode_t mode)
{
struct smb2_create_req *req = iov[0].iov_base;
unsigned int num = *num_iovec;
iov[num].iov_base = create_posix_buf(mode);
if (iov[num].iov_base == NULL)
return -ENOMEM;
iov[num].iov_len = sizeof(struct create_posix);
if (!req->CreateContextsOffset)
req->CreateContextsOffset = cpu_to_le32(
sizeof(struct smb2_create_req) +
iov[num - 1].iov_len);
le32_add_cpu(&req->CreateContextsLength, sizeof(struct create_posix));
*num_iovec = num + 1;
return 0;
}
/*
*
* SMB2 Worker functions follow:
*
* The general structure of the worker functions is:
* 1) Call smb2_init (assembles SMB2 header)
* 2) Initialize SMB2 command specific fields in fixed length area of SMB
* 3) Call smb_sendrcv2 (sends request on socket and waits for response)
* 4) Decode SMB2 command specific fields in the fixed length area
* 5) Decode variable length data area (if any for this SMB2 command type)
* 6) Call free smb buffer
* 7) return
*
*/
int
SMB2_negotiate(const unsigned int xid, struct cifs_ses *ses)
{
struct smb_rqst rqst;
struct smb2_negotiate_req *req;
struct smb2_negotiate_rsp *rsp;
struct kvec iov[1];
struct kvec rsp_iov;
int rc = 0;
int resp_buftype;
struct TCP_Server_Info *server = ses->server;
int blob_offset, blob_length;
char *security_blob;
int flags = CIFS_NEG_OP;
unsigned int total_len;
cifs_dbg(FYI, "Negotiate protocol\n");
if (!server) {
WARN(1, "%s: server is NULL!\n", __func__);
return -EIO;
}
rc = smb2_plain_req_init(SMB2_NEGOTIATE, NULL, (void **) &req, &total_len);
if (rc)
return rc;
req->sync_hdr.SessionId = 0;
memset(server->preauth_sha_hash, 0, SMB2_PREAUTH_HASH_SIZE);
memset(ses->preauth_sha_hash, 0, SMB2_PREAUTH_HASH_SIZE);
if (strcmp(ses->server->vals->version_string,
SMB3ANY_VERSION_STRING) == 0) {
req->Dialects[0] = cpu_to_le16(SMB30_PROT_ID);
req->Dialects[1] = cpu_to_le16(SMB302_PROT_ID);
req->DialectCount = cpu_to_le16(2);
total_len += 4;
} else if (strcmp(ses->server->vals->version_string,
SMBDEFAULT_VERSION_STRING) == 0) {
req->Dialects[0] = cpu_to_le16(SMB21_PROT_ID);
req->Dialects[1] = cpu_to_le16(SMB30_PROT_ID);
req->Dialects[2] = cpu_to_le16(SMB302_PROT_ID);
req->Dialects[3] = cpu_to_le16(SMB311_PROT_ID);
req->DialectCount = cpu_to_le16(4);
total_len += 8;
} else {
/* otherwise send specific dialect */
req->Dialects[0] = cpu_to_le16(ses->server->vals->protocol_id);
req->DialectCount = cpu_to_le16(1);
total_len += 2;
}
/* only one of SMB2 signing flags may be set in SMB2 request */
if (ses->sign)
req->SecurityMode = cpu_to_le16(SMB2_NEGOTIATE_SIGNING_REQUIRED);
else if (global_secflags & CIFSSEC_MAY_SIGN)
req->SecurityMode = cpu_to_le16(SMB2_NEGOTIATE_SIGNING_ENABLED);
else
req->SecurityMode = 0;
req->Capabilities = cpu_to_le32(ses->server->vals->req_capabilities);
/* ClientGUID must be zero for SMB2.02 dialect */
if (ses->server->vals->protocol_id == SMB20_PROT_ID)
memset(req->ClientGUID, 0, SMB2_CLIENT_GUID_SIZE);
else {
memcpy(req->ClientGUID, server->client_guid,
SMB2_CLIENT_GUID_SIZE);
if ((ses->server->vals->protocol_id == SMB311_PROT_ID) ||
(strcmp(ses->server->vals->version_string,
SMBDEFAULT_VERSION_STRING) == 0))
assemble_neg_contexts(req, &total_len);
}
iov[0].iov_base = (char *)req;
iov[0].iov_len = total_len;
memset(&rqst, 0, sizeof(struct smb_rqst));
rqst.rq_iov = iov;
rqst.rq_nvec = 1;
rc = cifs_send_recv(xid, ses, &rqst, &resp_buftype, flags, &rsp_iov);
cifs_small_buf_release(req);
rsp = (struct smb2_negotiate_rsp *)rsp_iov.iov_base;
/*
* No tcon so can't do
* cifs_stats_inc(&tcon->stats.smb2_stats.smb2_com_fail[SMB2...]);
*/
if (rc == -EOPNOTSUPP) {
cifs_dbg(VFS, "Dialect not supported by server. Consider "
"specifying vers=1.0 or vers=2.0 on mount for accessing"
" older servers\n");
goto neg_exit;
} else if (rc != 0)
goto neg_exit;
if (strcmp(ses->server->vals->version_string,
SMB3ANY_VERSION_STRING) == 0) {
if (rsp->DialectRevision == cpu_to_le16(SMB20_PROT_ID)) {
cifs_dbg(VFS,
"SMB2 dialect returned but not requested\n");
return -EIO;
} else if (rsp->DialectRevision == cpu_to_le16(SMB21_PROT_ID)) {
cifs_dbg(VFS,
"SMB2.1 dialect returned but not requested\n");
return -EIO;
}
} else if (strcmp(ses->server->vals->version_string,
SMBDEFAULT_VERSION_STRING) == 0) {
if (rsp->DialectRevision == cpu_to_le16(SMB20_PROT_ID)) {
cifs_dbg(VFS,
"SMB2 dialect returned but not requested\n");
return -EIO;
} else if (rsp->DialectRevision == cpu_to_le16(SMB21_PROT_ID)) {
/* ops set to 3.0 by default for default so update */
ses->server->ops = &smb21_operations;
} else if (rsp->DialectRevision == cpu_to_le16(SMB311_PROT_ID))
ses->server->ops = &smb311_operations;
} else if (le16_to_cpu(rsp->DialectRevision) !=
ses->server->vals->protocol_id) {
/* if requested single dialect ensure returned dialect matched */
cifs_dbg(VFS, "Illegal 0x%x dialect returned: not requested\n",
le16_to_cpu(rsp->DialectRevision));
return -EIO;
}
cifs_dbg(FYI, "mode 0x%x\n", rsp->SecurityMode);
if (rsp->DialectRevision == cpu_to_le16(SMB20_PROT_ID))
cifs_dbg(FYI, "negotiated smb2.0 dialect\n");
else if (rsp->DialectRevision == cpu_to_le16(SMB21_PROT_ID))
cifs_dbg(FYI, "negotiated smb2.1 dialect\n");
else if (rsp->DialectRevision == cpu_to_le16(SMB30_PROT_ID))
cifs_dbg(FYI, "negotiated smb3.0 dialect\n");
else if (rsp->DialectRevision == cpu_to_le16(SMB302_PROT_ID))
cifs_dbg(FYI, "negotiated smb3.02 dialect\n");
else if (rsp->DialectRevision == cpu_to_le16(SMB311_PROT_ID))
cifs_dbg(FYI, "negotiated smb3.1.1 dialect\n");
else {
cifs_dbg(VFS, "Illegal dialect returned by server 0x%x\n",
le16_to_cpu(rsp->DialectRevision));
rc = -EIO;
goto neg_exit;
}
server->dialect = le16_to_cpu(rsp->DialectRevision);
/*
* Keep a copy of the hash after negprot. This hash will be
* the starting hash value for all sessions made from this
* server.
*/
memcpy(server->preauth_sha_hash, ses->preauth_sha_hash,
SMB2_PREAUTH_HASH_SIZE);
/* SMB2 only has an extended negflavor */
server->negflavor = CIFS_NEGFLAVOR_EXTENDED;
/* set it to the maximum buffer size value we can send with 1 credit */
server->maxBuf = min_t(unsigned int, le32_to_cpu(rsp->MaxTransactSize),
SMB2_MAX_BUFFER_SIZE);
server->max_read = le32_to_cpu(rsp->MaxReadSize);
server->max_write = le32_to_cpu(rsp->MaxWriteSize);
server->sec_mode = le16_to_cpu(rsp->SecurityMode);
if ((server->sec_mode & SMB2_SEC_MODE_FLAGS_ALL) != server->sec_mode)
cifs_dbg(FYI, "Server returned unexpected security mode 0x%x\n",
server->sec_mode);
server->capabilities = le32_to_cpu(rsp->Capabilities);
/* Internal types */
server->capabilities |= SMB2_NT_FIND | SMB2_LARGE_FILES;
security_blob = smb2_get_data_area_len(&blob_offset, &blob_length,
(struct smb2_sync_hdr *)rsp);
/*
* See MS-SMB2 section 2.2.4: if no blob, client picks default which
* for us will be
* ses->sectype = RawNTLMSSP;
* but for time being this is our only auth choice so doesn't matter.
* We just found a server which sets blob length to zero expecting raw.
*/
if (blob_length == 0) {
cifs_dbg(FYI, "missing security blob on negprot\n");
server->sec_ntlmssp = true;
}
rc = cifs_enable_signing(server, ses->sign);
if (rc)
goto neg_exit;
if (blob_length) {
rc = decode_negTokenInit(security_blob, blob_length, server);
if (rc == 1)
rc = 0;
else if (rc == 0)
rc = -EIO;
}
if (rsp->DialectRevision == cpu_to_le16(SMB311_PROT_ID)) {
if (rsp->NegotiateContextCount)
rc = smb311_decode_neg_context(rsp, server,
rsp_iov.iov_len);
else
cifs_dbg(VFS, "Missing expected negotiate contexts\n");
}
neg_exit:
free_rsp_buf(resp_buftype, rsp);
return rc;
}
int smb3_validate_negotiate(const unsigned int xid, struct cifs_tcon *tcon)
{
int rc;
struct validate_negotiate_info_req *pneg_inbuf;
struct validate_negotiate_info_rsp *pneg_rsp = NULL;
u32 rsplen;
u32 inbuflen; /* max of 4 dialects */
cifs_dbg(FYI, "validate negotiate\n");
/* In SMB3.11 preauth integrity supersedes validate negotiate */
if (tcon->ses->server->dialect == SMB311_PROT_ID)
return 0;
/*
* validation ioctl must be signed, so no point sending this if we
* can not sign it (ie are not known user). Even if signing is not
* required (enabled but not negotiated), in those cases we selectively
* sign just this, the first and only signed request on a connection.
* Having validation of negotiate info helps reduce attack vectors.
*/
if (tcon->ses->session_flags & SMB2_SESSION_FLAG_IS_GUEST)
return 0; /* validation requires signing */
if (tcon->ses->user_name == NULL) {
cifs_dbg(FYI, "Can't validate negotiate: null user mount\n");
return 0; /* validation requires signing */
}
if (tcon->ses->session_flags & SMB2_SESSION_FLAG_IS_NULL)
cifs_dbg(VFS, "Unexpected null user (anonymous) auth flag sent by server\n");
pneg_inbuf = kmalloc(sizeof(*pneg_inbuf), GFP_NOFS);
if (!pneg_inbuf)
return -ENOMEM;
pneg_inbuf->Capabilities =
cpu_to_le32(tcon->ses->server->vals->req_capabilities);
memcpy(pneg_inbuf->Guid, tcon->ses->server->client_guid,
SMB2_CLIENT_GUID_SIZE);
if (tcon->ses->sign)
pneg_inbuf->SecurityMode =
cpu_to_le16(SMB2_NEGOTIATE_SIGNING_REQUIRED);
else if (global_secflags & CIFSSEC_MAY_SIGN)
pneg_inbuf->SecurityMode =
cpu_to_le16(SMB2_NEGOTIATE_SIGNING_ENABLED);
else
pneg_inbuf->SecurityMode = 0;
if (strcmp(tcon->ses->server->vals->version_string,
SMB3ANY_VERSION_STRING) == 0) {
pneg_inbuf->Dialects[0] = cpu_to_le16(SMB30_PROT_ID);
pneg_inbuf->Dialects[1] = cpu_to_le16(SMB302_PROT_ID);
pneg_inbuf->DialectCount = cpu_to_le16(2);
/* structure is big enough for 3 dialects, sending only 2 */
inbuflen = sizeof(*pneg_inbuf) -
(2 * sizeof(pneg_inbuf->Dialects[0]));
} else if (strcmp(tcon->ses->server->vals->version_string,
SMBDEFAULT_VERSION_STRING) == 0) {
pneg_inbuf->Dialects[0] = cpu_to_le16(SMB21_PROT_ID);
pneg_inbuf->Dialects[1] = cpu_to_le16(SMB30_PROT_ID);
pneg_inbuf->Dialects[2] = cpu_to_le16(SMB302_PROT_ID);
pneg_inbuf->Dialects[3] = cpu_to_le16(SMB311_PROT_ID);
pneg_inbuf->DialectCount = cpu_to_le16(4);
/* structure is big enough for 3 dialects */
inbuflen = sizeof(*pneg_inbuf);
} else {
/* otherwise specific dialect was requested */
pneg_inbuf->Dialects[0] =
cpu_to_le16(tcon->ses->server->vals->protocol_id);
pneg_inbuf->DialectCount = cpu_to_le16(1);
/* structure is big enough for 3 dialects, sending only 1 */
inbuflen = sizeof(*pneg_inbuf) -
sizeof(pneg_inbuf->Dialects[0]) * 2;
}
rc = SMB2_ioctl(xid, tcon, NO_FILE_ID, NO_FILE_ID,
FSCTL_VALIDATE_NEGOTIATE_INFO, true /* is_fsctl */,
(char *)pneg_inbuf, inbuflen, CIFSMaxBufSize,
(char **)&pneg_rsp, &rsplen);
if (rc == -EOPNOTSUPP) {
/*
* Old Windows versions or Netapp SMB server can return
* not supported error. Client should accept it.
*/
cifs_dbg(VFS, "Server does not support validate negotiate\n");
return 0;
} else if (rc != 0) {
cifs_dbg(VFS, "validate protocol negotiate failed: %d\n", rc);
rc = -EIO;
goto out_free_inbuf;
}
rc = -EIO;
if (rsplen != sizeof(*pneg_rsp)) {
cifs_dbg(VFS, "invalid protocol negotiate response size: %d\n",
rsplen);
/* relax check since Mac returns max bufsize allowed on ioctl */
if (rsplen > CIFSMaxBufSize || rsplen < sizeof(*pneg_rsp))
goto out_free_rsp;
}
/* check validate negotiate info response matches what we got earlier */
if (pneg_rsp->Dialect != cpu_to_le16(tcon->ses->server->dialect))
goto vneg_out;
if (pneg_rsp->SecurityMode != cpu_to_le16(tcon->ses->server->sec_mode))
goto vneg_out;
/* do not validate server guid because not saved at negprot time yet */
if ((le32_to_cpu(pneg_rsp->Capabilities) | SMB2_NT_FIND |
SMB2_LARGE_FILES) != tcon->ses->server->capabilities)
goto vneg_out;
/* validate negotiate successful */
rc = 0;
cifs_dbg(FYI, "validate negotiate info successful\n");
goto out_free_rsp;
vneg_out:
cifs_dbg(VFS, "protocol revalidation - security settings mismatch\n");
out_free_rsp:
kfree(pneg_rsp);
out_free_inbuf:
kfree(pneg_inbuf);
return rc;
}
enum securityEnum
smb2_select_sectype(struct TCP_Server_Info *server, enum securityEnum requested)
{
switch (requested) {
case Kerberos:
case RawNTLMSSP:
return requested;
case NTLMv2:
return RawNTLMSSP;
case Unspecified:
if (server->sec_ntlmssp &&
(global_secflags & CIFSSEC_MAY_NTLMSSP))
return RawNTLMSSP;
if ((server->sec_kerberos || server->sec_mskerberos) &&
(global_secflags & CIFSSEC_MAY_KRB5))
return Kerberos;
/* Fallthrough */
default:
return Unspecified;
}
}
struct SMB2_sess_data {
unsigned int xid;
struct cifs_ses *ses;
struct nls_table *nls_cp;
void (*func)(struct SMB2_sess_data *);
int result;
u64 previous_session;
/* we will send the SMB in three pieces:
* a fixed length beginning part, an optional
* SPNEGO blob (which can be zero length), and a
* last part which will include the strings
* and rest of bcc area. This allows us to avoid
* a large buffer 17K allocation
*/
int buf0_type;
struct kvec iov[2];
};
static int
SMB2_sess_alloc_buffer(struct SMB2_sess_data *sess_data)
{
int rc;
struct cifs_ses *ses = sess_data->ses;
struct smb2_sess_setup_req *req;
struct TCP_Server_Info *server = ses->server;
unsigned int total_len;
rc = smb2_plain_req_init(SMB2_SESSION_SETUP, NULL, (void **) &req,
&total_len);
if (rc)
return rc;
/* First session, not a reauthenticate */
req->sync_hdr.SessionId = 0;
/* if reconnect, we need to send previous sess id, otherwise it is 0 */
req->PreviousSessionId = sess_data->previous_session;
req->Flags = 0; /* MBZ */
/* enough to enable echos and oplocks and one max size write */
req->sync_hdr.CreditRequest = cpu_to_le16(130);
/* only one of SMB2 signing flags may be set in SMB2 request */
if (server->sign)
req->SecurityMode = SMB2_NEGOTIATE_SIGNING_REQUIRED;
else if (global_secflags & CIFSSEC_MAY_SIGN) /* one flag unlike MUST_ */
req->SecurityMode = SMB2_NEGOTIATE_SIGNING_ENABLED;
else
req->SecurityMode = 0;
req->Capabilities = 0;
req->Channel = 0; /* MBZ */
sess_data->iov[0].iov_base = (char *)req;
/* 1 for pad */
sess_data->iov[0].iov_len = total_len - 1;
/*
* This variable will be used to clear the buffer
* allocated above in case of any error in the calling function.
*/
sess_data->buf0_type = CIFS_SMALL_BUFFER;
return 0;
}
static void
SMB2_sess_free_buffer(struct SMB2_sess_data *sess_data)
{
free_rsp_buf(sess_data->buf0_type, sess_data->iov[0].iov_base);
sess_data->buf0_type = CIFS_NO_BUFFER;
}
static int
SMB2_sess_sendreceive(struct SMB2_sess_data *sess_data)
{
int rc;
struct smb_rqst rqst;
struct smb2_sess_setup_req *req = sess_data->iov[0].iov_base;
struct kvec rsp_iov = { NULL, 0 };
/* Testing shows that buffer offset must be at location of Buffer[0] */
req->SecurityBufferOffset =
cpu_to_le16(sizeof(struct smb2_sess_setup_req) - 1 /* pad */);
req->SecurityBufferLength = cpu_to_le16(sess_data->iov[1].iov_len);
memset(&rqst, 0, sizeof(struct smb_rqst));
rqst.rq_iov = sess_data->iov;
rqst.rq_nvec = 2;
/* BB add code to build os and lm fields */
rc = cifs_send_recv(sess_data->xid, sess_data->ses,
&rqst,
&sess_data->buf0_type,
CIFS_LOG_ERROR | CIFS_NEG_OP, &rsp_iov);
cifs_small_buf_release(sess_data->iov[0].iov_base);
memcpy(&sess_data->iov[0], &rsp_iov, sizeof(struct kvec));
return rc;
}
static int
SMB2_sess_establish_session(struct SMB2_sess_data *sess_data)
{
int rc = 0;
struct cifs_ses *ses = sess_data->ses;
mutex_lock(&ses->server->srv_mutex);
if (ses->server->ops->generate_signingkey) {
rc = ses->server->ops->generate_signingkey(ses);
if (rc) {
cifs_dbg(FYI,
"SMB3 session key generation failed\n");
mutex_unlock(&ses->server->srv_mutex);
return rc;
}
}
if (!ses->server->session_estab) {
ses->server->sequence_number = 0x2;
ses->server->session_estab = true;
}
mutex_unlock(&ses->server->srv_mutex);
cifs_dbg(FYI, "SMB2/3 session established successfully\n");
spin_lock(&GlobalMid_Lock);
ses->status = CifsGood;
ses->need_reconnect = false;
spin_unlock(&GlobalMid_Lock);
return rc;
}
#ifdef CONFIG_CIFS_UPCALL
static void
SMB2_auth_kerberos(struct SMB2_sess_data *sess_data)
{
int rc;
struct cifs_ses *ses = sess_data->ses;
struct cifs_spnego_msg *msg;
struct key *spnego_key = NULL;
struct smb2_sess_setup_rsp *rsp = NULL;
rc = SMB2_sess_alloc_buffer(sess_data);
if (rc)
goto out;
spnego_key = cifs_get_spnego_key(ses);
if (IS_ERR(spnego_key)) {
rc = PTR_ERR(spnego_key);
spnego_key = NULL;
goto out;
}
msg = spnego_key->payload.data[0];
/*
* check version field to make sure that cifs.upcall is
* sending us a response in an expected form
*/
if (msg->version != CIFS_SPNEGO_UPCALL_VERSION) {
cifs_dbg(VFS,
"bad cifs.upcall version. Expected %d got %d",
CIFS_SPNEGO_UPCALL_VERSION, msg->version);
rc = -EKEYREJECTED;
goto out_put_spnego_key;
}
ses->auth_key.response = kmemdup(msg->data, msg->sesskey_len,
GFP_KERNEL);
if (!ses->auth_key.response) {
cifs_dbg(VFS,
"Kerberos can't allocate (%u bytes) memory",
msg->sesskey_len);
rc = -ENOMEM;
goto out_put_spnego_key;
}
ses->auth_key.len = msg->sesskey_len;
sess_data->iov[1].iov_base = msg->data + msg->sesskey_len;
sess_data->iov[1].iov_len = msg->secblob_len;
rc = SMB2_sess_sendreceive(sess_data);
if (rc)
goto out_put_spnego_key;
rsp = (struct smb2_sess_setup_rsp *)sess_data->iov[0].iov_base;
ses->Suid = rsp->sync_hdr.SessionId;
ses->session_flags = le16_to_cpu(rsp->SessionFlags);
rc = SMB2_sess_establish_session(sess_data);
out_put_spnego_key:
key_invalidate(spnego_key);
key_put(spnego_key);
out:
sess_data->result = rc;
sess_data->func = NULL;
SMB2_sess_free_buffer(sess_data);
}
#else
static void
SMB2_auth_kerberos(struct SMB2_sess_data *sess_data)
{
cifs_dbg(VFS, "Kerberos negotiated but upcall support disabled!\n");
sess_data->result = -EOPNOTSUPP;
sess_data->func = NULL;
}
#endif
static void
SMB2_sess_auth_rawntlmssp_authenticate(struct SMB2_sess_data *sess_data);
static void
SMB2_sess_auth_rawntlmssp_negotiate(struct SMB2_sess_data *sess_data)
{
int rc;
struct cifs_ses *ses = sess_data->ses;
struct smb2_sess_setup_rsp *rsp = NULL;
char *ntlmssp_blob = NULL;
bool use_spnego = false; /* else use raw ntlmssp */
u16 blob_length = 0;
/*
* If memory allocation is successful, caller of this function
* frees it.
*/
ses->ntlmssp = kmalloc(sizeof(struct ntlmssp_auth), GFP_KERNEL);
if (!ses->ntlmssp) {
rc = -ENOMEM;
goto out_err;
}
ses->ntlmssp->sesskey_per_smbsess = true;
rc = SMB2_sess_alloc_buffer(sess_data);
if (rc)
goto out_err;
ntlmssp_blob = kmalloc(sizeof(struct _NEGOTIATE_MESSAGE),
GFP_KERNEL);
if (ntlmssp_blob == NULL) {
rc = -ENOMEM;
goto out;
}
build_ntlmssp_negotiate_blob(ntlmssp_blob, ses);
if (use_spnego) {
/* BB eventually need to add this */
cifs_dbg(VFS, "spnego not supported for SMB2 yet\n");
rc = -EOPNOTSUPP;
goto out;
} else {
blob_length = sizeof(struct _NEGOTIATE_MESSAGE);
/* with raw NTLMSSP we don't encapsulate in SPNEGO */
}
sess_data->iov[1].iov_base = ntlmssp_blob;
sess_data->iov[1].iov_len = blob_length;
rc = SMB2_sess_sendreceive(sess_data);
rsp = (struct smb2_sess_setup_rsp *)sess_data->iov[0].iov_base;
/* If true, rc here is expected and not an error */
if (sess_data->buf0_type != CIFS_NO_BUFFER &&
rsp->sync_hdr.Status == STATUS_MORE_PROCESSING_REQUIRED)
rc = 0;
if (rc)
goto out;
if (offsetof(struct smb2_sess_setup_rsp, Buffer) !=
le16_to_cpu(rsp->SecurityBufferOffset)) {
cifs_dbg(VFS, "Invalid security buffer offset %d\n",
le16_to_cpu(rsp->SecurityBufferOffset));
rc = -EIO;
goto out;
}
rc = decode_ntlmssp_challenge(rsp->Buffer,
le16_to_cpu(rsp->SecurityBufferLength), ses);
if (rc)
goto out;
cifs_dbg(FYI, "rawntlmssp session setup challenge phase\n");
ses->Suid = rsp->sync_hdr.SessionId;
ses->session_flags = le16_to_cpu(rsp->SessionFlags);
out:
kfree(ntlmssp_blob);
SMB2_sess_free_buffer(sess_data);
if (!rc) {
sess_data->result = 0;
sess_data->func = SMB2_sess_auth_rawntlmssp_authenticate;
return;
}
out_err:
kfree(ses->ntlmssp);
ses->ntlmssp = NULL;
sess_data->result = rc;
sess_data->func = NULL;
}
static void
SMB2_sess_auth_rawntlmssp_authenticate(struct SMB2_sess_data *sess_data)
{
int rc;
struct cifs_ses *ses = sess_data->ses;
struct smb2_sess_setup_req *req;
struct smb2_sess_setup_rsp *rsp = NULL;
unsigned char *ntlmssp_blob = NULL;
bool use_spnego = false; /* else use raw ntlmssp */
u16 blob_length = 0;
rc = SMB2_sess_alloc_buffer(sess_data);
if (rc)
goto out;
req = (struct smb2_sess_setup_req *) sess_data->iov[0].iov_base;
req->sync_hdr.SessionId = ses->Suid;
rc = build_ntlmssp_auth_blob(&ntlmssp_blob, &blob_length, ses,
sess_data->nls_cp);
if (rc) {
cifs_dbg(FYI, "build_ntlmssp_auth_blob failed %d\n", rc);
goto out;
}
if (use_spnego) {
/* BB eventually need to add this */
cifs_dbg(VFS, "spnego not supported for SMB2 yet\n");
rc = -EOPNOTSUPP;
goto out;
}
sess_data->iov[1].iov_base = ntlmssp_blob;
sess_data->iov[1].iov_len = blob_length;
rc = SMB2_sess_sendreceive(sess_data);
if (rc)
goto out;
rsp = (struct smb2_sess_setup_rsp *)sess_data->iov[0].iov_base;
ses->Suid = rsp->sync_hdr.SessionId;
ses->session_flags = le16_to_cpu(rsp->SessionFlags);
rc = SMB2_sess_establish_session(sess_data);
out:
kfree(ntlmssp_blob);
SMB2_sess_free_buffer(sess_data);
kfree(ses->ntlmssp);
ses->ntlmssp = NULL;
sess_data->result = rc;
sess_data->func = NULL;
}
static int
SMB2_select_sec(struct cifs_ses *ses, struct SMB2_sess_data *sess_data)
{
int type;
type = smb2_select_sectype(ses->server, ses->sectype);
cifs_dbg(FYI, "sess setup type %d\n", type);
if (type == Unspecified) {
cifs_dbg(VFS,
"Unable to select appropriate authentication method!");
return -EINVAL;
}
switch (type) {
case Kerberos:
sess_data->func = SMB2_auth_kerberos;
break;
case RawNTLMSSP:
sess_data->func = SMB2_sess_auth_rawntlmssp_negotiate;
break;
default:
cifs_dbg(VFS, "secType %d not supported!\n", type);
return -EOPNOTSUPP;
}
return 0;
}
int
SMB2_sess_setup(const unsigned int xid, struct cifs_ses *ses,
const struct nls_table *nls_cp)
{
int rc = 0;
struct TCP_Server_Info *server = ses->server;
struct SMB2_sess_data *sess_data;
cifs_dbg(FYI, "Session Setup\n");
if (!server) {
WARN(1, "%s: server is NULL!\n", __func__);
return -EIO;
}
sess_data = kzalloc(sizeof(struct SMB2_sess_data), GFP_KERNEL);
if (!sess_data)
return -ENOMEM;
rc = SMB2_select_sec(ses, sess_data);
if (rc)
goto out;
sess_data->xid = xid;
sess_data->ses = ses;
sess_data->buf0_type = CIFS_NO_BUFFER;
sess_data->nls_cp = (struct nls_table *) nls_cp;
sess_data->previous_session = ses->Suid;
/*
* Initialize the session hash with the server one.
*/
memcpy(ses->preauth_sha_hash, ses->server->preauth_sha_hash,
SMB2_PREAUTH_HASH_SIZE);
while (sess_data->func)
sess_data->func(sess_data);
if ((ses->session_flags & SMB2_SESSION_FLAG_IS_GUEST) && (ses->sign))
cifs_dbg(VFS, "signing requested but authenticated as guest\n");
rc = sess_data->result;
out:
kfree(sess_data);
return rc;
}
int
SMB2_logoff(const unsigned int xid, struct cifs_ses *ses)
{
struct smb_rqst rqst;
struct smb2_logoff_req *req; /* response is also trivial struct */
int rc = 0;
struct TCP_Server_Info *server;
int flags = 0;
unsigned int total_len;
struct kvec iov[1];
struct kvec rsp_iov;
int resp_buf_type;
cifs_dbg(FYI, "disconnect session %p\n", ses);
if (ses && (ses->server))
server = ses->server;
else
return -EIO;
/* no need to send SMB logoff if uid already closed due to reconnect */
if (ses->need_reconnect)
goto smb2_session_already_dead;
rc = smb2_plain_req_init(SMB2_LOGOFF, NULL, (void **) &req, &total_len);
if (rc)
return rc;
/* since no tcon, smb2_init can not do this, so do here */
req->sync_hdr.SessionId = ses->Suid;
if (ses->session_flags & SMB2_SESSION_FLAG_ENCRYPT_DATA)
flags |= CIFS_TRANSFORM_REQ;
else if (server->sign)
req->sync_hdr.Flags |= SMB2_FLAGS_SIGNED;
flags |= CIFS_NO_RESP;
iov[0].iov_base = (char *)req;
iov[0].iov_len = total_len;
memset(&rqst, 0, sizeof(struct smb_rqst));
rqst.rq_iov = iov;
rqst.rq_nvec = 1;
rc = cifs_send_recv(xid, ses, &rqst, &resp_buf_type, flags, &rsp_iov);
cifs_small_buf_release(req);
/*
* No tcon so can't do
* cifs_stats_inc(&tcon->stats.smb2_stats.smb2_com_fail[SMB2...]);
*/
smb2_session_already_dead:
return rc;
}
static inline void cifs_stats_fail_inc(struct cifs_tcon *tcon, uint16_t code)
{
cifs_stats_inc(&tcon->stats.smb2_stats.smb2_com_failed[code]);
}
#define MAX_SHARENAME_LENGTH (255 /* server */ + 80 /* share */ + 1 /* NULL */)
/* These are similar values to what Windows uses */
static inline void init_copy_chunk_defaults(struct cifs_tcon *tcon)
{
tcon->max_chunks = 256;
tcon->max_bytes_chunk = 1048576;
tcon->max_bytes_copy = 16777216;
}
int
SMB2_tcon(const unsigned int xid, struct cifs_ses *ses, const char *tree,
struct cifs_tcon *tcon, const struct nls_table *cp)
{
struct smb_rqst rqst;
struct smb2_tree_connect_req *req;
struct smb2_tree_connect_rsp *rsp = NULL;
struct kvec iov[2];
struct kvec rsp_iov = { NULL, 0 };
int rc = 0;
int resp_buftype;
int unc_path_len;
__le16 *unc_path = NULL;
int flags = 0;
unsigned int total_len;
cifs_dbg(FYI, "TCON\n");
if (!(ses->server) || !tree)
return -EIO;
unc_path = kmalloc(MAX_SHARENAME_LENGTH * 2, GFP_KERNEL);
if (unc_path == NULL)
return -ENOMEM;
unc_path_len = cifs_strtoUTF16(unc_path, tree, strlen(tree), cp) + 1;
unc_path_len *= 2;
if (unc_path_len < 2) {
kfree(unc_path);
return -EINVAL;
}
/* SMB2 TREE_CONNECT request must be called with TreeId == 0 */
tcon->tid = 0;
atomic_set(&tcon->num_remote_opens, 0);
rc = smb2_plain_req_init(SMB2_TREE_CONNECT, tcon, (void **) &req,
&total_len);
if (rc) {
kfree(unc_path);
return rc;
}
if (smb3_encryption_required(tcon))
flags |= CIFS_TRANSFORM_REQ;
iov[0].iov_base = (char *)req;
/* 1 for pad */
iov[0].iov_len = total_len - 1;
/* Testing shows that buffer offset must be at location of Buffer[0] */
req->PathOffset = cpu_to_le16(sizeof(struct smb2_tree_connect_req)
- 1 /* pad */);
req->PathLength = cpu_to_le16(unc_path_len - 2);
iov[1].iov_base = unc_path;
iov[1].iov_len = unc_path_len;
/*
* 3.11 tcon req must be signed if not encrypted. See MS-SMB2 3.2.4.1.1
* unless it is guest or anonymous user. See MS-SMB2 3.2.5.3.1
* (Samba servers don't always set the flag so also check if null user)
*/
if ((ses->server->dialect == SMB311_PROT_ID) &&
!smb3_encryption_required(tcon) &&
!(ses->session_flags &
(SMB2_SESSION_FLAG_IS_GUEST|SMB2_SESSION_FLAG_IS_NULL)) &&
((ses->user_name != NULL) || (ses->sectype == Kerberos)))
req->sync_hdr.Flags |= SMB2_FLAGS_SIGNED;
memset(&rqst, 0, sizeof(struct smb_rqst));
rqst.rq_iov = iov;
rqst.rq_nvec = 2;
/* Need 64 for max size write so ask for more in case not there yet */
req->sync_hdr.CreditRequest = cpu_to_le16(64);
rc = cifs_send_recv(xid, ses, &rqst, &resp_buftype, flags, &rsp_iov);
cifs_small_buf_release(req);
rsp = (struct smb2_tree_connect_rsp *)rsp_iov.iov_base;
trace_smb3_tcon(xid, tcon->tid, ses->Suid, tree, rc);
if (rc != 0) {
if (tcon) {
cifs_stats_fail_inc(tcon, SMB2_TREE_CONNECT_HE);
tcon->need_reconnect = true;
}
goto tcon_error_exit;
}
switch (rsp->ShareType) {
case SMB2_SHARE_TYPE_DISK:
cifs_dbg(FYI, "connection to disk share\n");
break;
case SMB2_SHARE_TYPE_PIPE:
tcon->pipe = true;
cifs_dbg(FYI, "connection to pipe share\n");
break;
case SMB2_SHARE_TYPE_PRINT:
tcon->print = true;
cifs_dbg(FYI, "connection to printer\n");
break;
default:
cifs_dbg(VFS, "unknown share type %d\n", rsp->ShareType);
rc = -EOPNOTSUPP;
goto tcon_error_exit;
}
tcon->share_flags = le32_to_cpu(rsp->ShareFlags);
tcon->capabilities = rsp->Capabilities; /* we keep caps little endian */
tcon->maximal_access = le32_to_cpu(rsp->MaximalAccess);
tcon->tidStatus = CifsGood;
tcon->need_reconnect = false;
tcon->tid = rsp->sync_hdr.TreeId;
strlcpy(tcon->treeName, tree, sizeof(tcon->treeName));
if ((rsp->Capabilities & SMB2_SHARE_CAP_DFS) &&
((tcon->share_flags & SHI1005_FLAGS_DFS) == 0))
cifs_dbg(VFS, "DFS capability contradicts DFS flag\n");
if (tcon->seal &&
!(tcon->ses->server->capabilities & SMB2_GLOBAL_CAP_ENCRYPTION))
cifs_dbg(VFS, "Encryption is requested but not supported\n");
init_copy_chunk_defaults(tcon);
if (tcon->ses->server->ops->validate_negotiate)
rc = tcon->ses->server->ops->validate_negotiate(xid, tcon);
tcon_exit:
free_rsp_buf(resp_buftype, rsp);
kfree(unc_path);
return rc;
tcon_error_exit:
if (rsp && rsp->sync_hdr.Status == STATUS_BAD_NETWORK_NAME) {
cifs_dbg(VFS, "BAD_NETWORK_NAME: %s\n", tree);
}
goto tcon_exit;
}
int
SMB2_tdis(const unsigned int xid, struct cifs_tcon *tcon)
{
struct smb_rqst rqst;
struct smb2_tree_disconnect_req *req; /* response is trivial */
int rc = 0;
struct cifs_ses *ses = tcon->ses;
int flags = 0;
unsigned int total_len;
struct kvec iov[1];
struct kvec rsp_iov;
int resp_buf_type;
cifs_dbg(FYI, "Tree Disconnect\n");
if (!ses || !(ses->server))
return -EIO;
if ((tcon->need_reconnect) || (tcon->ses->need_reconnect))
return 0;
rc = smb2_plain_req_init(SMB2_TREE_DISCONNECT, tcon, (void **) &req,
&total_len);
if (rc)
return rc;
if (smb3_encryption_required(tcon))
flags |= CIFS_TRANSFORM_REQ;
flags |= CIFS_NO_RESP;
iov[0].iov_base = (char *)req;
iov[0].iov_len = total_len;
memset(&rqst, 0, sizeof(struct smb_rqst));
rqst.rq_iov = iov;
rqst.rq_nvec = 1;
rc = cifs_send_recv(xid, ses, &rqst, &resp_buf_type, flags, &rsp_iov);
cifs_small_buf_release(req);
if (rc)
cifs_stats_fail_inc(tcon, SMB2_TREE_DISCONNECT_HE);
return rc;
}
static struct create_durable *
create_durable_buf(void)
{
struct create_durable *buf;
buf = kzalloc(sizeof(struct create_durable), GFP_KERNEL);
if (!buf)
return NULL;
buf->ccontext.DataOffset = cpu_to_le16(offsetof
(struct create_durable, Data));
buf->ccontext.DataLength = cpu_to_le32(16);
buf->ccontext.NameOffset = cpu_to_le16(offsetof
(struct create_durable, Name));
buf->ccontext.NameLength = cpu_to_le16(4);
/* SMB2_CREATE_DURABLE_HANDLE_REQUEST is "DHnQ" */
buf->Name[0] = 'D';
buf->Name[1] = 'H';
buf->Name[2] = 'n';
buf->Name[3] = 'Q';
return buf;
}
static struct create_durable *
create_reconnect_durable_buf(struct cifs_fid *fid)
{
struct create_durable *buf;
buf = kzalloc(sizeof(struct create_durable), GFP_KERNEL);
if (!buf)
return NULL;
buf->ccontext.DataOffset = cpu_to_le16(offsetof
(struct create_durable, Data));
buf->ccontext.DataLength = cpu_to_le32(16);
buf->ccontext.NameOffset = cpu_to_le16(offsetof
(struct create_durable, Name));
buf->ccontext.NameLength = cpu_to_le16(4);
buf->Data.Fid.PersistentFileId = fid->persistent_fid;
buf->Data.Fid.VolatileFileId = fid->volatile_fid;
/* SMB2_CREATE_DURABLE_HANDLE_RECONNECT is "DHnC" */
buf->Name[0] = 'D';
buf->Name[1] = 'H';
buf->Name[2] = 'n';
buf->Name[3] = 'C';
return buf;
}
__u8
smb2_parse_lease_state(struct TCP_Server_Info *server,
struct smb2_create_rsp *rsp,
unsigned int *epoch, char *lease_key)
{
char *data_offset;
struct create_context *cc;
unsigned int next;
unsigned int remaining;
char *name;
data_offset = (char *)rsp + le32_to_cpu(rsp->CreateContextsOffset);
remaining = le32_to_cpu(rsp->CreateContextsLength);
cc = (struct create_context *)data_offset;
while (remaining >= sizeof(struct create_context)) {
name = le16_to_cpu(cc->NameOffset) + (char *)cc;
if (le16_to_cpu(cc->NameLength) == 4 &&
strncmp(name, "RqLs", 4) == 0)
return server->ops->parse_lease_buf(cc, epoch,
lease_key);
next = le32_to_cpu(cc->Next);
if (!next)
break;
remaining -= next;
cc = (struct create_context *)((char *)cc + next);
}
return 0;
}
static int
add_lease_context(struct TCP_Server_Info *server, struct kvec *iov,
unsigned int *num_iovec, u8 *lease_key, __u8 *oplock)
{
struct smb2_create_req *req = iov[0].iov_base;
unsigned int num = *num_iovec;
iov[num].iov_base = server->ops->create_lease_buf(lease_key, *oplock);
if (iov[num].iov_base == NULL)
return -ENOMEM;
iov[num].iov_len = server->vals->create_lease_size;
req->RequestedOplockLevel = SMB2_OPLOCK_LEVEL_LEASE;
if (!req->CreateContextsOffset)
req->CreateContextsOffset = cpu_to_le32(
sizeof(struct smb2_create_req) +
iov[num - 1].iov_len);
le32_add_cpu(&req->CreateContextsLength,
server->vals->create_lease_size);
*num_iovec = num + 1;
return 0;
}
static struct create_durable_v2 *
create_durable_v2_buf(struct cifs_open_parms *oparms)
{
struct cifs_fid *pfid = oparms->fid;
struct create_durable_v2 *buf;
buf = kzalloc(sizeof(struct create_durable_v2), GFP_KERNEL);
if (!buf)
return NULL;
buf->ccontext.DataOffset = cpu_to_le16(offsetof
(struct create_durable_v2, dcontext));
buf->ccontext.DataLength = cpu_to_le32(sizeof(struct durable_context_v2));
buf->ccontext.NameOffset = cpu_to_le16(offsetof
(struct create_durable_v2, Name));
buf->ccontext.NameLength = cpu_to_le16(4);
/*
* NB: Handle timeout defaults to 0, which allows server to choose
* (most servers default to 120 seconds) and most clients default to 0.
* This can be overridden at mount ("handletimeout=") if the user wants
* a different persistent (or resilient) handle timeout for all opens
* opens on a particular SMB3 mount.
*/
buf->dcontext.Timeout = cpu_to_le32(oparms->tcon->handle_timeout);
buf->dcontext.Flags = cpu_to_le32(SMB2_DHANDLE_FLAG_PERSISTENT);
generate_random_uuid(buf->dcontext.CreateGuid);
memcpy(pfid->create_guid, buf->dcontext.CreateGuid, 16);
/* SMB2_CREATE_DURABLE_HANDLE_REQUEST is "DH2Q" */
buf->Name[0] = 'D';
buf->Name[1] = 'H';
buf->Name[2] = '2';
buf->Name[3] = 'Q';
return buf;
}
static struct create_durable_handle_reconnect_v2 *
create_reconnect_durable_v2_buf(struct cifs_fid *fid)
{
struct create_durable_handle_reconnect_v2 *buf;
buf = kzalloc(sizeof(struct create_durable_handle_reconnect_v2),
GFP_KERNEL);
if (!buf)
return NULL;
buf->ccontext.DataOffset =
cpu_to_le16(offsetof(struct create_durable_handle_reconnect_v2,
dcontext));
buf->ccontext.DataLength =
cpu_to_le32(sizeof(struct durable_reconnect_context_v2));
buf->ccontext.NameOffset =
cpu_to_le16(offsetof(struct create_durable_handle_reconnect_v2,
Name));
buf->ccontext.NameLength = cpu_to_le16(4);
buf->dcontext.Fid.PersistentFileId = fid->persistent_fid;
buf->dcontext.Fid.VolatileFileId = fid->volatile_fid;
buf->dcontext.Flags = cpu_to_le32(SMB2_DHANDLE_FLAG_PERSISTENT);
memcpy(buf->dcontext.CreateGuid, fid->create_guid, 16);
/* SMB2_CREATE_DURABLE_HANDLE_RECONNECT_V2 is "DH2C" */
buf->Name[0] = 'D';
buf->Name[1] = 'H';
buf->Name[2] = '2';
buf->Name[3] = 'C';
return buf;
}
static int
add_durable_v2_context(struct kvec *iov, unsigned int *num_iovec,
struct cifs_open_parms *oparms)
{
struct smb2_create_req *req = iov[0].iov_base;
unsigned int num = *num_iovec;
iov[num].iov_base = create_durable_v2_buf(oparms);
if (iov[num].iov_base == NULL)
return -ENOMEM;
iov[num].iov_len = sizeof(struct create_durable_v2);
if (!req->CreateContextsOffset)
req->CreateContextsOffset =
cpu_to_le32(sizeof(struct smb2_create_req) +
iov[1].iov_len);
le32_add_cpu(&req->CreateContextsLength, sizeof(struct create_durable_v2));
*num_iovec = num + 1;
return 0;
}
static int
add_durable_reconnect_v2_context(struct kvec *iov, unsigned int *num_iovec,
struct cifs_open_parms *oparms)
{
struct smb2_create_req *req = iov[0].iov_base;
unsigned int num = *num_iovec;
/* indicate that we don't need to relock the file */
oparms->reconnect = false;
iov[num].iov_base = create_reconnect_durable_v2_buf(oparms->fid);
if (iov[num].iov_base == NULL)
return -ENOMEM;
iov[num].iov_len = sizeof(struct create_durable_handle_reconnect_v2);
if (!req->CreateContextsOffset)
req->CreateContextsOffset =
cpu_to_le32(sizeof(struct smb2_create_req) +
iov[1].iov_len);
le32_add_cpu(&req->CreateContextsLength,
sizeof(struct create_durable_handle_reconnect_v2));
*num_iovec = num + 1;
return 0;
}
static int
add_durable_context(struct kvec *iov, unsigned int *num_iovec,
struct cifs_open_parms *oparms, bool use_persistent)
{
struct smb2_create_req *req = iov[0].iov_base;
unsigned int num = *num_iovec;
if (use_persistent) {
if (oparms->reconnect)
return add_durable_reconnect_v2_context(iov, num_iovec,
oparms);
else
return add_durable_v2_context(iov, num_iovec, oparms);
}
if (oparms->reconnect) {
iov[num].iov_base = create_reconnect_durable_buf(oparms->fid);
/* indicate that we don't need to relock the file */
oparms->reconnect = false;
} else
iov[num].iov_base = create_durable_buf();
if (iov[num].iov_base == NULL)
return -ENOMEM;
iov[num].iov_len = sizeof(struct create_durable);
if (!req->CreateContextsOffset)
req->CreateContextsOffset =
cpu_to_le32(sizeof(struct smb2_create_req) +
iov[1].iov_len);
le32_add_cpu(&req->CreateContextsLength, sizeof(struct create_durable));
*num_iovec = num + 1;
return 0;
}
/* See MS-SMB2 2.2.13.2.7 */
static struct crt_twarp_ctxt *
create_twarp_buf(__u64 timewarp)
{
struct crt_twarp_ctxt *buf;
buf = kzalloc(sizeof(struct crt_twarp_ctxt), GFP_KERNEL);
if (!buf)
return NULL;
buf->ccontext.DataOffset = cpu_to_le16(offsetof
(struct crt_twarp_ctxt, Timestamp));
buf->ccontext.DataLength = cpu_to_le32(8);
buf->ccontext.NameOffset = cpu_to_le16(offsetof
(struct crt_twarp_ctxt, Name));
buf->ccontext.NameLength = cpu_to_le16(4);
/* SMB2_CREATE_TIMEWARP_TOKEN is "TWrp" */
buf->Name[0] = 'T';
buf->Name[1] = 'W';
buf->Name[2] = 'r';
buf->Name[3] = 'p';
buf->Timestamp = cpu_to_le64(timewarp);
return buf;
}
/* See MS-SMB2 2.2.13.2.7 */
static int
add_twarp_context(struct kvec *iov, unsigned int *num_iovec, __u64 timewarp)
{
struct smb2_create_req *req = iov[0].iov_base;
unsigned int num = *num_iovec;
iov[num].iov_base = create_twarp_buf(timewarp);
if (iov[num].iov_base == NULL)
return -ENOMEM;
iov[num].iov_len = sizeof(struct crt_twarp_ctxt);
if (!req->CreateContextsOffset)
req->CreateContextsOffset = cpu_to_le32(
sizeof(struct smb2_create_req) +
iov[num - 1].iov_len);
le32_add_cpu(&req->CreateContextsLength, sizeof(struct crt_twarp_ctxt));
*num_iovec = num + 1;
return 0;
}
static int
alloc_path_with_tree_prefix(__le16 **out_path, int *out_size, int *out_len,
const char *treename, const __le16 *path)
{
int treename_len, path_len;
struct nls_table *cp;
const __le16 sep[] = {cpu_to_le16('\\'), cpu_to_le16(0x0000)};
/*
* skip leading "\\"
*/
treename_len = strlen(treename);
if (treename_len < 2 || !(treename[0] == '\\' && treename[1] == '\\'))
return -EINVAL;
treename += 2;
treename_len -= 2;
path_len = UniStrnlen((wchar_t *)path, PATH_MAX);
/*
* make room for one path separator between the treename and
* path
*/
*out_len = treename_len + 1 + path_len;
/*
* final path needs to be null-terminated UTF16 with a
* size aligned to 8
*/
*out_size = roundup((*out_len+1)*2, 8);
*out_path = kzalloc(*out_size, GFP_KERNEL);
if (!*out_path)
return -ENOMEM;
cp = load_nls_default();
cifs_strtoUTF16(*out_path, treename, treename_len, cp);
UniStrcat(*out_path, sep);
UniStrcat(*out_path, path);
unload_nls(cp);
return 0;
}
int smb311_posix_mkdir(const unsigned int xid, struct inode *inode,
umode_t mode, struct cifs_tcon *tcon,
const char *full_path,
struct cifs_sb_info *cifs_sb)
{
struct smb_rqst rqst;
struct smb2_create_req *req;
struct smb2_create_rsp *rsp = NULL;
struct cifs_ses *ses = tcon->ses;
struct kvec iov[3]; /* make sure at least one for each open context */
struct kvec rsp_iov = {NULL, 0};
int resp_buftype;
int uni_path_len;
__le16 *copy_path = NULL;
int copy_size;
int rc = 0;
unsigned int n_iov = 2;
__u32 file_attributes = 0;
char *pc_buf = NULL;
int flags = 0;
unsigned int total_len;
__le16 *utf16_path = NULL;
cifs_dbg(FYI, "mkdir\n");
/* resource #1: path allocation */
utf16_path = cifs_convert_path_to_utf16(full_path, cifs_sb);
if (!utf16_path)
return -ENOMEM;
if (!ses || !(ses->server)) {
rc = -EIO;
goto err_free_path;
}
/* resource #2: request */
rc = smb2_plain_req_init(SMB2_CREATE, tcon, (void **) &req, &total_len);
if (rc)
goto err_free_path;
if (smb3_encryption_required(tcon))
flags |= CIFS_TRANSFORM_REQ;
req->ImpersonationLevel = IL_IMPERSONATION;
req->DesiredAccess = cpu_to_le32(FILE_WRITE_ATTRIBUTES);
/* File attributes ignored on open (used in create though) */
req->FileAttributes = cpu_to_le32(file_attributes);
req->ShareAccess = FILE_SHARE_ALL_LE;
req->CreateDisposition = cpu_to_le32(FILE_CREATE);
req->CreateOptions = cpu_to_le32(CREATE_NOT_FILE);
iov[0].iov_base = (char *)req;
/* -1 since last byte is buf[0] which is sent below (path) */
iov[0].iov_len = total_len - 1;
req->NameOffset = cpu_to_le16(sizeof(struct smb2_create_req));
/* [MS-SMB2] 2.2.13 NameOffset:
* If SMB2_FLAGS_DFS_OPERATIONS is set in the Flags field of
* the SMB2 header, the file name includes a prefix that will
* be processed during DFS name normalization as specified in
* section 3.3.5.9. Otherwise, the file name is relative to
* the share that is identified by the TreeId in the SMB2
* header.
*/
if (tcon->share_flags & SHI1005_FLAGS_DFS) {
int name_len;
req->sync_hdr.Flags |= SMB2_FLAGS_DFS_OPERATIONS;
rc = alloc_path_with_tree_prefix(©_path, ©_size,
&name_len,
tcon->treeName, utf16_path);
if (rc)
goto err_free_req;
req->NameLength = cpu_to_le16(name_len * 2);
uni_path_len = copy_size;
/* free before overwriting resource */
kfree(utf16_path);
utf16_path = copy_path;
} else {
uni_path_len = (2 * UniStrnlen((wchar_t *)utf16_path, PATH_MAX)) + 2;
/* MUST set path len (NameLength) to 0 opening root of share */
req->NameLength = cpu_to_le16(uni_path_len - 2);
if (uni_path_len % 8 != 0) {
copy_size = roundup(uni_path_len, 8);
copy_path = kzalloc(copy_size, GFP_KERNEL);
if (!copy_path) {
rc = -ENOMEM;
goto err_free_req;
}
memcpy((char *)copy_path, (const char *)utf16_path,
uni_path_len);
uni_path_len = copy_size;
/* free before overwriting resource */
kfree(utf16_path);
utf16_path = copy_path;
}
}
iov[1].iov_len = uni_path_len;
iov[1].iov_base = utf16_path;
req->RequestedOplockLevel = SMB2_OPLOCK_LEVEL_NONE;
if (tcon->posix_extensions) {
/* resource #3: posix buf */
rc = add_posix_context(iov, &n_iov, mode);
if (rc)
goto err_free_req;
pc_buf = iov[n_iov-1].iov_base;
}
memset(&rqst, 0, sizeof(struct smb_rqst));
rqst.rq_iov = iov;
rqst.rq_nvec = n_iov;
trace_smb3_posix_mkdir_enter(xid, tcon->tid, ses->Suid, CREATE_NOT_FILE,
FILE_WRITE_ATTRIBUTES);
/* resource #4: response buffer */
rc = cifs_send_recv(xid, ses, &rqst, &resp_buftype, flags, &rsp_iov);
if (rc) {
cifs_stats_fail_inc(tcon, SMB2_CREATE_HE);
trace_smb3_posix_mkdir_err(xid, tcon->tid, ses->Suid,
CREATE_NOT_FILE,
FILE_WRITE_ATTRIBUTES, rc);
goto err_free_rsp_buf;
}
rsp = (struct smb2_create_rsp *)rsp_iov.iov_base;
trace_smb3_posix_mkdir_done(xid, rsp->PersistentFileId, tcon->tid,
ses->Suid, CREATE_NOT_FILE,
FILE_WRITE_ATTRIBUTES);
SMB2_close(xid, tcon, rsp->PersistentFileId, rsp->VolatileFileId);
/* Eventually save off posix specific response info and timestaps */
err_free_rsp_buf:
free_rsp_buf(resp_buftype, rsp);
kfree(pc_buf);
err_free_req:
cifs_small_buf_release(req);
err_free_path:
kfree(utf16_path);
return rc;
}
int
SMB2_open_init(struct cifs_tcon *tcon, struct smb_rqst *rqst, __u8 *oplock,
struct cifs_open_parms *oparms, __le16 *path)
{
struct TCP_Server_Info *server = tcon->ses->server;
struct smb2_create_req *req;
unsigned int n_iov = 2;
__u32 file_attributes = 0;
int copy_size;
int uni_path_len;
unsigned int total_len;
struct kvec *iov = rqst->rq_iov;
__le16 *copy_path;
int rc;
rc = smb2_plain_req_init(SMB2_CREATE, tcon, (void **) &req, &total_len);
if (rc)
return rc;
iov[0].iov_base = (char *)req;
/* -1 since last byte is buf[0] which is sent below (path) */
iov[0].iov_len = total_len - 1;
if (oparms->create_options & CREATE_OPTION_READONLY)
file_attributes |= ATTR_READONLY;
if (oparms->create_options & CREATE_OPTION_SPECIAL)
file_attributes |= ATTR_SYSTEM;
req->ImpersonationLevel = IL_IMPERSONATION;
req->DesiredAccess = cpu_to_le32(oparms->desired_access);
/* File attributes ignored on open (used in create though) */
req->FileAttributes = cpu_to_le32(file_attributes);
req->ShareAccess = FILE_SHARE_ALL_LE;
req->CreateDisposition = cpu_to_le32(oparms->disposition);
req->CreateOptions = cpu_to_le32(oparms->create_options & CREATE_OPTIONS_MASK);
req->NameOffset = cpu_to_le16(sizeof(struct smb2_create_req));
/* [MS-SMB2] 2.2.13 NameOffset:
* If SMB2_FLAGS_DFS_OPERATIONS is set in the Flags field of
* the SMB2 header, the file name includes a prefix that will
* be processed during DFS name normalization as specified in
* section 3.3.5.9. Otherwise, the file name is relative to
* the share that is identified by the TreeId in the SMB2
* header.
*/
if (tcon->share_flags & SHI1005_FLAGS_DFS) {
int name_len;
req->sync_hdr.Flags |= SMB2_FLAGS_DFS_OPERATIONS;
rc = alloc_path_with_tree_prefix(©_path, ©_size,
&name_len,
tcon->treeName, path);
if (rc)
return rc;
req->NameLength = cpu_to_le16(name_len * 2);
uni_path_len = copy_size;
path = copy_path;
} else {
uni_path_len = (2 * UniStrnlen((wchar_t *)path, PATH_MAX)) + 2;
/* MUST set path len (NameLength) to 0 opening root of share */
req->NameLength = cpu_to_le16(uni_path_len - 2);
copy_size = uni_path_len;
if (copy_size % 8 != 0)
copy_size = roundup(copy_size, 8);
copy_path = kzalloc(copy_size, GFP_KERNEL);
if (!copy_path)
return -ENOMEM;
memcpy((char *)copy_path, (const char *)path,
uni_path_len);
uni_path_len = copy_size;
path = copy_path;
}
iov[1].iov_len = uni_path_len;
iov[1].iov_base = path;
if (!server->oplocks)
*oplock = SMB2_OPLOCK_LEVEL_NONE;
if (!(server->capabilities & SMB2_GLOBAL_CAP_LEASING) ||
*oplock == SMB2_OPLOCK_LEVEL_NONE)
req->RequestedOplockLevel = *oplock;
else if (!(server->capabilities & SMB2_GLOBAL_CAP_DIRECTORY_LEASING) &&
(oparms->create_options & CREATE_NOT_FILE))
req->RequestedOplockLevel = *oplock; /* no srv lease support */
else {
rc = add_lease_context(server, iov, &n_iov,
oparms->fid->lease_key, oplock);
if (rc)
return rc;
}
if (*oplock == SMB2_OPLOCK_LEVEL_BATCH) {
/* need to set Next field of lease context if we request it */
if (server->capabilities & SMB2_GLOBAL_CAP_LEASING) {
struct create_context *ccontext =
(struct create_context *)iov[n_iov-1].iov_base;
ccontext->Next =
cpu_to_le32(server->vals->create_lease_size);
}
rc = add_durable_context(iov, &n_iov, oparms,
tcon->use_persistent);
if (rc)
return rc;
}
if (tcon->posix_extensions) {
if (n_iov > 2) {
struct create_context *ccontext =
(struct create_context *)iov[n_iov-1].iov_base;
ccontext->Next =
cpu_to_le32(iov[n_iov-1].iov_len);
}
rc = add_posix_context(iov, &n_iov, oparms->mode);
if (rc)
return rc;
}
if (tcon->snapshot_time) {
cifs_dbg(FYI, "adding snapshot context\n");
if (n_iov > 2) {
struct create_context *ccontext =
(struct create_context *)iov[n_iov-1].iov_base;
ccontext->Next =
cpu_to_le32(iov[n_iov-1].iov_len);
}
rc = add_twarp_context(iov, &n_iov, tcon->snapshot_time);
if (rc)
return rc;
}
rqst->rq_nvec = n_iov;
return 0;
}
/* rq_iov[0] is the request and is released by cifs_small_buf_release().
* All other vectors are freed by kfree().
*/
void
SMB2_open_free(struct smb_rqst *rqst)
{
int i;
if (rqst && rqst->rq_iov) {
cifs_small_buf_release(rqst->rq_iov[0].iov_base);
for (i = 1; i < rqst->rq_nvec; i++)
if (rqst->rq_iov[i].iov_base != smb2_padding)
kfree(rqst->rq_iov[i].iov_base);
}
}
int
SMB2_open(const unsigned int xid, struct cifs_open_parms *oparms, __le16 *path,
__u8 *oplock, struct smb2_file_all_info *buf,
struct kvec *err_iov, int *buftype)
{
struct smb_rqst rqst;
struct smb2_create_rsp *rsp = NULL;
struct TCP_Server_Info *server;
struct cifs_tcon *tcon = oparms->tcon;
struct cifs_ses *ses = tcon->ses;
struct kvec iov[SMB2_CREATE_IOV_SIZE];
struct kvec rsp_iov = {NULL, 0};
int resp_buftype = CIFS_NO_BUFFER;
int rc = 0;
int flags = 0;
cifs_dbg(FYI, "create/open\n");
if (ses && (ses->server))
server = ses->server;
else
return -EIO;
if (smb3_encryption_required(tcon))
flags |= CIFS_TRANSFORM_REQ;
memset(&rqst, 0, sizeof(struct smb_rqst));
memset(&iov, 0, sizeof(iov));
rqst.rq_iov = iov;
rqst.rq_nvec = SMB2_CREATE_IOV_SIZE;
rc = SMB2_open_init(tcon, &rqst, oplock, oparms, path);
if (rc)
goto creat_exit;
trace_smb3_open_enter(xid, tcon->tid, tcon->ses->Suid,
oparms->create_options, oparms->desired_access);
rc = cifs_send_recv(xid, ses, &rqst, &resp_buftype, flags,
&rsp_iov);
rsp = (struct smb2_create_rsp *)rsp_iov.iov_base;
if (rc != 0) {
cifs_stats_fail_inc(tcon, SMB2_CREATE_HE);
if (err_iov && rsp) {
*err_iov = rsp_iov;
*buftype = resp_buftype;
resp_buftype = CIFS_NO_BUFFER;
rsp = NULL;
}
trace_smb3_open_err(xid, tcon->tid, ses->Suid,
oparms->create_options, oparms->desired_access, rc);
goto creat_exit;
} else
trace_smb3_open_done(xid, rsp->PersistentFileId, tcon->tid,
ses->Suid, oparms->create_options,
oparms->desired_access);
atomic_inc(&tcon->num_remote_opens);
oparms->fid->persistent_fid = rsp->PersistentFileId;
oparms->fid->volatile_fid = rsp->VolatileFileId;
#ifdef CONFIG_CIFS_DEBUG2
oparms->fid->mid = le64_to_cpu(rsp->sync_hdr.MessageId);
#endif /* CIFS_DEBUG2 */
if (buf) {
memcpy(buf, &rsp->CreationTime, 32);
buf->AllocationSize = rsp->AllocationSize;
buf->EndOfFile = rsp->EndofFile;
buf->Attributes = rsp->FileAttributes;
buf->NumberOfLinks = cpu_to_le32(1);
buf->DeletePending = 0;
}
if (rsp->OplockLevel == SMB2_OPLOCK_LEVEL_LEASE)
*oplock = smb2_parse_lease_state(server, rsp,
&oparms->fid->epoch,
oparms->fid->lease_key);
else
*oplock = rsp->OplockLevel;
creat_exit:
SMB2_open_free(&rqst);
free_rsp_buf(resp_buftype, rsp);
return rc;
}
int
SMB2_ioctl_init(struct cifs_tcon *tcon, struct smb_rqst *rqst,
u64 persistent_fid, u64 volatile_fid, u32 opcode,
bool is_fsctl, char *in_data, u32 indatalen,
__u32 max_response_size)
{
struct smb2_ioctl_req *req;
struct kvec *iov = rqst->rq_iov;
unsigned int total_len;
int rc;
rc = smb2_plain_req_init(SMB2_IOCTL, tcon, (void **) &req, &total_len);
if (rc)
return rc;
req->CtlCode = cpu_to_le32(opcode);
req->PersistentFileId = persistent_fid;
req->VolatileFileId = volatile_fid;
iov[0].iov_base = (char *)req;
/*
* If no input data, the size of ioctl struct in
* protocol spec still includes a 1 byte data buffer,
* but if input data passed to ioctl, we do not
* want to double count this, so we do not send
* the dummy one byte of data in iovec[0] if sending
* input data (in iovec[1]).
*/
if (indatalen) {
req->InputCount = cpu_to_le32(indatalen);
/* do not set InputOffset if no input data */
req->InputOffset =
cpu_to_le32(offsetof(struct smb2_ioctl_req, Buffer));
rqst->rq_nvec = 2;
iov[0].iov_len = total_len - 1;
iov[1].iov_base = in_data;
iov[1].iov_len = indatalen;
} else {
rqst->rq_nvec = 1;
iov[0].iov_len = total_len;
}
req->OutputOffset = 0;
req->OutputCount = 0; /* MBZ */
/*
* In most cases max_response_size is set to 16K (CIFSMaxBufSize)
* We Could increase default MaxOutputResponse, but that could require
* more credits. Windows typically sets this smaller, but for some
* ioctls it may be useful to allow server to send more. No point
* limiting what the server can send as long as fits in one credit
* We can not handle more than CIFS_MAX_BUF_SIZE yet but may want
* to increase this limit up in the future.
* Note that for snapshot queries that servers like Azure expect that
* the first query be minimal size (and just used to get the number/size
* of previous versions) so response size must be specified as EXACTLY
* sizeof(struct snapshot_array) which is 16 when rounded up to multiple
* of eight bytes. Currently that is the only case where we set max
* response size smaller.
*/
req->MaxOutputResponse = cpu_to_le32(max_response_size);
if (is_fsctl)
req->Flags = cpu_to_le32(SMB2_0_IOCTL_IS_FSCTL);
else
req->Flags = 0;
/* validate negotiate request must be signed - see MS-SMB2 3.2.5.5 */
if (opcode == FSCTL_VALIDATE_NEGOTIATE_INFO)
req->sync_hdr.Flags |= SMB2_FLAGS_SIGNED;
return 0;
}
void
SMB2_ioctl_free(struct smb_rqst *rqst)
{
if (rqst && rqst->rq_iov)
cifs_small_buf_release(rqst->rq_iov[0].iov_base); /* request */
}
/*
* SMB2 IOCTL is used for both IOCTLs and FSCTLs
*/
int
SMB2_ioctl(const unsigned int xid, struct cifs_tcon *tcon, u64 persistent_fid,
u64 volatile_fid, u32 opcode, bool is_fsctl,
char *in_data, u32 indatalen, u32 max_out_data_len,
char **out_data, u32 *plen /* returned data len */)
{
struct smb_rqst rqst;
struct smb2_ioctl_rsp *rsp = NULL;
struct cifs_ses *ses;
struct kvec iov[SMB2_IOCTL_IOV_SIZE];
struct kvec rsp_iov = {NULL, 0};
int resp_buftype = CIFS_NO_BUFFER;
int rc = 0;
int flags = 0;
cifs_dbg(FYI, "SMB2 IOCTL\n");
if (out_data != NULL)
*out_data = NULL;
/* zero out returned data len, in case of error */
if (plen)
*plen = 0;
if (tcon)
ses = tcon->ses;
else
return -EIO;
if (!ses || !(ses->server))
return -EIO;
if (smb3_encryption_required(tcon))
flags |= CIFS_TRANSFORM_REQ;
memset(&rqst, 0, sizeof(struct smb_rqst));
memset(&iov, 0, sizeof(iov));
rqst.rq_iov = iov;
rqst.rq_nvec = SMB2_IOCTL_IOV_SIZE;
rc = SMB2_ioctl_init(tcon, &rqst, persistent_fid, volatile_fid, opcode,
is_fsctl, in_data, indatalen, max_out_data_len);
if (rc)
goto ioctl_exit;
rc = cifs_send_recv(xid, ses, &rqst, &resp_buftype, flags,
&rsp_iov);
rsp = (struct smb2_ioctl_rsp *)rsp_iov.iov_base;
if (rc != 0)
trace_smb3_fsctl_err(xid, persistent_fid, tcon->tid,
ses->Suid, 0, opcode, rc);
if ((rc != 0) && (rc != -EINVAL)) {
cifs_stats_fail_inc(tcon, SMB2_IOCTL_HE);
goto ioctl_exit;
} else if (rc == -EINVAL) {
if ((opcode != FSCTL_SRV_COPYCHUNK_WRITE) &&
(opcode != FSCTL_SRV_COPYCHUNK)) {
cifs_stats_fail_inc(tcon, SMB2_IOCTL_HE);
goto ioctl_exit;
}
}
/* check if caller wants to look at return data or just return rc */
if ((plen == NULL) || (out_data == NULL))
goto ioctl_exit;
*plen = le32_to_cpu(rsp->OutputCount);
/* We check for obvious errors in the output buffer length and offset */
if (*plen == 0)
goto ioctl_exit; /* server returned no data */
else if (*plen > rsp_iov.iov_len || *plen > 0xFF00) {
cifs_dbg(VFS, "srv returned invalid ioctl length: %d\n", *plen);
*plen = 0;
rc = -EIO;
goto ioctl_exit;
}
if (rsp_iov.iov_len - *plen < le32_to_cpu(rsp->OutputOffset)) {
cifs_dbg(VFS, "Malformed ioctl resp: len %d offset %d\n", *plen,
le32_to_cpu(rsp->OutputOffset));
*plen = 0;
rc = -EIO;
goto ioctl_exit;
}
*out_data = kmemdup((char *)rsp + le32_to_cpu(rsp->OutputOffset),
*plen, GFP_KERNEL);
if (*out_data == NULL) {
rc = -ENOMEM;
goto ioctl_exit;
}
ioctl_exit:
SMB2_ioctl_free(&rqst);
free_rsp_buf(resp_buftype, rsp);
return rc;
}
/*
* Individual callers to ioctl worker function follow
*/
int
SMB2_set_compression(const unsigned int xid, struct cifs_tcon *tcon,
u64 persistent_fid, u64 volatile_fid)
{
int rc;
struct compress_ioctl fsctl_input;
char *ret_data = NULL;
fsctl_input.CompressionState =
cpu_to_le16(COMPRESSION_FORMAT_DEFAULT);
rc = SMB2_ioctl(xid, tcon, persistent_fid, volatile_fid,
FSCTL_SET_COMPRESSION, true /* is_fsctl */,
(char *)&fsctl_input /* data input */,
2 /* in data len */, CIFSMaxBufSize /* max out data */,
&ret_data /* out data */, NULL);
cifs_dbg(FYI, "set compression rc %d\n", rc);
return rc;
}
int
SMB2_close_init(struct cifs_tcon *tcon, struct smb_rqst *rqst,
u64 persistent_fid, u64 volatile_fid)
{
struct smb2_close_req *req;
struct kvec *iov = rqst->rq_iov;
unsigned int total_len;
int rc;
rc = smb2_plain_req_init(SMB2_CLOSE, tcon, (void **) &req, &total_len);
if (rc)
return rc;
req->PersistentFileId = persistent_fid;
req->VolatileFileId = volatile_fid;
iov[0].iov_base = (char *)req;
iov[0].iov_len = total_len;
return 0;
}
void
SMB2_close_free(struct smb_rqst *rqst)
{
if (rqst && rqst->rq_iov)
cifs_small_buf_release(rqst->rq_iov[0].iov_base); /* request */
}
int
SMB2_close_flags(const unsigned int xid, struct cifs_tcon *tcon,
u64 persistent_fid, u64 volatile_fid, int flags)
{
struct smb_rqst rqst;
struct smb2_close_rsp *rsp = NULL;
struct cifs_ses *ses = tcon->ses;
struct kvec iov[1];
struct kvec rsp_iov;
int resp_buftype = CIFS_NO_BUFFER;
int rc = 0;
cifs_dbg(FYI, "Close\n");
if (!ses || !(ses->server))
return -EIO;
if (smb3_encryption_required(tcon))
flags |= CIFS_TRANSFORM_REQ;
memset(&rqst, 0, sizeof(struct smb_rqst));
memset(&iov, 0, sizeof(iov));
rqst.rq_iov = iov;
rqst.rq_nvec = 1;
rc = SMB2_close_init(tcon, &rqst, persistent_fid, volatile_fid);
if (rc)
goto close_exit;
rc = cifs_send_recv(xid, ses, &rqst, &resp_buftype, flags, &rsp_iov);
rsp = (struct smb2_close_rsp *)rsp_iov.iov_base;
if (rc != 0) {
cifs_stats_fail_inc(tcon, SMB2_CLOSE_HE);
trace_smb3_close_err(xid, persistent_fid, tcon->tid, ses->Suid,
rc);
goto close_exit;
}
atomic_dec(&tcon->num_remote_opens);
/* BB FIXME - decode close response, update inode for caching */
close_exit:
SMB2_close_free(&rqst);
free_rsp_buf(resp_buftype, rsp);
return rc;
}
int
SMB2_close(const unsigned int xid, struct cifs_tcon *tcon,
u64 persistent_fid, u64 volatile_fid)
{
return SMB2_close_flags(xid, tcon, persistent_fid, volatile_fid, 0);
}
int
smb2_validate_iov(unsigned int offset, unsigned int buffer_length,
struct kvec *iov, unsigned int min_buf_size)
{
unsigned int smb_len = iov->iov_len;
char *end_of_smb = smb_len + (char *)iov->iov_base;
char *begin_of_buf = offset + (char *)iov->iov_base;
char *end_of_buf = begin_of_buf + buffer_length;
if (buffer_length < min_buf_size) {
cifs_dbg(VFS, "buffer length %d smaller than minimum size %d\n",
buffer_length, min_buf_size);
return -EINVAL;
}
/* check if beyond RFC1001 maximum length */
if ((smb_len > 0x7FFFFF) || (buffer_length > 0x7FFFFF)) {
cifs_dbg(VFS, "buffer length %d or smb length %d too large\n",
buffer_length, smb_len);
return -EINVAL;
}
if ((begin_of_buf > end_of_smb) || (end_of_buf > end_of_smb)) {
cifs_dbg(VFS, "illegal server response, bad offset to data\n");
return -EINVAL;
}
return 0;
}
/*
* If SMB buffer fields are valid, copy into temporary buffer to hold result.
* Caller must free buffer.
*/
int
smb2_validate_and_copy_iov(unsigned int offset, unsigned int buffer_length,
struct kvec *iov, unsigned int minbufsize,
char *data)
{
char *begin_of_buf = offset + (char *)iov->iov_base;
int rc;
if (!data)
return -EINVAL;
rc = smb2_validate_iov(offset, buffer_length, iov, minbufsize);
if (rc)
return rc;
memcpy(data, begin_of_buf, buffer_length);
return 0;
}
int
SMB2_query_info_init(struct cifs_tcon *tcon, struct smb_rqst *rqst,
u64 persistent_fid, u64 volatile_fid,
u8 info_class, u8 info_type, u32 additional_info,
size_t output_len, size_t input_len, void *input)
{
struct smb2_query_info_req *req;
struct kvec *iov = rqst->rq_iov;
unsigned int total_len;
int rc;
rc = smb2_plain_req_init(SMB2_QUERY_INFO, tcon, (void **) &req,
&total_len);
if (rc)
return rc;
req->InfoType = info_type;
req->FileInfoClass = info_class;
req->PersistentFileId = persistent_fid;
req->VolatileFileId = volatile_fid;
req->AdditionalInformation = cpu_to_le32(additional_info);
req->OutputBufferLength = cpu_to_le32(output_len);
if (input_len) {
req->InputBufferLength = cpu_to_le32(input_len);
/* total_len for smb query request never close to le16 max */
req->InputBufferOffset = cpu_to_le16(total_len - 1);
memcpy(req->Buffer, input, input_len);
}
iov[0].iov_base = (char *)req;
/* 1 for Buffer */
iov[0].iov_len = total_len - 1 + input_len;
return 0;
}
void
SMB2_query_info_free(struct smb_rqst *rqst)
{
if (rqst && rqst->rq_iov)
cifs_small_buf_release(rqst->rq_iov[0].iov_base); /* request */
}
static int
query_info(const unsigned int xid, struct cifs_tcon *tcon,
u64 persistent_fid, u64 volatile_fid, u8 info_class, u8 info_type,
u32 additional_info, size_t output_len, size_t min_len, void **data,
u32 *dlen)
{
struct smb_rqst rqst;
struct smb2_query_info_rsp *rsp = NULL;
struct kvec iov[1];
struct kvec rsp_iov;
int rc = 0;
int resp_buftype = CIFS_NO_BUFFER;
struct cifs_ses *ses = tcon->ses;
int flags = 0;
bool allocated = false;
cifs_dbg(FYI, "Query Info\n");
if (!ses || !(ses->server))
return -EIO;
if (smb3_encryption_required(tcon))
flags |= CIFS_TRANSFORM_REQ;
memset(&rqst, 0, sizeof(struct smb_rqst));
memset(&iov, 0, sizeof(iov));
rqst.rq_iov = iov;
rqst.rq_nvec = 1;
rc = SMB2_query_info_init(tcon, &rqst, persistent_fid, volatile_fid,
info_class, info_type, additional_info,
output_len, 0, NULL);
if (rc)
goto qinf_exit;
trace_smb3_query_info_enter(xid, persistent_fid, tcon->tid,
ses->Suid, info_class, (__u32)info_type);
rc = cifs_send_recv(xid, ses, &rqst, &resp_buftype, flags, &rsp_iov);
rsp = (struct smb2_query_info_rsp *)rsp_iov.iov_base;
if (rc) {
cifs_stats_fail_inc(tcon, SMB2_QUERY_INFO_HE);
trace_smb3_query_info_err(xid, persistent_fid, tcon->tid,
ses->Suid, info_class, (__u32)info_type, rc);
goto qinf_exit;
}
trace_smb3_query_info_done(xid, persistent_fid, tcon->tid,
ses->Suid, info_class, (__u32)info_type);
if (dlen) {
*dlen = le32_to_cpu(rsp->OutputBufferLength);
if (!*data) {
*data = kmalloc(*dlen, GFP_KERNEL);
if (!*data) {
cifs_dbg(VFS,
"Error %d allocating memory for acl\n",
rc);
*dlen = 0;
rc = -ENOMEM;
goto qinf_exit;
}
allocated = true;
}
}
rc = smb2_validate_and_copy_iov(le16_to_cpu(rsp->OutputBufferOffset),
le32_to_cpu(rsp->OutputBufferLength),
&rsp_iov, min_len, *data);
if (rc && allocated) {
kfree(*data);
*data = NULL;
*dlen = 0;
}
qinf_exit:
SMB2_query_info_free(&rqst);
free_rsp_buf(resp_buftype, rsp);
return rc;
}
int SMB2_query_info(const unsigned int xid, struct cifs_tcon *tcon,
u64 persistent_fid, u64 volatile_fid, struct smb2_file_all_info *data)
{
return query_info(xid, tcon, persistent_fid, volatile_fid,
FILE_ALL_INFORMATION, SMB2_O_INFO_FILE, 0,
sizeof(struct smb2_file_all_info) + PATH_MAX * 2,
sizeof(struct smb2_file_all_info), (void **)&data,
NULL);
}
int
SMB2_query_acl(const unsigned int xid, struct cifs_tcon *tcon,
u64 persistent_fid, u64 volatile_fid,
void **data, u32 *plen)
{
__u32 additional_info = OWNER_SECINFO | GROUP_SECINFO | DACL_SECINFO;
*plen = 0;
return query_info(xid, tcon, persistent_fid, volatile_fid,
0, SMB2_O_INFO_SECURITY, additional_info,
SMB2_MAX_BUFFER_SIZE, MIN_SEC_DESC_LEN, data, plen);
}
int
SMB2_get_srv_num(const unsigned int xid, struct cifs_tcon *tcon,
u64 persistent_fid, u64 volatile_fid, __le64 *uniqueid)
{
return query_info(xid, tcon, persistent_fid, volatile_fid,
FILE_INTERNAL_INFORMATION, SMB2_O_INFO_FILE, 0,
sizeof(struct smb2_file_internal_info),
sizeof(struct smb2_file_internal_info),
(void **)&uniqueid, NULL);
}
/*
* This is a no-op for now. We're not really interested in the reply, but
* rather in the fact that the server sent one and that server->lstrp
* gets updated.
*
* FIXME: maybe we should consider checking that the reply matches request?
*/
static void
smb2_echo_callback(struct mid_q_entry *mid)
{
struct TCP_Server_Info *server = mid->callback_data;
struct smb2_echo_rsp *rsp = (struct smb2_echo_rsp *)mid->resp_buf;
struct cifs_credits credits = { .value = 0, .instance = 0 };
if (mid->mid_state == MID_RESPONSE_RECEIVED
|| mid->mid_state == MID_RESPONSE_MALFORMED) {
credits.value = le16_to_cpu(rsp->sync_hdr.CreditRequest);
credits.instance = server->reconnect_instance;
}
DeleteMidQEntry(mid);
add_credits(server, &credits, CIFS_ECHO_OP);
}
void smb2_reconnect_server(struct work_struct *work)
{
struct TCP_Server_Info *server = container_of(work,
struct TCP_Server_Info, reconnect.work);
struct cifs_ses *ses;
struct cifs_tcon *tcon, *tcon2;
struct list_head tmp_list;
int tcon_exist = false;
int rc;
int resched = false;
/* Prevent simultaneous reconnects that can corrupt tcon->rlist list */
mutex_lock(&server->reconnect_mutex);
INIT_LIST_HEAD(&tmp_list);
cifs_dbg(FYI, "Need negotiate, reconnecting tcons\n");
spin_lock(&cifs_tcp_ses_lock);
list_for_each_entry(ses, &server->smb_ses_list, smb_ses_list) {
list_for_each_entry(tcon, &ses->tcon_list, tcon_list) {
if (tcon->need_reconnect || tcon->need_reopen_files) {
tcon->tc_count++;
list_add_tail(&tcon->rlist, &tmp_list);
tcon_exist = true;
}
}
if (ses->tcon_ipc && ses->tcon_ipc->need_reconnect) {
list_add_tail(&ses->tcon_ipc->rlist, &tmp_list);
tcon_exist = true;
}
}
/*
* Get the reference to server struct to be sure that the last call of
* cifs_put_tcon() in the loop below won't release the server pointer.
*/
if (tcon_exist)
server->srv_count++;
spin_unlock(&cifs_tcp_ses_lock);
list_for_each_entry_safe(tcon, tcon2, &tmp_list, rlist) {
rc = smb2_reconnect(SMB2_INTERNAL_CMD, tcon);
if (!rc)
cifs_reopen_persistent_handles(tcon);
else
resched = true;
list_del_init(&tcon->rlist);
cifs_put_tcon(tcon);
}
cifs_dbg(FYI, "Reconnecting tcons finished\n");
if (resched)
queue_delayed_work(cifsiod_wq, &server->reconnect, 2 * HZ);
mutex_unlock(&server->reconnect_mutex);
/* now we can safely release srv struct */
if (tcon_exist)
cifs_put_tcp_session(server, 1);
}
int
SMB2_echo(struct TCP_Server_Info *server)
{
struct smb2_echo_req *req;
int rc = 0;
struct kvec iov[1];
struct smb_rqst rqst = { .rq_iov = iov,
.rq_nvec = 1 };
unsigned int total_len;
cifs_dbg(FYI, "In echo request\n");
if (server->tcpStatus == CifsNeedNegotiate) {
/* No need to send echo on newly established connections */
queue_delayed_work(cifsiod_wq, &server->reconnect, 0);
return rc;
}
rc = smb2_plain_req_init(SMB2_ECHO, NULL, (void **)&req, &total_len);
if (rc)
return rc;
req->sync_hdr.CreditRequest = cpu_to_le16(1);
iov[0].iov_len = total_len;
iov[0].iov_base = (char *)req;
rc = cifs_call_async(server, &rqst, NULL, smb2_echo_callback, NULL,
server, CIFS_ECHO_OP, NULL);
if (rc)
cifs_dbg(FYI, "Echo request failed: %d\n", rc);
cifs_small_buf_release(req);
return rc;
}
int
SMB2_flush(const unsigned int xid, struct cifs_tcon *tcon, u64 persistent_fid,
u64 volatile_fid)
{
struct smb_rqst rqst;
struct smb2_flush_req *req;
struct cifs_ses *ses = tcon->ses;
struct kvec iov[1];
struct kvec rsp_iov;
int resp_buftype;
int rc = 0;
int flags = 0;
unsigned int total_len;
cifs_dbg(FYI, "Flush\n");
if (!ses || !(ses->server))
return -EIO;
rc = smb2_plain_req_init(SMB2_FLUSH, tcon, (void **) &req, &total_len);
if (rc)
return rc;
if (smb3_encryption_required(tcon))
flags |= CIFS_TRANSFORM_REQ;
req->PersistentFileId = persistent_fid;
req->VolatileFileId = volatile_fid;
iov[0].iov_base = (char *)req;
iov[0].iov_len = total_len;
memset(&rqst, 0, sizeof(struct smb_rqst));
rqst.rq_iov = iov;
rqst.rq_nvec = 1;
rc = cifs_send_recv(xid, ses, &rqst, &resp_buftype, flags, &rsp_iov);
cifs_small_buf_release(req);
if (rc != 0) {
cifs_stats_fail_inc(tcon, SMB2_FLUSH_HE);
trace_smb3_flush_err(xid, persistent_fid, tcon->tid, ses->Suid,
rc);
}
free_rsp_buf(resp_buftype, rsp_iov.iov_base);
return rc;
}
/*
* To form a chain of read requests, any read requests after the first should
* have the end_of_chain boolean set to true.
*/
static int
smb2_new_read_req(void **buf, unsigned int *total_len,
struct cifs_io_parms *io_parms, struct cifs_readdata *rdata,
unsigned int remaining_bytes, int request_type)
{
int rc = -EACCES;
struct smb2_read_plain_req *req = NULL;
struct smb2_sync_hdr *shdr;
struct TCP_Server_Info *server;
rc = smb2_plain_req_init(SMB2_READ, io_parms->tcon, (void **) &req,
total_len);
if (rc)
return rc;
server = io_parms->tcon->ses->server;
if (server == NULL)
return -ECONNABORTED;
shdr = &req->sync_hdr;
shdr->ProcessId = cpu_to_le32(io_parms->pid);
req->PersistentFileId = io_parms->persistent_fid;
req->VolatileFileId = io_parms->volatile_fid;
req->ReadChannelInfoOffset = 0; /* reserved */
req->ReadChannelInfoLength = 0; /* reserved */
req->Channel = 0; /* reserved */
req->MinimumCount = 0;
req->Length = cpu_to_le32(io_parms->length);
req->Offset = cpu_to_le64(io_parms->offset);
trace_smb3_read_enter(0 /* xid */,
io_parms->persistent_fid,
io_parms->tcon->tid, io_parms->tcon->ses->Suid,
io_parms->offset, io_parms->length);
#ifdef CONFIG_CIFS_SMB_DIRECT
/*
* If we want to do a RDMA write, fill in and append
* smbd_buffer_descriptor_v1 to the end of read request
*/
if (server->rdma && rdata && !server->sign &&
rdata->bytes >= server->smbd_conn->rdma_readwrite_threshold) {
struct smbd_buffer_descriptor_v1 *v1;
bool need_invalidate =
io_parms->tcon->ses->server->dialect == SMB30_PROT_ID;
rdata->mr = smbd_register_mr(
server->smbd_conn, rdata->pages,
rdata->nr_pages, rdata->page_offset,
rdata->tailsz, true, need_invalidate);
if (!rdata->mr)
return -ENOBUFS;
req->Channel = SMB2_CHANNEL_RDMA_V1_INVALIDATE;
if (need_invalidate)
req->Channel = SMB2_CHANNEL_RDMA_V1;
req->ReadChannelInfoOffset =
cpu_to_le16(offsetof(struct smb2_read_plain_req, Buffer));
req->ReadChannelInfoLength =
cpu_to_le16(sizeof(struct smbd_buffer_descriptor_v1));
v1 = (struct smbd_buffer_descriptor_v1 *) &req->Buffer[0];
v1->offset = cpu_to_le64(rdata->mr->mr->iova);
v1->token = cpu_to_le32(rdata->mr->mr->rkey);
v1->length = cpu_to_le32(rdata->mr->mr->length);
*total_len += sizeof(*v1) - 1;
}
#endif
if (request_type & CHAINED_REQUEST) {
if (!(request_type & END_OF_CHAIN)) {
/* next 8-byte aligned request */
*total_len = DIV_ROUND_UP(*total_len, 8) * 8;
shdr->NextCommand = cpu_to_le32(*total_len);
} else /* END_OF_CHAIN */
shdr->NextCommand = 0;
if (request_type & RELATED_REQUEST) {
shdr->Flags |= SMB2_FLAGS_RELATED_OPERATIONS;
/*
* Related requests use info from previous read request
* in chain.
*/
shdr->SessionId = 0xFFFFFFFF;
shdr->TreeId = 0xFFFFFFFF;
req->PersistentFileId = 0xFFFFFFFF;
req->VolatileFileId = 0xFFFFFFFF;
}
}
if (remaining_bytes > io_parms->length)
req->RemainingBytes = cpu_to_le32(remaining_bytes);
else
req->RemainingBytes = 0;
*buf = req;
return rc;
}
static void
smb2_readv_callback(struct mid_q_entry *mid)
{
struct cifs_readdata *rdata = mid->callback_data;
struct cifs_tcon *tcon = tlink_tcon(rdata->cfile->tlink);
struct TCP_Server_Info *server = tcon->ses->server;
struct smb2_sync_hdr *shdr =
(struct smb2_sync_hdr *)rdata->iov[0].iov_base;
struct cifs_credits credits = { .value = 0, .instance = 0 };
struct smb_rqst rqst = { .rq_iov = rdata->iov,
.rq_nvec = 2,
.rq_pages = rdata->pages,
.rq_offset = rdata->page_offset,
.rq_npages = rdata->nr_pages,
.rq_pagesz = rdata->pagesz,
.rq_tailsz = rdata->tailsz };
cifs_dbg(FYI, "%s: mid=%llu state=%d result=%d bytes=%u\n",
__func__, mid->mid, mid->mid_state, rdata->result,
rdata->bytes);
switch (mid->mid_state) {
case MID_RESPONSE_RECEIVED:
credits.value = le16_to_cpu(shdr->CreditRequest);
credits.instance = server->reconnect_instance;
/* result already set, check signature */
if (server->sign && !mid->decrypted) {
int rc;
rc = smb2_verify_signature(&rqst, server);
if (rc)
cifs_dbg(VFS, "SMB signature verification returned error = %d\n",
rc);
}
/* FIXME: should this be counted toward the initiating task? */
task_io_account_read(rdata->got_bytes);
cifs_stats_bytes_read(tcon, rdata->got_bytes);
break;
case MID_REQUEST_SUBMITTED:
case MID_RETRY_NEEDED:
rdata->result = -EAGAIN;
if (server->sign && rdata->got_bytes)
/* reset bytes number since we can not check a sign */
rdata->got_bytes = 0;
/* FIXME: should this be counted toward the initiating task? */
task_io_account_read(rdata->got_bytes);
cifs_stats_bytes_read(tcon, rdata->got_bytes);
break;
case MID_RESPONSE_MALFORMED:
credits.value = le16_to_cpu(shdr->CreditRequest);
credits.instance = server->reconnect_instance;
/* fall through */
default:
rdata->result = -EIO;
}
#ifdef CONFIG_CIFS_SMB_DIRECT
/*
* If this rdata has a memmory registered, the MR can be freed
* MR needs to be freed as soon as I/O finishes to prevent deadlock
* because they have limited number and are used for future I/Os
*/
if (rdata->mr) {
smbd_deregister_mr(rdata->mr);
rdata->mr = NULL;
}
#endif
if (rdata->result && rdata->result != -ENODATA) {
cifs_stats_fail_inc(tcon, SMB2_READ_HE);
trace_smb3_read_err(0 /* xid */,
rdata->cfile->fid.persistent_fid,
tcon->tid, tcon->ses->Suid, rdata->offset,
rdata->bytes, rdata->result);
} else
trace_smb3_read_done(0 /* xid */,
rdata->cfile->fid.persistent_fid,
tcon->tid, tcon->ses->Suid,
rdata->offset, rdata->got_bytes);
queue_work(cifsiod_wq, &rdata->work);
DeleteMidQEntry(mid);
add_credits(server, &credits, 0);
}
/* smb2_async_readv - send an async read, and set up mid to handle result */
int
smb2_async_readv(struct cifs_readdata *rdata)
{
int rc, flags = 0;
char *buf;
struct smb2_sync_hdr *shdr;
struct cifs_io_parms io_parms;
struct smb_rqst rqst = { .rq_iov = rdata->iov,
.rq_nvec = 1 };
struct TCP_Server_Info *server;
unsigned int total_len;
cifs_dbg(FYI, "%s: offset=%llu bytes=%u\n",
__func__, rdata->offset, rdata->bytes);
io_parms.tcon = tlink_tcon(rdata->cfile->tlink);
io_parms.offset = rdata->offset;
io_parms.length = rdata->bytes;
io_parms.persistent_fid = rdata->cfile->fid.persistent_fid;
io_parms.volatile_fid = rdata->cfile->fid.volatile_fid;
io_parms.pid = rdata->pid;
server = io_parms.tcon->ses->server;
rc = smb2_new_read_req(
(void **) &buf, &total_len, &io_parms, rdata, 0, 0);
if (rc)
return rc;
if (smb3_encryption_required(io_parms.tcon))
flags |= CIFS_TRANSFORM_REQ;
rdata->iov[0].iov_base = buf;
rdata->iov[0].iov_len = total_len;
shdr = (struct smb2_sync_hdr *)buf;
if (rdata->credits.value > 0) {
shdr->CreditCharge = cpu_to_le16(DIV_ROUND_UP(rdata->bytes,
SMB2_MAX_BUFFER_SIZE));
shdr->CreditRequest =
cpu_to_le16(le16_to_cpu(shdr->CreditCharge) + 1);
rc = adjust_credits(server, &rdata->credits, rdata->bytes);
if (rc)
goto async_readv_out;
flags |= CIFS_HAS_CREDITS;
}
kref_get(&rdata->refcount);
rc = cifs_call_async(io_parms.tcon->ses->server, &rqst,
cifs_readv_receive, smb2_readv_callback,
smb3_handle_read_data, rdata, flags,
&rdata->credits);
if (rc) {
kref_put(&rdata->refcount, cifs_readdata_release);
cifs_stats_fail_inc(io_parms.tcon, SMB2_READ_HE);
trace_smb3_read_err(0 /* xid */, io_parms.persistent_fid,
io_parms.tcon->tid,
io_parms.tcon->ses->Suid,
io_parms.offset, io_parms.length, rc);
}
async_readv_out:
cifs_small_buf_release(buf);
return rc;
}
int
SMB2_read(const unsigned int xid, struct cifs_io_parms *io_parms,
unsigned int *nbytes, char **buf, int *buf_type)
{
struct smb_rqst rqst;
int resp_buftype, rc = -EACCES;
struct smb2_read_plain_req *req = NULL;
struct smb2_read_rsp *rsp = NULL;
struct kvec iov[1];
struct kvec rsp_iov;
unsigned int total_len;
int flags = CIFS_LOG_ERROR;
struct cifs_ses *ses = io_parms->tcon->ses;
*nbytes = 0;
rc = smb2_new_read_req((void **)&req, &total_len, io_parms, NULL, 0, 0);
if (rc)
return rc;
if (smb3_encryption_required(io_parms->tcon))
flags |= CIFS_TRANSFORM_REQ;
iov[0].iov_base = (char *)req;
iov[0].iov_len = total_len;
memset(&rqst, 0, sizeof(struct smb_rqst));
rqst.rq_iov = iov;
rqst.rq_nvec = 1;
rc = cifs_send_recv(xid, ses, &rqst, &resp_buftype, flags, &rsp_iov);
rsp = (struct smb2_read_rsp *)rsp_iov.iov_base;
if (rc) {
if (rc != -ENODATA) {
cifs_stats_fail_inc(io_parms->tcon, SMB2_READ_HE);
cifs_dbg(VFS, "Send error in read = %d\n", rc);
trace_smb3_read_err(xid, req->PersistentFileId,
io_parms->tcon->tid, ses->Suid,
io_parms->offset, io_parms->length,
rc);
} else
trace_smb3_read_done(xid, req->PersistentFileId,
io_parms->tcon->tid, ses->Suid,
io_parms->offset, 0);
free_rsp_buf(resp_buftype, rsp_iov.iov_base);
return rc == -ENODATA ? 0 : rc;
} else
trace_smb3_read_done(xid, req->PersistentFileId,
io_parms->tcon->tid, ses->Suid,
io_parms->offset, io_parms->length);
cifs_small_buf_release(req);
*nbytes = le32_to_cpu(rsp->DataLength);
if ((*nbytes > CIFS_MAX_MSGSIZE) ||
(*nbytes > io_parms->length)) {
cifs_dbg(FYI, "bad length %d for count %d\n",
*nbytes, io_parms->length);
rc = -EIO;
*nbytes = 0;
}
if (*buf) {
memcpy(*buf, (char *)rsp + rsp->DataOffset, *nbytes);
free_rsp_buf(resp_buftype, rsp_iov.iov_base);
} else if (resp_buftype != CIFS_NO_BUFFER) {
*buf = rsp_iov.iov_base;
if (resp_buftype == CIFS_SMALL_BUFFER)
*buf_type = CIFS_SMALL_BUFFER;
else if (resp_buftype == CIFS_LARGE_BUFFER)
*buf_type = CIFS_LARGE_BUFFER;
}
return rc;
}
/*
* Check the mid_state and signature on received buffer (if any), and queue the
* workqueue completion task.
*/
static void
smb2_writev_callback(struct mid_q_entry *mid)
{
struct cifs_writedata *wdata = mid->callback_data;
struct cifs_tcon *tcon = tlink_tcon(wdata->cfile->tlink);
struct TCP_Server_Info *server = tcon->ses->server;
unsigned int written;
struct smb2_write_rsp *rsp = (struct smb2_write_rsp *)mid->resp_buf;
struct cifs_credits credits = { .value = 0, .instance = 0 };
switch (mid->mid_state) {
case MID_RESPONSE_RECEIVED:
credits.value = le16_to_cpu(rsp->sync_hdr.CreditRequest);
credits.instance = server->reconnect_instance;
wdata->result = smb2_check_receive(mid, server, 0);
if (wdata->result != 0)
break;
written = le32_to_cpu(rsp->DataLength);
/*
* Mask off high 16 bits when bytes written as returned
* by the server is greater than bytes requested by the
* client. OS/2 servers are known to set incorrect
* CountHigh values.
*/
if (written > wdata->bytes)
written &= 0xFFFF;
if (written < wdata->bytes)
wdata->result = -ENOSPC;
else
wdata->bytes = written;
break;
case MID_REQUEST_SUBMITTED:
case MID_RETRY_NEEDED:
wdata->result = -EAGAIN;
break;
case MID_RESPONSE_MALFORMED:
credits.value = le16_to_cpu(rsp->sync_hdr.CreditRequest);
credits.instance = server->reconnect_instance;
/* fall through */
default:
wdata->result = -EIO;
break;
}
#ifdef CONFIG_CIFS_SMB_DIRECT
/*
* If this wdata has a memory registered, the MR can be freed
* The number of MRs available is limited, it's important to recover
* used MR as soon as I/O is finished. Hold MR longer in the later
* I/O process can possibly result in I/O deadlock due to lack of MR
* to send request on I/O retry
*/
if (wdata->mr) {
smbd_deregister_mr(wdata->mr);
wdata->mr = NULL;
}
#endif
if (wdata->result) {
cifs_stats_fail_inc(tcon, SMB2_WRITE_HE);
trace_smb3_write_err(0 /* no xid */,
wdata->cfile->fid.persistent_fid,
tcon->tid, tcon->ses->Suid, wdata->offset,
wdata->bytes, wdata->result);
} else
trace_smb3_write_done(0 /* no xid */,
wdata->cfile->fid.persistent_fid,
tcon->tid, tcon->ses->Suid,
wdata->offset, wdata->bytes);
queue_work(cifsiod_wq, &wdata->work);
DeleteMidQEntry(mid);
add_credits(server, &credits, 0);
}
/* smb2_async_writev - send an async write, and set up mid to handle result */
int
smb2_async_writev(struct cifs_writedata *wdata,
void (*release)(struct kref *kref))
{
int rc = -EACCES, flags = 0;
struct smb2_write_req *req = NULL;
struct smb2_sync_hdr *shdr;
struct cifs_tcon *tcon = tlink_tcon(wdata->cfile->tlink);
struct TCP_Server_Info *server = tcon->ses->server;
struct kvec iov[1];
struct smb_rqst rqst = { };
unsigned int total_len;
rc = smb2_plain_req_init(SMB2_WRITE, tcon, (void **) &req, &total_len);
if (rc)
return rc;
if (smb3_encryption_required(tcon))
flags |= CIFS_TRANSFORM_REQ;
shdr = (struct smb2_sync_hdr *)req;
shdr->ProcessId = cpu_to_le32(wdata->cfile->pid);
req->PersistentFileId = wdata->cfile->fid.persistent_fid;
req->VolatileFileId = wdata->cfile->fid.volatile_fid;
req->WriteChannelInfoOffset = 0;
req->WriteChannelInfoLength = 0;
req->Channel = 0;
req->Offset = cpu_to_le64(wdata->offset);
req->DataOffset = cpu_to_le16(
offsetof(struct smb2_write_req, Buffer));
req->RemainingBytes = 0;
trace_smb3_write_enter(0 /* xid */, wdata->cfile->fid.persistent_fid,
tcon->tid, tcon->ses->Suid, wdata->offset, wdata->bytes);
#ifdef CONFIG_CIFS_SMB_DIRECT
/*
* If we want to do a server RDMA read, fill in and append
* smbd_buffer_descriptor_v1 to the end of write request
*/
if (server->rdma && !server->sign && wdata->bytes >=
server->smbd_conn->rdma_readwrite_threshold) {
struct smbd_buffer_descriptor_v1 *v1;
bool need_invalidate = server->dialect == SMB30_PROT_ID;
wdata->mr = smbd_register_mr(
server->smbd_conn, wdata->pages,
wdata->nr_pages, wdata->page_offset,
wdata->tailsz, false, need_invalidate);
if (!wdata->mr) {
rc = -ENOBUFS;
goto async_writev_out;
}
req->Length = 0;
req->DataOffset = 0;
if (wdata->nr_pages > 1)
req->RemainingBytes =
cpu_to_le32(
(wdata->nr_pages - 1) * wdata->pagesz -
wdata->page_offset + wdata->tailsz
);
else
req->RemainingBytes = cpu_to_le32(wdata->tailsz);
req->Channel = SMB2_CHANNEL_RDMA_V1_INVALIDATE;
if (need_invalidate)
req->Channel = SMB2_CHANNEL_RDMA_V1;
req->WriteChannelInfoOffset =
cpu_to_le16(offsetof(struct smb2_write_req, Buffer));
req->WriteChannelInfoLength =
cpu_to_le16(sizeof(struct smbd_buffer_descriptor_v1));
v1 = (struct smbd_buffer_descriptor_v1 *) &req->Buffer[0];
v1->offset = cpu_to_le64(wdata->mr->mr->iova);
v1->token = cpu_to_le32(wdata->mr->mr->rkey);
v1->length = cpu_to_le32(wdata->mr->mr->length);
}
#endif
iov[0].iov_len = total_len - 1;
iov[0].iov_base = (char *)req;
rqst.rq_iov = iov;
rqst.rq_nvec = 1;
rqst.rq_pages = wdata->pages;
rqst.rq_offset = wdata->page_offset;
rqst.rq_npages = wdata->nr_pages;
rqst.rq_pagesz = wdata->pagesz;
rqst.rq_tailsz = wdata->tailsz;
#ifdef CONFIG_CIFS_SMB_DIRECT
if (wdata->mr) {
iov[0].iov_len += sizeof(struct smbd_buffer_descriptor_v1);
rqst.rq_npages = 0;
}
#endif
cifs_dbg(FYI, "async write at %llu %u bytes\n",
wdata->offset, wdata->bytes);
#ifdef CONFIG_CIFS_SMB_DIRECT
/* For RDMA read, I/O size is in RemainingBytes not in Length */
if (!wdata->mr)
req->Length = cpu_to_le32(wdata->bytes);
#else
req->Length = cpu_to_le32(wdata->bytes);
#endif
if (wdata->credits.value > 0) {
shdr->CreditCharge = cpu_to_le16(DIV_ROUND_UP(wdata->bytes,
SMB2_MAX_BUFFER_SIZE));
shdr->CreditRequest =
cpu_to_le16(le16_to_cpu(shdr->CreditCharge) + 1);
rc = adjust_credits(server, &wdata->credits, wdata->bytes);
if (rc)
goto async_writev_out;
flags |= CIFS_HAS_CREDITS;
}
kref_get(&wdata->refcount);
rc = cifs_call_async(server, &rqst, NULL, smb2_writev_callback, NULL,
wdata, flags, &wdata->credits);
if (rc) {
trace_smb3_write_err(0 /* no xid */, req->PersistentFileId,
tcon->tid, tcon->ses->Suid, wdata->offset,
wdata->bytes, rc);
kref_put(&wdata->refcount, release);
cifs_stats_fail_inc(tcon, SMB2_WRITE_HE);
}
async_writev_out:
cifs_small_buf_release(req);
return rc;
}
/*
* SMB2_write function gets iov pointer to kvec array with n_vec as a length.
* The length field from io_parms must be at least 1 and indicates a number of
* elements with data to write that begins with position 1 in iov array. All
* data length is specified by count.
*/
int
SMB2_write(const unsigned int xid, struct cifs_io_parms *io_parms,
unsigned int *nbytes, struct kvec *iov, int n_vec)
{
struct smb_rqst rqst;
int rc = 0;
struct smb2_write_req *req = NULL;
struct smb2_write_rsp *rsp = NULL;
int resp_buftype;
struct kvec rsp_iov;
int flags = 0;
unsigned int total_len;
*nbytes = 0;
if (n_vec < 1)
return rc;
rc = smb2_plain_req_init(SMB2_WRITE, io_parms->tcon, (void **) &req,
&total_len);
if (rc)
return rc;
if (io_parms->tcon->ses->server == NULL)
return -ECONNABORTED;
if (smb3_encryption_required(io_parms->tcon))
flags |= CIFS_TRANSFORM_REQ;
req->sync_hdr.ProcessId = cpu_to_le32(io_parms->pid);
req->PersistentFileId = io_parms->persistent_fid;
req->VolatileFileId = io_parms->volatile_fid;
req->WriteChannelInfoOffset = 0;
req->WriteChannelInfoLength = 0;
req->Channel = 0;
req->Length = cpu_to_le32(io_parms->length);
req->Offset = cpu_to_le64(io_parms->offset);
req->DataOffset = cpu_to_le16(
offsetof(struct smb2_write_req, Buffer));
req->RemainingBytes = 0;
trace_smb3_write_enter(xid, io_parms->persistent_fid,
io_parms->tcon->tid, io_parms->tcon->ses->Suid,
io_parms->offset, io_parms->length);
iov[0].iov_base = (char *)req;
/* 1 for Buffer */
iov[0].iov_len = total_len - 1;
memset(&rqst, 0, sizeof(struct smb_rqst));
rqst.rq_iov = iov;
rqst.rq_nvec = n_vec + 1;
rc = cifs_send_recv(xid, io_parms->tcon->ses, &rqst,
&resp_buftype, flags, &rsp_iov);
rsp = (struct smb2_write_rsp *)rsp_iov.iov_base;
if (rc) {
trace_smb3_write_err(xid, req->PersistentFileId,
io_parms->tcon->tid,
io_parms->tcon->ses->Suid,
io_parms->offset, io_parms->length, rc);
cifs_stats_fail_inc(io_parms->tcon, SMB2_WRITE_HE);
cifs_dbg(VFS, "Send error in write = %d\n", rc);
} else {
*nbytes = le32_to_cpu(rsp->DataLength);
trace_smb3_write_done(xid, req->PersistentFileId,
io_parms->tcon->tid,
io_parms->tcon->ses->Suid,
io_parms->offset, *nbytes);
}
cifs_small_buf_release(req);
free_rsp_buf(resp_buftype, rsp);
return rc;
}
static unsigned int
num_entries(char *bufstart, char *end_of_buf, char **lastentry, size_t size)
{
int len;
unsigned int entrycount = 0;
unsigned int next_offset = 0;
char *entryptr;
FILE_DIRECTORY_INFO *dir_info;
if (bufstart == NULL)
return 0;
entryptr = bufstart;
while (1) {
if (entryptr + next_offset < entryptr ||
entryptr + next_offset > end_of_buf ||
entryptr + next_offset + size > end_of_buf) {
cifs_dbg(VFS, "malformed search entry would overflow\n");
break;
}
entryptr = entryptr + next_offset;
dir_info = (FILE_DIRECTORY_INFO *)entryptr;
len = le32_to_cpu(dir_info->FileNameLength);
if (entryptr + len < entryptr ||
entryptr + len > end_of_buf ||
entryptr + len + size > end_of_buf) {
cifs_dbg(VFS, "directory entry name would overflow frame end of buf %p\n",
end_of_buf);
break;
}
*lastentry = entryptr;
entrycount++;
next_offset = le32_to_cpu(dir_info->NextEntryOffset);
if (!next_offset)
break;
}
return entrycount;
}
/*
* Readdir/FindFirst
*/
int
SMB2_query_directory(const unsigned int xid, struct cifs_tcon *tcon,
u64 persistent_fid, u64 volatile_fid, int index,
struct cifs_search_info *srch_inf)
{
struct smb_rqst rqst;
struct smb2_query_directory_req *req;
struct smb2_query_directory_rsp *rsp = NULL;
struct kvec iov[2];
struct kvec rsp_iov;
int rc = 0;
int len;
int resp_buftype = CIFS_NO_BUFFER;
unsigned char *bufptr;
struct TCP_Server_Info *server;
struct cifs_ses *ses = tcon->ses;
__le16 asteriks = cpu_to_le16('*');
char *end_of_smb;
unsigned int output_size = CIFSMaxBufSize;
size_t info_buf_size;
int flags = 0;
unsigned int total_len;
if (ses && (ses->server))
server = ses->server;
else
return -EIO;
rc = smb2_plain_req_init(SMB2_QUERY_DIRECTORY, tcon, (void **) &req,
&total_len);
if (rc)
return rc;
if (smb3_encryption_required(tcon))
flags |= CIFS_TRANSFORM_REQ;
switch (srch_inf->info_level) {
case SMB_FIND_FILE_DIRECTORY_INFO:
req->FileInformationClass = FILE_DIRECTORY_INFORMATION;
info_buf_size = sizeof(FILE_DIRECTORY_INFO) - 1;
break;
case SMB_FIND_FILE_ID_FULL_DIR_INFO:
req->FileInformationClass = FILEID_FULL_DIRECTORY_INFORMATION;
info_buf_size = sizeof(SEARCH_ID_FULL_DIR_INFO) - 1;
break;
default:
cifs_dbg(VFS, "info level %u isn't supported\n",
srch_inf->info_level);
rc = -EINVAL;
goto qdir_exit;
}
req->FileIndex = cpu_to_le32(index);
req->PersistentFileId = persistent_fid;
req->VolatileFileId = volatile_fid;
len = 0x2;
bufptr = req->Buffer;
memcpy(bufptr, &asteriks, len);
req->FileNameOffset =
cpu_to_le16(sizeof(struct smb2_query_directory_req) - 1);
req->FileNameLength = cpu_to_le16(len);
/*
* BB could be 30 bytes or so longer if we used SMB2 specific
* buffer lengths, but this is safe and close enough.
*/
output_size = min_t(unsigned int, output_size, server->maxBuf);
output_size = min_t(unsigned int, output_size, 2 << 15);
req->OutputBufferLength = cpu_to_le32(output_size);
iov[0].iov_base = (char *)req;
/* 1 for Buffer */
iov[0].iov_len = total_len - 1;
iov[1].iov_base = (char *)(req->Buffer);
iov[1].iov_len = len;
memset(&rqst, 0, sizeof(struct smb_rqst));
rqst.rq_iov = iov;
rqst.rq_nvec = 2;
trace_smb3_query_dir_enter(xid, persistent_fid, tcon->tid,
tcon->ses->Suid, index, output_size);
rc = cifs_send_recv(xid, ses, &rqst, &resp_buftype, flags, &rsp_iov);
cifs_small_buf_release(req);
rsp = (struct smb2_query_directory_rsp *)rsp_iov.iov_base;
if (rc) {
if (rc == -ENODATA &&
rsp->sync_hdr.Status == STATUS_NO_MORE_FILES) {
trace_smb3_query_dir_done(xid, persistent_fid,
tcon->tid, tcon->ses->Suid, index, 0);
srch_inf->endOfSearch = true;
rc = 0;
} else {
trace_smb3_query_dir_err(xid, persistent_fid, tcon->tid,
tcon->ses->Suid, index, 0, rc);
cifs_stats_fail_inc(tcon, SMB2_QUERY_DIRECTORY_HE);
}
goto qdir_exit;
}
rc = smb2_validate_iov(le16_to_cpu(rsp->OutputBufferOffset),
le32_to_cpu(rsp->OutputBufferLength), &rsp_iov,
info_buf_size);
if (rc) {
trace_smb3_query_dir_err(xid, persistent_fid, tcon->tid,
tcon->ses->Suid, index, 0, rc);
goto qdir_exit;
}
srch_inf->unicode = true;
if (srch_inf->ntwrk_buf_start) {
if (srch_inf->smallBuf)
cifs_small_buf_release(srch_inf->ntwrk_buf_start);
else
cifs_buf_release(srch_inf->ntwrk_buf_start);
}
srch_inf->ntwrk_buf_start = (char *)rsp;
srch_inf->srch_entries_start = srch_inf->last_entry =
(char *)rsp + le16_to_cpu(rsp->OutputBufferOffset);
end_of_smb = rsp_iov.iov_len + (char *)rsp;
srch_inf->entries_in_buffer =
num_entries(srch_inf->srch_entries_start, end_of_smb,
&srch_inf->last_entry, info_buf_size);
srch_inf->index_of_last_entry += srch_inf->entries_in_buffer;
cifs_dbg(FYI, "num entries %d last_index %lld srch start %p srch end %p\n",
srch_inf->entries_in_buffer, srch_inf->index_of_last_entry,
srch_inf->srch_entries_start, srch_inf->last_entry);
if (resp_buftype == CIFS_LARGE_BUFFER)
srch_inf->smallBuf = false;
else if (resp_buftype == CIFS_SMALL_BUFFER)
srch_inf->smallBuf = true;
else
cifs_dbg(VFS, "illegal search buffer type\n");
trace_smb3_query_dir_done(xid, persistent_fid, tcon->tid,
tcon->ses->Suid, index, srch_inf->entries_in_buffer);
return rc;
qdir_exit:
free_rsp_buf(resp_buftype, rsp);
return rc;
}
int
SMB2_set_info_init(struct cifs_tcon *tcon, struct smb_rqst *rqst,
u64 persistent_fid, u64 volatile_fid, u32 pid, u8 info_class,
u8 info_type, u32 additional_info,
void **data, unsigned int *size)
{
struct smb2_set_info_req *req;
struct kvec *iov = rqst->rq_iov;
unsigned int i, total_len;
int rc;
rc = smb2_plain_req_init(SMB2_SET_INFO, tcon, (void **) &req, &total_len);
if (rc)
return rc;
req->sync_hdr.ProcessId = cpu_to_le32(pid);
req->InfoType = info_type;
req->FileInfoClass = info_class;
req->PersistentFileId = persistent_fid;
req->VolatileFileId = volatile_fid;
req->AdditionalInformation = cpu_to_le32(additional_info);
req->BufferOffset =
cpu_to_le16(sizeof(struct smb2_set_info_req) - 1);
req->BufferLength = cpu_to_le32(*size);
memcpy(req->Buffer, *data, *size);
total_len += *size;
iov[0].iov_base = (char *)req;
/* 1 for Buffer */
iov[0].iov_len = total_len - 1;
for (i = 1; i < rqst->rq_nvec; i++) {
le32_add_cpu(&req->BufferLength, size[i]);
iov[i].iov_base = (char *)data[i];
iov[i].iov_len = size[i];
}
return 0;
}
void
SMB2_set_info_free(struct smb_rqst *rqst)
{
if (rqst && rqst->rq_iov)
cifs_buf_release(rqst->rq_iov[0].iov_base); /* request */
}
static int
send_set_info(const unsigned int xid, struct cifs_tcon *tcon,
u64 persistent_fid, u64 volatile_fid, u32 pid, u8 info_class,
u8 info_type, u32 additional_info, unsigned int num,
void **data, unsigned int *size)
{
struct smb_rqst rqst;
struct smb2_set_info_rsp *rsp = NULL;
struct kvec *iov;
struct kvec rsp_iov;
int rc = 0;
int resp_buftype;
struct cifs_ses *ses = tcon->ses;
int flags = 0;
if (!ses || !(ses->server))
return -EIO;
if (!num)
return -EINVAL;
if (smb3_encryption_required(tcon))
flags |= CIFS_TRANSFORM_REQ;
iov = kmalloc_array(num, sizeof(struct kvec), GFP_KERNEL);
if (!iov)
return -ENOMEM;
memset(&rqst, 0, sizeof(struct smb_rqst));
rqst.rq_iov = iov;
rqst.rq_nvec = num;
rc = SMB2_set_info_init(tcon, &rqst, persistent_fid, volatile_fid, pid,
info_class, info_type, additional_info,
data, size);
if (rc) {
kfree(iov);
return rc;
}
rc = cifs_send_recv(xid, ses, &rqst, &resp_buftype, flags,
&rsp_iov);
SMB2_set_info_free(&rqst);
rsp = (struct smb2_set_info_rsp *)rsp_iov.iov_base;
if (rc != 0) {
cifs_stats_fail_inc(tcon, SMB2_SET_INFO_HE);
trace_smb3_set_info_err(xid, persistent_fid, tcon->tid,
ses->Suid, info_class, (__u32)info_type, rc);
}
free_rsp_buf(resp_buftype, rsp);
kfree(iov);
return rc;
}
int
SMB2_set_eof(const unsigned int xid, struct cifs_tcon *tcon, u64 persistent_fid,
u64 volatile_fid, u32 pid, __le64 *eof)
{
struct smb2_file_eof_info info;
void *data;
unsigned int size;
info.EndOfFile = *eof;
data = &info;
size = sizeof(struct smb2_file_eof_info);
return send_set_info(xid, tcon, persistent_fid, volatile_fid,
pid, FILE_END_OF_FILE_INFORMATION, SMB2_O_INFO_FILE,
0, 1, &data, &size);
}
int
SMB2_set_acl(const unsigned int xid, struct cifs_tcon *tcon,
u64 persistent_fid, u64 volatile_fid,
struct cifs_ntsd *pnntsd, int pacllen, int aclflag)
{
return send_set_info(xid, tcon, persistent_fid, volatile_fid,
current->tgid, 0, SMB2_O_INFO_SECURITY, aclflag,
1, (void **)&pnntsd, &pacllen);
}
int
SMB2_set_ea(const unsigned int xid, struct cifs_tcon *tcon,
u64 persistent_fid, u64 volatile_fid,
struct smb2_file_full_ea_info *buf, int len)
{
return send_set_info(xid, tcon, persistent_fid, volatile_fid,
current->tgid, FILE_FULL_EA_INFORMATION, SMB2_O_INFO_FILE,
0, 1, (void **)&buf, &len);
}
int
SMB2_oplock_break(const unsigned int xid, struct cifs_tcon *tcon,
const u64 persistent_fid, const u64 volatile_fid,
__u8 oplock_level)
{
struct smb_rqst rqst;
int rc;
struct smb2_oplock_break *req = NULL;
struct cifs_ses *ses = tcon->ses;
int flags = CIFS_OBREAK_OP;
unsigned int total_len;
struct kvec iov[1];
struct kvec rsp_iov;
int resp_buf_type;
cifs_dbg(FYI, "SMB2_oplock_break\n");
rc = smb2_plain_req_init(SMB2_OPLOCK_BREAK, tcon, (void **) &req,
&total_len);
if (rc)
return rc;
if (smb3_encryption_required(tcon))
flags |= CIFS_TRANSFORM_REQ;
req->VolatileFid = volatile_fid;
req->PersistentFid = persistent_fid;
req->OplockLevel = oplock_level;
req->sync_hdr.CreditRequest = cpu_to_le16(1);
flags |= CIFS_NO_RESP;
iov[0].iov_base = (char *)req;
iov[0].iov_len = total_len;
memset(&rqst, 0, sizeof(struct smb_rqst));
rqst.rq_iov = iov;
rqst.rq_nvec = 1;
rc = cifs_send_recv(xid, ses, &rqst, &resp_buf_type, flags, &rsp_iov);
cifs_small_buf_release(req);
if (rc) {
cifs_stats_fail_inc(tcon, SMB2_OPLOCK_BREAK_HE);
cifs_dbg(FYI, "Send error in Oplock Break = %d\n", rc);
}
return rc;
}
void
smb2_copy_fs_info_to_kstatfs(struct smb2_fs_full_size_info *pfs_inf,
struct kstatfs *kst)
{
kst->f_bsize = le32_to_cpu(pfs_inf->BytesPerSector) *
le32_to_cpu(pfs_inf->SectorsPerAllocationUnit);
kst->f_blocks = le64_to_cpu(pfs_inf->TotalAllocationUnits);
kst->f_bfree = kst->f_bavail =
le64_to_cpu(pfs_inf->CallerAvailableAllocationUnits);
return;
}
static void
copy_posix_fs_info_to_kstatfs(FILE_SYSTEM_POSIX_INFO *response_data,
struct kstatfs *kst)
{
kst->f_bsize = le32_to_cpu(response_data->BlockSize);
kst->f_blocks = le64_to_cpu(response_data->TotalBlocks);
kst->f_bfree = le64_to_cpu(response_data->BlocksAvail);
if (response_data->UserBlocksAvail == cpu_to_le64(-1))
kst->f_bavail = kst->f_bfree;
else
kst->f_bavail = le64_to_cpu(response_data->UserBlocksAvail);
if (response_data->TotalFileNodes != cpu_to_le64(-1))
kst->f_files = le64_to_cpu(response_data->TotalFileNodes);
if (response_data->FreeFileNodes != cpu_to_le64(-1))
kst->f_ffree = le64_to_cpu(response_data->FreeFileNodes);
return;
}
static int
build_qfs_info_req(struct kvec *iov, struct cifs_tcon *tcon, int level,
int outbuf_len, u64 persistent_fid, u64 volatile_fid)
{
int rc;
struct smb2_query_info_req *req;
unsigned int total_len;
cifs_dbg(FYI, "Query FSInfo level %d\n", level);
if ((tcon->ses == NULL) || (tcon->ses->server == NULL))
return -EIO;
rc = smb2_plain_req_init(SMB2_QUERY_INFO, tcon, (void **) &req,
&total_len);
if (rc)
return rc;
req->InfoType = SMB2_O_INFO_FILESYSTEM;
req->FileInfoClass = level;
req->PersistentFileId = persistent_fid;
req->VolatileFileId = volatile_fid;
/* 1 for pad */
req->InputBufferOffset =
cpu_to_le16(sizeof(struct smb2_query_info_req) - 1);
req->OutputBufferLength = cpu_to_le32(
outbuf_len + sizeof(struct smb2_query_info_rsp) - 1);
iov->iov_base = (char *)req;
iov->iov_len = total_len;
return 0;
}
int
SMB311_posix_qfs_info(const unsigned int xid, struct cifs_tcon *tcon,
u64 persistent_fid, u64 volatile_fid, struct kstatfs *fsdata)
{
struct smb_rqst rqst;
struct smb2_query_info_rsp *rsp = NULL;
struct kvec iov;
struct kvec rsp_iov;
int rc = 0;
int resp_buftype;
struct cifs_ses *ses = tcon->ses;
FILE_SYSTEM_POSIX_INFO *info = NULL;
int flags = 0;
rc = build_qfs_info_req(&iov, tcon, FS_POSIX_INFORMATION,
sizeof(FILE_SYSTEM_POSIX_INFO),
persistent_fid, volatile_fid);
if (rc)
return rc;
if (smb3_encryption_required(tcon))
flags |= CIFS_TRANSFORM_REQ;
memset(&rqst, 0, sizeof(struct smb_rqst));
rqst.rq_iov = &iov;
rqst.rq_nvec = 1;
rc = cifs_send_recv(xid, ses, &rqst, &resp_buftype, flags, &rsp_iov);
cifs_small_buf_release(iov.iov_base);
if (rc) {
cifs_stats_fail_inc(tcon, SMB2_QUERY_INFO_HE);
goto posix_qfsinf_exit;
}
rsp = (struct smb2_query_info_rsp *)rsp_iov.iov_base;
info = (FILE_SYSTEM_POSIX_INFO *)(
le16_to_cpu(rsp->OutputBufferOffset) + (char *)rsp);
rc = smb2_validate_iov(le16_to_cpu(rsp->OutputBufferOffset),
le32_to_cpu(rsp->OutputBufferLength), &rsp_iov,
sizeof(FILE_SYSTEM_POSIX_INFO));
if (!rc)
copy_posix_fs_info_to_kstatfs(info, fsdata);
posix_qfsinf_exit:
free_rsp_buf(resp_buftype, rsp_iov.iov_base);
return rc;
}
int
SMB2_QFS_info(const unsigned int xid, struct cifs_tcon *tcon,
u64 persistent_fid, u64 volatile_fid, struct kstatfs *fsdata)
{
struct smb_rqst rqst;
struct smb2_query_info_rsp *rsp = NULL;
struct kvec iov;
struct kvec rsp_iov;
int rc = 0;
int resp_buftype;
struct cifs_ses *ses = tcon->ses;
struct smb2_fs_full_size_info *info = NULL;
int flags = 0;
rc = build_qfs_info_req(&iov, tcon, FS_FULL_SIZE_INFORMATION,
sizeof(struct smb2_fs_full_size_info),
persistent_fid, volatile_fid);
if (rc)
return rc;
if (smb3_encryption_required(tcon))
flags |= CIFS_TRANSFORM_REQ;
memset(&rqst, 0, sizeof(struct smb_rqst));
rqst.rq_iov = &iov;
rqst.rq_nvec = 1;
rc = cifs_send_recv(xid, ses, &rqst, &resp_buftype, flags, &rsp_iov);
cifs_small_buf_release(iov.iov_base);
if (rc) {
cifs_stats_fail_inc(tcon, SMB2_QUERY_INFO_HE);
goto qfsinf_exit;
}
rsp = (struct smb2_query_info_rsp *)rsp_iov.iov_base;
info = (struct smb2_fs_full_size_info *)(
le16_to_cpu(rsp->OutputBufferOffset) + (char *)rsp);
rc = smb2_validate_iov(le16_to_cpu(rsp->OutputBufferOffset),
le32_to_cpu(rsp->OutputBufferLength), &rsp_iov,
sizeof(struct smb2_fs_full_size_info));
if (!rc)
smb2_copy_fs_info_to_kstatfs(info, fsdata);
qfsinf_exit:
free_rsp_buf(resp_buftype, rsp_iov.iov_base);
return rc;
}
int
SMB2_QFS_attr(const unsigned int xid, struct cifs_tcon *tcon,
u64 persistent_fid, u64 volatile_fid, int level)
{
struct smb_rqst rqst;
struct smb2_query_info_rsp *rsp = NULL;
struct kvec iov;
struct kvec rsp_iov;
int rc = 0;
int resp_buftype, max_len, min_len;
struct cifs_ses *ses = tcon->ses;
unsigned int rsp_len, offset;
int flags = 0;
if (level == FS_DEVICE_INFORMATION) {
max_len = sizeof(FILE_SYSTEM_DEVICE_INFO);
min_len = sizeof(FILE_SYSTEM_DEVICE_INFO);
} else if (level == FS_ATTRIBUTE_INFORMATION) {
max_len = sizeof(FILE_SYSTEM_ATTRIBUTE_INFO);
min_len = MIN_FS_ATTR_INFO_SIZE;
} else if (level == FS_SECTOR_SIZE_INFORMATION) {
max_len = sizeof(struct smb3_fs_ss_info);
min_len = sizeof(struct smb3_fs_ss_info);
} else if (level == FS_VOLUME_INFORMATION) {
max_len = sizeof(struct smb3_fs_vol_info) + MAX_VOL_LABEL_LEN;
min_len = sizeof(struct smb3_fs_vol_info);
} else {
cifs_dbg(FYI, "Invalid qfsinfo level %d\n", level);
return -EINVAL;
}
rc = build_qfs_info_req(&iov, tcon, level, max_len,
persistent_fid, volatile_fid);
if (rc)
return rc;
if (smb3_encryption_required(tcon))
flags |= CIFS_TRANSFORM_REQ;
memset(&rqst, 0, sizeof(struct smb_rqst));
rqst.rq_iov = &iov;
rqst.rq_nvec = 1;
rc = cifs_send_recv(xid, ses, &rqst, &resp_buftype, flags, &rsp_iov);
cifs_small_buf_release(iov.iov_base);
if (rc) {
cifs_stats_fail_inc(tcon, SMB2_QUERY_INFO_HE);
goto qfsattr_exit;
}
rsp = (struct smb2_query_info_rsp *)rsp_iov.iov_base;
rsp_len = le32_to_cpu(rsp->OutputBufferLength);
offset = le16_to_cpu(rsp->OutputBufferOffset);
rc = smb2_validate_iov(offset, rsp_len, &rsp_iov, min_len);
if (rc)
goto qfsattr_exit;
if (level == FS_ATTRIBUTE_INFORMATION)
memcpy(&tcon->fsAttrInfo, offset
+ (char *)rsp, min_t(unsigned int,
rsp_len, max_len));
else if (level == FS_DEVICE_INFORMATION)
memcpy(&tcon->fsDevInfo, offset
+ (char *)rsp, sizeof(FILE_SYSTEM_DEVICE_INFO));
else if (level == FS_SECTOR_SIZE_INFORMATION) {
struct smb3_fs_ss_info *ss_info = (struct smb3_fs_ss_info *)
(offset + (char *)rsp);
tcon->ss_flags = le32_to_cpu(ss_info->Flags);
tcon->perf_sector_size =
le32_to_cpu(ss_info->PhysicalBytesPerSectorForPerf);
} else if (level == FS_VOLUME_INFORMATION) {
struct smb3_fs_vol_info *vol_info = (struct smb3_fs_vol_info *)
(offset + (char *)rsp);
tcon->vol_serial_number = vol_info->VolumeSerialNumber;
tcon->vol_create_time = vol_info->VolumeCreationTime;
}
qfsattr_exit:
free_rsp_buf(resp_buftype, rsp_iov.iov_base);
return rc;
}
int
smb2_lockv(const unsigned int xid, struct cifs_tcon *tcon,
const __u64 persist_fid, const __u64 volatile_fid, const __u32 pid,
const __u32 num_lock, struct smb2_lock_element *buf)
{
struct smb_rqst rqst;
int rc = 0;
struct smb2_lock_req *req = NULL;
struct kvec iov[2];
struct kvec rsp_iov;
int resp_buf_type;
unsigned int count;
int flags = CIFS_NO_RESP;
unsigned int total_len;
cifs_dbg(FYI, "smb2_lockv num lock %d\n", num_lock);
rc = smb2_plain_req_init(SMB2_LOCK, tcon, (void **) &req, &total_len);
if (rc)
return rc;
if (smb3_encryption_required(tcon))
flags |= CIFS_TRANSFORM_REQ;
req->sync_hdr.ProcessId = cpu_to_le32(pid);
req->LockCount = cpu_to_le16(num_lock);
req->PersistentFileId = persist_fid;
req->VolatileFileId = volatile_fid;
count = num_lock * sizeof(struct smb2_lock_element);
iov[0].iov_base = (char *)req;
iov[0].iov_len = total_len - sizeof(struct smb2_lock_element);
iov[1].iov_base = (char *)buf;
iov[1].iov_len = count;
cifs_stats_inc(&tcon->stats.cifs_stats.num_locks);
memset(&rqst, 0, sizeof(struct smb_rqst));
rqst.rq_iov = iov;
rqst.rq_nvec = 2;
rc = cifs_send_recv(xid, tcon->ses, &rqst, &resp_buf_type, flags,
&rsp_iov);
cifs_small_buf_release(req);
if (rc) {
cifs_dbg(FYI, "Send error in smb2_lockv = %d\n", rc);
cifs_stats_fail_inc(tcon, SMB2_LOCK_HE);
trace_smb3_lock_err(xid, persist_fid, tcon->tid,
tcon->ses->Suid, rc);
}
return rc;
}
int
SMB2_lock(const unsigned int xid, struct cifs_tcon *tcon,
const __u64 persist_fid, const __u64 volatile_fid, const __u32 pid,
const __u64 length, const __u64 offset, const __u32 lock_flags,
const bool wait)
{
struct smb2_lock_element lock;
lock.Offset = cpu_to_le64(offset);
lock.Length = cpu_to_le64(length);
lock.Flags = cpu_to_le32(lock_flags);
if (!wait && lock_flags != SMB2_LOCKFLAG_UNLOCK)
lock.Flags |= cpu_to_le32(SMB2_LOCKFLAG_FAIL_IMMEDIATELY);
return smb2_lockv(xid, tcon, persist_fid, volatile_fid, pid, 1, &lock);
}
int
SMB2_lease_break(const unsigned int xid, struct cifs_tcon *tcon,
__u8 *lease_key, const __le32 lease_state)
{
struct smb_rqst rqst;
int rc;
struct smb2_lease_ack *req = NULL;
struct cifs_ses *ses = tcon->ses;
int flags = CIFS_OBREAK_OP;
unsigned int total_len;
struct kvec iov[1];
struct kvec rsp_iov;
int resp_buf_type;
__u64 *please_key_high;
__u64 *please_key_low;
cifs_dbg(FYI, "SMB2_lease_break\n");
rc = smb2_plain_req_init(SMB2_OPLOCK_BREAK, tcon, (void **) &req,
&total_len);
if (rc)
return rc;
if (smb3_encryption_required(tcon))
flags |= CIFS_TRANSFORM_REQ;
req->sync_hdr.CreditRequest = cpu_to_le16(1);
req->StructureSize = cpu_to_le16(36);
total_len += 12;
memcpy(req->LeaseKey, lease_key, 16);
req->LeaseState = lease_state;
flags |= CIFS_NO_RESP;
iov[0].iov_base = (char *)req;
iov[0].iov_len = total_len;
memset(&rqst, 0, sizeof(struct smb_rqst));
rqst.rq_iov = iov;
rqst.rq_nvec = 1;
rc = cifs_send_recv(xid, ses, &rqst, &resp_buf_type, flags, &rsp_iov);
cifs_small_buf_release(req);
please_key_low = (__u64 *)lease_key;
please_key_high = (__u64 *)(lease_key+8);
if (rc) {
cifs_stats_fail_inc(tcon, SMB2_OPLOCK_BREAK_HE);
trace_smb3_lease_err(le32_to_cpu(lease_state), tcon->tid,
ses->Suid, *please_key_low, *please_key_high, rc);
cifs_dbg(FYI, "Send error in Lease Break = %d\n", rc);
} else
trace_smb3_lease_done(le32_to_cpu(lease_state), tcon->tid,
ses->Suid, *please_key_low, *please_key_high);
return rc;
}
| ./CrossVul/dataset_final_sorted/CWE-416/c/good_1058_0 |
crossvul-cpp_data_bad_1342_1 | /* vi:set ts=8 sts=4 sw=4 noet:
*
* VIM - Vi IMproved by Bram Moolenaar
*
* Do ":help uganda" in Vim to read copying and usage conditions.
* Do ":help credits" in Vim to see a list of people who contributed.
* See README.txt for an overview of the Vim source code.
*/
#include "vim.h"
#ifdef AMIGA
# include <time.h> /* for time() */
#endif
/*
* Vim originated from Stevie version 3.6 (Fish disk 217) by GRWalter (Fred)
* It has been changed beyond recognition since then.
*
* Differences between version 7.4 and 8.x can be found with ":help version8".
* Differences between version 6.4 and 7.x can be found with ":help version7".
* Differences between version 5.8 and 6.x can be found with ":help version6".
* Differences between version 4.x and 5.x can be found with ":help version5".
* Differences between version 3.0 and 4.x can be found with ":help version4".
* All the remarks about older versions have been removed, they are not very
* interesting.
*/
#include "version.h"
char *Version = VIM_VERSION_SHORT;
static char *mediumVersion = VIM_VERSION_MEDIUM;
#if defined(HAVE_DATE_TIME) || defined(PROTO)
# if (defined(VMS) && defined(VAXC)) || defined(PROTO)
char longVersion[sizeof(VIM_VERSION_LONG_DATE) + sizeof(__DATE__)
+ sizeof(__TIME__) + 3];
void
init_longVersion(void)
{
/*
* Construct the long version string. Necessary because
* VAX C can't concatenate strings in the preprocessor.
*/
strcpy(longVersion, VIM_VERSION_LONG_DATE);
strcat(longVersion, __DATE__);
strcat(longVersion, " ");
strcat(longVersion, __TIME__);
strcat(longVersion, ")");
}
# else
void
init_longVersion(void)
{
char *date_time = __DATE__ " " __TIME__;
char *msg = _("%s (%s, compiled %s)");
size_t len = strlen(msg)
+ strlen(VIM_VERSION_LONG_ONLY)
+ strlen(VIM_VERSION_DATE_ONLY)
+ strlen(date_time);
longVersion = alloc(len);
if (longVersion == NULL)
longVersion = VIM_VERSION_LONG;
else
vim_snprintf(longVersion, len, msg,
VIM_VERSION_LONG_ONLY, VIM_VERSION_DATE_ONLY, date_time);
}
# endif
#else
char *longVersion = VIM_VERSION_LONG;
void
init_longVersion(void)
{
// nothing to do
}
#endif
static char *(features[]) =
{
#ifdef HAVE_ACL
"+acl",
#else
"-acl",
#endif
#ifdef AMIGA /* only for Amiga systems */
# ifdef FEAT_ARP
"+ARP",
# else
"-ARP",
# endif
#endif
#ifdef FEAT_ARABIC
"+arabic",
#else
"-arabic",
#endif
"+autocmd",
#ifdef FEAT_AUTOCHDIR
"+autochdir",
#else
"-autochdir",
#endif
#ifdef FEAT_AUTOSERVERNAME
"+autoservername",
#else
"-autoservername",
#endif
#ifdef FEAT_BEVAL_GUI
"+balloon_eval",
#else
"-balloon_eval",
#endif
#ifdef FEAT_BEVAL_TERM
"+balloon_eval_term",
#else
"-balloon_eval_term",
#endif
#ifdef FEAT_BROWSE
"+browse",
#else
"-browse",
#endif
#ifdef NO_BUILTIN_TCAPS
"-builtin_terms",
#endif
#ifdef SOME_BUILTIN_TCAPS
"+builtin_terms",
#endif
#ifdef ALL_BUILTIN_TCAPS
"++builtin_terms",
#endif
#ifdef FEAT_BYTEOFF
"+byte_offset",
#else
"-byte_offset",
#endif
#ifdef FEAT_JOB_CHANNEL
"+channel",
#else
"-channel",
#endif
#ifdef FEAT_CINDENT
"+cindent",
#else
"-cindent",
#endif
#ifdef FEAT_CLIENTSERVER
"+clientserver",
#else
"-clientserver",
#endif
#ifdef FEAT_CLIPBOARD
"+clipboard",
#else
"-clipboard",
#endif
"+cmdline_compl",
"+cmdline_hist",
#ifdef FEAT_CMDL_INFO
"+cmdline_info",
#else
"-cmdline_info",
#endif
"+comments",
#ifdef FEAT_CONCEAL
"+conceal",
#else
"-conceal",
#endif
#ifdef FEAT_CRYPT
"+cryptv",
#else
"-cryptv",
#endif
#ifdef FEAT_CSCOPE
"+cscope",
#else
"-cscope",
#endif
"+cursorbind",
#ifdef CURSOR_SHAPE
"+cursorshape",
#else
"-cursorshape",
#endif
#if defined(FEAT_CON_DIALOG) && defined(FEAT_GUI_DIALOG)
"+dialog_con_gui",
#else
# if defined(FEAT_CON_DIALOG)
"+dialog_con",
# else
# if defined(FEAT_GUI_DIALOG)
"+dialog_gui",
# else
"-dialog",
# endif
# endif
#endif
#ifdef FEAT_DIFF
"+diff",
#else
"-diff",
#endif
#ifdef FEAT_DIGRAPHS
"+digraphs",
#else
"-digraphs",
#endif
#ifdef FEAT_GUI_MSWIN
# ifdef FEAT_DIRECTX
"+directx",
# else
"-directx",
# endif
#endif
#ifdef FEAT_DND
"+dnd",
#else
"-dnd",
#endif
#ifdef EBCDIC
"+ebcdic",
#else
"-ebcdic",
#endif
#ifdef FEAT_EMACS_TAGS
"+emacs_tags",
#else
"-emacs_tags",
#endif
#ifdef FEAT_EVAL
"+eval",
#else
"-eval",
#endif
"+ex_extra",
#ifdef FEAT_SEARCH_EXTRA
"+extra_search",
#else
"-extra_search",
#endif
"-farsi",
#ifdef FEAT_SEARCHPATH
"+file_in_path",
#else
"-file_in_path",
#endif
#ifdef FEAT_FIND_ID
"+find_in_path",
#else
"-find_in_path",
#endif
#ifdef FEAT_FLOAT
"+float",
#else
"-float",
#endif
#ifdef FEAT_FOLDING
"+folding",
#else
"-folding",
#endif
#ifdef FEAT_FOOTER
"+footer",
#else
"-footer",
#endif
/* only interesting on Unix systems */
#if !defined(USE_SYSTEM) && defined(UNIX)
"+fork()",
#endif
#ifdef FEAT_GETTEXT
# ifdef DYNAMIC_GETTEXT
"+gettext/dyn",
# else
"+gettext",
# endif
#else
"-gettext",
#endif
#ifdef FEAT_HANGULIN
"+hangul_input",
#else
"-hangul_input",
#endif
#if (defined(HAVE_ICONV_H) && defined(USE_ICONV)) || defined(DYNAMIC_ICONV)
# ifdef DYNAMIC_ICONV
"+iconv/dyn",
# else
"+iconv",
# endif
#else
"-iconv",
#endif
"+insert_expand",
#ifdef FEAT_JOB_CHANNEL
"+job",
#else
"-job",
#endif
#ifdef FEAT_JUMPLIST
"+jumplist",
#else
"-jumplist",
#endif
#ifdef FEAT_KEYMAP
"+keymap",
#else
"-keymap",
#endif
#ifdef FEAT_EVAL
"+lambda",
#else
"-lambda",
#endif
#ifdef FEAT_LANGMAP
"+langmap",
#else
"-langmap",
#endif
#ifdef FEAT_LIBCALL
"+libcall",
#else
"-libcall",
#endif
#ifdef FEAT_LINEBREAK
"+linebreak",
#else
"-linebreak",
#endif
#ifdef FEAT_LISP
"+lispindent",
#else
"-lispindent",
#endif
"+listcmds",
"+localmap",
#ifdef FEAT_LUA
# ifdef DYNAMIC_LUA
"+lua/dyn",
# else
"+lua",
# endif
#else
"-lua",
#endif
#ifdef FEAT_MENU
"+menu",
#else
"-menu",
#endif
#ifdef FEAT_SESSION
"+mksession",
#else
"-mksession",
#endif
"+modify_fname",
#ifdef FEAT_MOUSE
"+mouse",
# ifdef FEAT_MOUSESHAPE
"+mouseshape",
# else
"-mouseshape",
# endif
# else
"-mouse",
#endif
#if defined(UNIX) || defined(VMS)
# ifdef FEAT_MOUSE_DEC
"+mouse_dec",
# else
"-mouse_dec",
# endif
# ifdef FEAT_MOUSE_GPM
"+mouse_gpm",
# else
"-mouse_gpm",
# endif
# ifdef FEAT_MOUSE_JSB
"+mouse_jsbterm",
# else
"-mouse_jsbterm",
# endif
# ifdef FEAT_MOUSE_NET
"+mouse_netterm",
# else
"-mouse_netterm",
# endif
#endif
#ifdef __QNX__
# ifdef FEAT_MOUSE_PTERM
"+mouse_pterm",
# else
"-mouse_pterm",
# endif
#endif
#if defined(UNIX) || defined(VMS)
# ifdef FEAT_MOUSE_XTERM
"+mouse_sgr",
# else
"-mouse_sgr",
# endif
# ifdef FEAT_SYSMOUSE
"+mouse_sysmouse",
# else
"-mouse_sysmouse",
# endif
# ifdef FEAT_MOUSE_URXVT
"+mouse_urxvt",
# else
"-mouse_urxvt",
# endif
# ifdef FEAT_MOUSE_XTERM
"+mouse_xterm",
# else
"-mouse_xterm",
# endif
#endif
#ifdef FEAT_MBYTE_IME
# ifdef DYNAMIC_IME
"+multi_byte_ime/dyn",
# else
"+multi_byte_ime",
# endif
#else
"+multi_byte",
#endif
#ifdef FEAT_MULTI_LANG
"+multi_lang",
#else
"-multi_lang",
#endif
#ifdef FEAT_MZSCHEME
# ifdef DYNAMIC_MZSCHEME
"+mzscheme/dyn",
# else
"+mzscheme",
# endif
#else
"-mzscheme",
#endif
#ifdef FEAT_NETBEANS_INTG
"+netbeans_intg",
#else
"-netbeans_intg",
#endif
#ifdef FEAT_NUM64
"+num64",
#else
"-num64",
#endif
#ifdef FEAT_GUI_MSWIN
# ifdef FEAT_OLE
"+ole",
# else
"-ole",
# endif
#endif
#ifdef FEAT_EVAL
"+packages",
#else
"-packages",
#endif
#ifdef FEAT_PATH_EXTRA
"+path_extra",
#else
"-path_extra",
#endif
#ifdef FEAT_PERL
# ifdef DYNAMIC_PERL
"+perl/dyn",
# else
"+perl",
# endif
#else
"-perl",
#endif
#ifdef FEAT_PERSISTENT_UNDO
"+persistent_undo",
#else
"-persistent_undo",
#endif
#ifdef FEAT_PRINTER
# ifdef FEAT_POSTSCRIPT
"+postscript",
# else
"-postscript",
# endif
"+printer",
#else
"-printer",
#endif
#ifdef FEAT_PROFILE
"+profile",
#else
"-profile",
#endif
#ifdef FEAT_PYTHON
# ifdef DYNAMIC_PYTHON
"+python/dyn",
# else
"+python",
# endif
#else
"-python",
#endif
#ifdef FEAT_PYTHON3
# ifdef DYNAMIC_PYTHON3
"+python3/dyn",
# else
"+python3",
# endif
#else
"-python3",
#endif
#ifdef FEAT_QUICKFIX
"+quickfix",
#else
"-quickfix",
#endif
#ifdef FEAT_RELTIME
"+reltime",
#else
"-reltime",
#endif
#ifdef FEAT_RIGHTLEFT
"+rightleft",
#else
"-rightleft",
#endif
#ifdef FEAT_RUBY
# ifdef DYNAMIC_RUBY
"+ruby/dyn",
# else
"+ruby",
# endif
#else
"-ruby",
#endif
"+scrollbind",
#ifdef FEAT_SIGNS
"+signs",
#else
"-signs",
#endif
#ifdef FEAT_SMARTINDENT
"+smartindent",
#else
"-smartindent",
#endif
#ifdef FEAT_SOUND
"+sound",
#else
"-sound",
#endif
#ifdef FEAT_SPELL
"+spell",
#else
"-spell",
#endif
#ifdef STARTUPTIME
"+startuptime",
#else
"-startuptime",
#endif
#ifdef FEAT_STL_OPT
"+statusline",
#else
"-statusline",
#endif
"-sun_workshop",
#ifdef FEAT_SYN_HL
"+syntax",
#else
"-syntax",
#endif
/* only interesting on Unix systems */
#if defined(USE_SYSTEM) && defined(UNIX)
"+system()",
#endif
#ifdef FEAT_TAG_BINS
"+tag_binary",
#else
"-tag_binary",
#endif
"-tag_old_static",
"-tag_any_white",
#ifdef FEAT_TCL
# ifdef DYNAMIC_TCL
"+tcl/dyn",
# else
"+tcl",
# endif
#else
"-tcl",
#endif
#ifdef FEAT_TERMGUICOLORS
"+termguicolors",
#else
"-termguicolors",
#endif
#ifdef FEAT_TERMINAL
"+terminal",
#else
"-terminal",
#endif
#if defined(UNIX)
/* only Unix can have terminfo instead of termcap */
# ifdef TERMINFO
"+terminfo",
# else
"-terminfo",
# endif
#endif
#ifdef FEAT_TERMRESPONSE
"+termresponse",
#else
"-termresponse",
#endif
#ifdef FEAT_TEXTOBJ
"+textobjects",
#else
"-textobjects",
#endif
#ifdef FEAT_TEXT_PROP
"+textprop",
#else
"-textprop",
#endif
#if !defined(UNIX)
/* unix always includes termcap support */
# ifdef HAVE_TGETENT
"+tgetent",
# else
"-tgetent",
# endif
#endif
#ifdef FEAT_TIMERS
"+timers",
#else
"-timers",
#endif
#ifdef FEAT_TITLE
"+title",
#else
"-title",
#endif
#ifdef FEAT_TOOLBAR
"+toolbar",
#else
"-toolbar",
#endif
"+user_commands",
#ifdef FEAT_VARTABS
"+vartabs",
#else
"-vartabs",
#endif
"+vertsplit",
"+virtualedit",
"+visual",
"+visualextra",
#ifdef FEAT_VIMINFO
"+viminfo",
#else
"-viminfo",
#endif
"+vreplace",
#ifdef MSWIN
# ifdef FEAT_VTP
"+vtp",
# else
"-vtp",
# endif
#endif
#ifdef FEAT_WILDIGN
"+wildignore",
#else
"-wildignore",
#endif
#ifdef FEAT_WILDMENU
"+wildmenu",
#else
"-wildmenu",
#endif
"+windows",
#ifdef FEAT_WRITEBACKUP
"+writebackup",
#else
"-writebackup",
#endif
#if defined(UNIX) || defined(VMS)
# ifdef FEAT_X11
"+X11",
# else
"-X11",
# endif
#endif
#ifdef FEAT_XFONTSET
"+xfontset",
#else
"-xfontset",
#endif
#ifdef FEAT_XIM
"+xim",
#else
"-xim",
#endif
#ifdef MSWIN
# ifdef FEAT_XPM_W32
"+xpm_w32",
# else
"-xpm_w32",
# endif
#else
# ifdef HAVE_XPM
"+xpm",
# else
"-xpm",
# endif
#endif
#if defined(UNIX) || defined(VMS)
# ifdef USE_XSMP_INTERACT
"+xsmp_interact",
# else
# ifdef USE_XSMP
"+xsmp",
# else
"-xsmp",
# endif
# endif
# ifdef FEAT_XCLIPBOARD
"+xterm_clipboard",
# else
"-xterm_clipboard",
# endif
#endif
#ifdef FEAT_XTERM_SAVE
"+xterm_save",
#else
"-xterm_save",
#endif
NULL
};
static int included_patches[] =
{ /* Add new patch number below this line */
/**/
2135,
/**/
2134,
/**/
2133,
/**/
2132,
/**/
2131,
/**/
2130,
/**/
2129,
/**/
2128,
/**/
2127,
/**/
2126,
/**/
2125,
/**/
2124,
/**/
2123,
/**/
2122,
/**/
2121,
/**/
2120,
/**/
2119,
/**/
2118,
/**/
2117,
/**/
2116,
/**/
2115,
/**/
2114,
/**/
2113,
/**/
2112,
/**/
2111,
/**/
2110,
/**/
2109,
/**/
2108,
/**/
2107,
/**/
2106,
/**/
2105,
/**/
2104,
/**/
2103,
/**/
2102,
/**/
2101,
/**/
2100,
/**/
2099,
/**/
2098,
/**/
2097,
/**/
2096,
/**/
2095,
/**/
2094,
/**/
2093,
/**/
2092,
/**/
2091,
/**/
2090,
/**/
2089,
/**/
2088,
/**/
2087,
/**/
2086,
/**/
2085,
/**/
2084,
/**/
2083,
/**/
2082,
/**/
2081,
/**/
2080,
/**/
2079,
/**/
2078,
/**/
2077,
/**/
2076,
/**/
2075,
/**/
2074,
/**/
2073,
/**/
2072,
/**/
2071,
/**/
2070,
/**/
2069,
/**/
2068,
/**/
2067,
/**/
2066,
/**/
2065,
/**/
2064,
/**/
2063,
/**/
2062,
/**/
2061,
/**/
2060,
/**/
2059,
/**/
2058,
/**/
2057,
/**/
2056,
/**/
2055,
/**/
2054,
/**/
2053,
/**/
2052,
/**/
2051,
/**/
2050,
/**/
2049,
/**/
2048,
/**/
2047,
/**/
2046,
/**/
2045,
/**/
2044,
/**/
2043,
/**/
2042,
/**/
2041,
/**/
2040,
/**/
2039,
/**/
2038,
/**/
2037,
/**/
2036,
/**/
2035,
/**/
2034,
/**/
2033,
/**/
2032,
/**/
2031,
/**/
2030,
/**/
2029,
/**/
2028,
/**/
2027,
/**/
2026,
/**/
2025,
/**/
2024,
/**/
2023,
/**/
2022,
/**/
2021,
/**/
2020,
/**/
2019,
/**/
2018,
/**/
2017,
/**/
2016,
/**/
2015,
/**/
2014,
/**/
2013,
/**/
2012,
/**/
2011,
/**/
2010,
/**/
2009,
/**/
2008,
/**/
2007,
/**/
2006,
/**/
2005,
/**/
2004,
/**/
2003,
/**/
2002,
/**/
2001,
/**/
2000,
/**/
1999,
/**/
1998,
/**/
1997,
/**/
1996,
/**/
1995,
/**/
1994,
/**/
1993,
/**/
1992,
/**/
1991,
/**/
1990,
/**/
1989,
/**/
1988,
/**/
1987,
/**/
1986,
/**/
1985,
/**/
1984,
/**/
1983,
/**/
1982,
/**/
1981,
/**/
1980,
/**/
1979,
/**/
1978,
/**/
1977,
/**/
1976,
/**/
1975,
/**/
1974,
/**/
1973,
/**/
1972,
/**/
1971,
/**/
1970,
/**/
1969,
/**/
1968,
/**/
1967,
/**/
1966,
/**/
1965,
/**/
1964,
/**/
1963,
/**/
1962,
/**/
1961,
/**/
1960,
/**/
1959,
/**/
1958,
/**/
1957,
/**/
1956,
/**/
1955,
/**/
1954,
/**/
1953,
/**/
1952,
/**/
1951,
/**/
1950,
/**/
1949,
/**/
1948,
/**/
1947,
/**/
1946,
/**/
1945,
/**/
1944,
/**/
1943,
/**/
1942,
/**/
1941,
/**/
1940,
/**/
1939,
/**/
1938,
/**/
1937,
/**/
1936,
/**/
1935,
/**/
1934,
/**/
1933,
/**/
1932,
/**/
1931,
/**/
1930,
/**/
1929,
/**/
1928,
/**/
1927,
/**/
1926,
/**/
1925,
/**/
1924,
/**/
1923,
/**/
1922,
/**/
1921,
/**/
1920,
/**/
1919,
/**/
1918,
/**/
1917,
/**/
1916,
/**/
1915,
/**/
1914,
/**/
1913,
/**/
1912,
/**/
1911,
/**/
1910,
/**/
1909,
/**/
1908,
/**/
1907,
/**/
1906,
/**/
1905,
/**/
1904,
/**/
1903,
/**/
1902,
/**/
1901,
/**/
1900,
/**/
1899,
/**/
1898,
/**/
1897,
/**/
1896,
/**/
1895,
/**/
1894,
/**/
1893,
/**/
1892,
/**/
1891,
/**/
1890,
/**/
1889,
/**/
1888,
/**/
1887,
/**/
1886,
/**/
1885,
/**/
1884,
/**/
1883,
/**/
1882,
/**/
1881,
/**/
1880,
/**/
1879,
/**/
1878,
/**/
1877,
/**/
1876,
/**/
1875,
/**/
1874,
/**/
1873,
/**/
1872,
/**/
1871,
/**/
1870,
/**/
1869,
/**/
1868,
/**/
1867,
/**/
1866,
/**/
1865,
/**/
1864,
/**/
1863,
/**/
1862,
/**/
1861,
/**/
1860,
/**/
1859,
/**/
1858,
/**/
1857,
/**/
1856,
/**/
1855,
/**/
1854,
/**/
1853,
/**/
1852,
/**/
1851,
/**/
1850,
/**/
1849,
/**/
1848,
/**/
1847,
/**/
1846,
/**/
1845,
/**/
1844,
/**/
1843,
/**/
1842,
/**/
1841,
/**/
1840,
/**/
1839,
/**/
1838,
/**/
1837,
/**/
1836,
/**/
1835,
/**/
1834,
/**/
1833,
/**/
1832,
/**/
1831,
/**/
1830,
/**/
1829,
/**/
1828,
/**/
1827,
/**/
1826,
/**/
1825,
/**/
1824,
/**/
1823,
/**/
1822,
/**/
1821,
/**/
1820,
/**/
1819,
/**/
1818,
/**/
1817,
/**/
1816,
/**/
1815,
/**/
1814,
/**/
1813,
/**/
1812,
/**/
1811,
/**/
1810,
/**/
1809,
/**/
1808,
/**/
1807,
/**/
1806,
/**/
1805,
/**/
1804,
/**/
1803,
/**/
1802,
/**/
1801,
/**/
1800,
/**/
1799,
/**/
1798,
/**/
1797,
/**/
1796,
/**/
1795,
/**/
1794,
/**/
1793,
/**/
1792,
/**/
1791,
/**/
1790,
/**/
1789,
/**/
1788,
/**/
1787,
/**/
1786,
/**/
1785,
/**/
1784,
/**/
1783,
/**/
1782,
/**/
1781,
/**/
1780,
/**/
1779,
/**/
1778,
/**/
1777,
/**/
1776,
/**/
1775,
/**/
1774,
/**/
1773,
/**/
1772,
/**/
1771,
/**/
1770,
/**/
1769,
/**/
1768,
/**/
1767,
/**/
1766,
/**/
1765,
/**/
1764,
/**/
1763,
/**/
1762,
/**/
1761,
/**/
1760,
/**/
1759,
/**/
1758,
/**/
1757,
/**/
1756,
/**/
1755,
/**/
1754,
/**/
1753,
/**/
1752,
/**/
1751,
/**/
1750,
/**/
1749,
/**/
1748,
/**/
1747,
/**/
1746,
/**/
1745,
/**/
1744,
/**/
1743,
/**/
1742,
/**/
1741,
/**/
1740,
/**/
1739,
/**/
1738,
/**/
1737,
/**/
1736,
/**/
1735,
/**/
1734,
/**/
1733,
/**/
1732,
/**/
1731,
/**/
1730,
/**/
1729,
/**/
1728,
/**/
1727,
/**/
1726,
/**/
1725,
/**/
1724,
/**/
1723,
/**/
1722,
/**/
1721,
/**/
1720,
/**/
1719,
/**/
1718,
/**/
1717,
/**/
1716,
/**/
1715,
/**/
1714,
/**/
1713,
/**/
1712,
/**/
1711,
/**/
1710,
/**/
1709,
/**/
1708,
/**/
1707,
/**/
1706,
/**/
1705,
/**/
1704,
/**/
1703,
/**/
1702,
/**/
1701,
/**/
1700,
/**/
1699,
/**/
1698,
/**/
1697,
/**/
1696,
/**/
1695,
/**/
1694,
/**/
1693,
/**/
1692,
/**/
1691,
/**/
1690,
/**/
1689,
/**/
1688,
/**/
1687,
/**/
1686,
/**/
1685,
/**/
1684,
/**/
1683,
/**/
1682,
/**/
1681,
/**/
1680,
/**/
1679,
/**/
1678,
/**/
1677,
/**/
1676,
/**/
1675,
/**/
1674,
/**/
1673,
/**/
1672,
/**/
1671,
/**/
1670,
/**/
1669,
/**/
1668,
/**/
1667,
/**/
1666,
/**/
1665,
/**/
1664,
/**/
1663,
/**/
1662,
/**/
1661,
/**/
1660,
/**/
1659,
/**/
1658,
/**/
1657,
/**/
1656,
/**/
1655,
/**/
1654,
/**/
1653,
/**/
1652,
/**/
1651,
/**/
1650,
/**/
1649,
/**/
1648,
/**/
1647,
/**/
1646,
/**/
1645,
/**/
1644,
/**/
1643,
/**/
1642,
/**/
1641,
/**/
1640,
/**/
1639,
/**/
1638,
/**/
1637,
/**/
1636,
/**/
1635,
/**/
1634,
/**/
1633,
/**/
1632,
/**/
1631,
/**/
1630,
/**/
1629,
/**/
1628,
/**/
1627,
/**/
1626,
/**/
1625,
/**/
1624,
/**/
1623,
/**/
1622,
/**/
1621,
/**/
1620,
/**/
1619,
/**/
1618,
/**/
1617,
/**/
1616,
/**/
1615,
/**/
1614,
/**/
1613,
/**/
1612,
/**/
1611,
/**/
1610,
/**/
1609,
/**/
1608,
/**/
1607,
/**/
1606,
/**/
1605,
/**/
1604,
/**/
1603,
/**/
1602,
/**/
1601,
/**/
1600,
/**/
1599,
/**/
1598,
/**/
1597,
/**/
1596,
/**/
1595,
/**/
1594,
/**/
1593,
/**/
1592,
/**/
1591,
/**/
1590,
/**/
1589,
/**/
1588,
/**/
1587,
/**/
1586,
/**/
1585,
/**/
1584,
/**/
1583,
/**/
1582,
/**/
1581,
/**/
1580,
/**/
1579,
/**/
1578,
/**/
1577,
/**/
1576,
/**/
1575,
/**/
1574,
/**/
1573,
/**/
1572,
/**/
1571,
/**/
1570,
/**/
1569,
/**/
1568,
/**/
1567,
/**/
1566,
/**/
1565,
/**/
1564,
/**/
1563,
/**/
1562,
/**/
1561,
/**/
1560,
/**/
1559,
/**/
1558,
/**/
1557,
/**/
1556,
/**/
1555,
/**/
1554,
/**/
1553,
/**/
1552,
/**/
1551,
/**/
1550,
/**/
1549,
/**/
1548,
/**/
1547,
/**/
1546,
/**/
1545,
/**/
1544,
/**/
1543,
/**/
1542,
/**/
1541,
/**/
1540,
/**/
1539,
/**/
1538,
/**/
1537,
/**/
1536,
/**/
1535,
/**/
1534,
/**/
1533,
/**/
1532,
/**/
1531,
/**/
1530,
/**/
1529,
/**/
1528,
/**/
1527,
/**/
1526,
/**/
1525,
/**/
1524,
/**/
1523,
/**/
1522,
/**/
1521,
/**/
1520,
/**/
1519,
/**/
1518,
/**/
1517,
/**/
1516,
/**/
1515,
/**/
1514,
/**/
1513,
/**/
1512,
/**/
1511,
/**/
1510,
/**/
1509,
/**/
1508,
/**/
1507,
/**/
1506,
/**/
1505,
/**/
1504,
/**/
1503,
/**/
1502,
/**/
1501,
/**/
1500,
/**/
1499,
/**/
1498,
/**/
1497,
/**/
1496,
/**/
1495,
/**/
1494,
/**/
1493,
/**/
1492,
/**/
1491,
/**/
1490,
/**/
1489,
/**/
1488,
/**/
1487,
/**/
1486,
/**/
1485,
/**/
1484,
/**/
1483,
/**/
1482,
/**/
1481,
/**/
1480,
/**/
1479,
/**/
1478,
/**/
1477,
/**/
1476,
/**/
1475,
/**/
1474,
/**/
1473,
/**/
1472,
/**/
1471,
/**/
1470,
/**/
1469,
/**/
1468,
/**/
1467,
/**/
1466,
/**/
1465,
/**/
1464,
/**/
1463,
/**/
1462,
/**/
1461,
/**/
1460,
/**/
1459,
/**/
1458,
/**/
1457,
/**/
1456,
/**/
1455,
/**/
1454,
/**/
1453,
/**/
1452,
/**/
1451,
/**/
1450,
/**/
1449,
/**/
1448,
/**/
1447,
/**/
1446,
/**/
1445,
/**/
1444,
/**/
1443,
/**/
1442,
/**/
1441,
/**/
1440,
/**/
1439,
/**/
1438,
/**/
1437,
/**/
1436,
/**/
1435,
/**/
1434,
/**/
1433,
/**/
1432,
/**/
1431,
/**/
1430,
/**/
1429,
/**/
1428,
/**/
1427,
/**/
1426,
/**/
1425,
/**/
1424,
/**/
1423,
/**/
1422,
/**/
1421,
/**/
1420,
/**/
1419,
/**/
1418,
/**/
1417,
/**/
1416,
/**/
1415,
/**/
1414,
/**/
1413,
/**/
1412,
/**/
1411,
/**/
1410,
/**/
1409,
/**/
1408,
/**/
1407,
/**/
1406,
/**/
1405,
/**/
1404,
/**/
1403,
/**/
1402,
/**/
1401,
/**/
1400,
/**/
1399,
/**/
1398,
/**/
1397,
/**/
1396,
/**/
1395,
/**/
1394,
/**/
1393,
/**/
1392,
/**/
1391,
/**/
1390,
/**/
1389,
/**/
1388,
/**/
1387,
/**/
1386,
/**/
1385,
/**/
1384,
/**/
1383,
/**/
1382,
/**/
1381,
/**/
1380,
/**/
1379,
/**/
1378,
/**/
1377,
/**/
1376,
/**/
1375,
/**/
1374,
/**/
1373,
/**/
1372,
/**/
1371,
/**/
1370,
/**/
1369,
/**/
1368,
/**/
1367,
/**/
1366,
/**/
1365,
/**/
1364,
/**/
1363,
/**/
1362,
/**/
1361,
/**/
1360,
/**/
1359,
/**/
1358,
/**/
1357,
/**/
1356,
/**/
1355,
/**/
1354,
/**/
1353,
/**/
1352,
/**/
1351,
/**/
1350,
/**/
1349,
/**/
1348,
/**/
1347,
/**/
1346,
/**/
1345,
/**/
1344,
/**/
1343,
/**/
1342,
/**/
1341,
/**/
1340,
/**/
1339,
/**/
1338,
/**/
1337,
/**/
1336,
/**/
1335,
/**/
1334,
/**/
1333,
/**/
1332,
/**/
1331,
/**/
1330,
/**/
1329,
/**/
1328,
/**/
1327,
/**/
1326,
/**/
1325,
/**/
1324,
/**/
1323,
/**/
1322,
/**/
1321,
/**/
1320,
/**/
1319,
/**/
1318,
/**/
1317,
/**/
1316,
/**/
1315,
/**/
1314,
/**/
1313,
/**/
1312,
/**/
1311,
/**/
1310,
/**/
1309,
/**/
1308,
/**/
1307,
/**/
1306,
/**/
1305,
/**/
1304,
/**/
1303,
/**/
1302,
/**/
1301,
/**/
1300,
/**/
1299,
/**/
1298,
/**/
1297,
/**/
1296,
/**/
1295,
/**/
1294,
/**/
1293,
/**/
1292,
/**/
1291,
/**/
1290,
/**/
1289,
/**/
1288,
/**/
1287,
/**/
1286,
/**/
1285,
/**/
1284,
/**/
1283,
/**/
1282,
/**/
1281,
/**/
1280,
/**/
1279,
/**/
1278,
/**/
1277,
/**/
1276,
/**/
1275,
/**/
1274,
/**/
1273,
/**/
1272,
/**/
1271,
/**/
1270,
/**/
1269,
/**/
1268,
/**/
1267,
/**/
1266,
/**/
1265,
/**/
1264,
/**/
1263,
/**/
1262,
/**/
1261,
/**/
1260,
/**/
1259,
/**/
1258,
/**/
1257,
/**/
1256,
/**/
1255,
/**/
1254,
/**/
1253,
/**/
1252,
/**/
1251,
/**/
1250,
/**/
1249,
/**/
1248,
/**/
1247,
/**/
1246,
/**/
1245,
/**/
1244,
/**/
1243,
/**/
1242,
/**/
1241,
/**/
1240,
/**/
1239,
/**/
1238,
/**/
1237,
/**/
1236,
/**/
1235,
/**/
1234,
/**/
1233,
/**/
1232,
/**/
1231,
/**/
1230,
/**/
1229,
/**/
1228,
/**/
1227,
/**/
1226,
/**/
1225,
/**/
1224,
/**/
1223,
/**/
1222,
/**/
1221,
/**/
1220,
/**/
1219,
/**/
1218,
/**/
1217,
/**/
1216,
/**/
1215,
/**/
1214,
/**/
1213,
/**/
1212,
/**/
1211,
/**/
1210,
/**/
1209,
/**/
1208,
/**/
1207,
/**/
1206,
/**/
1205,
/**/
1204,
/**/
1203,
/**/
1202,
/**/
1201,
/**/
1200,
/**/
1199,
/**/
1198,
/**/
1197,
/**/
1196,
/**/
1195,
/**/
1194,
/**/
1193,
/**/
1192,
/**/
1191,
/**/
1190,
/**/
1189,
/**/
1188,
/**/
1187,
/**/
1186,
/**/
1185,
/**/
1184,
/**/
1183,
/**/
1182,
/**/
1181,
/**/
1180,
/**/
1179,
/**/
1178,
/**/
1177,
/**/
1176,
/**/
1175,
/**/
1174,
/**/
1173,
/**/
1172,
/**/
1171,
/**/
1170,
/**/
1169,
/**/
1168,
/**/
1167,
/**/
1166,
/**/
1165,
/**/
1164,
/**/
1163,
/**/
1162,
/**/
1161,
/**/
1160,
/**/
1159,
/**/
1158,
/**/
1157,
/**/
1156,
/**/
1155,
/**/
1154,
/**/
1153,
/**/
1152,
/**/
1151,
/**/
1150,
/**/
1149,
/**/
1148,
/**/
1147,
/**/
1146,
/**/
1145,
/**/
1144,
/**/
1143,
/**/
1142,
/**/
1141,
/**/
1140,
/**/
1139,
/**/
1138,
/**/
1137,
/**/
1136,
/**/
1135,
/**/
1134,
/**/
1133,
/**/
1132,
/**/
1131,
/**/
1130,
/**/
1129,
/**/
1128,
/**/
1127,
/**/
1126,
/**/
1125,
/**/
1124,
/**/
1123,
/**/
1122,
/**/
1121,
/**/
1120,
/**/
1119,
/**/
1118,
/**/
1117,
/**/
1116,
/**/
1115,
/**/
1114,
/**/
1113,
/**/
1112,
/**/
1111,
/**/
1110,
/**/
1109,
/**/
1108,
/**/
1107,
/**/
1106,
/**/
1105,
/**/
1104,
/**/
1103,
/**/
1102,
/**/
1101,
/**/
1100,
/**/
1099,
/**/
1098,
/**/
1097,
/**/
1096,
/**/
1095,
/**/
1094,
/**/
1093,
/**/
1092,
/**/
1091,
/**/
1090,
/**/
1089,
/**/
1088,
/**/
1087,
/**/
1086,
/**/
1085,
/**/
1084,
/**/
1083,
/**/
1082,
/**/
1081,
/**/
1080,
/**/
1079,
/**/
1078,
/**/
1077,
/**/
1076,
/**/
1075,
/**/
1074,
/**/
1073,
/**/
1072,
/**/
1071,
/**/
1070,
/**/
1069,
/**/
1068,
/**/
1067,
/**/
1066,
/**/
1065,
/**/
1064,
/**/
1063,
/**/
1062,
/**/
1061,
/**/
1060,
/**/
1059,
/**/
1058,
/**/
1057,
/**/
1056,
/**/
1055,
/**/
1054,
/**/
1053,
/**/
1052,
/**/
1051,
/**/
1050,
/**/
1049,
/**/
1048,
/**/
1047,
/**/
1046,
/**/
1045,
/**/
1044,
/**/
1043,
/**/
1042,
/**/
1041,
/**/
1040,
/**/
1039,
/**/
1038,
/**/
1037,
/**/
1036,
/**/
1035,
/**/
1034,
/**/
1033,
/**/
1032,
/**/
1031,
/**/
1030,
/**/
1029,
/**/
1028,
/**/
1027,
/**/
1026,
/**/
1025,
/**/
1024,
/**/
1023,
/**/
1022,
/**/
1021,
/**/
1020,
/**/
1019,
/**/
1018,
/**/
1017,
/**/
1016,
/**/
1015,
/**/
1014,
/**/
1013,
/**/
1012,
/**/
1011,
/**/
1010,
/**/
1009,
/**/
1008,
/**/
1007,
/**/
1006,
/**/
1005,
/**/
1004,
/**/
1003,
/**/
1002,
/**/
1001,
/**/
1000,
/**/
999,
/**/
998,
/**/
997,
/**/
996,
/**/
995,
/**/
994,
/**/
993,
/**/
992,
/**/
991,
/**/
990,
/**/
989,
/**/
988,
/**/
987,
/**/
986,
/**/
985,
/**/
984,
/**/
983,
/**/
982,
/**/
981,
/**/
980,
/**/
979,
/**/
978,
/**/
977,
/**/
976,
/**/
975,
/**/
974,
/**/
973,
/**/
972,
/**/
971,
/**/
970,
/**/
969,
/**/
968,
/**/
967,
/**/
966,
/**/
965,
/**/
964,
/**/
963,
/**/
962,
/**/
961,
/**/
960,
/**/
959,
/**/
958,
/**/
957,
/**/
956,
/**/
955,
/**/
954,
/**/
953,
/**/
952,
/**/
951,
/**/
950,
/**/
949,
/**/
948,
/**/
947,
/**/
946,
/**/
945,
/**/
944,
/**/
943,
/**/
942,
/**/
941,
/**/
940,
/**/
939,
/**/
938,
/**/
937,
/**/
936,
/**/
935,
/**/
934,
/**/
933,
/**/
932,
/**/
931,
/**/
930,
/**/
929,
/**/
928,
/**/
927,
/**/
926,
/**/
925,
/**/
924,
/**/
923,
/**/
922,
/**/
921,
/**/
920,
/**/
919,
/**/
918,
/**/
917,
/**/
916,
/**/
915,
/**/
914,
/**/
913,
/**/
912,
/**/
911,
/**/
910,
/**/
909,
/**/
908,
/**/
907,
/**/
906,
/**/
905,
/**/
904,
/**/
903,
/**/
902,
/**/
901,
/**/
900,
/**/
899,
/**/
898,
/**/
897,
/**/
896,
/**/
895,
/**/
894,
/**/
893,
/**/
892,
/**/
891,
/**/
890,
/**/
889,
/**/
888,
/**/
887,
/**/
886,
/**/
885,
/**/
884,
/**/
883,
/**/
882,
/**/
881,
/**/
880,
/**/
879,
/**/
878,
/**/
877,
/**/
876,
/**/
875,
/**/
874,
/**/
873,
/**/
872,
/**/
871,
/**/
870,
/**/
869,
/**/
868,
/**/
867,
/**/
866,
/**/
865,
/**/
864,
/**/
863,
/**/
862,
/**/
861,
/**/
860,
/**/
859,
/**/
858,
/**/
857,
/**/
856,
/**/
855,
/**/
854,
/**/
853,
/**/
852,
/**/
851,
/**/
850,
/**/
849,
/**/
848,
/**/
847,
/**/
846,
/**/
845,
/**/
844,
/**/
843,
/**/
842,
/**/
841,
/**/
840,
/**/
839,
/**/
838,
/**/
837,
/**/
836,
/**/
835,
/**/
834,
/**/
833,
/**/
832,
/**/
831,
/**/
830,
/**/
829,
/**/
828,
/**/
827,
/**/
826,
/**/
825,
/**/
824,
/**/
823,
/**/
822,
/**/
821,
/**/
820,
/**/
819,
/**/
818,
/**/
817,
/**/
816,
/**/
815,
/**/
814,
/**/
813,
/**/
812,
/**/
811,
/**/
810,
/**/
809,
/**/
808,
/**/
807,
/**/
806,
/**/
805,
/**/
804,
/**/
803,
/**/
802,
/**/
801,
/**/
800,
/**/
799,
/**/
798,
/**/
797,
/**/
796,
/**/
795,
/**/
794,
/**/
793,
/**/
792,
/**/
791,
/**/
790,
/**/
789,
/**/
788,
/**/
787,
/**/
786,
/**/
785,
/**/
784,
/**/
783,
/**/
782,
/**/
781,
/**/
780,
/**/
779,
/**/
778,
/**/
777,
/**/
776,
/**/
775,
/**/
774,
/**/
773,
/**/
772,
/**/
771,
/**/
770,
/**/
769,
/**/
768,
/**/
767,
/**/
766,
/**/
765,
/**/
764,
/**/
763,
/**/
762,
/**/
761,
/**/
760,
/**/
759,
/**/
758,
/**/
757,
/**/
756,
/**/
755,
/**/
754,
/**/
753,
/**/
752,
/**/
751,
/**/
750,
/**/
749,
/**/
748,
/**/
747,
/**/
746,
/**/
745,
/**/
744,
/**/
743,
/**/
742,
/**/
741,
/**/
740,
/**/
739,
/**/
738,
/**/
737,
/**/
736,
/**/
735,
/**/
734,
/**/
733,
/**/
732,
/**/
731,
/**/
730,
/**/
729,
/**/
728,
/**/
727,
/**/
726,
/**/
725,
/**/
724,
/**/
723,
/**/
722,
/**/
721,
/**/
720,
/**/
719,
/**/
718,
/**/
717,
/**/
716,
/**/
715,
/**/
714,
/**/
713,
/**/
712,
/**/
711,
/**/
710,
/**/
709,
/**/
708,
/**/
707,
/**/
706,
/**/
705,
/**/
704,
/**/
703,
/**/
702,
/**/
701,
/**/
700,
/**/
699,
/**/
698,
/**/
697,
/**/
696,
/**/
695,
/**/
694,
/**/
693,
/**/
692,
/**/
691,
/**/
690,
/**/
689,
/**/
688,
/**/
687,
/**/
686,
/**/
685,
/**/
684,
/**/
683,
/**/
682,
/**/
681,
/**/
680,
/**/
679,
/**/
678,
/**/
677,
/**/
676,
/**/
675,
/**/
674,
/**/
673,
/**/
672,
/**/
671,
/**/
670,
/**/
669,
/**/
668,
/**/
667,
/**/
666,
/**/
665,
/**/
664,
/**/
663,
/**/
662,
/**/
661,
/**/
660,
/**/
659,
/**/
658,
/**/
657,
/**/
656,
/**/
655,
/**/
654,
/**/
653,
/**/
652,
/**/
651,
/**/
650,
/**/
649,
/**/
648,
/**/
647,
/**/
646,
/**/
645,
/**/
644,
/**/
643,
/**/
642,
/**/
641,
/**/
640,
/**/
639,
/**/
638,
/**/
637,
/**/
636,
/**/
635,
/**/
634,
/**/
633,
/**/
632,
/**/
631,
/**/
630,
/**/
629,
/**/
628,
/**/
627,
/**/
626,
/**/
625,
/**/
624,
/**/
623,
/**/
622,
/**/
621,
/**/
620,
/**/
619,
/**/
618,
/**/
617,
/**/
616,
/**/
615,
/**/
614,
/**/
613,
/**/
612,
/**/
611,
/**/
610,
/**/
609,
/**/
608,
/**/
607,
/**/
606,
/**/
605,
/**/
604,
/**/
603,
/**/
602,
/**/
601,
/**/
600,
/**/
599,
/**/
598,
/**/
597,
/**/
596,
/**/
595,
/**/
594,
/**/
593,
/**/
592,
/**/
591,
/**/
590,
/**/
589,
/**/
588,
/**/
587,
/**/
586,
/**/
585,
/**/
584,
/**/
583,
/**/
582,
/**/
581,
/**/
580,
/**/
579,
/**/
578,
/**/
577,
/**/
576,
/**/
575,
/**/
574,
/**/
573,
/**/
572,
/**/
571,
/**/
570,
/**/
569,
/**/
568,
/**/
567,
/**/
566,
/**/
565,
/**/
564,
/**/
563,
/**/
562,
/**/
561,
/**/
560,
/**/
559,
/**/
558,
/**/
557,
/**/
556,
/**/
555,
/**/
554,
/**/
553,
/**/
552,
/**/
551,
/**/
550,
/**/
549,
/**/
548,
/**/
547,
/**/
546,
/**/
545,
/**/
544,
/**/
543,
/**/
542,
/**/
541,
/**/
540,
/**/
539,
/**/
538,
/**/
537,
/**/
536,
/**/
535,
/**/
534,
/**/
533,
/**/
532,
/**/
531,
/**/
530,
/**/
529,
/**/
528,
/**/
527,
/**/
526,
/**/
525,
/**/
524,
/**/
523,
/**/
522,
/**/
521,
/**/
520,
/**/
519,
/**/
518,
/**/
517,
/**/
516,
/**/
515,
/**/
514,
/**/
513,
/**/
512,
/**/
511,
/**/
510,
/**/
509,
/**/
508,
/**/
507,
/**/
506,
/**/
505,
/**/
504,
/**/
503,
/**/
502,
/**/
501,
/**/
500,
/**/
499,
/**/
498,
/**/
497,
/**/
496,
/**/
495,
/**/
494,
/**/
493,
/**/
492,
/**/
491,
/**/
490,
/**/
489,
/**/
488,
/**/
487,
/**/
486,
/**/
485,
/**/
484,
/**/
483,
/**/
482,
/**/
481,
/**/
480,
/**/
479,
/**/
478,
/**/
477,
/**/
476,
/**/
475,
/**/
474,
/**/
473,
/**/
472,
/**/
471,
/**/
470,
/**/
469,
/**/
468,
/**/
467,
/**/
466,
/**/
465,
/**/
464,
/**/
463,
/**/
462,
/**/
461,
/**/
460,
/**/
459,
/**/
458,
/**/
457,
/**/
456,
/**/
455,
/**/
454,
/**/
453,
/**/
452,
/**/
451,
/**/
450,
/**/
449,
/**/
448,
/**/
447,
/**/
446,
/**/
445,
/**/
444,
/**/
443,
/**/
442,
/**/
441,
/**/
440,
/**/
439,
/**/
438,
/**/
437,
/**/
436,
/**/
435,
/**/
434,
/**/
433,
/**/
432,
/**/
431,
/**/
430,
/**/
429,
/**/
428,
/**/
427,
/**/
426,
/**/
425,
/**/
424,
/**/
423,
/**/
422,
/**/
421,
/**/
420,
/**/
419,
/**/
418,
/**/
417,
/**/
416,
/**/
415,
/**/
414,
/**/
413,
/**/
412,
/**/
411,
/**/
410,
/**/
409,
/**/
408,
/**/
407,
/**/
406,
/**/
405,
/**/
404,
/**/
403,
/**/
402,
/**/
401,
/**/
400,
/**/
399,
/**/
398,
/**/
397,
/**/
396,
/**/
395,
/**/
394,
/**/
393,
/**/
392,
/**/
391,
/**/
390,
/**/
389,
/**/
388,
/**/
387,
/**/
386,
/**/
385,
/**/
384,
/**/
383,
/**/
382,
/**/
381,
/**/
380,
/**/
379,
/**/
378,
/**/
377,
/**/
376,
/**/
375,
/**/
374,
/**/
373,
/**/
372,
/**/
371,
/**/
370,
/**/
369,
/**/
368,
/**/
367,
/**/
366,
/**/
365,
/**/
364,
/**/
363,
/**/
362,
/**/
361,
/**/
360,
/**/
359,
/**/
358,
/**/
357,
/**/
356,
/**/
355,
/**/
354,
/**/
353,
/**/
352,
/**/
351,
/**/
350,
/**/
349,
/**/
348,
/**/
347,
/**/
346,
/**/
345,
/**/
344,
/**/
343,
/**/
342,
/**/
341,
/**/
340,
/**/
339,
/**/
338,
/**/
337,
/**/
336,
/**/
335,
/**/
334,
/**/
333,
/**/
332,
/**/
331,
/**/
330,
/**/
329,
/**/
328,
/**/
327,
/**/
326,
/**/
325,
/**/
324,
/**/
323,
/**/
322,
/**/
321,
/**/
320,
/**/
319,
/**/
318,
/**/
317,
/**/
316,
/**/
315,
/**/
314,
/**/
313,
/**/
312,
/**/
311,
/**/
310,
/**/
309,
/**/
308,
/**/
307,
/**/
306,
/**/
305,
/**/
304,
/**/
303,
/**/
302,
/**/
301,
/**/
300,
/**/
299,
/**/
298,
/**/
297,
/**/
296,
/**/
295,
/**/
294,
/**/
293,
/**/
292,
/**/
291,
/**/
290,
/**/
289,
/**/
288,
/**/
287,
/**/
286,
/**/
285,
/**/
284,
/**/
283,
/**/
282,
/**/
281,
/**/
280,
/**/
279,
/**/
278,
/**/
277,
/**/
276,
/**/
275,
/**/
274,
/**/
273,
/**/
272,
/**/
271,
/**/
270,
/**/
269,
/**/
268,
/**/
267,
/**/
266,
/**/
265,
/**/
264,
/**/
263,
/**/
262,
/**/
261,
/**/
260,
/**/
259,
/**/
258,
/**/
257,
/**/
256,
/**/
255,
/**/
254,
/**/
253,
/**/
252,
/**/
251,
/**/
250,
/**/
249,
/**/
248,
/**/
247,
/**/
246,
/**/
245,
/**/
244,
/**/
243,
/**/
242,
/**/
241,
/**/
240,
/**/
239,
/**/
238,
/**/
237,
/**/
236,
/**/
235,
/**/
234,
/**/
233,
/**/
232,
/**/
231,
/**/
230,
/**/
229,
/**/
228,
/**/
227,
/**/
226,
/**/
225,
/**/
224,
/**/
223,
/**/
222,
/**/
221,
/**/
220,
/**/
219,
/**/
218,
/**/
217,
/**/
216,
/**/
215,
/**/
214,
/**/
213,
/**/
212,
/**/
211,
/**/
210,
/**/
209,
/**/
208,
/**/
207,
/**/
206,
/**/
205,
/**/
204,
/**/
203,
/**/
202,
/**/
201,
/**/
200,
/**/
199,
/**/
198,
/**/
197,
/**/
196,
/**/
195,
/**/
194,
/**/
193,
/**/
192,
/**/
191,
/**/
190,
/**/
189,
/**/
188,
/**/
187,
/**/
186,
/**/
185,
/**/
184,
/**/
183,
/**/
182,
/**/
181,
/**/
180,
/**/
179,
/**/
178,
/**/
177,
/**/
176,
/**/
175,
/**/
174,
/**/
173,
/**/
172,
/**/
171,
/**/
170,
/**/
169,
/**/
168,
/**/
167,
/**/
166,
/**/
165,
/**/
164,
/**/
163,
/**/
162,
/**/
161,
/**/
160,
/**/
159,
/**/
158,
/**/
157,
/**/
156,
/**/
155,
/**/
154,
/**/
153,
/**/
152,
/**/
151,
/**/
150,
/**/
149,
/**/
148,
/**/
147,
/**/
146,
/**/
145,
/**/
144,
/**/
143,
/**/
142,
/**/
141,
/**/
140,
/**/
139,
/**/
138,
/**/
137,
/**/
136,
/**/
135,
/**/
134,
/**/
133,
/**/
132,
/**/
131,
/**/
130,
/**/
129,
/**/
128,
/**/
127,
/**/
126,
/**/
125,
/**/
124,
/**/
123,
/**/
122,
/**/
121,
/**/
120,
/**/
119,
/**/
118,
/**/
117,
/**/
116,
/**/
115,
/**/
114,
/**/
113,
/**/
112,
/**/
111,
/**/
110,
/**/
109,
/**/
108,
/**/
107,
/**/
106,
/**/
105,
/**/
104,
/**/
103,
/**/
102,
/**/
101,
/**/
100,
/**/
99,
/**/
98,
/**/
97,
/**/
96,
/**/
95,
/**/
94,
/**/
93,
/**/
92,
/**/
91,
/**/
90,
/**/
89,
/**/
88,
/**/
87,
/**/
86,
/**/
85,
/**/
84,
/**/
83,
/**/
82,
/**/
81,
/**/
80,
/**/
79,
/**/
78,
/**/
77,
/**/
76,
/**/
75,
/**/
74,
/**/
73,
/**/
72,
/**/
71,
/**/
70,
/**/
69,
/**/
68,
/**/
67,
/**/
66,
/**/
65,
/**/
64,
/**/
63,
/**/
62,
/**/
61,
/**/
60,
/**/
59,
/**/
58,
/**/
57,
/**/
56,
/**/
55,
/**/
54,
/**/
53,
/**/
52,
/**/
51,
/**/
50,
/**/
49,
/**/
48,
/**/
47,
/**/
46,
/**/
45,
/**/
44,
/**/
43,
/**/
42,
/**/
41,
/**/
40,
/**/
39,
/**/
38,
/**/
37,
/**/
36,
/**/
35,
/**/
34,
/**/
33,
/**/
32,
/**/
31,
/**/
30,
/**/
29,
/**/
28,
/**/
27,
/**/
26,
/**/
25,
/**/
24,
/**/
23,
/**/
22,
/**/
21,
/**/
20,
/**/
19,
/**/
18,
/**/
17,
/**/
16,
/**/
15,
/**/
14,
/**/
13,
/**/
12,
/**/
11,
/**/
10,
/**/
9,
/**/
8,
/**/
7,
/**/
6,
/**/
5,
/**/
4,
/**/
3,
/**/
2,
/**/
1,
/**/
0
};
/*
* Place to put a short description when adding a feature with a patch.
* Keep it short, e.g.,: "relative numbers", "persistent undo".
* Also add a comment marker to separate the lines.
* See the official Vim patches for the diff format: It must use a context of
* one line only. Create it by hand or use "diff -C2" and edit the patch.
*/
static char *(extra_patches[]) =
{ /* Add your patch description below this line */
/**/
NULL
};
int
highest_patch(void)
{
// this relies on the highest patch number to be the first entry
return included_patches[0];
}
#if defined(FEAT_EVAL) || defined(PROTO)
/*
* Return TRUE if patch "n" has been included.
*/
int
has_patch(int n)
{
int i;
for (i = 0; included_patches[i] != 0; ++i)
if (included_patches[i] == n)
return TRUE;
return FALSE;
}
#endif
void
ex_version(exarg_T *eap)
{
/*
* Ignore a ":version 9.99" command.
*/
if (*eap->arg == NUL)
{
msg_putchar('\n');
list_version();
}
}
/*
* Output a string for the version message. If it's going to wrap, output a
* newline, unless the message is too long to fit on the screen anyway.
* When "wrap" is TRUE wrap the string in [].
*/
static void
version_msg_wrap(char_u *s, int wrap)
{
int len = (int)vim_strsize(s) + (wrap ? 2 : 0);
if (!got_int && len < (int)Columns && msg_col + len >= (int)Columns
&& *s != '\n')
msg_putchar('\n');
if (!got_int)
{
if (wrap)
msg_puts("[");
msg_puts((char *)s);
if (wrap)
msg_puts("]");
}
}
static void
version_msg(char *s)
{
version_msg_wrap((char_u *)s, FALSE);
}
/*
* List all features aligned in columns, dictionary style.
*/
static void
list_features(void)
{
list_in_columns((char_u **)features, -1, -1);
}
/*
* List string items nicely aligned in columns.
* When "size" is < 0 then the last entry is marked with NULL.
* The entry with index "current" is inclosed in [].
*/
void
list_in_columns(char_u **items, int size, int current)
{
int i;
int ncol;
int nrow;
int cur_row = 1;
int item_count = 0;
int width = 0;
#ifdef FEAT_SYN_HL
int use_highlight = (items == (char_u **)features);
#endif
/* Find the length of the longest item, use that + 1 as the column
* width. */
for (i = 0; size < 0 ? items[i] != NULL : i < size; ++i)
{
int l = (int)vim_strsize(items[i]) + (i == current ? 2 : 0);
if (l > width)
width = l;
++item_count;
}
width += 1;
if (Columns < width)
{
// Not enough screen columns - show one per line
for (i = 0; i < item_count; ++i)
{
version_msg_wrap(items[i], i == current);
if (msg_col > 0 && i < item_count - 1)
msg_putchar('\n');
}
return;
}
// The rightmost column doesn't need a separator.
// Sacrifice it to fit in one more column if possible.
ncol = (int) (Columns + 1) / width;
nrow = item_count / ncol + (item_count % ncol ? 1 : 0);
// "i" counts columns then rows. "idx" counts rows then columns.
for (i = 0; !got_int && i < nrow * ncol; ++i)
{
int idx = (i / ncol) + (i % ncol) * nrow;
if (idx < item_count)
{
int last_col = (i + 1) % ncol == 0;
if (idx == current)
msg_putchar('[');
#ifdef FEAT_SYN_HL
if (use_highlight && items[idx][0] == '-')
msg_puts_attr((char *)items[idx], HL_ATTR(HLF_W));
else
#endif
msg_puts((char *)items[idx]);
if (idx == current)
msg_putchar(']');
if (last_col)
{
if (msg_col > 0 && cur_row < nrow)
msg_putchar('\n');
++cur_row;
}
else
{
while (msg_col % width)
msg_putchar(' ');
}
}
else
{
// this row is out of items, thus at the end of the row
if (msg_col > 0)
{
if (cur_row < nrow)
msg_putchar('\n');
++cur_row;
}
}
}
}
void
list_version(void)
{
int i;
int first;
char *s = "";
/*
* When adding features here, don't forget to update the list of
* internal variables in eval.c!
*/
init_longVersion();
msg(longVersion);
#ifdef MSWIN
# ifdef FEAT_GUI_MSWIN
# ifdef VIMDLL
# ifdef _WIN64
msg_puts(_("\nMS-Windows 64-bit GUI/console version"));
# else
msg_puts(_("\nMS-Windows 32-bit GUI/console version"));
# endif
# else
# ifdef _WIN64
msg_puts(_("\nMS-Windows 64-bit GUI version"));
# else
msg_puts(_("\nMS-Windows 32-bit GUI version"));
# endif
# endif
# ifdef FEAT_OLE
msg_puts(_(" with OLE support"));
# endif
# else
# ifdef _WIN64
msg_puts(_("\nMS-Windows 64-bit console version"));
# else
msg_puts(_("\nMS-Windows 32-bit console version"));
# endif
# endif
#endif
#if defined(MACOS_X)
# if defined(MACOS_X_DARWIN)
msg_puts(_("\nmacOS version"));
# else
msg_puts(_("\nmacOS version w/o darwin feat."));
# endif
#endif
#ifdef VMS
msg_puts(_("\nOpenVMS version"));
# ifdef HAVE_PATHDEF
if (*compiled_arch != NUL)
{
msg_puts(" - ");
msg_puts((char *)compiled_arch);
}
# endif
#endif
/* Print the list of patch numbers if there is at least one. */
/* Print a range when patches are consecutive: "1-10, 12, 15-40, 42-45" */
if (included_patches[0] != 0)
{
msg_puts(_("\nIncluded patches: "));
first = -1;
/* find last one */
for (i = 0; included_patches[i] != 0; ++i)
;
while (--i >= 0)
{
if (first < 0)
first = included_patches[i];
if (i == 0 || included_patches[i - 1] != included_patches[i] + 1)
{
msg_puts(s);
s = ", ";
msg_outnum((long)first);
if (first != included_patches[i])
{
msg_puts("-");
msg_outnum((long)included_patches[i]);
}
first = -1;
}
}
}
/* Print the list of extra patch descriptions if there is at least one. */
if (extra_patches[0] != NULL)
{
msg_puts(_("\nExtra patches: "));
s = "";
for (i = 0; extra_patches[i] != NULL; ++i)
{
msg_puts(s);
s = ", ";
msg_puts(extra_patches[i]);
}
}
#ifdef MODIFIED_BY
msg_puts("\n");
msg_puts(_("Modified by "));
msg_puts(MODIFIED_BY);
#endif
#ifdef HAVE_PATHDEF
if (*compiled_user != NUL || *compiled_sys != NUL)
{
msg_puts(_("\nCompiled "));
if (*compiled_user != NUL)
{
msg_puts(_("by "));
msg_puts((char *)compiled_user);
}
if (*compiled_sys != NUL)
{
msg_puts("@");
msg_puts((char *)compiled_sys);
}
}
#endif
#ifdef FEAT_HUGE
msg_puts(_("\nHuge version "));
#else
# ifdef FEAT_BIG
msg_puts(_("\nBig version "));
# else
# ifdef FEAT_NORMAL
msg_puts(_("\nNormal version "));
# else
# ifdef FEAT_SMALL
msg_puts(_("\nSmall version "));
# else
msg_puts(_("\nTiny version "));
# endif
# endif
# endif
#endif
#ifndef FEAT_GUI
msg_puts(_("without GUI."));
#else
# ifdef FEAT_GUI_GTK
# ifdef USE_GTK3
msg_puts(_("with GTK3 GUI."));
# else
# ifdef FEAT_GUI_GNOME
msg_puts(_("with GTK2-GNOME GUI."));
# else
msg_puts(_("with GTK2 GUI."));
# endif
# endif
# else
# ifdef FEAT_GUI_MOTIF
msg_puts(_("with X11-Motif GUI."));
# else
# ifdef FEAT_GUI_ATHENA
# ifdef FEAT_GUI_NEXTAW
msg_puts(_("with X11-neXtaw GUI."));
# else
msg_puts(_("with X11-Athena GUI."));
# endif
# else
# ifdef FEAT_GUI_PHOTON
msg_puts(_("with Photon GUI."));
# else
# if defined(MSWIN)
msg_puts(_("with GUI."));
# else
# if defined(TARGET_API_MAC_CARBON) && TARGET_API_MAC_CARBON
msg_puts(_("with Carbon GUI."));
# else
# if defined(TARGET_API_MAC_OSX) && TARGET_API_MAC_OSX
msg_puts(_("with Cocoa GUI."));
# else
# endif
# endif
# endif
# endif
# endif
# endif
# endif
#endif
version_msg(_(" Features included (+) or not (-):\n"));
list_features();
if (msg_col > 0)
msg_putchar('\n');
#ifdef SYS_VIMRC_FILE
version_msg(_(" system vimrc file: \""));
version_msg(SYS_VIMRC_FILE);
version_msg("\"\n");
#endif
#ifdef USR_VIMRC_FILE
version_msg(_(" user vimrc file: \""));
version_msg(USR_VIMRC_FILE);
version_msg("\"\n");
#endif
#ifdef USR_VIMRC_FILE2
version_msg(_(" 2nd user vimrc file: \""));
version_msg(USR_VIMRC_FILE2);
version_msg("\"\n");
#endif
#ifdef USR_VIMRC_FILE3
version_msg(_(" 3rd user vimrc file: \""));
version_msg(USR_VIMRC_FILE3);
version_msg("\"\n");
#endif
#ifdef USR_EXRC_FILE
version_msg(_(" user exrc file: \""));
version_msg(USR_EXRC_FILE);
version_msg("\"\n");
#endif
#ifdef USR_EXRC_FILE2
version_msg(_(" 2nd user exrc file: \""));
version_msg(USR_EXRC_FILE2);
version_msg("\"\n");
#endif
#ifdef FEAT_GUI
# ifdef SYS_GVIMRC_FILE
version_msg(_(" system gvimrc file: \""));
version_msg(SYS_GVIMRC_FILE);
version_msg("\"\n");
# endif
version_msg(_(" user gvimrc file: \""));
version_msg(USR_GVIMRC_FILE);
version_msg("\"\n");
# ifdef USR_GVIMRC_FILE2
version_msg(_("2nd user gvimrc file: \""));
version_msg(USR_GVIMRC_FILE2);
version_msg("\"\n");
# endif
# ifdef USR_GVIMRC_FILE3
version_msg(_("3rd user gvimrc file: \""));
version_msg(USR_GVIMRC_FILE3);
version_msg("\"\n");
# endif
#endif
version_msg(_(" defaults file: \""));
version_msg(VIM_DEFAULTS_FILE);
version_msg("\"\n");
#ifdef FEAT_GUI
# ifdef SYS_MENU_FILE
version_msg(_(" system menu file: \""));
version_msg(SYS_MENU_FILE);
version_msg("\"\n");
# endif
#endif
#ifdef HAVE_PATHDEF
if (*default_vim_dir != NUL)
{
version_msg(_(" fall-back for $VIM: \""));
version_msg((char *)default_vim_dir);
version_msg("\"\n");
}
if (*default_vimruntime_dir != NUL)
{
version_msg(_(" f-b for $VIMRUNTIME: \""));
version_msg((char *)default_vimruntime_dir);
version_msg("\"\n");
}
version_msg(_("Compilation: "));
version_msg((char *)all_cflags);
version_msg("\n");
#ifdef VMS
if (*compiler_version != NUL)
{
version_msg(_("Compiler: "));
version_msg((char *)compiler_version);
version_msg("\n");
}
#endif
version_msg(_("Linking: "));
version_msg((char *)all_lflags);
#endif
#ifdef DEBUG
version_msg("\n");
version_msg(_(" DEBUG BUILD"));
#endif
}
static void do_intro_line(int row, char_u *mesg, int add_version, int attr);
/*
* Show the intro message when not editing a file.
*/
void
maybe_intro_message(void)
{
if (BUFEMPTY()
&& curbuf->b_fname == NULL
&& firstwin->w_next == NULL
&& vim_strchr(p_shm, SHM_INTRO) == NULL)
intro_message(FALSE);
}
/*
* Give an introductory message about Vim.
* Only used when starting Vim on an empty file, without a file name.
* Or with the ":intro" command (for Sven :-).
*/
void
intro_message(
int colon) /* TRUE for ":intro" */
{
int i;
int row;
int blanklines;
int sponsor;
char *p;
static char *(lines[]) =
{
N_("VIM - Vi IMproved"),
"",
N_("version "),
N_("by Bram Moolenaar et al."),
#ifdef MODIFIED_BY
" ",
#endif
N_("Vim is open source and freely distributable"),
"",
N_("Help poor children in Uganda!"),
N_("type :help iccf<Enter> for information "),
"",
N_("type :q<Enter> to exit "),
N_("type :help<Enter> or <F1> for on-line help"),
N_("type :help version8<Enter> for version info"),
NULL,
"",
N_("Running in Vi compatible mode"),
N_("type :set nocp<Enter> for Vim defaults"),
N_("type :help cp-default<Enter> for info on this"),
};
#ifdef FEAT_GUI
static char *(gui_lines[]) =
{
NULL,
NULL,
NULL,
NULL,
#ifdef MODIFIED_BY
NULL,
#endif
NULL,
NULL,
NULL,
N_("menu Help->Orphans for information "),
NULL,
N_("Running modeless, typed text is inserted"),
N_("menu Edit->Global Settings->Toggle Insert Mode "),
N_(" for two modes "),
NULL,
NULL,
NULL,
N_("menu Edit->Global Settings->Toggle Vi Compatible"),
N_(" for Vim defaults "),
};
#endif
/* blanklines = screen height - # message lines */
blanklines = (int)Rows - ((sizeof(lines) / sizeof(char *)) - 1);
if (!p_cp)
blanklines += 4; /* add 4 for not showing "Vi compatible" message */
/* Don't overwrite a statusline. Depends on 'cmdheight'. */
if (p_ls > 1)
blanklines -= Rows - topframe->fr_height;
if (blanklines < 0)
blanklines = 0;
/* Show the sponsor and register message one out of four times, the Uganda
* message two out of four times. */
sponsor = (int)time(NULL);
sponsor = ((sponsor & 2) == 0) - ((sponsor & 4) == 0);
/* start displaying the message lines after half of the blank lines */
row = blanklines / 2;
if ((row >= 2 && Columns >= 50) || colon)
{
for (i = 0; i < (int)(sizeof(lines) / sizeof(char *)); ++i)
{
p = lines[i];
#ifdef FEAT_GUI
if (p_im && gui.in_use && gui_lines[i] != NULL)
p = gui_lines[i];
#endif
if (p == NULL)
{
if (!p_cp)
break;
continue;
}
if (sponsor != 0)
{
if (strstr(p, "children") != NULL)
p = sponsor < 0
? N_("Sponsor Vim development!")
: N_("Become a registered Vim user!");
else if (strstr(p, "iccf") != NULL)
p = sponsor < 0
? N_("type :help sponsor<Enter> for information ")
: N_("type :help register<Enter> for information ");
else if (strstr(p, "Orphans") != NULL)
p = N_("menu Help->Sponsor/Register for information ");
}
if (*p != NUL)
do_intro_line(row, (char_u *)_(p), i == 2, 0);
++row;
}
}
/* Make the wait-return message appear just below the text. */
if (colon)
msg_row = row;
}
static void
do_intro_line(
int row,
char_u *mesg,
int add_version,
int attr)
{
char_u vers[20];
int col;
char_u *p;
int l;
int clen;
#ifdef MODIFIED_BY
# define MODBY_LEN 150
char_u modby[MODBY_LEN];
if (*mesg == ' ')
{
vim_strncpy(modby, (char_u *)_("Modified by "), MODBY_LEN - 1);
l = (int)STRLEN(modby);
vim_strncpy(modby + l, (char_u *)MODIFIED_BY, MODBY_LEN - l - 1);
mesg = modby;
}
#endif
/* Center the message horizontally. */
col = vim_strsize(mesg);
if (add_version)
{
STRCPY(vers, mediumVersion);
if (highest_patch())
{
/* Check for 9.9x or 9.9xx, alpha/beta version */
if (isalpha((int)vers[3]))
{
int len = (isalpha((int)vers[4])) ? 5 : 4;
sprintf((char *)vers + len, ".%d%s", highest_patch(),
mediumVersion + len);
}
else
sprintf((char *)vers + 3, ".%d", highest_patch());
}
col += (int)STRLEN(vers);
}
col = (Columns - col) / 2;
if (col < 0)
col = 0;
/* Split up in parts to highlight <> items differently. */
for (p = mesg; *p != NUL; p += l)
{
clen = 0;
for (l = 0; p[l] != NUL
&& (l == 0 || (p[l] != '<' && p[l - 1] != '>')); ++l)
{
if (has_mbyte)
{
clen += ptr2cells(p + l);
l += (*mb_ptr2len)(p + l) - 1;
}
else
clen += byte2cells(p[l]);
}
screen_puts_len(p, l, row, col, *p == '<' ? HL_ATTR(HLF_8) : attr);
col += clen;
}
/* Add the version number to the version line. */
if (add_version)
screen_puts(vers, row, col, 0);
}
/*
* ":intro": clear screen, display intro screen and wait for return.
*/
void
ex_intro(exarg_T *eap UNUSED)
{
screenclear();
intro_message(TRUE);
wait_return(TRUE);
}
| ./CrossVul/dataset_final_sorted/CWE-416/c/bad_1342_1 |
crossvul-cpp_data_bad_2840_0 | /*
* fs/userfaultfd.c
*
* Copyright (C) 2007 Davide Libenzi <davidel@xmailserver.org>
* Copyright (C) 2008-2009 Red Hat, Inc.
* Copyright (C) 2015 Red Hat, Inc.
*
* This work is licensed under the terms of the GNU GPL, version 2. See
* the COPYING file in the top-level directory.
*
* Some part derived from fs/eventfd.c (anon inode setup) and
* mm/ksm.c (mm hashing).
*/
#include <linux/list.h>
#include <linux/hashtable.h>
#include <linux/sched/signal.h>
#include <linux/sched/mm.h>
#include <linux/mm.h>
#include <linux/poll.h>
#include <linux/slab.h>
#include <linux/seq_file.h>
#include <linux/file.h>
#include <linux/bug.h>
#include <linux/anon_inodes.h>
#include <linux/syscalls.h>
#include <linux/userfaultfd_k.h>
#include <linux/mempolicy.h>
#include <linux/ioctl.h>
#include <linux/security.h>
#include <linux/hugetlb.h>
static struct kmem_cache *userfaultfd_ctx_cachep __read_mostly;
enum userfaultfd_state {
UFFD_STATE_WAIT_API,
UFFD_STATE_RUNNING,
};
/*
* Start with fault_pending_wqh and fault_wqh so they're more likely
* to be in the same cacheline.
*/
struct userfaultfd_ctx {
/* waitqueue head for the pending (i.e. not read) userfaults */
wait_queue_head_t fault_pending_wqh;
/* waitqueue head for the userfaults */
wait_queue_head_t fault_wqh;
/* waitqueue head for the pseudo fd to wakeup poll/read */
wait_queue_head_t fd_wqh;
/* waitqueue head for events */
wait_queue_head_t event_wqh;
/* a refile sequence protected by fault_pending_wqh lock */
struct seqcount refile_seq;
/* pseudo fd refcounting */
atomic_t refcount;
/* userfaultfd syscall flags */
unsigned int flags;
/* features requested from the userspace */
unsigned int features;
/* state machine */
enum userfaultfd_state state;
/* released */
bool released;
/* mm with one ore more vmas attached to this userfaultfd_ctx */
struct mm_struct *mm;
};
struct userfaultfd_fork_ctx {
struct userfaultfd_ctx *orig;
struct userfaultfd_ctx *new;
struct list_head list;
};
struct userfaultfd_unmap_ctx {
struct userfaultfd_ctx *ctx;
unsigned long start;
unsigned long end;
struct list_head list;
};
struct userfaultfd_wait_queue {
struct uffd_msg msg;
wait_queue_entry_t wq;
struct userfaultfd_ctx *ctx;
bool waken;
};
struct userfaultfd_wake_range {
unsigned long start;
unsigned long len;
};
static int userfaultfd_wake_function(wait_queue_entry_t *wq, unsigned mode,
int wake_flags, void *key)
{
struct userfaultfd_wake_range *range = key;
int ret;
struct userfaultfd_wait_queue *uwq;
unsigned long start, len;
uwq = container_of(wq, struct userfaultfd_wait_queue, wq);
ret = 0;
/* len == 0 means wake all */
start = range->start;
len = range->len;
if (len && (start > uwq->msg.arg.pagefault.address ||
start + len <= uwq->msg.arg.pagefault.address))
goto out;
WRITE_ONCE(uwq->waken, true);
/*
* The Program-Order guarantees provided by the scheduler
* ensure uwq->waken is visible before the task is woken.
*/
ret = wake_up_state(wq->private, mode);
if (ret) {
/*
* Wake only once, autoremove behavior.
*
* After the effect of list_del_init is visible to the other
* CPUs, the waitqueue may disappear from under us, see the
* !list_empty_careful() in handle_userfault().
*
* try_to_wake_up() has an implicit smp_mb(), and the
* wq->private is read before calling the extern function
* "wake_up_state" (which in turns calls try_to_wake_up).
*/
list_del_init(&wq->entry);
}
out:
return ret;
}
/**
* userfaultfd_ctx_get - Acquires a reference to the internal userfaultfd
* context.
* @ctx: [in] Pointer to the userfaultfd context.
*/
static void userfaultfd_ctx_get(struct userfaultfd_ctx *ctx)
{
if (!atomic_inc_not_zero(&ctx->refcount))
BUG();
}
/**
* userfaultfd_ctx_put - Releases a reference to the internal userfaultfd
* context.
* @ctx: [in] Pointer to userfaultfd context.
*
* The userfaultfd context reference must have been previously acquired either
* with userfaultfd_ctx_get() or userfaultfd_ctx_fdget().
*/
static void userfaultfd_ctx_put(struct userfaultfd_ctx *ctx)
{
if (atomic_dec_and_test(&ctx->refcount)) {
VM_BUG_ON(spin_is_locked(&ctx->fault_pending_wqh.lock));
VM_BUG_ON(waitqueue_active(&ctx->fault_pending_wqh));
VM_BUG_ON(spin_is_locked(&ctx->fault_wqh.lock));
VM_BUG_ON(waitqueue_active(&ctx->fault_wqh));
VM_BUG_ON(spin_is_locked(&ctx->event_wqh.lock));
VM_BUG_ON(waitqueue_active(&ctx->event_wqh));
VM_BUG_ON(spin_is_locked(&ctx->fd_wqh.lock));
VM_BUG_ON(waitqueue_active(&ctx->fd_wqh));
mmdrop(ctx->mm);
kmem_cache_free(userfaultfd_ctx_cachep, ctx);
}
}
static inline void msg_init(struct uffd_msg *msg)
{
BUILD_BUG_ON(sizeof(struct uffd_msg) != 32);
/*
* Must use memset to zero out the paddings or kernel data is
* leaked to userland.
*/
memset(msg, 0, sizeof(struct uffd_msg));
}
static inline struct uffd_msg userfault_msg(unsigned long address,
unsigned int flags,
unsigned long reason,
unsigned int features)
{
struct uffd_msg msg;
msg_init(&msg);
msg.event = UFFD_EVENT_PAGEFAULT;
msg.arg.pagefault.address = address;
if (flags & FAULT_FLAG_WRITE)
/*
* If UFFD_FEATURE_PAGEFAULT_FLAG_WP was set in the
* uffdio_api.features and UFFD_PAGEFAULT_FLAG_WRITE
* was not set in a UFFD_EVENT_PAGEFAULT, it means it
* was a read fault, otherwise if set it means it's
* a write fault.
*/
msg.arg.pagefault.flags |= UFFD_PAGEFAULT_FLAG_WRITE;
if (reason & VM_UFFD_WP)
/*
* If UFFD_FEATURE_PAGEFAULT_FLAG_WP was set in the
* uffdio_api.features and UFFD_PAGEFAULT_FLAG_WP was
* not set in a UFFD_EVENT_PAGEFAULT, it means it was
* a missing fault, otherwise if set it means it's a
* write protect fault.
*/
msg.arg.pagefault.flags |= UFFD_PAGEFAULT_FLAG_WP;
if (features & UFFD_FEATURE_THREAD_ID)
msg.arg.pagefault.feat.ptid = task_pid_vnr(current);
return msg;
}
#ifdef CONFIG_HUGETLB_PAGE
/*
* Same functionality as userfaultfd_must_wait below with modifications for
* hugepmd ranges.
*/
static inline bool userfaultfd_huge_must_wait(struct userfaultfd_ctx *ctx,
struct vm_area_struct *vma,
unsigned long address,
unsigned long flags,
unsigned long reason)
{
struct mm_struct *mm = ctx->mm;
pte_t *pte;
bool ret = true;
VM_BUG_ON(!rwsem_is_locked(&mm->mmap_sem));
pte = huge_pte_offset(mm, address, vma_mmu_pagesize(vma));
if (!pte)
goto out;
ret = false;
/*
* Lockless access: we're in a wait_event so it's ok if it
* changes under us.
*/
if (huge_pte_none(*pte))
ret = true;
if (!huge_pte_write(*pte) && (reason & VM_UFFD_WP))
ret = true;
out:
return ret;
}
#else
static inline bool userfaultfd_huge_must_wait(struct userfaultfd_ctx *ctx,
struct vm_area_struct *vma,
unsigned long address,
unsigned long flags,
unsigned long reason)
{
return false; /* should never get here */
}
#endif /* CONFIG_HUGETLB_PAGE */
/*
* Verify the pagetables are still not ok after having reigstered into
* the fault_pending_wqh to avoid userland having to UFFDIO_WAKE any
* userfault that has already been resolved, if userfaultfd_read and
* UFFDIO_COPY|ZEROPAGE are being run simultaneously on two different
* threads.
*/
static inline bool userfaultfd_must_wait(struct userfaultfd_ctx *ctx,
unsigned long address,
unsigned long flags,
unsigned long reason)
{
struct mm_struct *mm = ctx->mm;
pgd_t *pgd;
p4d_t *p4d;
pud_t *pud;
pmd_t *pmd, _pmd;
pte_t *pte;
bool ret = true;
VM_BUG_ON(!rwsem_is_locked(&mm->mmap_sem));
pgd = pgd_offset(mm, address);
if (!pgd_present(*pgd))
goto out;
p4d = p4d_offset(pgd, address);
if (!p4d_present(*p4d))
goto out;
pud = pud_offset(p4d, address);
if (!pud_present(*pud))
goto out;
pmd = pmd_offset(pud, address);
/*
* READ_ONCE must function as a barrier with narrower scope
* and it must be equivalent to:
* _pmd = *pmd; barrier();
*
* This is to deal with the instability (as in
* pmd_trans_unstable) of the pmd.
*/
_pmd = READ_ONCE(*pmd);
if (!pmd_present(_pmd))
goto out;
ret = false;
if (pmd_trans_huge(_pmd))
goto out;
/*
* the pmd is stable (as in !pmd_trans_unstable) so we can re-read it
* and use the standard pte_offset_map() instead of parsing _pmd.
*/
pte = pte_offset_map(pmd, address);
/*
* Lockless access: we're in a wait_event so it's ok if it
* changes under us.
*/
if (pte_none(*pte))
ret = true;
pte_unmap(pte);
out:
return ret;
}
/*
* The locking rules involved in returning VM_FAULT_RETRY depending on
* FAULT_FLAG_ALLOW_RETRY, FAULT_FLAG_RETRY_NOWAIT and
* FAULT_FLAG_KILLABLE are not straightforward. The "Caution"
* recommendation in __lock_page_or_retry is not an understatement.
*
* If FAULT_FLAG_ALLOW_RETRY is set, the mmap_sem must be released
* before returning VM_FAULT_RETRY only if FAULT_FLAG_RETRY_NOWAIT is
* not set.
*
* If FAULT_FLAG_ALLOW_RETRY is set but FAULT_FLAG_KILLABLE is not
* set, VM_FAULT_RETRY can still be returned if and only if there are
* fatal_signal_pending()s, and the mmap_sem must be released before
* returning it.
*/
int handle_userfault(struct vm_fault *vmf, unsigned long reason)
{
struct mm_struct *mm = vmf->vma->vm_mm;
struct userfaultfd_ctx *ctx;
struct userfaultfd_wait_queue uwq;
int ret;
bool must_wait, return_to_userland;
long blocking_state;
ret = VM_FAULT_SIGBUS;
/*
* We don't do userfault handling for the final child pid update.
*
* We also don't do userfault handling during
* coredumping. hugetlbfs has the special
* follow_hugetlb_page() to skip missing pages in the
* FOLL_DUMP case, anon memory also checks for FOLL_DUMP with
* the no_page_table() helper in follow_page_mask(), but the
* shmem_vm_ops->fault method is invoked even during
* coredumping without mmap_sem and it ends up here.
*/
if (current->flags & (PF_EXITING|PF_DUMPCORE))
goto out;
/*
* Coredumping runs without mmap_sem so we can only check that
* the mmap_sem is held, if PF_DUMPCORE was not set.
*/
WARN_ON_ONCE(!rwsem_is_locked(&mm->mmap_sem));
ctx = vmf->vma->vm_userfaultfd_ctx.ctx;
if (!ctx)
goto out;
BUG_ON(ctx->mm != mm);
VM_BUG_ON(reason & ~(VM_UFFD_MISSING|VM_UFFD_WP));
VM_BUG_ON(!(reason & VM_UFFD_MISSING) ^ !!(reason & VM_UFFD_WP));
if (ctx->features & UFFD_FEATURE_SIGBUS)
goto out;
/*
* If it's already released don't get it. This avoids to loop
* in __get_user_pages if userfaultfd_release waits on the
* caller of handle_userfault to release the mmap_sem.
*/
if (unlikely(ACCESS_ONCE(ctx->released))) {
/*
* Don't return VM_FAULT_SIGBUS in this case, so a non
* cooperative manager can close the uffd after the
* last UFFDIO_COPY, without risking to trigger an
* involuntary SIGBUS if the process was starting the
* userfaultfd while the userfaultfd was still armed
* (but after the last UFFDIO_COPY). If the uffd
* wasn't already closed when the userfault reached
* this point, that would normally be solved by
* userfaultfd_must_wait returning 'false'.
*
* If we were to return VM_FAULT_SIGBUS here, the non
* cooperative manager would be instead forced to
* always call UFFDIO_UNREGISTER before it can safely
* close the uffd.
*/
ret = VM_FAULT_NOPAGE;
goto out;
}
/*
* Check that we can return VM_FAULT_RETRY.
*
* NOTE: it should become possible to return VM_FAULT_RETRY
* even if FAULT_FLAG_TRIED is set without leading to gup()
* -EBUSY failures, if the userfaultfd is to be extended for
* VM_UFFD_WP tracking and we intend to arm the userfault
* without first stopping userland access to the memory. For
* VM_UFFD_MISSING userfaults this is enough for now.
*/
if (unlikely(!(vmf->flags & FAULT_FLAG_ALLOW_RETRY))) {
/*
* Validate the invariant that nowait must allow retry
* to be sure not to return SIGBUS erroneously on
* nowait invocations.
*/
BUG_ON(vmf->flags & FAULT_FLAG_RETRY_NOWAIT);
#ifdef CONFIG_DEBUG_VM
if (printk_ratelimit()) {
printk(KERN_WARNING
"FAULT_FLAG_ALLOW_RETRY missing %x\n",
vmf->flags);
dump_stack();
}
#endif
goto out;
}
/*
* Handle nowait, not much to do other than tell it to retry
* and wait.
*/
ret = VM_FAULT_RETRY;
if (vmf->flags & FAULT_FLAG_RETRY_NOWAIT)
goto out;
/* take the reference before dropping the mmap_sem */
userfaultfd_ctx_get(ctx);
init_waitqueue_func_entry(&uwq.wq, userfaultfd_wake_function);
uwq.wq.private = current;
uwq.msg = userfault_msg(vmf->address, vmf->flags, reason,
ctx->features);
uwq.ctx = ctx;
uwq.waken = false;
return_to_userland =
(vmf->flags & (FAULT_FLAG_USER|FAULT_FLAG_KILLABLE)) ==
(FAULT_FLAG_USER|FAULT_FLAG_KILLABLE);
blocking_state = return_to_userland ? TASK_INTERRUPTIBLE :
TASK_KILLABLE;
spin_lock(&ctx->fault_pending_wqh.lock);
/*
* After the __add_wait_queue the uwq is visible to userland
* through poll/read().
*/
__add_wait_queue(&ctx->fault_pending_wqh, &uwq.wq);
/*
* The smp_mb() after __set_current_state prevents the reads
* following the spin_unlock to happen before the list_add in
* __add_wait_queue.
*/
set_current_state(blocking_state);
spin_unlock(&ctx->fault_pending_wqh.lock);
if (!is_vm_hugetlb_page(vmf->vma))
must_wait = userfaultfd_must_wait(ctx, vmf->address, vmf->flags,
reason);
else
must_wait = userfaultfd_huge_must_wait(ctx, vmf->vma,
vmf->address,
vmf->flags, reason);
up_read(&mm->mmap_sem);
if (likely(must_wait && !ACCESS_ONCE(ctx->released) &&
(return_to_userland ? !signal_pending(current) :
!fatal_signal_pending(current)))) {
wake_up_poll(&ctx->fd_wqh, POLLIN);
schedule();
ret |= VM_FAULT_MAJOR;
/*
* False wakeups can orginate even from rwsem before
* up_read() however userfaults will wait either for a
* targeted wakeup on the specific uwq waitqueue from
* wake_userfault() or for signals or for uffd
* release.
*/
while (!READ_ONCE(uwq.waken)) {
/*
* This needs the full smp_store_mb()
* guarantee as the state write must be
* visible to other CPUs before reading
* uwq.waken from other CPUs.
*/
set_current_state(blocking_state);
if (READ_ONCE(uwq.waken) ||
READ_ONCE(ctx->released) ||
(return_to_userland ? signal_pending(current) :
fatal_signal_pending(current)))
break;
schedule();
}
}
__set_current_state(TASK_RUNNING);
if (return_to_userland) {
if (signal_pending(current) &&
!fatal_signal_pending(current)) {
/*
* If we got a SIGSTOP or SIGCONT and this is
* a normal userland page fault, just let
* userland return so the signal will be
* handled and gdb debugging works. The page
* fault code immediately after we return from
* this function is going to release the
* mmap_sem and it's not depending on it
* (unlike gup would if we were not to return
* VM_FAULT_RETRY).
*
* If a fatal signal is pending we still take
* the streamlined VM_FAULT_RETRY failure path
* and there's no need to retake the mmap_sem
* in such case.
*/
down_read(&mm->mmap_sem);
ret = VM_FAULT_NOPAGE;
}
}
/*
* Here we race with the list_del; list_add in
* userfaultfd_ctx_read(), however because we don't ever run
* list_del_init() to refile across the two lists, the prev
* and next pointers will never point to self. list_add also
* would never let any of the two pointers to point to
* self. So list_empty_careful won't risk to see both pointers
* pointing to self at any time during the list refile. The
* only case where list_del_init() is called is the full
* removal in the wake function and there we don't re-list_add
* and it's fine not to block on the spinlock. The uwq on this
* kernel stack can be released after the list_del_init.
*/
if (!list_empty_careful(&uwq.wq.entry)) {
spin_lock(&ctx->fault_pending_wqh.lock);
/*
* No need of list_del_init(), the uwq on the stack
* will be freed shortly anyway.
*/
list_del(&uwq.wq.entry);
spin_unlock(&ctx->fault_pending_wqh.lock);
}
/*
* ctx may go away after this if the userfault pseudo fd is
* already released.
*/
userfaultfd_ctx_put(ctx);
out:
return ret;
}
static void userfaultfd_event_wait_completion(struct userfaultfd_ctx *ctx,
struct userfaultfd_wait_queue *ewq)
{
if (WARN_ON_ONCE(current->flags & PF_EXITING))
goto out;
ewq->ctx = ctx;
init_waitqueue_entry(&ewq->wq, current);
spin_lock(&ctx->event_wqh.lock);
/*
* After the __add_wait_queue the uwq is visible to userland
* through poll/read().
*/
__add_wait_queue(&ctx->event_wqh, &ewq->wq);
for (;;) {
set_current_state(TASK_KILLABLE);
if (ewq->msg.event == 0)
break;
if (ACCESS_ONCE(ctx->released) ||
fatal_signal_pending(current)) {
__remove_wait_queue(&ctx->event_wqh, &ewq->wq);
if (ewq->msg.event == UFFD_EVENT_FORK) {
struct userfaultfd_ctx *new;
new = (struct userfaultfd_ctx *)
(unsigned long)
ewq->msg.arg.reserved.reserved1;
userfaultfd_ctx_put(new);
}
break;
}
spin_unlock(&ctx->event_wqh.lock);
wake_up_poll(&ctx->fd_wqh, POLLIN);
schedule();
spin_lock(&ctx->event_wqh.lock);
}
__set_current_state(TASK_RUNNING);
spin_unlock(&ctx->event_wqh.lock);
/*
* ctx may go away after this if the userfault pseudo fd is
* already released.
*/
out:
userfaultfd_ctx_put(ctx);
}
static void userfaultfd_event_complete(struct userfaultfd_ctx *ctx,
struct userfaultfd_wait_queue *ewq)
{
ewq->msg.event = 0;
wake_up_locked(&ctx->event_wqh);
__remove_wait_queue(&ctx->event_wqh, &ewq->wq);
}
int dup_userfaultfd(struct vm_area_struct *vma, struct list_head *fcs)
{
struct userfaultfd_ctx *ctx = NULL, *octx;
struct userfaultfd_fork_ctx *fctx;
octx = vma->vm_userfaultfd_ctx.ctx;
if (!octx || !(octx->features & UFFD_FEATURE_EVENT_FORK)) {
vma->vm_userfaultfd_ctx = NULL_VM_UFFD_CTX;
vma->vm_flags &= ~(VM_UFFD_WP | VM_UFFD_MISSING);
return 0;
}
list_for_each_entry(fctx, fcs, list)
if (fctx->orig == octx) {
ctx = fctx->new;
break;
}
if (!ctx) {
fctx = kmalloc(sizeof(*fctx), GFP_KERNEL);
if (!fctx)
return -ENOMEM;
ctx = kmem_cache_alloc(userfaultfd_ctx_cachep, GFP_KERNEL);
if (!ctx) {
kfree(fctx);
return -ENOMEM;
}
atomic_set(&ctx->refcount, 1);
ctx->flags = octx->flags;
ctx->state = UFFD_STATE_RUNNING;
ctx->features = octx->features;
ctx->released = false;
ctx->mm = vma->vm_mm;
atomic_inc(&ctx->mm->mm_count);
userfaultfd_ctx_get(octx);
fctx->orig = octx;
fctx->new = ctx;
list_add_tail(&fctx->list, fcs);
}
vma->vm_userfaultfd_ctx.ctx = ctx;
return 0;
}
static void dup_fctx(struct userfaultfd_fork_ctx *fctx)
{
struct userfaultfd_ctx *ctx = fctx->orig;
struct userfaultfd_wait_queue ewq;
msg_init(&ewq.msg);
ewq.msg.event = UFFD_EVENT_FORK;
ewq.msg.arg.reserved.reserved1 = (unsigned long)fctx->new;
userfaultfd_event_wait_completion(ctx, &ewq);
}
void dup_userfaultfd_complete(struct list_head *fcs)
{
struct userfaultfd_fork_ctx *fctx, *n;
list_for_each_entry_safe(fctx, n, fcs, list) {
dup_fctx(fctx);
list_del(&fctx->list);
kfree(fctx);
}
}
void mremap_userfaultfd_prep(struct vm_area_struct *vma,
struct vm_userfaultfd_ctx *vm_ctx)
{
struct userfaultfd_ctx *ctx;
ctx = vma->vm_userfaultfd_ctx.ctx;
if (ctx && (ctx->features & UFFD_FEATURE_EVENT_REMAP)) {
vm_ctx->ctx = ctx;
userfaultfd_ctx_get(ctx);
}
}
void mremap_userfaultfd_complete(struct vm_userfaultfd_ctx *vm_ctx,
unsigned long from, unsigned long to,
unsigned long len)
{
struct userfaultfd_ctx *ctx = vm_ctx->ctx;
struct userfaultfd_wait_queue ewq;
if (!ctx)
return;
if (to & ~PAGE_MASK) {
userfaultfd_ctx_put(ctx);
return;
}
msg_init(&ewq.msg);
ewq.msg.event = UFFD_EVENT_REMAP;
ewq.msg.arg.remap.from = from;
ewq.msg.arg.remap.to = to;
ewq.msg.arg.remap.len = len;
userfaultfd_event_wait_completion(ctx, &ewq);
}
bool userfaultfd_remove(struct vm_area_struct *vma,
unsigned long start, unsigned long end)
{
struct mm_struct *mm = vma->vm_mm;
struct userfaultfd_ctx *ctx;
struct userfaultfd_wait_queue ewq;
ctx = vma->vm_userfaultfd_ctx.ctx;
if (!ctx || !(ctx->features & UFFD_FEATURE_EVENT_REMOVE))
return true;
userfaultfd_ctx_get(ctx);
up_read(&mm->mmap_sem);
msg_init(&ewq.msg);
ewq.msg.event = UFFD_EVENT_REMOVE;
ewq.msg.arg.remove.start = start;
ewq.msg.arg.remove.end = end;
userfaultfd_event_wait_completion(ctx, &ewq);
return false;
}
static bool has_unmap_ctx(struct userfaultfd_ctx *ctx, struct list_head *unmaps,
unsigned long start, unsigned long end)
{
struct userfaultfd_unmap_ctx *unmap_ctx;
list_for_each_entry(unmap_ctx, unmaps, list)
if (unmap_ctx->ctx == ctx && unmap_ctx->start == start &&
unmap_ctx->end == end)
return true;
return false;
}
int userfaultfd_unmap_prep(struct vm_area_struct *vma,
unsigned long start, unsigned long end,
struct list_head *unmaps)
{
for ( ; vma && vma->vm_start < end; vma = vma->vm_next) {
struct userfaultfd_unmap_ctx *unmap_ctx;
struct userfaultfd_ctx *ctx = vma->vm_userfaultfd_ctx.ctx;
if (!ctx || !(ctx->features & UFFD_FEATURE_EVENT_UNMAP) ||
has_unmap_ctx(ctx, unmaps, start, end))
continue;
unmap_ctx = kzalloc(sizeof(*unmap_ctx), GFP_KERNEL);
if (!unmap_ctx)
return -ENOMEM;
userfaultfd_ctx_get(ctx);
unmap_ctx->ctx = ctx;
unmap_ctx->start = start;
unmap_ctx->end = end;
list_add_tail(&unmap_ctx->list, unmaps);
}
return 0;
}
void userfaultfd_unmap_complete(struct mm_struct *mm, struct list_head *uf)
{
struct userfaultfd_unmap_ctx *ctx, *n;
struct userfaultfd_wait_queue ewq;
list_for_each_entry_safe(ctx, n, uf, list) {
msg_init(&ewq.msg);
ewq.msg.event = UFFD_EVENT_UNMAP;
ewq.msg.arg.remove.start = ctx->start;
ewq.msg.arg.remove.end = ctx->end;
userfaultfd_event_wait_completion(ctx->ctx, &ewq);
list_del(&ctx->list);
kfree(ctx);
}
}
static int userfaultfd_release(struct inode *inode, struct file *file)
{
struct userfaultfd_ctx *ctx = file->private_data;
struct mm_struct *mm = ctx->mm;
struct vm_area_struct *vma, *prev;
/* len == 0 means wake all */
struct userfaultfd_wake_range range = { .len = 0, };
unsigned long new_flags;
ACCESS_ONCE(ctx->released) = true;
if (!mmget_not_zero(mm))
goto wakeup;
/*
* Flush page faults out of all CPUs. NOTE: all page faults
* must be retried without returning VM_FAULT_SIGBUS if
* userfaultfd_ctx_get() succeeds but vma->vma_userfault_ctx
* changes while handle_userfault released the mmap_sem. So
* it's critical that released is set to true (above), before
* taking the mmap_sem for writing.
*/
down_write(&mm->mmap_sem);
prev = NULL;
for (vma = mm->mmap; vma; vma = vma->vm_next) {
cond_resched();
BUG_ON(!!vma->vm_userfaultfd_ctx.ctx ^
!!(vma->vm_flags & (VM_UFFD_MISSING | VM_UFFD_WP)));
if (vma->vm_userfaultfd_ctx.ctx != ctx) {
prev = vma;
continue;
}
new_flags = vma->vm_flags & ~(VM_UFFD_MISSING | VM_UFFD_WP);
prev = vma_merge(mm, prev, vma->vm_start, vma->vm_end,
new_flags, vma->anon_vma,
vma->vm_file, vma->vm_pgoff,
vma_policy(vma),
NULL_VM_UFFD_CTX);
if (prev)
vma = prev;
else
prev = vma;
vma->vm_flags = new_flags;
vma->vm_userfaultfd_ctx = NULL_VM_UFFD_CTX;
}
up_write(&mm->mmap_sem);
mmput(mm);
wakeup:
/*
* After no new page faults can wait on this fault_*wqh, flush
* the last page faults that may have been already waiting on
* the fault_*wqh.
*/
spin_lock(&ctx->fault_pending_wqh.lock);
__wake_up_locked_key(&ctx->fault_pending_wqh, TASK_NORMAL, &range);
__wake_up_locked_key(&ctx->fault_wqh, TASK_NORMAL, &range);
spin_unlock(&ctx->fault_pending_wqh.lock);
/* Flush pending events that may still wait on event_wqh */
wake_up_all(&ctx->event_wqh);
wake_up_poll(&ctx->fd_wqh, POLLHUP);
userfaultfd_ctx_put(ctx);
return 0;
}
/* fault_pending_wqh.lock must be hold by the caller */
static inline struct userfaultfd_wait_queue *find_userfault_in(
wait_queue_head_t *wqh)
{
wait_queue_entry_t *wq;
struct userfaultfd_wait_queue *uwq;
VM_BUG_ON(!spin_is_locked(&wqh->lock));
uwq = NULL;
if (!waitqueue_active(wqh))
goto out;
/* walk in reverse to provide FIFO behavior to read userfaults */
wq = list_last_entry(&wqh->head, typeof(*wq), entry);
uwq = container_of(wq, struct userfaultfd_wait_queue, wq);
out:
return uwq;
}
static inline struct userfaultfd_wait_queue *find_userfault(
struct userfaultfd_ctx *ctx)
{
return find_userfault_in(&ctx->fault_pending_wqh);
}
static inline struct userfaultfd_wait_queue *find_userfault_evt(
struct userfaultfd_ctx *ctx)
{
return find_userfault_in(&ctx->event_wqh);
}
static unsigned int userfaultfd_poll(struct file *file, poll_table *wait)
{
struct userfaultfd_ctx *ctx = file->private_data;
unsigned int ret;
poll_wait(file, &ctx->fd_wqh, wait);
switch (ctx->state) {
case UFFD_STATE_WAIT_API:
return POLLERR;
case UFFD_STATE_RUNNING:
/*
* poll() never guarantees that read won't block.
* userfaults can be waken before they're read().
*/
if (unlikely(!(file->f_flags & O_NONBLOCK)))
return POLLERR;
/*
* lockless access to see if there are pending faults
* __pollwait last action is the add_wait_queue but
* the spin_unlock would allow the waitqueue_active to
* pass above the actual list_add inside
* add_wait_queue critical section. So use a full
* memory barrier to serialize the list_add write of
* add_wait_queue() with the waitqueue_active read
* below.
*/
ret = 0;
smp_mb();
if (waitqueue_active(&ctx->fault_pending_wqh))
ret = POLLIN;
else if (waitqueue_active(&ctx->event_wqh))
ret = POLLIN;
return ret;
default:
WARN_ON_ONCE(1);
return POLLERR;
}
}
static const struct file_operations userfaultfd_fops;
static int resolve_userfault_fork(struct userfaultfd_ctx *ctx,
struct userfaultfd_ctx *new,
struct uffd_msg *msg)
{
int fd;
struct file *file;
unsigned int flags = new->flags & UFFD_SHARED_FCNTL_FLAGS;
fd = get_unused_fd_flags(flags);
if (fd < 0)
return fd;
file = anon_inode_getfile("[userfaultfd]", &userfaultfd_fops, new,
O_RDWR | flags);
if (IS_ERR(file)) {
put_unused_fd(fd);
return PTR_ERR(file);
}
fd_install(fd, file);
msg->arg.reserved.reserved1 = 0;
msg->arg.fork.ufd = fd;
return 0;
}
static ssize_t userfaultfd_ctx_read(struct userfaultfd_ctx *ctx, int no_wait,
struct uffd_msg *msg)
{
ssize_t ret;
DECLARE_WAITQUEUE(wait, current);
struct userfaultfd_wait_queue *uwq;
/*
* Handling fork event requires sleeping operations, so
* we drop the event_wqh lock, then do these ops, then
* lock it back and wake up the waiter. While the lock is
* dropped the ewq may go away so we keep track of it
* carefully.
*/
LIST_HEAD(fork_event);
struct userfaultfd_ctx *fork_nctx = NULL;
/* always take the fd_wqh lock before the fault_pending_wqh lock */
spin_lock(&ctx->fd_wqh.lock);
__add_wait_queue(&ctx->fd_wqh, &wait);
for (;;) {
set_current_state(TASK_INTERRUPTIBLE);
spin_lock(&ctx->fault_pending_wqh.lock);
uwq = find_userfault(ctx);
if (uwq) {
/*
* Use a seqcount to repeat the lockless check
* in wake_userfault() to avoid missing
* wakeups because during the refile both
* waitqueue could become empty if this is the
* only userfault.
*/
write_seqcount_begin(&ctx->refile_seq);
/*
* The fault_pending_wqh.lock prevents the uwq
* to disappear from under us.
*
* Refile this userfault from
* fault_pending_wqh to fault_wqh, it's not
* pending anymore after we read it.
*
* Use list_del() by hand (as
* userfaultfd_wake_function also uses
* list_del_init() by hand) to be sure nobody
* changes __remove_wait_queue() to use
* list_del_init() in turn breaking the
* !list_empty_careful() check in
* handle_userfault(). The uwq->wq.head list
* must never be empty at any time during the
* refile, or the waitqueue could disappear
* from under us. The "wait_queue_head_t"
* parameter of __remove_wait_queue() is unused
* anyway.
*/
list_del(&uwq->wq.entry);
__add_wait_queue(&ctx->fault_wqh, &uwq->wq);
write_seqcount_end(&ctx->refile_seq);
/* careful to always initialize msg if ret == 0 */
*msg = uwq->msg;
spin_unlock(&ctx->fault_pending_wqh.lock);
ret = 0;
break;
}
spin_unlock(&ctx->fault_pending_wqh.lock);
spin_lock(&ctx->event_wqh.lock);
uwq = find_userfault_evt(ctx);
if (uwq) {
*msg = uwq->msg;
if (uwq->msg.event == UFFD_EVENT_FORK) {
fork_nctx = (struct userfaultfd_ctx *)
(unsigned long)
uwq->msg.arg.reserved.reserved1;
list_move(&uwq->wq.entry, &fork_event);
spin_unlock(&ctx->event_wqh.lock);
ret = 0;
break;
}
userfaultfd_event_complete(ctx, uwq);
spin_unlock(&ctx->event_wqh.lock);
ret = 0;
break;
}
spin_unlock(&ctx->event_wqh.lock);
if (signal_pending(current)) {
ret = -ERESTARTSYS;
break;
}
if (no_wait) {
ret = -EAGAIN;
break;
}
spin_unlock(&ctx->fd_wqh.lock);
schedule();
spin_lock(&ctx->fd_wqh.lock);
}
__remove_wait_queue(&ctx->fd_wqh, &wait);
__set_current_state(TASK_RUNNING);
spin_unlock(&ctx->fd_wqh.lock);
if (!ret && msg->event == UFFD_EVENT_FORK) {
ret = resolve_userfault_fork(ctx, fork_nctx, msg);
if (!ret) {
spin_lock(&ctx->event_wqh.lock);
if (!list_empty(&fork_event)) {
uwq = list_first_entry(&fork_event,
typeof(*uwq),
wq.entry);
list_del(&uwq->wq.entry);
__add_wait_queue(&ctx->event_wqh, &uwq->wq);
userfaultfd_event_complete(ctx, uwq);
}
spin_unlock(&ctx->event_wqh.lock);
}
}
return ret;
}
static ssize_t userfaultfd_read(struct file *file, char __user *buf,
size_t count, loff_t *ppos)
{
struct userfaultfd_ctx *ctx = file->private_data;
ssize_t _ret, ret = 0;
struct uffd_msg msg;
int no_wait = file->f_flags & O_NONBLOCK;
if (ctx->state == UFFD_STATE_WAIT_API)
return -EINVAL;
for (;;) {
if (count < sizeof(msg))
return ret ? ret : -EINVAL;
_ret = userfaultfd_ctx_read(ctx, no_wait, &msg);
if (_ret < 0)
return ret ? ret : _ret;
if (copy_to_user((__u64 __user *) buf, &msg, sizeof(msg)))
return ret ? ret : -EFAULT;
ret += sizeof(msg);
buf += sizeof(msg);
count -= sizeof(msg);
/*
* Allow to read more than one fault at time but only
* block if waiting for the very first one.
*/
no_wait = O_NONBLOCK;
}
}
static void __wake_userfault(struct userfaultfd_ctx *ctx,
struct userfaultfd_wake_range *range)
{
spin_lock(&ctx->fault_pending_wqh.lock);
/* wake all in the range and autoremove */
if (waitqueue_active(&ctx->fault_pending_wqh))
__wake_up_locked_key(&ctx->fault_pending_wqh, TASK_NORMAL,
range);
if (waitqueue_active(&ctx->fault_wqh))
__wake_up_locked_key(&ctx->fault_wqh, TASK_NORMAL, range);
spin_unlock(&ctx->fault_pending_wqh.lock);
}
static __always_inline void wake_userfault(struct userfaultfd_ctx *ctx,
struct userfaultfd_wake_range *range)
{
unsigned seq;
bool need_wakeup;
/*
* To be sure waitqueue_active() is not reordered by the CPU
* before the pagetable update, use an explicit SMP memory
* barrier here. PT lock release or up_read(mmap_sem) still
* have release semantics that can allow the
* waitqueue_active() to be reordered before the pte update.
*/
smp_mb();
/*
* Use waitqueue_active because it's very frequent to
* change the address space atomically even if there are no
* userfaults yet. So we take the spinlock only when we're
* sure we've userfaults to wake.
*/
do {
seq = read_seqcount_begin(&ctx->refile_seq);
need_wakeup = waitqueue_active(&ctx->fault_pending_wqh) ||
waitqueue_active(&ctx->fault_wqh);
cond_resched();
} while (read_seqcount_retry(&ctx->refile_seq, seq));
if (need_wakeup)
__wake_userfault(ctx, range);
}
static __always_inline int validate_range(struct mm_struct *mm,
__u64 start, __u64 len)
{
__u64 task_size = mm->task_size;
if (start & ~PAGE_MASK)
return -EINVAL;
if (len & ~PAGE_MASK)
return -EINVAL;
if (!len)
return -EINVAL;
if (start < mmap_min_addr)
return -EINVAL;
if (start >= task_size)
return -EINVAL;
if (len > task_size - start)
return -EINVAL;
return 0;
}
static inline bool vma_can_userfault(struct vm_area_struct *vma)
{
return vma_is_anonymous(vma) || is_vm_hugetlb_page(vma) ||
vma_is_shmem(vma);
}
static int userfaultfd_register(struct userfaultfd_ctx *ctx,
unsigned long arg)
{
struct mm_struct *mm = ctx->mm;
struct vm_area_struct *vma, *prev, *cur;
int ret;
struct uffdio_register uffdio_register;
struct uffdio_register __user *user_uffdio_register;
unsigned long vm_flags, new_flags;
bool found;
bool basic_ioctls;
unsigned long start, end, vma_end;
user_uffdio_register = (struct uffdio_register __user *) arg;
ret = -EFAULT;
if (copy_from_user(&uffdio_register, user_uffdio_register,
sizeof(uffdio_register)-sizeof(__u64)))
goto out;
ret = -EINVAL;
if (!uffdio_register.mode)
goto out;
if (uffdio_register.mode & ~(UFFDIO_REGISTER_MODE_MISSING|
UFFDIO_REGISTER_MODE_WP))
goto out;
vm_flags = 0;
if (uffdio_register.mode & UFFDIO_REGISTER_MODE_MISSING)
vm_flags |= VM_UFFD_MISSING;
if (uffdio_register.mode & UFFDIO_REGISTER_MODE_WP) {
vm_flags |= VM_UFFD_WP;
/*
* FIXME: remove the below error constraint by
* implementing the wprotect tracking mode.
*/
ret = -EINVAL;
goto out;
}
ret = validate_range(mm, uffdio_register.range.start,
uffdio_register.range.len);
if (ret)
goto out;
start = uffdio_register.range.start;
end = start + uffdio_register.range.len;
ret = -ENOMEM;
if (!mmget_not_zero(mm))
goto out;
down_write(&mm->mmap_sem);
vma = find_vma_prev(mm, start, &prev);
if (!vma)
goto out_unlock;
/* check that there's at least one vma in the range */
ret = -EINVAL;
if (vma->vm_start >= end)
goto out_unlock;
/*
* If the first vma contains huge pages, make sure start address
* is aligned to huge page size.
*/
if (is_vm_hugetlb_page(vma)) {
unsigned long vma_hpagesize = vma_kernel_pagesize(vma);
if (start & (vma_hpagesize - 1))
goto out_unlock;
}
/*
* Search for not compatible vmas.
*/
found = false;
basic_ioctls = false;
for (cur = vma; cur && cur->vm_start < end; cur = cur->vm_next) {
cond_resched();
BUG_ON(!!cur->vm_userfaultfd_ctx.ctx ^
!!(cur->vm_flags & (VM_UFFD_MISSING | VM_UFFD_WP)));
/* check not compatible vmas */
ret = -EINVAL;
if (!vma_can_userfault(cur))
goto out_unlock;
/*
* If this vma contains ending address, and huge pages
* check alignment.
*/
if (is_vm_hugetlb_page(cur) && end <= cur->vm_end &&
end > cur->vm_start) {
unsigned long vma_hpagesize = vma_kernel_pagesize(cur);
ret = -EINVAL;
if (end & (vma_hpagesize - 1))
goto out_unlock;
}
/*
* Check that this vma isn't already owned by a
* different userfaultfd. We can't allow more than one
* userfaultfd to own a single vma simultaneously or we
* wouldn't know which one to deliver the userfaults to.
*/
ret = -EBUSY;
if (cur->vm_userfaultfd_ctx.ctx &&
cur->vm_userfaultfd_ctx.ctx != ctx)
goto out_unlock;
/*
* Note vmas containing huge pages
*/
if (is_vm_hugetlb_page(cur))
basic_ioctls = true;
found = true;
}
BUG_ON(!found);
if (vma->vm_start < start)
prev = vma;
ret = 0;
do {
cond_resched();
BUG_ON(!vma_can_userfault(vma));
BUG_ON(vma->vm_userfaultfd_ctx.ctx &&
vma->vm_userfaultfd_ctx.ctx != ctx);
/*
* Nothing to do: this vma is already registered into this
* userfaultfd and with the right tracking mode too.
*/
if (vma->vm_userfaultfd_ctx.ctx == ctx &&
(vma->vm_flags & vm_flags) == vm_flags)
goto skip;
if (vma->vm_start > start)
start = vma->vm_start;
vma_end = min(end, vma->vm_end);
new_flags = (vma->vm_flags & ~vm_flags) | vm_flags;
prev = vma_merge(mm, prev, start, vma_end, new_flags,
vma->anon_vma, vma->vm_file, vma->vm_pgoff,
vma_policy(vma),
((struct vm_userfaultfd_ctx){ ctx }));
if (prev) {
vma = prev;
goto next;
}
if (vma->vm_start < start) {
ret = split_vma(mm, vma, start, 1);
if (ret)
break;
}
if (vma->vm_end > end) {
ret = split_vma(mm, vma, end, 0);
if (ret)
break;
}
next:
/*
* In the vma_merge() successful mprotect-like case 8:
* the next vma was merged into the current one and
* the current one has not been updated yet.
*/
vma->vm_flags = new_flags;
vma->vm_userfaultfd_ctx.ctx = ctx;
skip:
prev = vma;
start = vma->vm_end;
vma = vma->vm_next;
} while (vma && vma->vm_start < end);
out_unlock:
up_write(&mm->mmap_sem);
mmput(mm);
if (!ret) {
/*
* Now that we scanned all vmas we can already tell
* userland which ioctls methods are guaranteed to
* succeed on this range.
*/
if (put_user(basic_ioctls ? UFFD_API_RANGE_IOCTLS_BASIC :
UFFD_API_RANGE_IOCTLS,
&user_uffdio_register->ioctls))
ret = -EFAULT;
}
out:
return ret;
}
static int userfaultfd_unregister(struct userfaultfd_ctx *ctx,
unsigned long arg)
{
struct mm_struct *mm = ctx->mm;
struct vm_area_struct *vma, *prev, *cur;
int ret;
struct uffdio_range uffdio_unregister;
unsigned long new_flags;
bool found;
unsigned long start, end, vma_end;
const void __user *buf = (void __user *)arg;
ret = -EFAULT;
if (copy_from_user(&uffdio_unregister, buf, sizeof(uffdio_unregister)))
goto out;
ret = validate_range(mm, uffdio_unregister.start,
uffdio_unregister.len);
if (ret)
goto out;
start = uffdio_unregister.start;
end = start + uffdio_unregister.len;
ret = -ENOMEM;
if (!mmget_not_zero(mm))
goto out;
down_write(&mm->mmap_sem);
vma = find_vma_prev(mm, start, &prev);
if (!vma)
goto out_unlock;
/* check that there's at least one vma in the range */
ret = -EINVAL;
if (vma->vm_start >= end)
goto out_unlock;
/*
* If the first vma contains huge pages, make sure start address
* is aligned to huge page size.
*/
if (is_vm_hugetlb_page(vma)) {
unsigned long vma_hpagesize = vma_kernel_pagesize(vma);
if (start & (vma_hpagesize - 1))
goto out_unlock;
}
/*
* Search for not compatible vmas.
*/
found = false;
ret = -EINVAL;
for (cur = vma; cur && cur->vm_start < end; cur = cur->vm_next) {
cond_resched();
BUG_ON(!!cur->vm_userfaultfd_ctx.ctx ^
!!(cur->vm_flags & (VM_UFFD_MISSING | VM_UFFD_WP)));
/*
* Check not compatible vmas, not strictly required
* here as not compatible vmas cannot have an
* userfaultfd_ctx registered on them, but this
* provides for more strict behavior to notice
* unregistration errors.
*/
if (!vma_can_userfault(cur))
goto out_unlock;
found = true;
}
BUG_ON(!found);
if (vma->vm_start < start)
prev = vma;
ret = 0;
do {
cond_resched();
BUG_ON(!vma_can_userfault(vma));
/*
* Nothing to do: this vma is already registered into this
* userfaultfd and with the right tracking mode too.
*/
if (!vma->vm_userfaultfd_ctx.ctx)
goto skip;
if (vma->vm_start > start)
start = vma->vm_start;
vma_end = min(end, vma->vm_end);
if (userfaultfd_missing(vma)) {
/*
* Wake any concurrent pending userfault while
* we unregister, so they will not hang
* permanently and it avoids userland to call
* UFFDIO_WAKE explicitly.
*/
struct userfaultfd_wake_range range;
range.start = start;
range.len = vma_end - start;
wake_userfault(vma->vm_userfaultfd_ctx.ctx, &range);
}
new_flags = vma->vm_flags & ~(VM_UFFD_MISSING | VM_UFFD_WP);
prev = vma_merge(mm, prev, start, vma_end, new_flags,
vma->anon_vma, vma->vm_file, vma->vm_pgoff,
vma_policy(vma),
NULL_VM_UFFD_CTX);
if (prev) {
vma = prev;
goto next;
}
if (vma->vm_start < start) {
ret = split_vma(mm, vma, start, 1);
if (ret)
break;
}
if (vma->vm_end > end) {
ret = split_vma(mm, vma, end, 0);
if (ret)
break;
}
next:
/*
* In the vma_merge() successful mprotect-like case 8:
* the next vma was merged into the current one and
* the current one has not been updated yet.
*/
vma->vm_flags = new_flags;
vma->vm_userfaultfd_ctx = NULL_VM_UFFD_CTX;
skip:
prev = vma;
start = vma->vm_end;
vma = vma->vm_next;
} while (vma && vma->vm_start < end);
out_unlock:
up_write(&mm->mmap_sem);
mmput(mm);
out:
return ret;
}
/*
* userfaultfd_wake may be used in combination with the
* UFFDIO_*_MODE_DONTWAKE to wakeup userfaults in batches.
*/
static int userfaultfd_wake(struct userfaultfd_ctx *ctx,
unsigned long arg)
{
int ret;
struct uffdio_range uffdio_wake;
struct userfaultfd_wake_range range;
const void __user *buf = (void __user *)arg;
ret = -EFAULT;
if (copy_from_user(&uffdio_wake, buf, sizeof(uffdio_wake)))
goto out;
ret = validate_range(ctx->mm, uffdio_wake.start, uffdio_wake.len);
if (ret)
goto out;
range.start = uffdio_wake.start;
range.len = uffdio_wake.len;
/*
* len == 0 means wake all and we don't want to wake all here,
* so check it again to be sure.
*/
VM_BUG_ON(!range.len);
wake_userfault(ctx, &range);
ret = 0;
out:
return ret;
}
static int userfaultfd_copy(struct userfaultfd_ctx *ctx,
unsigned long arg)
{
__s64 ret;
struct uffdio_copy uffdio_copy;
struct uffdio_copy __user *user_uffdio_copy;
struct userfaultfd_wake_range range;
user_uffdio_copy = (struct uffdio_copy __user *) arg;
ret = -EFAULT;
if (copy_from_user(&uffdio_copy, user_uffdio_copy,
/* don't copy "copy" last field */
sizeof(uffdio_copy)-sizeof(__s64)))
goto out;
ret = validate_range(ctx->mm, uffdio_copy.dst, uffdio_copy.len);
if (ret)
goto out;
/*
* double check for wraparound just in case. copy_from_user()
* will later check uffdio_copy.src + uffdio_copy.len to fit
* in the userland range.
*/
ret = -EINVAL;
if (uffdio_copy.src + uffdio_copy.len <= uffdio_copy.src)
goto out;
if (uffdio_copy.mode & ~UFFDIO_COPY_MODE_DONTWAKE)
goto out;
if (mmget_not_zero(ctx->mm)) {
ret = mcopy_atomic(ctx->mm, uffdio_copy.dst, uffdio_copy.src,
uffdio_copy.len);
mmput(ctx->mm);
} else {
return -ESRCH;
}
if (unlikely(put_user(ret, &user_uffdio_copy->copy)))
return -EFAULT;
if (ret < 0)
goto out;
BUG_ON(!ret);
/* len == 0 would wake all */
range.len = ret;
if (!(uffdio_copy.mode & UFFDIO_COPY_MODE_DONTWAKE)) {
range.start = uffdio_copy.dst;
wake_userfault(ctx, &range);
}
ret = range.len == uffdio_copy.len ? 0 : -EAGAIN;
out:
return ret;
}
static int userfaultfd_zeropage(struct userfaultfd_ctx *ctx,
unsigned long arg)
{
__s64 ret;
struct uffdio_zeropage uffdio_zeropage;
struct uffdio_zeropage __user *user_uffdio_zeropage;
struct userfaultfd_wake_range range;
user_uffdio_zeropage = (struct uffdio_zeropage __user *) arg;
ret = -EFAULT;
if (copy_from_user(&uffdio_zeropage, user_uffdio_zeropage,
/* don't copy "zeropage" last field */
sizeof(uffdio_zeropage)-sizeof(__s64)))
goto out;
ret = validate_range(ctx->mm, uffdio_zeropage.range.start,
uffdio_zeropage.range.len);
if (ret)
goto out;
ret = -EINVAL;
if (uffdio_zeropage.mode & ~UFFDIO_ZEROPAGE_MODE_DONTWAKE)
goto out;
if (mmget_not_zero(ctx->mm)) {
ret = mfill_zeropage(ctx->mm, uffdio_zeropage.range.start,
uffdio_zeropage.range.len);
mmput(ctx->mm);
} else {
return -ESRCH;
}
if (unlikely(put_user(ret, &user_uffdio_zeropage->zeropage)))
return -EFAULT;
if (ret < 0)
goto out;
/* len == 0 would wake all */
BUG_ON(!ret);
range.len = ret;
if (!(uffdio_zeropage.mode & UFFDIO_ZEROPAGE_MODE_DONTWAKE)) {
range.start = uffdio_zeropage.range.start;
wake_userfault(ctx, &range);
}
ret = range.len == uffdio_zeropage.range.len ? 0 : -EAGAIN;
out:
return ret;
}
static inline unsigned int uffd_ctx_features(__u64 user_features)
{
/*
* For the current set of features the bits just coincide
*/
return (unsigned int)user_features;
}
/*
* userland asks for a certain API version and we return which bits
* and ioctl commands are implemented in this kernel for such API
* version or -EINVAL if unknown.
*/
static int userfaultfd_api(struct userfaultfd_ctx *ctx,
unsigned long arg)
{
struct uffdio_api uffdio_api;
void __user *buf = (void __user *)arg;
int ret;
__u64 features;
ret = -EINVAL;
if (ctx->state != UFFD_STATE_WAIT_API)
goto out;
ret = -EFAULT;
if (copy_from_user(&uffdio_api, buf, sizeof(uffdio_api)))
goto out;
features = uffdio_api.features;
if (uffdio_api.api != UFFD_API || (features & ~UFFD_API_FEATURES)) {
memset(&uffdio_api, 0, sizeof(uffdio_api));
if (copy_to_user(buf, &uffdio_api, sizeof(uffdio_api)))
goto out;
ret = -EINVAL;
goto out;
}
/* report all available features and ioctls to userland */
uffdio_api.features = UFFD_API_FEATURES;
uffdio_api.ioctls = UFFD_API_IOCTLS;
ret = -EFAULT;
if (copy_to_user(buf, &uffdio_api, sizeof(uffdio_api)))
goto out;
ctx->state = UFFD_STATE_RUNNING;
/* only enable the requested features for this uffd context */
ctx->features = uffd_ctx_features(features);
ret = 0;
out:
return ret;
}
static long userfaultfd_ioctl(struct file *file, unsigned cmd,
unsigned long arg)
{
int ret = -EINVAL;
struct userfaultfd_ctx *ctx = file->private_data;
if (cmd != UFFDIO_API && ctx->state == UFFD_STATE_WAIT_API)
return -EINVAL;
switch(cmd) {
case UFFDIO_API:
ret = userfaultfd_api(ctx, arg);
break;
case UFFDIO_REGISTER:
ret = userfaultfd_register(ctx, arg);
break;
case UFFDIO_UNREGISTER:
ret = userfaultfd_unregister(ctx, arg);
break;
case UFFDIO_WAKE:
ret = userfaultfd_wake(ctx, arg);
break;
case UFFDIO_COPY:
ret = userfaultfd_copy(ctx, arg);
break;
case UFFDIO_ZEROPAGE:
ret = userfaultfd_zeropage(ctx, arg);
break;
}
return ret;
}
#ifdef CONFIG_PROC_FS
static void userfaultfd_show_fdinfo(struct seq_file *m, struct file *f)
{
struct userfaultfd_ctx *ctx = f->private_data;
wait_queue_entry_t *wq;
struct userfaultfd_wait_queue *uwq;
unsigned long pending = 0, total = 0;
spin_lock(&ctx->fault_pending_wqh.lock);
list_for_each_entry(wq, &ctx->fault_pending_wqh.head, entry) {
uwq = container_of(wq, struct userfaultfd_wait_queue, wq);
pending++;
total++;
}
list_for_each_entry(wq, &ctx->fault_wqh.head, entry) {
uwq = container_of(wq, struct userfaultfd_wait_queue, wq);
total++;
}
spin_unlock(&ctx->fault_pending_wqh.lock);
/*
* If more protocols will be added, there will be all shown
* separated by a space. Like this:
* protocols: aa:... bb:...
*/
seq_printf(m, "pending:\t%lu\ntotal:\t%lu\nAPI:\t%Lx:%x:%Lx\n",
pending, total, UFFD_API, ctx->features,
UFFD_API_IOCTLS|UFFD_API_RANGE_IOCTLS);
}
#endif
static const struct file_operations userfaultfd_fops = {
#ifdef CONFIG_PROC_FS
.show_fdinfo = userfaultfd_show_fdinfo,
#endif
.release = userfaultfd_release,
.poll = userfaultfd_poll,
.read = userfaultfd_read,
.unlocked_ioctl = userfaultfd_ioctl,
.compat_ioctl = userfaultfd_ioctl,
.llseek = noop_llseek,
};
static void init_once_userfaultfd_ctx(void *mem)
{
struct userfaultfd_ctx *ctx = (struct userfaultfd_ctx *) mem;
init_waitqueue_head(&ctx->fault_pending_wqh);
init_waitqueue_head(&ctx->fault_wqh);
init_waitqueue_head(&ctx->event_wqh);
init_waitqueue_head(&ctx->fd_wqh);
seqcount_init(&ctx->refile_seq);
}
/**
* userfaultfd_file_create - Creates a userfaultfd file pointer.
* @flags: Flags for the userfaultfd file.
*
* This function creates a userfaultfd file pointer, w/out installing
* it into the fd table. This is useful when the userfaultfd file is
* used during the initialization of data structures that require
* extra setup after the userfaultfd creation. So the userfaultfd
* creation is split into the file pointer creation phase, and the
* file descriptor installation phase. In this way races with
* userspace closing the newly installed file descriptor can be
* avoided. Returns a userfaultfd file pointer, or a proper error
* pointer.
*/
static struct file *userfaultfd_file_create(int flags)
{
struct file *file;
struct userfaultfd_ctx *ctx;
BUG_ON(!current->mm);
/* Check the UFFD_* constants for consistency. */
BUILD_BUG_ON(UFFD_CLOEXEC != O_CLOEXEC);
BUILD_BUG_ON(UFFD_NONBLOCK != O_NONBLOCK);
file = ERR_PTR(-EINVAL);
if (flags & ~UFFD_SHARED_FCNTL_FLAGS)
goto out;
file = ERR_PTR(-ENOMEM);
ctx = kmem_cache_alloc(userfaultfd_ctx_cachep, GFP_KERNEL);
if (!ctx)
goto out;
atomic_set(&ctx->refcount, 1);
ctx->flags = flags;
ctx->features = 0;
ctx->state = UFFD_STATE_WAIT_API;
ctx->released = false;
ctx->mm = current->mm;
/* prevent the mm struct to be freed */
mmgrab(ctx->mm);
file = anon_inode_getfile("[userfaultfd]", &userfaultfd_fops, ctx,
O_RDWR | (flags & UFFD_SHARED_FCNTL_FLAGS));
if (IS_ERR(file)) {
mmdrop(ctx->mm);
kmem_cache_free(userfaultfd_ctx_cachep, ctx);
}
out:
return file;
}
SYSCALL_DEFINE1(userfaultfd, int, flags)
{
int fd, error;
struct file *file;
error = get_unused_fd_flags(flags & UFFD_SHARED_FCNTL_FLAGS);
if (error < 0)
return error;
fd = error;
file = userfaultfd_file_create(flags);
if (IS_ERR(file)) {
error = PTR_ERR(file);
goto err_put_unused_fd;
}
fd_install(fd, file);
return fd;
err_put_unused_fd:
put_unused_fd(fd);
return error;
}
static int __init userfaultfd_init(void)
{
userfaultfd_ctx_cachep = kmem_cache_create("userfaultfd_ctx_cache",
sizeof(struct userfaultfd_ctx),
0,
SLAB_HWCACHE_ALIGN|SLAB_PANIC,
init_once_userfaultfd_ctx);
return 0;
}
__initcall(userfaultfd_init);
| ./CrossVul/dataset_final_sorted/CWE-416/c/bad_2840_0 |
crossvul-cpp_data_bad_1857_0 | /*
* Interface between ext4 and JBD
*/
#include "ext4_jbd2.h"
#include <trace/events/ext4.h>
/* Just increment the non-pointer handle value */
static handle_t *ext4_get_nojournal(void)
{
handle_t *handle = current->journal_info;
unsigned long ref_cnt = (unsigned long)handle;
BUG_ON(ref_cnt >= EXT4_NOJOURNAL_MAX_REF_COUNT);
ref_cnt++;
handle = (handle_t *)ref_cnt;
current->journal_info = handle;
return handle;
}
/* Decrement the non-pointer handle value */
static void ext4_put_nojournal(handle_t *handle)
{
unsigned long ref_cnt = (unsigned long)handle;
BUG_ON(ref_cnt == 0);
ref_cnt--;
handle = (handle_t *)ref_cnt;
current->journal_info = handle;
}
/*
* Wrappers for jbd2_journal_start/end.
*/
static int ext4_journal_check_start(struct super_block *sb)
{
journal_t *journal;
might_sleep();
if (sb->s_flags & MS_RDONLY)
return -EROFS;
WARN_ON(sb->s_writers.frozen == SB_FREEZE_COMPLETE);
journal = EXT4_SB(sb)->s_journal;
/*
* Special case here: if the journal has aborted behind our
* backs (eg. EIO in the commit thread), then we still need to
* take the FS itself readonly cleanly.
*/
if (journal && is_journal_aborted(journal)) {
ext4_abort(sb, "Detected aborted journal");
return -EROFS;
}
return 0;
}
handle_t *__ext4_journal_start_sb(struct super_block *sb, unsigned int line,
int type, int blocks, int rsv_blocks)
{
journal_t *journal;
int err;
trace_ext4_journal_start(sb, blocks, rsv_blocks, _RET_IP_);
err = ext4_journal_check_start(sb);
if (err < 0)
return ERR_PTR(err);
journal = EXT4_SB(sb)->s_journal;
if (!journal)
return ext4_get_nojournal();
return jbd2__journal_start(journal, blocks, rsv_blocks, GFP_NOFS,
type, line);
}
int __ext4_journal_stop(const char *where, unsigned int line, handle_t *handle)
{
struct super_block *sb;
int err;
int rc;
if (!ext4_handle_valid(handle)) {
ext4_put_nojournal(handle);
return 0;
}
if (!handle->h_transaction) {
err = jbd2_journal_stop(handle);
return handle->h_err ? handle->h_err : err;
}
sb = handle->h_transaction->t_journal->j_private;
err = handle->h_err;
rc = jbd2_journal_stop(handle);
if (!err)
err = rc;
if (err)
__ext4_std_error(sb, where, line, err);
return err;
}
handle_t *__ext4_journal_start_reserved(handle_t *handle, unsigned int line,
int type)
{
struct super_block *sb;
int err;
if (!ext4_handle_valid(handle))
return ext4_get_nojournal();
sb = handle->h_journal->j_private;
trace_ext4_journal_start_reserved(sb, handle->h_buffer_credits,
_RET_IP_);
err = ext4_journal_check_start(sb);
if (err < 0) {
jbd2_journal_free_reserved(handle);
return ERR_PTR(err);
}
err = jbd2_journal_start_reserved(handle, type, line);
if (err < 0)
return ERR_PTR(err);
return handle;
}
static void ext4_journal_abort_handle(const char *caller, unsigned int line,
const char *err_fn,
struct buffer_head *bh,
handle_t *handle, int err)
{
char nbuf[16];
const char *errstr = ext4_decode_error(NULL, err, nbuf);
BUG_ON(!ext4_handle_valid(handle));
if (bh)
BUFFER_TRACE(bh, "abort");
if (!handle->h_err)
handle->h_err = err;
if (is_handle_aborted(handle))
return;
printk(KERN_ERR "EXT4-fs: %s:%d: aborting transaction: %s in %s\n",
caller, line, errstr, err_fn);
jbd2_journal_abort_handle(handle);
}
int __ext4_journal_get_write_access(const char *where, unsigned int line,
handle_t *handle, struct buffer_head *bh)
{
int err = 0;
might_sleep();
if (ext4_handle_valid(handle)) {
err = jbd2_journal_get_write_access(handle, bh);
if (err)
ext4_journal_abort_handle(where, line, __func__, bh,
handle, err);
}
return err;
}
/*
* The ext4 forget function must perform a revoke if we are freeing data
* which has been journaled. Metadata (eg. indirect blocks) must be
* revoked in all cases.
*
* "bh" may be NULL: a metadata block may have been freed from memory
* but there may still be a record of it in the journal, and that record
* still needs to be revoked.
*
* If the handle isn't valid we're not journaling, but we still need to
* call into ext4_journal_revoke() to put the buffer head.
*/
int __ext4_forget(const char *where, unsigned int line, handle_t *handle,
int is_metadata, struct inode *inode,
struct buffer_head *bh, ext4_fsblk_t blocknr)
{
int err;
might_sleep();
trace_ext4_forget(inode, is_metadata, blocknr);
BUFFER_TRACE(bh, "enter");
jbd_debug(4, "forgetting bh %p: is_metadata = %d, mode %o, "
"data mode %x\n",
bh, is_metadata, inode->i_mode,
test_opt(inode->i_sb, DATA_FLAGS));
/* In the no journal case, we can just do a bforget and return */
if (!ext4_handle_valid(handle)) {
bforget(bh);
return 0;
}
/* Never use the revoke function if we are doing full data
* journaling: there is no need to, and a V1 superblock won't
* support it. Otherwise, only skip the revoke on un-journaled
* data blocks. */
if (test_opt(inode->i_sb, DATA_FLAGS) == EXT4_MOUNT_JOURNAL_DATA ||
(!is_metadata && !ext4_should_journal_data(inode))) {
if (bh) {
BUFFER_TRACE(bh, "call jbd2_journal_forget");
err = jbd2_journal_forget(handle, bh);
if (err)
ext4_journal_abort_handle(where, line, __func__,
bh, handle, err);
return err;
}
return 0;
}
/*
* data!=journal && (is_metadata || should_journal_data(inode))
*/
BUFFER_TRACE(bh, "call jbd2_journal_revoke");
err = jbd2_journal_revoke(handle, blocknr, bh);
if (err) {
ext4_journal_abort_handle(where, line, __func__,
bh, handle, err);
__ext4_abort(inode->i_sb, where, line,
"error %d when attempting revoke", err);
}
BUFFER_TRACE(bh, "exit");
return err;
}
int __ext4_journal_get_create_access(const char *where, unsigned int line,
handle_t *handle, struct buffer_head *bh)
{
int err = 0;
if (ext4_handle_valid(handle)) {
err = jbd2_journal_get_create_access(handle, bh);
if (err)
ext4_journal_abort_handle(where, line, __func__,
bh, handle, err);
}
return err;
}
int __ext4_handle_dirty_metadata(const char *where, unsigned int line,
handle_t *handle, struct inode *inode,
struct buffer_head *bh)
{
int err = 0;
might_sleep();
set_buffer_meta(bh);
set_buffer_prio(bh);
if (ext4_handle_valid(handle)) {
err = jbd2_journal_dirty_metadata(handle, bh);
/* Errors can only happen due to aborted journal or a nasty bug */
if (!is_handle_aborted(handle) && WARN_ON_ONCE(err)) {
ext4_journal_abort_handle(where, line, __func__, bh,
handle, err);
if (inode == NULL) {
pr_err("EXT4: jbd2_journal_dirty_metadata "
"failed: handle type %u started at "
"line %u, credits %u/%u, errcode %d",
handle->h_type,
handle->h_line_no,
handle->h_requested_credits,
handle->h_buffer_credits, err);
return err;
}
ext4_error_inode(inode, where, line,
bh->b_blocknr,
"journal_dirty_metadata failed: "
"handle type %u started at line %u, "
"credits %u/%u, errcode %d",
handle->h_type,
handle->h_line_no,
handle->h_requested_credits,
handle->h_buffer_credits, err);
}
} else {
if (inode)
mark_buffer_dirty_inode(bh, inode);
else
mark_buffer_dirty(bh);
if (inode && inode_needs_sync(inode)) {
sync_dirty_buffer(bh);
if (buffer_req(bh) && !buffer_uptodate(bh)) {
struct ext4_super_block *es;
es = EXT4_SB(inode->i_sb)->s_es;
es->s_last_error_block =
cpu_to_le64(bh->b_blocknr);
ext4_error_inode(inode, where, line,
bh->b_blocknr,
"IO error syncing itable block");
err = -EIO;
}
}
}
return err;
}
int __ext4_handle_dirty_super(const char *where, unsigned int line,
handle_t *handle, struct super_block *sb)
{
struct buffer_head *bh = EXT4_SB(sb)->s_sbh;
int err = 0;
ext4_superblock_csum_set(sb);
if (ext4_handle_valid(handle)) {
err = jbd2_journal_dirty_metadata(handle, bh);
if (err)
ext4_journal_abort_handle(where, line, __func__,
bh, handle, err);
} else
mark_buffer_dirty(bh);
return err;
}
| ./CrossVul/dataset_final_sorted/CWE-416/c/bad_1857_0 |
crossvul-cpp_data_good_819_2 | /*
* "splice": joining two ropes together by interweaving their strands.
*
* This is the "extended pipe" functionality, where a pipe is used as
* an arbitrary in-memory buffer. Think of a pipe as a small kernel
* buffer that you can use to transfer data from one end to the other.
*
* The traditional unix read/write is extended with a "splice()" operation
* that transfers data buffers to or from a pipe buffer.
*
* Named by Larry McVoy, original implementation from Linus, extended by
* Jens to support splicing to files, network, direct splicing, etc and
* fixing lots of bugs.
*
* Copyright (C) 2005-2006 Jens Axboe <axboe@kernel.dk>
* Copyright (C) 2005-2006 Linus Torvalds <torvalds@osdl.org>
* Copyright (C) 2006 Ingo Molnar <mingo@elte.hu>
*
*/
#include <linux/bvec.h>
#include <linux/fs.h>
#include <linux/file.h>
#include <linux/pagemap.h>
#include <linux/splice.h>
#include <linux/memcontrol.h>
#include <linux/mm_inline.h>
#include <linux/swap.h>
#include <linux/writeback.h>
#include <linux/export.h>
#include <linux/syscalls.h>
#include <linux/uio.h>
#include <linux/security.h>
#include <linux/gfp.h>
#include <linux/socket.h>
#include <linux/compat.h>
#include <linux/sched/signal.h>
#include "internal.h"
/*
* Attempt to steal a page from a pipe buffer. This should perhaps go into
* a vm helper function, it's already simplified quite a bit by the
* addition of remove_mapping(). If success is returned, the caller may
* attempt to reuse this page for another destination.
*/
static int page_cache_pipe_buf_steal(struct pipe_inode_info *pipe,
struct pipe_buffer *buf)
{
struct page *page = buf->page;
struct address_space *mapping;
lock_page(page);
mapping = page_mapping(page);
if (mapping) {
WARN_ON(!PageUptodate(page));
/*
* At least for ext2 with nobh option, we need to wait on
* writeback completing on this page, since we'll remove it
* from the pagecache. Otherwise truncate wont wait on the
* page, allowing the disk blocks to be reused by someone else
* before we actually wrote our data to them. fs corruption
* ensues.
*/
wait_on_page_writeback(page);
if (page_has_private(page) &&
!try_to_release_page(page, GFP_KERNEL))
goto out_unlock;
/*
* If we succeeded in removing the mapping, set LRU flag
* and return good.
*/
if (remove_mapping(mapping, page)) {
buf->flags |= PIPE_BUF_FLAG_LRU;
return 0;
}
}
/*
* Raced with truncate or failed to remove page from current
* address space, unlock and return failure.
*/
out_unlock:
unlock_page(page);
return 1;
}
static void page_cache_pipe_buf_release(struct pipe_inode_info *pipe,
struct pipe_buffer *buf)
{
put_page(buf->page);
buf->flags &= ~PIPE_BUF_FLAG_LRU;
}
/*
* Check whether the contents of buf is OK to access. Since the content
* is a page cache page, IO may be in flight.
*/
static int page_cache_pipe_buf_confirm(struct pipe_inode_info *pipe,
struct pipe_buffer *buf)
{
struct page *page = buf->page;
int err;
if (!PageUptodate(page)) {
lock_page(page);
/*
* Page got truncated/unhashed. This will cause a 0-byte
* splice, if this is the first page.
*/
if (!page->mapping) {
err = -ENODATA;
goto error;
}
/*
* Uh oh, read-error from disk.
*/
if (!PageUptodate(page)) {
err = -EIO;
goto error;
}
/*
* Page is ok afterall, we are done.
*/
unlock_page(page);
}
return 0;
error:
unlock_page(page);
return err;
}
const struct pipe_buf_operations page_cache_pipe_buf_ops = {
.can_merge = 0,
.confirm = page_cache_pipe_buf_confirm,
.release = page_cache_pipe_buf_release,
.steal = page_cache_pipe_buf_steal,
.get = generic_pipe_buf_get,
};
static int user_page_pipe_buf_steal(struct pipe_inode_info *pipe,
struct pipe_buffer *buf)
{
if (!(buf->flags & PIPE_BUF_FLAG_GIFT))
return 1;
buf->flags |= PIPE_BUF_FLAG_LRU;
return generic_pipe_buf_steal(pipe, buf);
}
static const struct pipe_buf_operations user_page_pipe_buf_ops = {
.can_merge = 0,
.confirm = generic_pipe_buf_confirm,
.release = page_cache_pipe_buf_release,
.steal = user_page_pipe_buf_steal,
.get = generic_pipe_buf_get,
};
static void wakeup_pipe_readers(struct pipe_inode_info *pipe)
{
smp_mb();
if (waitqueue_active(&pipe->wait))
wake_up_interruptible(&pipe->wait);
kill_fasync(&pipe->fasync_readers, SIGIO, POLL_IN);
}
/**
* splice_to_pipe - fill passed data into a pipe
* @pipe: pipe to fill
* @spd: data to fill
*
* Description:
* @spd contains a map of pages and len/offset tuples, along with
* the struct pipe_buf_operations associated with these pages. This
* function will link that data to the pipe.
*
*/
ssize_t splice_to_pipe(struct pipe_inode_info *pipe,
struct splice_pipe_desc *spd)
{
unsigned int spd_pages = spd->nr_pages;
int ret = 0, page_nr = 0;
if (!spd_pages)
return 0;
if (unlikely(!pipe->readers)) {
send_sig(SIGPIPE, current, 0);
ret = -EPIPE;
goto out;
}
while (pipe->nrbufs < pipe->buffers) {
int newbuf = (pipe->curbuf + pipe->nrbufs) & (pipe->buffers - 1);
struct pipe_buffer *buf = pipe->bufs + newbuf;
buf->page = spd->pages[page_nr];
buf->offset = spd->partial[page_nr].offset;
buf->len = spd->partial[page_nr].len;
buf->private = spd->partial[page_nr].private;
buf->ops = spd->ops;
buf->flags = 0;
pipe->nrbufs++;
page_nr++;
ret += buf->len;
if (!--spd->nr_pages)
break;
}
if (!ret)
ret = -EAGAIN;
out:
while (page_nr < spd_pages)
spd->spd_release(spd, page_nr++);
return ret;
}
EXPORT_SYMBOL_GPL(splice_to_pipe);
ssize_t add_to_pipe(struct pipe_inode_info *pipe, struct pipe_buffer *buf)
{
int ret;
if (unlikely(!pipe->readers)) {
send_sig(SIGPIPE, current, 0);
ret = -EPIPE;
} else if (pipe->nrbufs == pipe->buffers) {
ret = -EAGAIN;
} else {
int newbuf = (pipe->curbuf + pipe->nrbufs) & (pipe->buffers - 1);
pipe->bufs[newbuf] = *buf;
pipe->nrbufs++;
return buf->len;
}
pipe_buf_release(pipe, buf);
return ret;
}
EXPORT_SYMBOL(add_to_pipe);
/*
* Check if we need to grow the arrays holding pages and partial page
* descriptions.
*/
int splice_grow_spd(const struct pipe_inode_info *pipe, struct splice_pipe_desc *spd)
{
unsigned int buffers = READ_ONCE(pipe->buffers);
spd->nr_pages_max = buffers;
if (buffers <= PIPE_DEF_BUFFERS)
return 0;
spd->pages = kmalloc_array(buffers, sizeof(struct page *), GFP_KERNEL);
spd->partial = kmalloc_array(buffers, sizeof(struct partial_page),
GFP_KERNEL);
if (spd->pages && spd->partial)
return 0;
kfree(spd->pages);
kfree(spd->partial);
return -ENOMEM;
}
void splice_shrink_spd(struct splice_pipe_desc *spd)
{
if (spd->nr_pages_max <= PIPE_DEF_BUFFERS)
return;
kfree(spd->pages);
kfree(spd->partial);
}
/**
* generic_file_splice_read - splice data from file to a pipe
* @in: file to splice from
* @ppos: position in @in
* @pipe: pipe to splice to
* @len: number of bytes to splice
* @flags: splice modifier flags
*
* Description:
* Will read pages from given file and fill them into a pipe. Can be
* used as long as it has more or less sane ->read_iter().
*
*/
ssize_t generic_file_splice_read(struct file *in, loff_t *ppos,
struct pipe_inode_info *pipe, size_t len,
unsigned int flags)
{
struct iov_iter to;
struct kiocb kiocb;
int idx, ret;
iov_iter_pipe(&to, READ, pipe, len);
idx = to.idx;
init_sync_kiocb(&kiocb, in);
kiocb.ki_pos = *ppos;
ret = call_read_iter(in, &kiocb, &to);
if (ret > 0) {
*ppos = kiocb.ki_pos;
file_accessed(in);
} else if (ret < 0) {
to.idx = idx;
to.iov_offset = 0;
iov_iter_advance(&to, 0); /* to free what was emitted */
/*
* callers of ->splice_read() expect -EAGAIN on
* "can't put anything in there", rather than -EFAULT.
*/
if (ret == -EFAULT)
ret = -EAGAIN;
}
return ret;
}
EXPORT_SYMBOL(generic_file_splice_read);
const struct pipe_buf_operations default_pipe_buf_ops = {
.can_merge = 0,
.confirm = generic_pipe_buf_confirm,
.release = generic_pipe_buf_release,
.steal = generic_pipe_buf_steal,
.get = generic_pipe_buf_get,
};
static int generic_pipe_buf_nosteal(struct pipe_inode_info *pipe,
struct pipe_buffer *buf)
{
return 1;
}
/* Pipe buffer operations for a socket and similar. */
const struct pipe_buf_operations nosteal_pipe_buf_ops = {
.can_merge = 0,
.confirm = generic_pipe_buf_confirm,
.release = generic_pipe_buf_release,
.steal = generic_pipe_buf_nosteal,
.get = generic_pipe_buf_get,
};
EXPORT_SYMBOL(nosteal_pipe_buf_ops);
static ssize_t kernel_readv(struct file *file, const struct kvec *vec,
unsigned long vlen, loff_t offset)
{
mm_segment_t old_fs;
loff_t pos = offset;
ssize_t res;
old_fs = get_fs();
set_fs(get_ds());
/* The cast to a user pointer is valid due to the set_fs() */
res = vfs_readv(file, (const struct iovec __user *)vec, vlen, &pos, 0);
set_fs(old_fs);
return res;
}
static ssize_t default_file_splice_read(struct file *in, loff_t *ppos,
struct pipe_inode_info *pipe, size_t len,
unsigned int flags)
{
struct kvec *vec, __vec[PIPE_DEF_BUFFERS];
struct iov_iter to;
struct page **pages;
unsigned int nr_pages;
size_t offset, base, copied = 0;
ssize_t res;
int i;
if (pipe->nrbufs == pipe->buffers)
return -EAGAIN;
/*
* Try to keep page boundaries matching to source pagecache ones -
* it probably won't be much help, but...
*/
offset = *ppos & ~PAGE_MASK;
iov_iter_pipe(&to, READ, pipe, len + offset);
res = iov_iter_get_pages_alloc(&to, &pages, len + offset, &base);
if (res <= 0)
return -ENOMEM;
nr_pages = DIV_ROUND_UP(res + base, PAGE_SIZE);
vec = __vec;
if (nr_pages > PIPE_DEF_BUFFERS) {
vec = kmalloc_array(nr_pages, sizeof(struct kvec), GFP_KERNEL);
if (unlikely(!vec)) {
res = -ENOMEM;
goto out;
}
}
pipe->bufs[to.idx].offset = offset;
pipe->bufs[to.idx].len -= offset;
for (i = 0; i < nr_pages; i++) {
size_t this_len = min_t(size_t, len, PAGE_SIZE - offset);
vec[i].iov_base = page_address(pages[i]) + offset;
vec[i].iov_len = this_len;
len -= this_len;
offset = 0;
}
res = kernel_readv(in, vec, nr_pages, *ppos);
if (res > 0) {
copied = res;
*ppos += res;
}
if (vec != __vec)
kfree(vec);
out:
for (i = 0; i < nr_pages; i++)
put_page(pages[i]);
kvfree(pages);
iov_iter_advance(&to, copied); /* truncates and discards */
return res;
}
/*
* Send 'sd->len' bytes to socket from 'sd->file' at position 'sd->pos'
* using sendpage(). Return the number of bytes sent.
*/
static int pipe_to_sendpage(struct pipe_inode_info *pipe,
struct pipe_buffer *buf, struct splice_desc *sd)
{
struct file *file = sd->u.file;
loff_t pos = sd->pos;
int more;
if (!likely(file->f_op->sendpage))
return -EINVAL;
more = (sd->flags & SPLICE_F_MORE) ? MSG_MORE : 0;
if (sd->len < sd->total_len && pipe->nrbufs > 1)
more |= MSG_SENDPAGE_NOTLAST;
return file->f_op->sendpage(file, buf->page, buf->offset,
sd->len, &pos, more);
}
static void wakeup_pipe_writers(struct pipe_inode_info *pipe)
{
smp_mb();
if (waitqueue_active(&pipe->wait))
wake_up_interruptible(&pipe->wait);
kill_fasync(&pipe->fasync_writers, SIGIO, POLL_OUT);
}
/**
* splice_from_pipe_feed - feed available data from a pipe to a file
* @pipe: pipe to splice from
* @sd: information to @actor
* @actor: handler that splices the data
*
* Description:
* This function loops over the pipe and calls @actor to do the
* actual moving of a single struct pipe_buffer to the desired
* destination. It returns when there's no more buffers left in
* the pipe or if the requested number of bytes (@sd->total_len)
* have been copied. It returns a positive number (one) if the
* pipe needs to be filled with more data, zero if the required
* number of bytes have been copied and -errno on error.
*
* This, together with splice_from_pipe_{begin,end,next}, may be
* used to implement the functionality of __splice_from_pipe() when
* locking is required around copying the pipe buffers to the
* destination.
*/
static int splice_from_pipe_feed(struct pipe_inode_info *pipe, struct splice_desc *sd,
splice_actor *actor)
{
int ret;
while (pipe->nrbufs) {
struct pipe_buffer *buf = pipe->bufs + pipe->curbuf;
sd->len = buf->len;
if (sd->len > sd->total_len)
sd->len = sd->total_len;
ret = pipe_buf_confirm(pipe, buf);
if (unlikely(ret)) {
if (ret == -ENODATA)
ret = 0;
return ret;
}
ret = actor(pipe, buf, sd);
if (ret <= 0)
return ret;
buf->offset += ret;
buf->len -= ret;
sd->num_spliced += ret;
sd->len -= ret;
sd->pos += ret;
sd->total_len -= ret;
if (!buf->len) {
pipe_buf_release(pipe, buf);
pipe->curbuf = (pipe->curbuf + 1) & (pipe->buffers - 1);
pipe->nrbufs--;
if (pipe->files)
sd->need_wakeup = true;
}
if (!sd->total_len)
return 0;
}
return 1;
}
/**
* splice_from_pipe_next - wait for some data to splice from
* @pipe: pipe to splice from
* @sd: information about the splice operation
*
* Description:
* This function will wait for some data and return a positive
* value (one) if pipe buffers are available. It will return zero
* or -errno if no more data needs to be spliced.
*/
static int splice_from_pipe_next(struct pipe_inode_info *pipe, struct splice_desc *sd)
{
/*
* Check for signal early to make process killable when there are
* always buffers available
*/
if (signal_pending(current))
return -ERESTARTSYS;
while (!pipe->nrbufs) {
if (!pipe->writers)
return 0;
if (!pipe->waiting_writers && sd->num_spliced)
return 0;
if (sd->flags & SPLICE_F_NONBLOCK)
return -EAGAIN;
if (signal_pending(current))
return -ERESTARTSYS;
if (sd->need_wakeup) {
wakeup_pipe_writers(pipe);
sd->need_wakeup = false;
}
pipe_wait(pipe);
}
return 1;
}
/**
* splice_from_pipe_begin - start splicing from pipe
* @sd: information about the splice operation
*
* Description:
* This function should be called before a loop containing
* splice_from_pipe_next() and splice_from_pipe_feed() to
* initialize the necessary fields of @sd.
*/
static void splice_from_pipe_begin(struct splice_desc *sd)
{
sd->num_spliced = 0;
sd->need_wakeup = false;
}
/**
* splice_from_pipe_end - finish splicing from pipe
* @pipe: pipe to splice from
* @sd: information about the splice operation
*
* Description:
* This function will wake up pipe writers if necessary. It should
* be called after a loop containing splice_from_pipe_next() and
* splice_from_pipe_feed().
*/
static void splice_from_pipe_end(struct pipe_inode_info *pipe, struct splice_desc *sd)
{
if (sd->need_wakeup)
wakeup_pipe_writers(pipe);
}
/**
* __splice_from_pipe - splice data from a pipe to given actor
* @pipe: pipe to splice from
* @sd: information to @actor
* @actor: handler that splices the data
*
* Description:
* This function does little more than loop over the pipe and call
* @actor to do the actual moving of a single struct pipe_buffer to
* the desired destination. See pipe_to_file, pipe_to_sendpage, or
* pipe_to_user.
*
*/
ssize_t __splice_from_pipe(struct pipe_inode_info *pipe, struct splice_desc *sd,
splice_actor *actor)
{
int ret;
splice_from_pipe_begin(sd);
do {
cond_resched();
ret = splice_from_pipe_next(pipe, sd);
if (ret > 0)
ret = splice_from_pipe_feed(pipe, sd, actor);
} while (ret > 0);
splice_from_pipe_end(pipe, sd);
return sd->num_spliced ? sd->num_spliced : ret;
}
EXPORT_SYMBOL(__splice_from_pipe);
/**
* splice_from_pipe - splice data from a pipe to a file
* @pipe: pipe to splice from
* @out: file to splice to
* @ppos: position in @out
* @len: how many bytes to splice
* @flags: splice modifier flags
* @actor: handler that splices the data
*
* Description:
* See __splice_from_pipe. This function locks the pipe inode,
* otherwise it's identical to __splice_from_pipe().
*
*/
ssize_t splice_from_pipe(struct pipe_inode_info *pipe, struct file *out,
loff_t *ppos, size_t len, unsigned int flags,
splice_actor *actor)
{
ssize_t ret;
struct splice_desc sd = {
.total_len = len,
.flags = flags,
.pos = *ppos,
.u.file = out,
};
pipe_lock(pipe);
ret = __splice_from_pipe(pipe, &sd, actor);
pipe_unlock(pipe);
return ret;
}
/**
* iter_file_splice_write - splice data from a pipe to a file
* @pipe: pipe info
* @out: file to write to
* @ppos: position in @out
* @len: number of bytes to splice
* @flags: splice modifier flags
*
* Description:
* Will either move or copy pages (determined by @flags options) from
* the given pipe inode to the given file.
* This one is ->write_iter-based.
*
*/
ssize_t
iter_file_splice_write(struct pipe_inode_info *pipe, struct file *out,
loff_t *ppos, size_t len, unsigned int flags)
{
struct splice_desc sd = {
.total_len = len,
.flags = flags,
.pos = *ppos,
.u.file = out,
};
int nbufs = pipe->buffers;
struct bio_vec *array = kcalloc(nbufs, sizeof(struct bio_vec),
GFP_KERNEL);
ssize_t ret;
if (unlikely(!array))
return -ENOMEM;
pipe_lock(pipe);
splice_from_pipe_begin(&sd);
while (sd.total_len) {
struct iov_iter from;
size_t left;
int n, idx;
ret = splice_from_pipe_next(pipe, &sd);
if (ret <= 0)
break;
if (unlikely(nbufs < pipe->buffers)) {
kfree(array);
nbufs = pipe->buffers;
array = kcalloc(nbufs, sizeof(struct bio_vec),
GFP_KERNEL);
if (!array) {
ret = -ENOMEM;
break;
}
}
/* build the vector */
left = sd.total_len;
for (n = 0, idx = pipe->curbuf; left && n < pipe->nrbufs; n++, idx++) {
struct pipe_buffer *buf = pipe->bufs + idx;
size_t this_len = buf->len;
if (this_len > left)
this_len = left;
if (idx == pipe->buffers - 1)
idx = -1;
ret = pipe_buf_confirm(pipe, buf);
if (unlikely(ret)) {
if (ret == -ENODATA)
ret = 0;
goto done;
}
array[n].bv_page = buf->page;
array[n].bv_len = this_len;
array[n].bv_offset = buf->offset;
left -= this_len;
}
iov_iter_bvec(&from, WRITE, array, n, sd.total_len - left);
ret = vfs_iter_write(out, &from, &sd.pos, 0);
if (ret <= 0)
break;
sd.num_spliced += ret;
sd.total_len -= ret;
*ppos = sd.pos;
/* dismiss the fully eaten buffers, adjust the partial one */
while (ret) {
struct pipe_buffer *buf = pipe->bufs + pipe->curbuf;
if (ret >= buf->len) {
ret -= buf->len;
buf->len = 0;
pipe_buf_release(pipe, buf);
pipe->curbuf = (pipe->curbuf + 1) & (pipe->buffers - 1);
pipe->nrbufs--;
if (pipe->files)
sd.need_wakeup = true;
} else {
buf->offset += ret;
buf->len -= ret;
ret = 0;
}
}
}
done:
kfree(array);
splice_from_pipe_end(pipe, &sd);
pipe_unlock(pipe);
if (sd.num_spliced)
ret = sd.num_spliced;
return ret;
}
EXPORT_SYMBOL(iter_file_splice_write);
static int write_pipe_buf(struct pipe_inode_info *pipe, struct pipe_buffer *buf,
struct splice_desc *sd)
{
int ret;
void *data;
loff_t tmp = sd->pos;
data = kmap(buf->page);
ret = __kernel_write(sd->u.file, data + buf->offset, sd->len, &tmp);
kunmap(buf->page);
return ret;
}
static ssize_t default_file_splice_write(struct pipe_inode_info *pipe,
struct file *out, loff_t *ppos,
size_t len, unsigned int flags)
{
ssize_t ret;
ret = splice_from_pipe(pipe, out, ppos, len, flags, write_pipe_buf);
if (ret > 0)
*ppos += ret;
return ret;
}
/**
* generic_splice_sendpage - splice data from a pipe to a socket
* @pipe: pipe to splice from
* @out: socket to write to
* @ppos: position in @out
* @len: number of bytes to splice
* @flags: splice modifier flags
*
* Description:
* Will send @len bytes from the pipe to a network socket. No data copying
* is involved.
*
*/
ssize_t generic_splice_sendpage(struct pipe_inode_info *pipe, struct file *out,
loff_t *ppos, size_t len, unsigned int flags)
{
return splice_from_pipe(pipe, out, ppos, len, flags, pipe_to_sendpage);
}
EXPORT_SYMBOL(generic_splice_sendpage);
/*
* Attempt to initiate a splice from pipe to file.
*/
static long do_splice_from(struct pipe_inode_info *pipe, struct file *out,
loff_t *ppos, size_t len, unsigned int flags)
{
ssize_t (*splice_write)(struct pipe_inode_info *, struct file *,
loff_t *, size_t, unsigned int);
if (out->f_op->splice_write)
splice_write = out->f_op->splice_write;
else
splice_write = default_file_splice_write;
return splice_write(pipe, out, ppos, len, flags);
}
/*
* Attempt to initiate a splice from a file to a pipe.
*/
static long do_splice_to(struct file *in, loff_t *ppos,
struct pipe_inode_info *pipe, size_t len,
unsigned int flags)
{
ssize_t (*splice_read)(struct file *, loff_t *,
struct pipe_inode_info *, size_t, unsigned int);
int ret;
if (unlikely(!(in->f_mode & FMODE_READ)))
return -EBADF;
ret = rw_verify_area(READ, in, ppos, len);
if (unlikely(ret < 0))
return ret;
if (unlikely(len > MAX_RW_COUNT))
len = MAX_RW_COUNT;
if (in->f_op->splice_read)
splice_read = in->f_op->splice_read;
else
splice_read = default_file_splice_read;
return splice_read(in, ppos, pipe, len, flags);
}
/**
* splice_direct_to_actor - splices data directly between two non-pipes
* @in: file to splice from
* @sd: actor information on where to splice to
* @actor: handles the data splicing
*
* Description:
* This is a special case helper to splice directly between two
* points, without requiring an explicit pipe. Internally an allocated
* pipe is cached in the process, and reused during the lifetime of
* that process.
*
*/
ssize_t splice_direct_to_actor(struct file *in, struct splice_desc *sd,
splice_direct_actor *actor)
{
struct pipe_inode_info *pipe;
long ret, bytes;
umode_t i_mode;
size_t len;
int i, flags, more;
/*
* We require the input being a regular file, as we don't want to
* randomly drop data for eg socket -> socket splicing. Use the
* piped splicing for that!
*/
i_mode = file_inode(in)->i_mode;
if (unlikely(!S_ISREG(i_mode) && !S_ISBLK(i_mode)))
return -EINVAL;
/*
* neither in nor out is a pipe, setup an internal pipe attached to
* 'out' and transfer the wanted data from 'in' to 'out' through that
*/
pipe = current->splice_pipe;
if (unlikely(!pipe)) {
pipe = alloc_pipe_info();
if (!pipe)
return -ENOMEM;
/*
* We don't have an immediate reader, but we'll read the stuff
* out of the pipe right after the splice_to_pipe(). So set
* PIPE_READERS appropriately.
*/
pipe->readers = 1;
current->splice_pipe = pipe;
}
/*
* Do the splice.
*/
ret = 0;
bytes = 0;
len = sd->total_len;
flags = sd->flags;
/*
* Don't block on output, we have to drain the direct pipe.
*/
sd->flags &= ~SPLICE_F_NONBLOCK;
more = sd->flags & SPLICE_F_MORE;
WARN_ON_ONCE(pipe->nrbufs != 0);
while (len) {
size_t read_len;
loff_t pos = sd->pos, prev_pos = pos;
/* Don't try to read more the pipe has space for. */
read_len = min_t(size_t, len,
(pipe->buffers - pipe->nrbufs) << PAGE_SHIFT);
ret = do_splice_to(in, &pos, pipe, read_len, flags);
if (unlikely(ret <= 0))
goto out_release;
read_len = ret;
sd->total_len = read_len;
/*
* If more data is pending, set SPLICE_F_MORE
* If this is the last data and SPLICE_F_MORE was not set
* initially, clears it.
*/
if (read_len < len)
sd->flags |= SPLICE_F_MORE;
else if (!more)
sd->flags &= ~SPLICE_F_MORE;
/*
* NOTE: nonblocking mode only applies to the input. We
* must not do the output in nonblocking mode as then we
* could get stuck data in the internal pipe:
*/
ret = actor(pipe, sd);
if (unlikely(ret <= 0)) {
sd->pos = prev_pos;
goto out_release;
}
bytes += ret;
len -= ret;
sd->pos = pos;
if (ret < read_len) {
sd->pos = prev_pos + ret;
goto out_release;
}
}
done:
pipe->nrbufs = pipe->curbuf = 0;
file_accessed(in);
return bytes;
out_release:
/*
* If we did an incomplete transfer we must release
* the pipe buffers in question:
*/
for (i = 0; i < pipe->buffers; i++) {
struct pipe_buffer *buf = pipe->bufs + i;
if (buf->ops)
pipe_buf_release(pipe, buf);
}
if (!bytes)
bytes = ret;
goto done;
}
EXPORT_SYMBOL(splice_direct_to_actor);
static int direct_splice_actor(struct pipe_inode_info *pipe,
struct splice_desc *sd)
{
struct file *file = sd->u.file;
return do_splice_from(pipe, file, sd->opos, sd->total_len,
sd->flags);
}
/**
* do_splice_direct - splices data directly between two files
* @in: file to splice from
* @ppos: input file offset
* @out: file to splice to
* @opos: output file offset
* @len: number of bytes to splice
* @flags: splice modifier flags
*
* Description:
* For use by do_sendfile(). splice can easily emulate sendfile, but
* doing it in the application would incur an extra system call
* (splice in + splice out, as compared to just sendfile()). So this helper
* can splice directly through a process-private pipe.
*
*/
long do_splice_direct(struct file *in, loff_t *ppos, struct file *out,
loff_t *opos, size_t len, unsigned int flags)
{
struct splice_desc sd = {
.len = len,
.total_len = len,
.flags = flags,
.pos = *ppos,
.u.file = out,
.opos = opos,
};
long ret;
if (unlikely(!(out->f_mode & FMODE_WRITE)))
return -EBADF;
if (unlikely(out->f_flags & O_APPEND))
return -EINVAL;
ret = rw_verify_area(WRITE, out, opos, len);
if (unlikely(ret < 0))
return ret;
ret = splice_direct_to_actor(in, &sd, direct_splice_actor);
if (ret > 0)
*ppos = sd.pos;
return ret;
}
EXPORT_SYMBOL(do_splice_direct);
static int wait_for_space(struct pipe_inode_info *pipe, unsigned flags)
{
for (;;) {
if (unlikely(!pipe->readers)) {
send_sig(SIGPIPE, current, 0);
return -EPIPE;
}
if (pipe->nrbufs != pipe->buffers)
return 0;
if (flags & SPLICE_F_NONBLOCK)
return -EAGAIN;
if (signal_pending(current))
return -ERESTARTSYS;
pipe->waiting_writers++;
pipe_wait(pipe);
pipe->waiting_writers--;
}
}
static int splice_pipe_to_pipe(struct pipe_inode_info *ipipe,
struct pipe_inode_info *opipe,
size_t len, unsigned int flags);
/*
* Determine where to splice to/from.
*/
static long do_splice(struct file *in, loff_t __user *off_in,
struct file *out, loff_t __user *off_out,
size_t len, unsigned int flags)
{
struct pipe_inode_info *ipipe;
struct pipe_inode_info *opipe;
loff_t offset;
long ret;
ipipe = get_pipe_info(in);
opipe = get_pipe_info(out);
if (ipipe && opipe) {
if (off_in || off_out)
return -ESPIPE;
if (!(in->f_mode & FMODE_READ))
return -EBADF;
if (!(out->f_mode & FMODE_WRITE))
return -EBADF;
/* Splicing to self would be fun, but... */
if (ipipe == opipe)
return -EINVAL;
return splice_pipe_to_pipe(ipipe, opipe, len, flags);
}
if (ipipe) {
if (off_in)
return -ESPIPE;
if (off_out) {
if (!(out->f_mode & FMODE_PWRITE))
return -EINVAL;
if (copy_from_user(&offset, off_out, sizeof(loff_t)))
return -EFAULT;
} else {
offset = out->f_pos;
}
if (unlikely(!(out->f_mode & FMODE_WRITE)))
return -EBADF;
if (unlikely(out->f_flags & O_APPEND))
return -EINVAL;
ret = rw_verify_area(WRITE, out, &offset, len);
if (unlikely(ret < 0))
return ret;
file_start_write(out);
ret = do_splice_from(ipipe, out, &offset, len, flags);
file_end_write(out);
if (!off_out)
out->f_pos = offset;
else if (copy_to_user(off_out, &offset, sizeof(loff_t)))
ret = -EFAULT;
return ret;
}
if (opipe) {
if (off_out)
return -ESPIPE;
if (off_in) {
if (!(in->f_mode & FMODE_PREAD))
return -EINVAL;
if (copy_from_user(&offset, off_in, sizeof(loff_t)))
return -EFAULT;
} else {
offset = in->f_pos;
}
pipe_lock(opipe);
ret = wait_for_space(opipe, flags);
if (!ret)
ret = do_splice_to(in, &offset, opipe, len, flags);
pipe_unlock(opipe);
if (ret > 0)
wakeup_pipe_readers(opipe);
if (!off_in)
in->f_pos = offset;
else if (copy_to_user(off_in, &offset, sizeof(loff_t)))
ret = -EFAULT;
return ret;
}
return -EINVAL;
}
static int iter_to_pipe(struct iov_iter *from,
struct pipe_inode_info *pipe,
unsigned flags)
{
struct pipe_buffer buf = {
.ops = &user_page_pipe_buf_ops,
.flags = flags
};
size_t total = 0;
int ret = 0;
bool failed = false;
while (iov_iter_count(from) && !failed) {
struct page *pages[16];
ssize_t copied;
size_t start;
int n;
copied = iov_iter_get_pages(from, pages, ~0UL, 16, &start);
if (copied <= 0) {
ret = copied;
break;
}
for (n = 0; copied; n++, start = 0) {
int size = min_t(int, copied, PAGE_SIZE - start);
if (!failed) {
buf.page = pages[n];
buf.offset = start;
buf.len = size;
ret = add_to_pipe(pipe, &buf);
if (unlikely(ret < 0)) {
failed = true;
} else {
iov_iter_advance(from, ret);
total += ret;
}
} else {
put_page(pages[n]);
}
copied -= size;
}
}
return total ? total : ret;
}
static int pipe_to_user(struct pipe_inode_info *pipe, struct pipe_buffer *buf,
struct splice_desc *sd)
{
int n = copy_page_to_iter(buf->page, buf->offset, sd->len, sd->u.data);
return n == sd->len ? n : -EFAULT;
}
/*
* For lack of a better implementation, implement vmsplice() to userspace
* as a simple copy of the pipes pages to the user iov.
*/
static long vmsplice_to_user(struct file *file, struct iov_iter *iter,
unsigned int flags)
{
struct pipe_inode_info *pipe = get_pipe_info(file);
struct splice_desc sd = {
.total_len = iov_iter_count(iter),
.flags = flags,
.u.data = iter
};
long ret = 0;
if (!pipe)
return -EBADF;
if (sd.total_len) {
pipe_lock(pipe);
ret = __splice_from_pipe(pipe, &sd, pipe_to_user);
pipe_unlock(pipe);
}
return ret;
}
/*
* vmsplice splices a user address range into a pipe. It can be thought of
* as splice-from-memory, where the regular splice is splice-from-file (or
* to file). In both cases the output is a pipe, naturally.
*/
static long vmsplice_to_pipe(struct file *file, struct iov_iter *iter,
unsigned int flags)
{
struct pipe_inode_info *pipe;
long ret = 0;
unsigned buf_flag = 0;
if (flags & SPLICE_F_GIFT)
buf_flag = PIPE_BUF_FLAG_GIFT;
pipe = get_pipe_info(file);
if (!pipe)
return -EBADF;
pipe_lock(pipe);
ret = wait_for_space(pipe, flags);
if (!ret)
ret = iter_to_pipe(iter, pipe, buf_flag);
pipe_unlock(pipe);
if (ret > 0)
wakeup_pipe_readers(pipe);
return ret;
}
static int vmsplice_type(struct fd f, int *type)
{
if (!f.file)
return -EBADF;
if (f.file->f_mode & FMODE_WRITE) {
*type = WRITE;
} else if (f.file->f_mode & FMODE_READ) {
*type = READ;
} else {
fdput(f);
return -EBADF;
}
return 0;
}
/*
* Note that vmsplice only really supports true splicing _from_ user memory
* to a pipe, not the other way around. Splicing from user memory is a simple
* operation that can be supported without any funky alignment restrictions
* or nasty vm tricks. We simply map in the user memory and fill them into
* a pipe. The reverse isn't quite as easy, though. There are two possible
* solutions for that:
*
* - memcpy() the data internally, at which point we might as well just
* do a regular read() on the buffer anyway.
* - Lots of nasty vm tricks, that are neither fast nor flexible (it
* has restriction limitations on both ends of the pipe).
*
* Currently we punt and implement it as a normal copy, see pipe_to_user().
*
*/
static long do_vmsplice(struct file *f, struct iov_iter *iter, unsigned int flags)
{
if (unlikely(flags & ~SPLICE_F_ALL))
return -EINVAL;
if (!iov_iter_count(iter))
return 0;
if (iov_iter_rw(iter) == WRITE)
return vmsplice_to_pipe(f, iter, flags);
else
return vmsplice_to_user(f, iter, flags);
}
SYSCALL_DEFINE4(vmsplice, int, fd, const struct iovec __user *, uiov,
unsigned long, nr_segs, unsigned int, flags)
{
struct iovec iovstack[UIO_FASTIOV];
struct iovec *iov = iovstack;
struct iov_iter iter;
long error;
struct fd f;
int type;
f = fdget(fd);
error = vmsplice_type(f, &type);
if (error)
return error;
error = import_iovec(type, uiov, nr_segs,
ARRAY_SIZE(iovstack), &iov, &iter);
if (!error) {
error = do_vmsplice(f.file, &iter, flags);
kfree(iov);
}
fdput(f);
return error;
}
#ifdef CONFIG_COMPAT
COMPAT_SYSCALL_DEFINE4(vmsplice, int, fd, const struct compat_iovec __user *, iov32,
unsigned int, nr_segs, unsigned int, flags)
{
struct iovec iovstack[UIO_FASTIOV];
struct iovec *iov = iovstack;
struct iov_iter iter;
long error;
struct fd f;
int type;
f = fdget(fd);
error = vmsplice_type(f, &type);
if (error)
return error;
error = compat_import_iovec(type, iov32, nr_segs,
ARRAY_SIZE(iovstack), &iov, &iter);
if (!error) {
error = do_vmsplice(f.file, &iter, flags);
kfree(iov);
}
fdput(f);
return error;
}
#endif
SYSCALL_DEFINE6(splice, int, fd_in, loff_t __user *, off_in,
int, fd_out, loff_t __user *, off_out,
size_t, len, unsigned int, flags)
{
struct fd in, out;
long error;
if (unlikely(!len))
return 0;
if (unlikely(flags & ~SPLICE_F_ALL))
return -EINVAL;
error = -EBADF;
in = fdget(fd_in);
if (in.file) {
if (in.file->f_mode & FMODE_READ) {
out = fdget(fd_out);
if (out.file) {
if (out.file->f_mode & FMODE_WRITE)
error = do_splice(in.file, off_in,
out.file, off_out,
len, flags);
fdput(out);
}
}
fdput(in);
}
return error;
}
/*
* Make sure there's data to read. Wait for input if we can, otherwise
* return an appropriate error.
*/
static int ipipe_prep(struct pipe_inode_info *pipe, unsigned int flags)
{
int ret;
/*
* Check ->nrbufs without the inode lock first. This function
* is speculative anyways, so missing one is ok.
*/
if (pipe->nrbufs)
return 0;
ret = 0;
pipe_lock(pipe);
while (!pipe->nrbufs) {
if (signal_pending(current)) {
ret = -ERESTARTSYS;
break;
}
if (!pipe->writers)
break;
if (!pipe->waiting_writers) {
if (flags & SPLICE_F_NONBLOCK) {
ret = -EAGAIN;
break;
}
}
pipe_wait(pipe);
}
pipe_unlock(pipe);
return ret;
}
/*
* Make sure there's writeable room. Wait for room if we can, otherwise
* return an appropriate error.
*/
static int opipe_prep(struct pipe_inode_info *pipe, unsigned int flags)
{
int ret;
/*
* Check ->nrbufs without the inode lock first. This function
* is speculative anyways, so missing one is ok.
*/
if (pipe->nrbufs < pipe->buffers)
return 0;
ret = 0;
pipe_lock(pipe);
while (pipe->nrbufs >= pipe->buffers) {
if (!pipe->readers) {
send_sig(SIGPIPE, current, 0);
ret = -EPIPE;
break;
}
if (flags & SPLICE_F_NONBLOCK) {
ret = -EAGAIN;
break;
}
if (signal_pending(current)) {
ret = -ERESTARTSYS;
break;
}
pipe->waiting_writers++;
pipe_wait(pipe);
pipe->waiting_writers--;
}
pipe_unlock(pipe);
return ret;
}
/*
* Splice contents of ipipe to opipe.
*/
static int splice_pipe_to_pipe(struct pipe_inode_info *ipipe,
struct pipe_inode_info *opipe,
size_t len, unsigned int flags)
{
struct pipe_buffer *ibuf, *obuf;
int ret = 0, nbuf;
bool input_wakeup = false;
retry:
ret = ipipe_prep(ipipe, flags);
if (ret)
return ret;
ret = opipe_prep(opipe, flags);
if (ret)
return ret;
/*
* Potential ABBA deadlock, work around it by ordering lock
* grabbing by pipe info address. Otherwise two different processes
* could deadlock (one doing tee from A -> B, the other from B -> A).
*/
pipe_double_lock(ipipe, opipe);
do {
if (!opipe->readers) {
send_sig(SIGPIPE, current, 0);
if (!ret)
ret = -EPIPE;
break;
}
if (!ipipe->nrbufs && !ipipe->writers)
break;
/*
* Cannot make any progress, because either the input
* pipe is empty or the output pipe is full.
*/
if (!ipipe->nrbufs || opipe->nrbufs >= opipe->buffers) {
/* Already processed some buffers, break */
if (ret)
break;
if (flags & SPLICE_F_NONBLOCK) {
ret = -EAGAIN;
break;
}
/*
* We raced with another reader/writer and haven't
* managed to process any buffers. A zero return
* value means EOF, so retry instead.
*/
pipe_unlock(ipipe);
pipe_unlock(opipe);
goto retry;
}
ibuf = ipipe->bufs + ipipe->curbuf;
nbuf = (opipe->curbuf + opipe->nrbufs) & (opipe->buffers - 1);
obuf = opipe->bufs + nbuf;
if (len >= ibuf->len) {
/*
* Simply move the whole buffer from ipipe to opipe
*/
*obuf = *ibuf;
ibuf->ops = NULL;
opipe->nrbufs++;
ipipe->curbuf = (ipipe->curbuf + 1) & (ipipe->buffers - 1);
ipipe->nrbufs--;
input_wakeup = true;
} else {
/*
* Get a reference to this pipe buffer,
* so we can copy the contents over.
*/
if (!pipe_buf_get(ipipe, ibuf)) {
if (ret == 0)
ret = -EFAULT;
break;
}
*obuf = *ibuf;
/*
* Don't inherit the gift flag, we need to
* prevent multiple steals of this page.
*/
obuf->flags &= ~PIPE_BUF_FLAG_GIFT;
obuf->len = len;
opipe->nrbufs++;
ibuf->offset += obuf->len;
ibuf->len -= obuf->len;
}
ret += obuf->len;
len -= obuf->len;
} while (len);
pipe_unlock(ipipe);
pipe_unlock(opipe);
/*
* If we put data in the output pipe, wakeup any potential readers.
*/
if (ret > 0)
wakeup_pipe_readers(opipe);
if (input_wakeup)
wakeup_pipe_writers(ipipe);
return ret;
}
/*
* Link contents of ipipe to opipe.
*/
static int link_pipe(struct pipe_inode_info *ipipe,
struct pipe_inode_info *opipe,
size_t len, unsigned int flags)
{
struct pipe_buffer *ibuf, *obuf;
int ret = 0, i = 0, nbuf;
/*
* Potential ABBA deadlock, work around it by ordering lock
* grabbing by pipe info address. Otherwise two different processes
* could deadlock (one doing tee from A -> B, the other from B -> A).
*/
pipe_double_lock(ipipe, opipe);
do {
if (!opipe->readers) {
send_sig(SIGPIPE, current, 0);
if (!ret)
ret = -EPIPE;
break;
}
/*
* If we have iterated all input buffers or ran out of
* output room, break.
*/
if (i >= ipipe->nrbufs || opipe->nrbufs >= opipe->buffers)
break;
ibuf = ipipe->bufs + ((ipipe->curbuf + i) & (ipipe->buffers-1));
nbuf = (opipe->curbuf + opipe->nrbufs) & (opipe->buffers - 1);
/*
* Get a reference to this pipe buffer,
* so we can copy the contents over.
*/
if (!pipe_buf_get(ipipe, ibuf)) {
if (ret == 0)
ret = -EFAULT;
break;
}
obuf = opipe->bufs + nbuf;
*obuf = *ibuf;
/*
* Don't inherit the gift flag, we need to
* prevent multiple steals of this page.
*/
obuf->flags &= ~PIPE_BUF_FLAG_GIFT;
if (obuf->len > len)
obuf->len = len;
opipe->nrbufs++;
ret += obuf->len;
len -= obuf->len;
i++;
} while (len);
/*
* return EAGAIN if we have the potential of some data in the
* future, otherwise just return 0
*/
if (!ret && ipipe->waiting_writers && (flags & SPLICE_F_NONBLOCK))
ret = -EAGAIN;
pipe_unlock(ipipe);
pipe_unlock(opipe);
/*
* If we put data in the output pipe, wakeup any potential readers.
*/
if (ret > 0)
wakeup_pipe_readers(opipe);
return ret;
}
/*
* This is a tee(1) implementation that works on pipes. It doesn't copy
* any data, it simply references the 'in' pages on the 'out' pipe.
* The 'flags' used are the SPLICE_F_* variants, currently the only
* applicable one is SPLICE_F_NONBLOCK.
*/
static long do_tee(struct file *in, struct file *out, size_t len,
unsigned int flags)
{
struct pipe_inode_info *ipipe = get_pipe_info(in);
struct pipe_inode_info *opipe = get_pipe_info(out);
int ret = -EINVAL;
/*
* Duplicate the contents of ipipe to opipe without actually
* copying the data.
*/
if (ipipe && opipe && ipipe != opipe) {
/*
* Keep going, unless we encounter an error. The ipipe/opipe
* ordering doesn't really matter.
*/
ret = ipipe_prep(ipipe, flags);
if (!ret) {
ret = opipe_prep(opipe, flags);
if (!ret)
ret = link_pipe(ipipe, opipe, len, flags);
}
}
return ret;
}
SYSCALL_DEFINE4(tee, int, fdin, int, fdout, size_t, len, unsigned int, flags)
{
struct fd in;
int error;
if (unlikely(flags & ~SPLICE_F_ALL))
return -EINVAL;
if (unlikely(!len))
return 0;
error = -EBADF;
in = fdget(fdin);
if (in.file) {
if (in.file->f_mode & FMODE_READ) {
struct fd out = fdget(fdout);
if (out.file) {
if (out.file->f_mode & FMODE_WRITE)
error = do_tee(in.file, out.file,
len, flags);
fdput(out);
}
}
fdput(in);
}
return error;
}
| ./CrossVul/dataset_final_sorted/CWE-416/c/good_819_2 |
crossvul-cpp_data_good_1373_0 | /*
* GPAC - Multimedia Framework C SDK
*
* Authors: Jean Le Feuvre
* Copyright (c) Telecom ParisTech 2005-2012
*
* This file is part of GPAC / MPEG2-TS sub-project
*
* GPAC is free software; you can redistribute it and/or modify
* it under the terms of the GNU Lesser General Public License as published by
* the Free Software Foundation; either version 2, or (at your option)
* any later version.
*
* GPAC is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with this library; see the file COPYING. If not, write to
* the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
*
*/
#include <gpac/mpegts.h>
#ifndef GPAC_DISABLE_MPEG2TS
#include <string.h>
#include <gpac/constants.h>
#include <gpac/internal/media_dev.h>
#include <gpac/download.h>
#ifndef GPAC_DISABLE_STREAMING
#include <gpac/internal/ietf_dev.h>
#endif
#ifdef GPAC_CONFIG_LINUX
#include <unistd.h>
#endif
#ifdef GPAC_ENABLE_MPE
#include <gpac/dvb_mpe.h>
#endif
#ifdef GPAC_ENABLE_DSMCC
#include <gpac/ait.h>
#endif
#define DEBUG_TS_PACKET 0
GF_EXPORT
const char *gf_m2ts_get_stream_name(u32 streamType)
{
switch (streamType) {
case GF_M2TS_VIDEO_MPEG1:
return "MPEG-1 Video";
case GF_M2TS_VIDEO_MPEG2:
return "MPEG-2 Video";
case GF_M2TS_AUDIO_MPEG1:
return "MPEG-1 Audio";
case GF_M2TS_AUDIO_MPEG2:
return "MPEG-2 Audio";
case GF_M2TS_PRIVATE_SECTION:
return "Private Section";
case GF_M2TS_PRIVATE_DATA:
return "Private Data";
case GF_M2TS_AUDIO_AAC:
return "AAC Audio";
case GF_M2TS_VIDEO_MPEG4:
return "MPEG-4 Video";
case GF_M2TS_VIDEO_H264:
return "MPEG-4/H264 Video";
case GF_M2TS_VIDEO_SVC:
return "H264-SVC Video";
case GF_M2TS_VIDEO_HEVC:
return "HEVC Video";
case GF_M2TS_VIDEO_SHVC:
return "SHVC Video";
case GF_M2TS_VIDEO_SHVC_TEMPORAL:
return "SHVC Video Temporal Sublayer";
case GF_M2TS_VIDEO_MHVC:
return "MHVC Video";
case GF_M2TS_VIDEO_MHVC_TEMPORAL:
return "MHVC Video Temporal Sublayer";
case GF_M2TS_AUDIO_AC3:
return "Dolby AC3 Audio";
case GF_M2TS_AUDIO_DTS:
return "Dolby DTS Audio";
case GF_M2TS_SUBTITLE_DVB:
return "DVB Subtitle";
case GF_M2TS_SYSTEMS_MPEG4_PES:
return "MPEG-4 SL (PES)";
case GF_M2TS_SYSTEMS_MPEG4_SECTIONS:
return "MPEG-4 SL (Section)";
case GF_M2TS_MPE_SECTIONS:
return "MPE (Section)";
case GF_M2TS_METADATA_PES:
return "Metadata (PES)";
case GF_M2TS_METADATA_ID3_HLS:
return "ID3/HLS Metadata (PES)";
default:
return "Unknown";
}
}
static u32 gf_m2ts_reframe_default(GF_M2TS_Demuxer *ts, GF_M2TS_PES *pes, Bool same_pts, unsigned char *data, u32 data_len, GF_M2TS_PESHeader *pes_hdr)
{
GF_M2TS_PES_PCK pck;
pck.flags = 0;
if (pes->rap) pck.flags |= GF_M2TS_PES_PCK_RAP;
if (!same_pts) pck.flags |= GF_M2TS_PES_PCK_AU_START;
pck.DTS = pes->DTS;
pck.PTS = pes->PTS;
pck.data = (char *)data;
pck.data_len = data_len;
pck.stream = pes;
ts->on_event(ts, GF_M2TS_EVT_PES_PCK, &pck);
/*we consumed all data*/
return 0;
}
static u32 gf_m2ts_reframe_reset(GF_M2TS_Demuxer *ts, GF_M2TS_PES *pes, Bool same_pts, unsigned char *data, u32 data_len, GF_M2TS_PESHeader *pes_hdr)
{
if (pes->pck_data) {
gf_free(pes->pck_data);
pes->pck_data = NULL;
}
pes->pck_data_len = pes->pck_alloc_len = 0;
if (pes->prev_data) {
gf_free(pes->prev_data);
pes->prev_data = NULL;
}
pes->prev_data_len = 0;
pes->pes_len = 0;
pes->prev_PTS = 0;
pes->reframe = NULL;
pes->cc = -1;
pes->temi_tc_desc_len = 0;
return 0;
}
static void add_text(char **buffer, u32 *size, u32 *pos, char *msg, u32 msg_len)
{
if (!msg || !buffer) return;
if (*pos+msg_len>*size) {
*size = *pos+msg_len-*size+256;
*buffer = (char *)gf_realloc(*buffer, *size);
}
strncpy((*buffer)+(*pos), msg, msg_len);
*pos += msg_len;
}
static GF_Err id3_parse_tag(char *data, u32 length, char **output, u32 *output_size, u32 *output_pos)
{
GF_BitStream *bs;
u32 pos;
if ((data[0] != 'I') || (data[1] != 'D') || (data[2] != '3'))
return GF_NOT_SUPPORTED;
bs = gf_bs_new(data, length, GF_BITSTREAM_READ);
gf_bs_skip_bytes(bs, 3);
/*u8 major = */gf_bs_read_u8(bs);
/*u8 minor = */gf_bs_read_u8(bs);
/*u8 unsync = */gf_bs_read_int(bs, 1);
/*u8 ext_hdr = */ gf_bs_read_int(bs, 1);
gf_bs_read_int(bs, 6);
u32 size = gf_id3_read_size(bs);
pos = (u32) gf_bs_get_position(bs);
if (size != length-pos)
size = length-pos;
while (size && (gf_bs_available(bs)>=10) ) {
u32 ftag = gf_bs_read_u32(bs);
u32 fsize = gf_id3_read_size(bs);
/*u16 fflags = */gf_bs_read_u16(bs);
size -= 10;
//TODO, handle more ID3 tags ?
if (ftag==ID3V2_FRAME_TXXX) {
u32 pos = (u32) gf_bs_get_position(bs);
char *text = data+pos;
add_text(output, output_size, output_pos, text, fsize);
} else {
GF_LOG(GF_LOG_WARNING, GF_LOG_CONTAINER, ("[MPEG-2 TS] ID3 tag not handled, patch welcome\n", gf_4cc_to_str(ftag) ) );
}
gf_bs_skip_bytes(bs, fsize);
}
gf_bs_del(bs);
return GF_OK;
}
static u32 gf_m2ts_reframe_id3_pes(GF_M2TS_Demuxer *ts, GF_M2TS_PES *pes, Bool same_pts, unsigned char *data, u32 data_len, GF_M2TS_PESHeader *pes_hdr)
{
char frame_header[256];
char *output_text = NULL;
u32 output_len = 0;
u32 pos = 0;
GF_M2TS_PES_PCK pck;
pck.flags = 0;
if (pes->rap) pck.flags |= GF_M2TS_PES_PCK_RAP;
if (!same_pts) pck.flags |= GF_M2TS_PES_PCK_AU_START;
pck.DTS = pes->DTS;
pck.PTS = pes->PTS;
sprintf(frame_header, LLU" --> NEXT\n", pes->PTS);
add_text(&output_text, &output_len, &pos, frame_header, (u32)strlen(frame_header));
id3_parse_tag((char *)data, data_len, &output_text, &output_len, &pos);
add_text(&output_text, &output_len, &pos, "\n\n", 2);
pck.data = (char *)output_text;
pck.data_len = pos;
pck.stream = pes;
ts->on_event(ts, GF_M2TS_EVT_PES_PCK, &pck);
gf_free(output_text);
/*we consumed all data*/
return 0;
}
static u32 gf_m2ts_sync(GF_M2TS_Demuxer *ts, char *data, u32 size, Bool simple_check)
{
u32 i=0;
/*if first byte is sync assume we're sync*/
if (simple_check && (data[i]==0x47)) return 0;
while (i < size) {
if (i+192 >= size) return size;
if ((data[i]==0x47) && (data[i+188]==0x47))
break;
if (i+192 >= size) return size;
if ((data[i]==0x47) && (data[i+192]==0x47)) {
ts->prefix_present = 1;
break;
}
i++;
}
if (i) {
GF_LOG(GF_LOG_DEBUG, GF_LOG_CONTAINER, ("[MPEG-2 TS] re-sync skipped %d bytes\n", i) );
}
return i;
}
GF_EXPORT
Bool gf_m2ts_crc32_check(u8 *data, u32 len)
{
u32 crc = gf_crc_32(data, len);
u32 crc_val = GF_4CC((u8) data[len], (u8) data[len+1], (u8) data[len+2], (u8) data[len+3]);
return (crc==crc_val) ? GF_TRUE : GF_FALSE;
}
static GF_M2TS_SectionFilter *gf_m2ts_section_filter_new(gf_m2ts_section_callback process_section_callback, Bool process_individual)
{
GF_M2TS_SectionFilter *sec;
GF_SAFEALLOC(sec, GF_M2TS_SectionFilter);
if (!sec) {
GF_LOG(GF_LOG_ERROR, GF_LOG_CONTAINER, ("[MPEG-2 TS] gf_m2ts_section_filter_new : OUT OF MEMORY\n"));
return NULL;
}
sec->cc = -1;
sec->process_section = process_section_callback;
sec->process_individual = process_individual;
return sec;
}
static void gf_m2ts_reset_sections(GF_List *sections)
{
u32 count;
GF_M2TS_Section *section;
//GF_LOG(GF_LOG_DEBUG, GF_LOG_CONTAINER, ("[MPEG-2 TS] Deleting sections\n"));
count = gf_list_count(sections);
while (count) {
section = gf_list_get(sections, 0);
gf_list_rem(sections, 0);
if (section->data) gf_free(section->data);
gf_free(section);
count--;
}
}
static void gf_m2ts_section_filter_reset(GF_M2TS_SectionFilter *sf)
{
if (sf->section) {
gf_free(sf->section);
sf->section = NULL;
}
while (sf->table) {
GF_M2TS_Table *t = sf->table;
sf->table = t->next;
gf_m2ts_reset_sections(t->sections);
gf_list_del(t->sections);
gf_free(t);
}
sf->cc = -1;
sf->length = sf->received = 0;
sf->demux_restarted = 1;
}
static void gf_m2ts_section_filter_del(GF_M2TS_SectionFilter *sf)
{
gf_m2ts_section_filter_reset(sf);
gf_free(sf);
}
static void gf_m2ts_metadata_descriptor_del(GF_M2TS_MetadataDescriptor *metad)
{
if (metad) {
if (metad->service_id_record) gf_free(metad->service_id_record);
if (metad->decoder_config) gf_free(metad->decoder_config);
if (metad->decoder_config_id) gf_free(metad->decoder_config_id);
gf_free(metad);
}
}
GF_EXPORT
void gf_m2ts_es_del(GF_M2TS_ES *es, GF_M2TS_Demuxer *ts)
{
gf_list_del_item(es->program->streams, es);
if (es->flags & GF_M2TS_ES_IS_SECTION) {
GF_M2TS_SECTION_ES *ses = (GF_M2TS_SECTION_ES *)es;
if (ses->sec) gf_m2ts_section_filter_del(ses->sec);
#ifdef GPAC_ENABLE_MPE
if (es->flags & GF_M2TS_ES_IS_MPE)
gf_dvb_mpe_section_del(es);
#endif
} else if (es->pid!=es->program->pmt_pid) {
GF_M2TS_PES *pes = (GF_M2TS_PES *)es;
if ((pes->flags & GF_M2TS_INHERIT_PCR) && ts->ess[es->program->pcr_pid]==es)
ts->ess[es->program->pcr_pid] = NULL;
if (pes->pck_data) gf_free(pes->pck_data);
if (pes->prev_data) gf_free(pes->prev_data);
if (pes->buf) gf_free(pes->buf);
if (pes->reassemble_buf) gf_free(pes->reassemble_buf);
if (pes->temi_tc_desc) gf_free(pes->temi_tc_desc);
if (pes->metadata_descriptor) gf_m2ts_metadata_descriptor_del(pes->metadata_descriptor);
}
if (es->slcfg) gf_free(es->slcfg);
gf_free(es);
}
static void gf_m2ts_reset_sdt(GF_M2TS_Demuxer *ts)
{
while (gf_list_count(ts->SDTs)) {
GF_M2TS_SDT *sdt = (GF_M2TS_SDT *)gf_list_last(ts->SDTs);
gf_list_rem_last(ts->SDTs);
if (sdt->provider) gf_free(sdt->provider);
if (sdt->service) gf_free(sdt->service);
gf_free(sdt);
}
}
GF_EXPORT
GF_M2TS_SDT *gf_m2ts_get_sdt_info(GF_M2TS_Demuxer *ts, u32 program_id)
{
u32 i;
for (i=0; i<gf_list_count(ts->SDTs); i++) {
GF_M2TS_SDT *sdt = (GF_M2TS_SDT *)gf_list_get(ts->SDTs, i);
if (sdt->service_id==program_id) return sdt;
}
return NULL;
}
static void gf_m2ts_section_complete(GF_M2TS_Demuxer *ts, GF_M2TS_SectionFilter *sec, GF_M2TS_SECTION_ES *ses)
{
//seek mode, only process PAT and PMT
if (ts->seek_mode && (sec->section[0] != GF_M2TS_TABLE_ID_PAT) && (sec->section[0] != GF_M2TS_TABLE_ID_PMT)) {
/*clean-up (including broken sections)*/
if (sec->section) gf_free(sec->section);
sec->section = NULL;
sec->length = sec->received = 0;
return;
}
if (!sec->process_section) {
if ((ts->on_event && (sec->section[0]==GF_M2TS_TABLE_ID_AIT)) ) {
#ifdef GPAC_ENABLE_DSMCC
GF_M2TS_SL_PCK pck;
pck.data_len = sec->length;
pck.data = sec->section;
pck.stream = (GF_M2TS_ES *)ses;
//ts->on_event(ts, GF_M2TS_EVT_AIT_FOUND, &pck);
on_ait_section(ts, GF_M2TS_EVT_AIT_FOUND, &pck);
#endif
} else if ((ts->on_event && (sec->section[0]==GF_M2TS_TABLE_ID_DSM_CC_ENCAPSULATED_DATA || sec->section[0]==GF_M2TS_TABLE_ID_DSM_CC_UN_MESSAGE ||
sec->section[0]==GF_M2TS_TABLE_ID_DSM_CC_DOWNLOAD_DATA_MESSAGE || sec->section[0]==GF_M2TS_TABLE_ID_DSM_CC_STREAM_DESCRIPTION || sec->section[0]==GF_M2TS_TABLE_ID_DSM_CC_PRIVATE)) ) {
#ifdef GPAC_ENABLE_DSMCC
GF_M2TS_SL_PCK pck;
pck.data_len = sec->length;
pck.data = sec->section;
pck.stream = (GF_M2TS_ES *)ses;
on_dsmcc_section(ts,GF_M2TS_EVT_DSMCC_FOUND,&pck);
//ts->on_event(ts, GF_M2TS_EVT_DSMCC_FOUND, &pck);
#endif
}
#ifdef GPAC_ENABLE_MPE
else if (ts->on_mpe_event && ((ses && (ses->flags & GF_M2TS_EVT_DVB_MPE)) || (sec->section[0]==GF_M2TS_TABLE_ID_INT)) ) {
GF_M2TS_SL_PCK pck;
pck.data_len = sec->length;
pck.data = sec->section;
pck.stream = (GF_M2TS_ES *)ses;
ts->on_mpe_event(ts, GF_M2TS_EVT_DVB_MPE, &pck);
}
#endif
else if (ts->on_event) {
GF_M2TS_SL_PCK pck;
pck.data_len = sec->length;
pck.data = sec->section;
pck.stream = (GF_M2TS_ES *)ses;
ts->on_event(ts, GF_M2TS_EVT_DVB_GENERAL, &pck);
}
} else {
Bool has_syntax_indicator;
u8 table_id;
u16 extended_table_id;
u32 status, section_start, i;
GF_M2TS_Table *t, *prev_t;
unsigned char *data;
Bool section_valid = 0;
status = 0;
/*parse header*/
data = (u8 *)sec->section;
/*look for proper table*/
table_id = data[0];
if (ts->on_event) {
switch (table_id) {
case GF_M2TS_TABLE_ID_PAT:
case GF_M2TS_TABLE_ID_SDT_ACTUAL:
case GF_M2TS_TABLE_ID_PMT:
case GF_M2TS_TABLE_ID_NIT_ACTUAL:
case GF_M2TS_TABLE_ID_TDT:
case GF_M2TS_TABLE_ID_TOT:
{
GF_M2TS_SL_PCK pck;
pck.data_len = sec->length;
pck.data = sec->section;
pck.stream = (GF_M2TS_ES *)ses;
ts->on_event(ts, GF_M2TS_EVT_DVB_GENERAL, &pck);
}
}
}
has_syntax_indicator = (data[1] & 0x80) ? 1 : 0;
if (has_syntax_indicator) {
extended_table_id = (data[3]<<8) | data[4];
} else {
extended_table_id = 0;
}
prev_t = NULL;
t = sec->table;
while (t) {
if ((t->table_id==table_id) && (t->ex_table_id == extended_table_id)) break;
prev_t = t;
t = t->next;
}
/*create table*/
if (!t) {
GF_SAFEALLOC(t, GF_M2TS_Table);
if (!t) {
GF_LOG(GF_LOG_ERROR, GF_LOG_CONTAINER, ("[MPEG-2 TS] Fail to alloc table %d %d\n", table_id, extended_table_id));
return;
}
GF_LOG(GF_LOG_DEBUG, GF_LOG_CONTAINER, ("[MPEG-2 TS] Creating table %d %d\n", table_id, extended_table_id));
t->table_id = table_id;
t->ex_table_id = extended_table_id;
t->last_version_number = 0xFF;
t->sections = gf_list_new();
if (prev_t) prev_t->next = t;
else sec->table = t;
}
if (has_syntax_indicator) {
if (sec->length < 4) {
GF_LOG(GF_LOG_ERROR, GF_LOG_CONTAINER, ("[MPEG-2 TS] corrupted section length %d less than CRC \n", sec->length));
} else {
/*remove crc32*/
sec->length -= 4;
if (gf_m2ts_crc32_check((char *)data, sec->length)) {
s32 cur_sec_num;
t->version_number = (data[5] >> 1) & 0x1f;
if (t->last_section_number && t->section_number && (t->version_number != t->last_version_number)) {
GF_LOG(GF_LOG_WARNING, GF_LOG_CONTAINER, ("[MPEG-2 TS] table transmission interrupted: previous table (v=%d) %d/%d sections - new table (v=%d) %d/%d sections\n", t->last_version_number, t->section_number, t->last_section_number, t->version_number, data[6] + 1, data[7] + 1) );
gf_m2ts_reset_sections(t->sections);
t->section_number = 0;
}
t->current_next_indicator = (data[5] & 0x1) ? 1 : 0;
/*add one to section numbers to detect if we missed or not the first section in the table*/
cur_sec_num = data[6] + 1;
t->last_section_number = data[7] + 1;
section_start = 8;
/*we missed something*/
if (!sec->process_individual && t->section_number + 1 != cur_sec_num) {
/* TODO - Check how to handle sections when the first complete section does
not have its sec num 0 */
section_valid = 0;
if (t->is_init) {
GF_LOG(GF_LOG_ERROR, GF_LOG_CONTAINER, ("[MPEG-2 TS] corrupted table (lost section %d)\n", cur_sec_num ? cur_sec_num-1 : 31) );
}
} else {
section_valid = 1;
t->section_number = cur_sec_num;
}
} else {
GF_LOG(GF_LOG_ERROR, GF_LOG_CONTAINER, ("[MPEG-2 TS] corrupted section (CRC32 failed)\n"));
}
}
} else {
section_valid = 1;
section_start = 3;
}
/*process section*/
if (section_valid) {
GF_M2TS_Section *section;
GF_SAFEALLOC(section, GF_M2TS_Section);
if (!section) {
GF_LOG(GF_LOG_ERROR, GF_LOG_CONTAINER, ("[MPEG-2 TS] Fail to create section\n"));
return;
}
section->data_size = sec->length - section_start;
section->data = (unsigned char*)gf_malloc(sizeof(unsigned char)*section->data_size);
memcpy(section->data, sec->section + section_start, sizeof(unsigned char)*section->data_size);
gf_list_add(t->sections, section);
if (t->section_number == 1) {
status |= GF_M2TS_TABLE_START;
if (t->last_version_number == t->version_number) {
t->is_repeat = 1;
} else {
t->is_repeat = 0;
}
/*only update version number in the first section of the table*/
t->last_version_number = t->version_number;
}
if (t->is_init) {
if (t->is_repeat) {
status |= GF_M2TS_TABLE_REPEAT;
} else {
status |= GF_M2TS_TABLE_UPDATE;
}
} else {
status |= GF_M2TS_TABLE_FOUND;
}
if (t->last_section_number == t->section_number) {
u32 table_size;
status |= GF_M2TS_TABLE_END;
table_size = 0;
for (i=0; i<gf_list_count(t->sections); i++) {
GF_M2TS_Section *section = gf_list_get(t->sections, i);
table_size += section->data_size;
}
if (t->is_repeat) {
if (t->table_size != table_size) {
status |= GF_M2TS_TABLE_UPDATE;
GF_LOG(GF_LOG_WARNING, GF_LOG_CONTAINER, ("[MPEG-2 TS] Repeated section found with different sizes (old table %d bytes, new table %d bytes)\n", t->table_size, table_size) );
t->table_size = table_size;
}
} else {
t->table_size = table_size;
}
t->is_init = 1;
/*reset section number*/
t->section_number = 0;
t->is_repeat = 0;
}
if (sec->process_individual) {
/*send each section of the table and not the aggregated table*/
if (sec->process_section)
sec->process_section(ts, ses, t->sections, t->table_id, t->ex_table_id, t->version_number, (u8) (t->last_section_number - 1), status);
gf_m2ts_reset_sections(t->sections);
} else {
if (status&GF_M2TS_TABLE_END) {
if (sec->process_section)
sec->process_section(ts, ses, t->sections, t->table_id, t->ex_table_id, t->version_number, (u8) (t->last_section_number - 1), status);
gf_m2ts_reset_sections(t->sections);
}
}
} else {
sec->cc = -1;
t->section_number = 0;
}
}
/*clean-up (including broken sections)*/
if (sec->section) gf_free(sec->section);
sec->section = NULL;
sec->length = sec->received = 0;
}
static Bool gf_m2ts_is_long_section(u8 table_id)
{
switch (table_id) {
case GF_M2TS_TABLE_ID_MPEG4_BIFS:
case GF_M2TS_TABLE_ID_MPEG4_OD:
case GF_M2TS_TABLE_ID_INT:
case GF_M2TS_TABLE_ID_EIT_ACTUAL_PF:
case GF_M2TS_TABLE_ID_EIT_OTHER_PF:
case GF_M2TS_TABLE_ID_ST:
case GF_M2TS_TABLE_ID_SIT:
case GF_M2TS_TABLE_ID_DSM_CC_PRIVATE:
case GF_M2TS_TABLE_ID_MPE_FEC:
case GF_M2TS_TABLE_ID_DSM_CC_DOWNLOAD_DATA_MESSAGE:
case GF_M2TS_TABLE_ID_DSM_CC_UN_MESSAGE:
return 1;
default:
if (table_id >= GF_M2TS_TABLE_ID_EIT_SCHEDULE_MIN && table_id <= GF_M2TS_TABLE_ID_EIT_SCHEDULE_MAX)
return 1;
else
return 0;
}
}
static u32 gf_m2ts_get_section_length(char byte0, char byte1, char byte2)
{
u32 length;
if (gf_m2ts_is_long_section(byte0)) {
length = 3 + ( ((((u32)byte1)<<8) | (byte2&0xff)) & 0xfff );
} else {
length = 3 + ( ((((u32)byte1)<<8) | (byte2&0xff)) & 0x3ff );
}
return length;
}
static void gf_m2ts_gather_section(GF_M2TS_Demuxer *ts, GF_M2TS_SectionFilter *sec, GF_M2TS_SECTION_ES *ses, GF_M2TS_Header *hdr, unsigned char *data, u32 data_size)
{
u32 payload_size = data_size;
u8 expect_cc = (sec->cc<0) ? hdr->continuity_counter : (sec->cc + 1) & 0xf;
Bool disc = (expect_cc == hdr->continuity_counter) ? 0 : 1;
sec->cc = expect_cc;
/*may happen if hdr->adaptation_field=2 no payload in TS packet*/
if (!data_size) return;
if (hdr->payload_start) {
u32 ptr_field;
ptr_field = data[0];
if (ptr_field+1>data_size) {
GF_LOG(GF_LOG_ERROR, GF_LOG_CONTAINER, ("[MPEG-2 TS] Invalid section start (@ptr_field=%d, @data_size=%d)\n", ptr_field, data_size) );
return;
}
/*end of previous section*/
if (!sec->length && sec->received) {
/* the length of the section could not be determined from the previous TS packet because we had only 1 or 2 bytes */
if (sec->received == 1)
sec->length = gf_m2ts_get_section_length(sec->section[0], data[1], data[2]);
else /* (sec->received == 2) */
sec->length = gf_m2ts_get_section_length(sec->section[0], sec->section[1], data[1]);
sec->section = (char*)gf_realloc(sec->section, sizeof(char)*sec->length);
}
if (sec->length && sec->received + ptr_field >= sec->length) {
u32 len = sec->length - sec->received;
memcpy(sec->section + sec->received, data+1, sizeof(char)*len);
sec->received += len;
if (ptr_field > len)
GF_LOG(GF_LOG_ERROR, GF_LOG_CONTAINER, ("[MPEG-2 TS] Invalid pointer field (@ptr_field=%d, @remaining=%d)\n", ptr_field, len) );
gf_m2ts_section_complete(ts, sec, ses);
}
data += ptr_field+1;
data_size -= ptr_field+1;
payload_size -= ptr_field+1;
aggregated_section:
if (sec->section) gf_free(sec->section);
sec->length = sec->received = 0;
sec->section = (char*)gf_malloc(sizeof(char)*data_size);
memcpy(sec->section, data, sizeof(char)*data_size);
sec->received = data_size;
} else if (disc) {
if (sec->section) gf_free(sec->section);
sec->section = NULL;
sec->received = sec->length = 0;
return;
} else if (!sec->section) {
return;
} else {
if (sec->length && sec->received+data_size > sec->length)
data_size = sec->length - sec->received;
if (sec->length) {
memcpy(sec->section + sec->received, data, sizeof(char)*data_size);
} else {
sec->section = (char*)gf_realloc(sec->section, sizeof(char)*(sec->received+data_size));
memcpy(sec->section + sec->received, data, sizeof(char)*data_size);
}
sec->received += data_size;
}
/*alloc final buffer*/
if (!sec->length && (sec->received >= 3)) {
sec->length = gf_m2ts_get_section_length(sec->section[0], sec->section[1], sec->section[2]);
sec->section = (char*)gf_realloc(sec->section, sizeof(char)*sec->length);
if (sec->received > sec->length) {
data_size -= sec->received - sec->length;
sec->received = sec->length;
}
}
if (!sec->length || sec->received < sec->length) return;
/*OK done*/
gf_m2ts_section_complete(ts, sec, ses);
if (payload_size > data_size) {
data += data_size;
/* detect padding after previous section */
if (data[0] != 0xFF) {
data_size = payload_size - data_size;
payload_size = data_size;
goto aggregated_section;
}
}
}
static void gf_m2ts_process_sdt(GF_M2TS_Demuxer *ts, GF_M2TS_SECTION_ES *ses, GF_List *sections, u8 table_id, u16 ex_table_id, u8 version_number, u8 last_section_number, u32 status)
{
u32 pos, evt_type;
u32 nb_sections;
u32 data_size;
unsigned char *data;
GF_M2TS_Section *section;
/*wait for the last section */
if (!(status&GF_M2TS_TABLE_END)) return;
/*skip if already received*/
if (status&GF_M2TS_TABLE_REPEAT) {
if (ts->on_event) ts->on_event(ts, GF_M2TS_EVT_SDT_REPEAT, NULL);
return;
}
if (table_id != GF_M2TS_TABLE_ID_SDT_ACTUAL) {
return;
}
gf_m2ts_reset_sdt(ts);
nb_sections = gf_list_count(sections);
if (nb_sections > 1) {
GF_LOG(GF_LOG_WARNING, GF_LOG_CONTAINER, ("[MPEG-2 TS] SDT on multiple sections not supported\n"));
}
section = (GF_M2TS_Section *)gf_list_get(sections, 0);
data = section->data;
data_size = section->data_size;
//orig_net_id = (data[0] << 8) | data[1];
pos = 3;
while (pos < data_size) {
GF_M2TS_SDT *sdt;
u32 descs_size, d_pos, ulen;
GF_SAFEALLOC(sdt, GF_M2TS_SDT);
if (!sdt) {
GF_LOG(GF_LOG_WARNING, GF_LOG_CONTAINER, ("[MPEG-2 TS] Fail to create SDT\n"));
return;
}
gf_list_add(ts->SDTs, sdt);
sdt->service_id = (data[pos]<<8) + data[pos+1];
sdt->EIT_schedule = (data[pos+2] & 0x2) ? 1 : 0;
sdt->EIT_present_following = (data[pos+2] & 0x1);
sdt->running_status = (data[pos+3]>>5) & 0x7;
sdt->free_CA_mode = (data[pos+3]>>4) & 0x1;
descs_size = ((data[pos+3]&0xf)<<8) | data[pos+4];
pos += 5;
d_pos = 0;
while (d_pos < descs_size) {
u8 d_tag = data[pos+d_pos];
u8 d_len = data[pos+d_pos+1];
switch (d_tag) {
case GF_M2TS_DVB_SERVICE_DESCRIPTOR:
if (sdt->provider) gf_free(sdt->provider);
sdt->provider = NULL;
if (sdt->service) gf_free(sdt->service);
sdt->service = NULL;
d_pos+=2;
sdt->service_type = data[pos+d_pos];
ulen = data[pos+d_pos+1];
d_pos += 2;
sdt->provider = (char*)gf_malloc(sizeof(char)*(ulen+1));
memcpy(sdt->provider, data+pos+d_pos, sizeof(char)*ulen);
sdt->provider[ulen] = 0;
d_pos += ulen;
ulen = data[pos+d_pos];
d_pos += 1;
sdt->service = (char*)gf_malloc(sizeof(char)*(ulen+1));
memcpy(sdt->service, data+pos+d_pos, sizeof(char)*ulen);
sdt->service[ulen] = 0;
d_pos += ulen;
break;
default:
GF_LOG(GF_LOG_DEBUG, GF_LOG_CONTAINER, ("[MPEG-2 TS] Skipping descriptor (0x%x) not supported\n", d_tag));
d_pos += d_len;
if (d_len == 0) d_pos = descs_size;
break;
}
}
pos += descs_size;
}
evt_type = GF_M2TS_EVT_SDT_FOUND;
if (ts->on_event) ts->on_event(ts, evt_type, NULL);
}
static void gf_m2ts_process_mpeg4section(GF_M2TS_Demuxer *ts, GF_M2TS_SECTION_ES *es, GF_List *sections, u8 table_id, u16 ex_table_id, u8 version_number, u8 last_section_number, u32 status)
{
GF_M2TS_SL_PCK sl_pck;
u32 nb_sections, i;
GF_M2TS_Section *section;
/*skip if already received*/
if (status & GF_M2TS_TABLE_REPEAT)
if (!(es->flags & GF_M2TS_ES_SEND_REPEATED_SECTIONS))
return;
GF_LOG(GF_LOG_DEBUG, GF_LOG_CONTAINER, ("[MPEG-2 TS] Sections for PID %d\n", es->pid) );
/*send all sections (eg SL-packets)*/
nb_sections = gf_list_count(sections);
for (i=0; i<nb_sections; i++) {
section = (GF_M2TS_Section *)gf_list_get(sections, i);
sl_pck.data = (char *)section->data;
sl_pck.data_len = section->data_size;
sl_pck.stream = (GF_M2TS_ES *)es;
sl_pck.version_number = version_number;
if (ts->on_event) ts->on_event(ts, GF_M2TS_EVT_SL_PCK, &sl_pck);
}
}
static void gf_m2ts_process_nit(GF_M2TS_Demuxer *ts, GF_M2TS_SECTION_ES *nit_es, GF_List *sections, u8 table_id, u16 ex_table_id, u8 version_number, u8 last_section_number, u32 status)
{
GF_LOG(GF_LOG_DEBUG, GF_LOG_CONTAINER, ("[MPEG-2 TS] NIT table processing (not yet implemented)"));
}
static void gf_m2ts_process_tdt_tot(GF_M2TS_Demuxer *ts, GF_M2TS_SECTION_ES *tdt_tot_es, GF_List *sections, u8 table_id, u16 ex_table_id, u8 version_number, u8 last_section_number, u32 status)
{
unsigned char *data;
u32 data_size, nb_sections;
u32 date, yp, mp, k;
GF_M2TS_Section *section;
GF_M2TS_TDT_TOT *time_table;
const char *table_name;
/*wait for the last section */
if ( !(status & GF_M2TS_TABLE_END) )
return;
switch (table_id) {
case GF_M2TS_TABLE_ID_TDT:
table_name = "TDT";
break;
case GF_M2TS_TABLE_ID_TOT:
table_name = "TOT";
break;
default:
GF_LOG(GF_LOG_DEBUG, GF_LOG_CONTAINER, ("[MPEG-2 TS] Unimplemented table_id %u for PID %u\n", table_id, GF_M2TS_PID_TDT_TOT_ST));
return;
}
nb_sections = gf_list_count(sections);
if (nb_sections > 1) {
GF_LOG(GF_LOG_WARNING, GF_LOG_CONTAINER, ("[MPEG-2 TS] %s on multiple sections not supported\n", table_name));
}
section = (GF_M2TS_Section *)gf_list_get(sections, 0);
data = section->data;
data_size = section->data_size;
/*TOT only contains 40 bits of UTC_time; TDT add descriptors and a CRC*/
if ((table_id==GF_M2TS_TABLE_ID_TDT) && (data_size != 5)) {
GF_LOG(GF_LOG_WARNING, GF_LOG_CONTAINER, ("[MPEG-2 TS] Corrupted TDT size\n", table_name));
}
GF_SAFEALLOC(time_table, GF_M2TS_TDT_TOT);
if (!time_table) {
GF_LOG(GF_LOG_ERROR, GF_LOG_CONTAINER, ("[MPEG-2 TS] Fail to alloc DVB time table\n"));
return;
}
/*UTC_time - see annex C of DVB-SI ETSI EN 300468*/
/* decodes an Modified Julian Date (MJD) into a Co-ordinated Universal Time (UTC)
See annex C of DVB-SI ETSI EN 300468 */
date = data[0]*256 + data[1];
yp = (u32)((date - 15078.2)/365.25);
mp = (u32)((date - 14956.1 - (u32)(yp * 365.25))/30.6001);
time_table->day = (u32)(date - 14956 - (u32)(yp * 365.25) - (u32)(mp * 30.6001));
if (mp == 14 || mp == 15) k = 1;
else k = 0;
time_table->year = yp + k + 1900;
time_table->month = mp - 1 - k*12;
time_table->hour = 10*((data[2]&0xf0)>>4) + (data[2]&0x0f);
time_table->minute = 10*((data[3]&0xf0)>>4) + (data[3]&0x0f);
time_table->second = 10*((data[4]&0xf0)>>4) + (data[4]&0x0f);
assert(time_table->hour<24 && time_table->minute<60 && time_table->second<60);
GF_LOG(GF_LOG_DEBUG, GF_LOG_CONTAINER, ("[MPEG-2 TS] Stream UTC time is %u/%02u/%02u %02u:%02u:%02u\n", time_table->year, time_table->month, time_table->day, time_table->hour, time_table->minute, time_table->second));
switch (table_id) {
case GF_M2TS_TABLE_ID_TDT:
if (ts->TDT_time) gf_free(ts->TDT_time);
ts->TDT_time = time_table;
if (ts->on_event) ts->on_event(ts, GF_M2TS_EVT_TDT, time_table);
break;
case GF_M2TS_TABLE_ID_TOT:
#if 0
{
u32 pos, loop_len;
loop_len = ((data[5]&0x0f) << 8) | (data[6] & 0xff);
data += 7;
pos = 0;
while (pos < loop_len) {
u8 tag = data[pos];
pos += 2;
if (tag == GF_M2TS_DVB_LOCAL_TIME_OFFSET_DESCRIPTOR) {
char tmp_time[10];
u16 offset_hours, offset_minutes;
now->country_code[0] = data[pos];
now->country_code[1] = data[pos+1];
now->country_code[2] = data[pos+2];
now->country_region_id = data[pos+3]>>2;
sprintf(tmp_time, "%02x", data[pos+4]);
offset_hours = atoi(tmp_time);
sprintf(tmp_time, "%02x", data[pos+5]);
offset_minutes = atoi(tmp_time);
now->local_time_offset_seconds = (offset_hours * 60 + offset_minutes) * 60;
if (data[pos+3] & 1) now->local_time_offset_seconds *= -1;
dvb_decode_mjd_to_unix_time(data+pos+6, &now->unix_next_toc);
sprintf(tmp_time, "%02x", data[pos+11]);
offset_hours = atoi(tmp_time);
sprintf(tmp_time, "%02x", data[pos+12]);
offset_minutes = atoi(tmp_time);
now->next_time_offset_seconds = (offset_hours * 60 + offset_minutes) * 60;
if (data[pos+3] & 1) now->next_time_offset_seconds *= -1;
pos+= 13;
}
}
/*TODO: check lengths are ok*/
if (ts->on_event) ts->on_event(ts, GF_M2TS_EVT_TOT, time_table);
}
#endif
/*check CRC32*/
if (ts->tdt_tot->length<4) {
GF_LOG(GF_LOG_ERROR, GF_LOG_CONTAINER, ("[MPEG-2 TS] corrupted %s table (less than 4 bytes but CRC32 should be present\n", table_name));
goto error_exit;
}
if (!gf_m2ts_crc32_check(ts->tdt_tot->section, ts->tdt_tot->length-4)) {
GF_LOG(GF_LOG_ERROR, GF_LOG_CONTAINER, ("[MPEG-2 TS] corrupted %s table (CRC32 failed)\n", table_name));
goto error_exit;
}
if (ts->TDT_time) gf_free(ts->TDT_time);
ts->TDT_time = time_table;
if (ts->on_event) ts->on_event(ts, GF_M2TS_EVT_TOT, time_table);
break;
default:
assert(0);
goto error_exit;
}
return; /*success*/
error_exit:
gf_free(time_table);
return;
}
static GF_M2TS_MetadataPointerDescriptor *gf_m2ts_read_metadata_pointer_descriptor(GF_BitStream *bs, u32 length)
{
u32 size;
GF_M2TS_MetadataPointerDescriptor *d;
GF_SAFEALLOC(d, GF_M2TS_MetadataPointerDescriptor);
if (!d) return NULL;
d->application_format = gf_bs_read_u16(bs);
size = 2;
if (d->application_format == 0xFFFF) {
d->application_format_identifier = gf_bs_read_u32(bs);
size += 4;
}
d->format = gf_bs_read_u8(bs);
size += 1;
if (d->format == 0xFF) {
d->format_identifier = gf_bs_read_u32(bs);
size += 4;
}
d->service_id = gf_bs_read_u8(bs);
d->locator_record_flag = (gf_bs_read_int(bs, 1) ? GF_TRUE : GF_FALSE);
d->carriage_flag = (enum metadata_carriage)gf_bs_read_int(bs, 2);
gf_bs_read_int(bs, 5); /*reserved */
size += 2;
if (d->locator_record_flag) {
d->locator_length = gf_bs_read_u8(bs);
d->locator_data = (char *)gf_malloc(d->locator_length);
size += 1 + d->locator_length;
gf_bs_read_data(bs, d->locator_data, d->locator_length);
}
if (d->carriage_flag != 3) {
d->program_number = gf_bs_read_u16(bs);
size += 2;
}
if (d->carriage_flag == 1) {
d->ts_location = gf_bs_read_u16(bs);
d->ts_id = gf_bs_read_u16(bs);
size += 4;
}
if (length-size > 0) {
d->data_size = length-size;
d->data = (char *)gf_malloc(d->data_size);
gf_bs_read_data(bs, d->data, d->data_size);
}
return d;
}
static void gf_m2ts_metadata_pointer_descriptor_del(GF_M2TS_MetadataPointerDescriptor *metapd)
{
if (metapd) {
if (metapd->locator_data) gf_free(metapd->locator_data);
if (metapd->data) gf_free(metapd->data);
gf_free(metapd);
}
}
static GF_M2TS_MetadataDescriptor *gf_m2ts_read_metadata_descriptor(GF_BitStream *bs, u32 length)
{
u32 size;
GF_M2TS_MetadataDescriptor *d;
GF_SAFEALLOC(d, GF_M2TS_MetadataDescriptor);
if (!d) return NULL;
d->application_format = gf_bs_read_u16(bs);
size = 2;
if (d->application_format == 0xFFFF) {
d->application_format_identifier = gf_bs_read_u32(bs);
size += 4;
}
d->format = gf_bs_read_u8(bs);
size += 1;
if (d->format == 0xFF) {
d->format_identifier = gf_bs_read_u32(bs);
size += 4;
}
d->service_id = gf_bs_read_u8(bs);
d->decoder_config_flags = gf_bs_read_int(bs, 3);
d->dsmcc_flag = (gf_bs_read_int(bs, 1) ? GF_TRUE : GF_FALSE);
gf_bs_read_int(bs, 4); /* reserved */
size += 2;
if (d->dsmcc_flag) {
d->service_id_record_length = gf_bs_read_u8(bs);
d->service_id_record = (char *)gf_malloc(d->service_id_record_length);
size += 1 + d->service_id_record_length;
gf_bs_read_data(bs, d->service_id_record, d->service_id_record_length);
}
if (d->decoder_config_flags == 1) {
d->decoder_config_length = gf_bs_read_u8(bs);
d->decoder_config = (char *)gf_malloc(d->decoder_config_length);
size += 1 + d->decoder_config_length;
gf_bs_read_data(bs, d->decoder_config, d->decoder_config_length);
}
if (d->decoder_config_flags == 3) {
d->decoder_config_id_length = gf_bs_read_u8(bs);
d->decoder_config_id = (char *)gf_malloc(d->decoder_config_id_length);
size += 1 + d->decoder_config_id_length;
gf_bs_read_data(bs, d->decoder_config_id, d->decoder_config_id_length);
}
if (d->decoder_config_flags == 4) {
d->decoder_config_service_id = gf_bs_read_u8(bs);
size++;
}
return d;
}
static void gf_m2ts_process_pmt(GF_M2TS_Demuxer *ts, GF_M2TS_SECTION_ES *pmt, GF_List *sections, u8 table_id, u16 ex_table_id, u8 version_number, u8 last_section_number, u32 status)
{
u32 info_length, pos, desc_len, evt_type, nb_es,i;
u32 nb_sections;
u32 data_size;
u32 nb_hevc, nb_hevc_temp, nb_shvc, nb_shvc_temp, nb_mhvc, nb_mhvc_temp;
unsigned char *data;
GF_M2TS_Section *section;
GF_Err e = GF_OK;
/*wait for the last section */
if (!(status&GF_M2TS_TABLE_END)) return;
nb_es = 0;
/*skip if already received but no update detected (eg same data) */
if ((status&GF_M2TS_TABLE_REPEAT) && !(status&GF_M2TS_TABLE_UPDATE)) {
if (ts->on_event) ts->on_event(ts, GF_M2TS_EVT_PMT_REPEAT, pmt->program);
return;
}
if (pmt->sec->demux_restarted) {
pmt->sec->demux_restarted = 0;
return;
}
GF_LOG(GF_LOG_DEBUG, GF_LOG_CONTAINER, ("[MPEG-2 TS] PMT Found or updated\n"));
nb_sections = gf_list_count(sections);
if (nb_sections > 1) {
GF_LOG(GF_LOG_WARNING, GF_LOG_CONTAINER, ("PMT on multiple sections not supported\n"));
}
section = (GF_M2TS_Section *)gf_list_get(sections, 0);
data = section->data;
data_size = section->data_size;
pmt->program->pcr_pid = ((data[0] & 0x1f) << 8) | data[1];
info_length = ((data[2]&0xf)<<8) | data[3];
if (info_length + 4 > data_size) {
GF_LOG(GF_LOG_ERROR, GF_LOG_CONTAINER, ("Broken PMT first loop, %d bytes avail but first loop size %d\n", data_size, info_length));
return;
} else if (info_length != 0) {
/* ...Read Descriptors ... */
u8 tag, len;
u32 first_loop_len = 0;
tag = data[4];
len = data[5];
while (info_length > first_loop_len) {
if (tag == GF_M2TS_MPEG4_IOD_DESCRIPTOR) {
if ((len>2) && (len - 2 <= info_length)) {
u32 size;
GF_BitStream *iod_bs;
iod_bs = gf_bs_new((char *)data+8, len-2, GF_BITSTREAM_READ);
if (pmt->program->pmt_iod) gf_odf_desc_del((GF_Descriptor *)pmt->program->pmt_iod);
e = gf_odf_parse_descriptor(iod_bs , (GF_Descriptor **) &pmt->program->pmt_iod, &size);
gf_bs_del(iod_bs );
if (e==GF_OK) {
/*remember program number for service/program selection*/
if (pmt->program->pmt_iod) pmt->program->pmt_iod->ServiceID = pmt->program->number;
/*if empty IOD (freebox case), discard it and use dynamic declaration of object*/
if (!gf_list_count(pmt->program->pmt_iod->ESDescriptors)) {
gf_odf_desc_del((GF_Descriptor *)pmt->program->pmt_iod);
pmt->program->pmt_iod = NULL;
}
}
} else {
GF_LOG(GF_LOG_ERROR, GF_LOG_CONTAINER, ("Broken IOD! len %d less than 2 bytes to declare IOD\n", len));
}
} else if (tag == GF_M2TS_METADATA_POINTER_DESCRIPTOR) {
GF_BitStream *metadatapd_bs;
GF_M2TS_MetadataPointerDescriptor *metapd;
metadatapd_bs = gf_bs_new((char *)data+6, len, GF_BITSTREAM_READ);
metapd = gf_m2ts_read_metadata_pointer_descriptor(metadatapd_bs, len);
gf_bs_del(metadatapd_bs);
if (metapd->application_format_identifier == GF_M2TS_META_ID3 &&
metapd->format_identifier == GF_M2TS_META_ID3 &&
metapd->carriage_flag == METADATA_CARRIAGE_SAME_TS) {
/*HLS ID3 Metadata */
pmt->program->metadata_pointer_descriptor = metapd;
} else {
/* don't know what to do with it for now, delete */
gf_m2ts_metadata_pointer_descriptor_del(metapd);
}
} else {
GF_LOG(GF_LOG_DEBUG, GF_LOG_CONTAINER, ("[MPEG-2 TS] Skipping descriptor (0x%x) and others not supported\n", tag));
}
first_loop_len += 2 + len;
}
}
if (data_size <= 4 + info_length) return;
data += 4 + info_length;
data_size -= 4 + info_length;
pos = 0;
/* count de number of program related PMT received */
for(i=0; i<gf_list_count(ts->programs); i++) {
GF_M2TS_Program *prog = (GF_M2TS_Program *)gf_list_get(ts->programs,i);
if(prog->pmt_pid == pmt->pid) {
break;
}
}
nb_hevc = nb_hevc_temp = nb_shvc = nb_shvc_temp = nb_mhvc = nb_mhvc_temp = 0;
while (pos<data_size) {
GF_M2TS_PES *pes = NULL;
GF_M2TS_SECTION_ES *ses = NULL;
GF_M2TS_ES *es = NULL;
Bool inherit_pcr = 0;
u32 pid, stream_type, reg_desc_format;
if (pos + 5 > data_size) {
GF_LOG(GF_LOG_ERROR, GF_LOG_CONTAINER, ("Broken PMT! size %d but position %d and need at least 5 bytes to declare es\n", data_size, pos));
break;
}
stream_type = data[0];
pid = ((data[1] & 0x1f) << 8) | data[2];
desc_len = ((data[3] & 0xf) << 8) | data[4];
GF_LOG(GF_LOG_DEBUG, GF_LOG_CONTAINER, ("stream_type :%d \n",stream_type));
switch (stream_type) {
/* PES */
case GF_M2TS_VIDEO_MPEG1:
case GF_M2TS_VIDEO_MPEG2:
case GF_M2TS_VIDEO_DCII:
case GF_M2TS_VIDEO_MPEG4:
case GF_M2TS_SYSTEMS_MPEG4_PES:
case GF_M2TS_VIDEO_H264:
case GF_M2TS_VIDEO_SVC:
case GF_M2TS_VIDEO_MVCD:
case GF_M2TS_VIDEO_HEVC:
case GF_M2TS_VIDEO_HEVC_MCTS:
case GF_M2TS_VIDEO_HEVC_TEMPORAL:
case GF_M2TS_VIDEO_SHVC:
case GF_M2TS_VIDEO_SHVC_TEMPORAL:
case GF_M2TS_VIDEO_MHVC:
case GF_M2TS_VIDEO_MHVC_TEMPORAL:
inherit_pcr = 1;
case GF_M2TS_AUDIO_MPEG1:
case GF_M2TS_AUDIO_MPEG2:
case GF_M2TS_AUDIO_AAC:
case GF_M2TS_AUDIO_LATM_AAC:
case GF_M2TS_AUDIO_AC3:
case GF_M2TS_AUDIO_DTS:
case GF_M2TS_MHAS_MAIN:
case GF_M2TS_MHAS_AUX:
case GF_M2TS_SUBTITLE_DVB:
case GF_M2TS_METADATA_PES:
GF_SAFEALLOC(pes, GF_M2TS_PES);
if (!pes) {
GF_LOG(GF_LOG_ERROR, GF_LOG_CONTAINER, ("[MPEG2TS] Failed to allocate ES for pid %d\n", pid));
return;
}
pes->cc = -1;
pes->flags = GF_M2TS_ES_IS_PES;
if (inherit_pcr)
pes->flags |= GF_M2TS_INHERIT_PCR;
es = (GF_M2TS_ES *)pes;
break;
case GF_M2TS_PRIVATE_DATA:
GF_SAFEALLOC(pes, GF_M2TS_PES);
if (!pes) {
GF_LOG(GF_LOG_ERROR, GF_LOG_CONTAINER, ("[MPEG2TS] Failed to allocate ES for pid %d\n", pid));
return;
}
pes->cc = -1;
pes->flags = GF_M2TS_ES_IS_PES;
es = (GF_M2TS_ES *)pes;
break;
/* Sections */
case GF_M2TS_SYSTEMS_MPEG4_SECTIONS:
GF_SAFEALLOC(ses, GF_M2TS_SECTION_ES);
if (!ses) {
GF_LOG(GF_LOG_ERROR, GF_LOG_CONTAINER, ("[MPEG2TS] Failed to allocate ES for pid %d\n", pid));
return;
}
es = (GF_M2TS_ES *)ses;
es->flags |= GF_M2TS_ES_IS_SECTION;
/* carriage of ISO_IEC_14496 data in sections */
if (stream_type == GF_M2TS_SYSTEMS_MPEG4_SECTIONS) {
/*MPEG-4 sections need to be fully checked: if one section is lost, this means we lost
one SL packet in the AU so we must wait for the complete section again*/
ses->sec = gf_m2ts_section_filter_new(gf_m2ts_process_mpeg4section, 0);
/*create OD container*/
if (!pmt->program->additional_ods) {
pmt->program->additional_ods = gf_list_new();
ts->has_4on2 = 1;
}
}
break;
case GF_M2TS_13818_6_ANNEX_A:
case GF_M2TS_13818_6_ANNEX_B:
case GF_M2TS_13818_6_ANNEX_C:
case GF_M2TS_13818_6_ANNEX_D:
case GF_M2TS_PRIVATE_SECTION:
case GF_M2TS_QUALITY_SEC:
case GF_M2TS_MORE_SEC:
GF_SAFEALLOC(ses, GF_M2TS_SECTION_ES);
if (!ses) {
GF_LOG(GF_LOG_ERROR, GF_LOG_CONTAINER, ("[MPEG2TS] Failed to allocate ES for pid %d\n", pid));
return;
}
es = (GF_M2TS_ES *)ses;
es->flags |= GF_M2TS_ES_IS_SECTION;
es->pid = pid;
es->service_id = pmt->program->number;
if (stream_type == GF_M2TS_PRIVATE_SECTION) {
GF_LOG(GF_LOG_INFO, GF_LOG_CONTAINER, ("AIT sections on pid %d\n", pid));
} else if (stream_type == GF_M2TS_QUALITY_SEC) {
GF_LOG(GF_LOG_INFO, GF_LOG_CONTAINER, ("Quality metadata sections on pid %d\n", pid));
} else if (stream_type == GF_M2TS_MORE_SEC) {
GF_LOG(GF_LOG_INFO, GF_LOG_CONTAINER, ("MORE sections on pid %d\n", pid));
} else {
GF_LOG(GF_LOG_INFO, GF_LOG_CONTAINER, ("stream type DSM CC user private sections on pid %d \n", pid));
}
/* NULL means: trigger the call to on_event with DVB_GENERAL type and the raw section as payload */
ses->sec = gf_m2ts_section_filter_new(NULL, 1);
//ses->sec->service_id = pmt->program->number;
break;
case GF_M2TS_MPE_SECTIONS:
if (! ts->prefix_present) {
GF_LOG(GF_LOG_INFO, GF_LOG_CONTAINER, ("stream type MPE found : pid = %d \n", pid));
#ifdef GPAC_ENABLE_MPE
es = gf_dvb_mpe_section_new();
if (es->flags & GF_M2TS_ES_IS_SECTION) {
/* NULL means: trigger the call to on_event with DVB_GENERAL type and the raw section as payload */
((GF_M2TS_SECTION_ES*)es)->sec = gf_m2ts_section_filter_new(NULL, 1);
}
#endif
break;
}
default:
GF_LOG(GF_LOG_WARNING, GF_LOG_CONTAINER, ("[MPEG-2 TS] Stream type (0x%x) for PID %d not supported\n", stream_type, pid ) );
//GF_LOG(/*GF_LOG_WARNING*/GF_LOG_ERROR, GF_LOG_CONTAINER, ("[MPEG-2 TS] Stream type (0x%x) for PID %d not supported\n", stream_type, pid ) );
break;
}
if (es) {
es->stream_type = (stream_type==GF_M2TS_PRIVATE_DATA) ? 0 : stream_type;
es->program = pmt->program;
es->pid = pid;
es->component_tag = -1;
}
pos += 5;
data += 5;
while (desc_len) {
if (pos + 2 > data_size) {
GF_LOG(GF_LOG_ERROR, GF_LOG_CONTAINER, ("Broken PMT descriptor! size %d but position %d and need at least 2 bytes to parse descritpor\n", data_size, pos));
break;
}
u8 tag = data[0];
u32 len = data[1];
if (pos + 2 + len > data_size) {
GF_LOG(GF_LOG_ERROR, GF_LOG_CONTAINER, ("Broken PMT descriptor! size %d, desc size %d but position %d\n", data_size, len, pos));
break;
}
if (es) {
switch (tag) {
case GF_M2TS_ISO_639_LANGUAGE_DESCRIPTOR:
if (pes && (len>=3) )
pes->lang = GF_4CC(' ', data[2], data[3], data[4]);
break;
case GF_M2TS_MPEG4_SL_DESCRIPTOR:
if (len>=2) {
es->mpeg4_es_id = ( (u32) data[2] & 0x1f) << 8 | data[3];
es->flags |= GF_M2TS_ES_IS_SL;
}
break;
case GF_M2TS_REGISTRATION_DESCRIPTOR:
if (len>=4) {
reg_desc_format = GF_4CC(data[2], data[3], data[4], data[5]);
/*cf http://www.smpte-ra.org/mpegreg/mpegreg.html*/
switch (reg_desc_format) {
case GF_M2TS_RA_STREAM_AC3:
es->stream_type = GF_M2TS_AUDIO_AC3;
break;
case GF_M2TS_RA_STREAM_VC1:
es->stream_type = GF_M2TS_VIDEO_VC1;
break;
case GF_M2TS_RA_STREAM_GPAC:
if (len==8) {
es->stream_type = GF_4CC(data[6], data[7], data[8], data[9]);
es->flags |= GF_M2TS_GPAC_CODEC_ID;
break;
}
default:
GF_LOG(GF_LOG_INFO, GF_LOG_CONTAINER, ("Unknown registration descriptor %s\n", gf_4cc_to_str(reg_desc_format) ));
break;
}
}
break;
case GF_M2TS_DVB_EAC3_DESCRIPTOR:
es->stream_type = GF_M2TS_AUDIO_EC3;
break;
case GF_M2TS_DVB_DATA_BROADCAST_ID_DESCRIPTOR:
if (len>=2) {
u32 id = data[2]<<8 | data[3];
if ((id == 0xB) && ses && !ses->sec) {
ses->sec = gf_m2ts_section_filter_new(NULL, 1);
}
}
break;
case GF_M2TS_DVB_SUBTITLING_DESCRIPTOR:
if (pes && (len>=8)) {
pes->sub.language[0] = data[2];
pes->sub.language[1] = data[3];
pes->sub.language[2] = data[4];
pes->sub.type = data[5];
pes->sub.composition_page_id = (data[6]<<8) | data[7];
pes->sub.ancillary_page_id = (data[8]<<8) | data[9];
}
es->stream_type = GF_M2TS_DVB_SUBTITLE;
break;
case GF_M2TS_DVB_STREAM_IDENTIFIER_DESCRIPTOR:
if (len>=1) {
es->component_tag = data[2];
GF_LOG(GF_LOG_DEBUG, GF_LOG_CONTAINER, ("Component Tag: %d on Program %d\n", es->component_tag, es->program->number));
}
break;
case GF_M2TS_DVB_TELETEXT_DESCRIPTOR:
es->stream_type = GF_M2TS_DVB_TELETEXT;
break;
case GF_M2TS_DVB_VBI_DATA_DESCRIPTOR:
es->stream_type = GF_M2TS_DVB_VBI;
break;
case GF_M2TS_HIERARCHY_DESCRIPTOR:
if (pes && (len>=4)) {
u8 hierarchy_embedded_layer_index;
GF_BitStream *hbs = gf_bs_new((const char *)data, data_size, GF_BITSTREAM_READ);
/*u32 skip = */gf_bs_read_int(hbs, 16);
/*u8 res1 = */gf_bs_read_int(hbs, 1);
/*u8 temp_scal = */gf_bs_read_int(hbs, 1);
/*u8 spatial_scal = */gf_bs_read_int(hbs, 1);
/*u8 quality_scal = */gf_bs_read_int(hbs, 1);
/*u8 hierarchy_type = */gf_bs_read_int(hbs, 4);
/*u8 res2 = */gf_bs_read_int(hbs, 2);
/*u8 hierarchy_layer_index = */gf_bs_read_int(hbs, 6);
/*u8 tref_not_present = */gf_bs_read_int(hbs, 1);
/*u8 res3 = */gf_bs_read_int(hbs, 1);
hierarchy_embedded_layer_index = gf_bs_read_int(hbs, 6);
/*u8 res4 = */gf_bs_read_int(hbs, 2);
/*u8 hierarchy_channel = */gf_bs_read_int(hbs, 6);
gf_bs_del(hbs);
pes->depends_on_pid = 1+hierarchy_embedded_layer_index;
}
break;
case GF_M2TS_METADATA_DESCRIPTOR:
{
GF_BitStream *metadatad_bs;
GF_M2TS_MetadataDescriptor *metad;
metadatad_bs = gf_bs_new((char *)data+2, len, GF_BITSTREAM_READ);
metad = gf_m2ts_read_metadata_descriptor(metadatad_bs, len);
gf_bs_del(metadatad_bs);
if (metad->application_format_identifier == GF_M2TS_META_ID3 &&
metad->format_identifier == GF_M2TS_META_ID3) {
/*HLS ID3 Metadata */
if (pes) {
pes->metadata_descriptor = metad;
pes->stream_type = GF_M2TS_METADATA_ID3_HLS;
}
} else {
/* don't know what to do with it for now, delete */
gf_m2ts_metadata_descriptor_del(metad);
}
}
break;
default:
GF_LOG(GF_LOG_DEBUG, GF_LOG_CONTAINER, ("[MPEG-2 TS] skipping descriptor (0x%x) not supported\n", tag));
break;
}
}
data += len+2;
pos += len+2;
if (desc_len < len+2) {
GF_LOG(GF_LOG_ERROR, GF_LOG_CONTAINER, ("[MPEG-2 TS] Invalid PMT es descriptor size for PID %d\n", pid ) );
break;
}
desc_len-=len+2;
}
if (es && !es->stream_type) {
gf_free(es);
es = NULL;
GF_LOG(GF_LOG_ERROR, GF_LOG_CONTAINER, ("[MPEG-2 TS] Private Stream type (0x%x) for PID %d not supported\n", stream_type, pid ) );
}
if (!es) continue;
if (ts->ess[pid]) {
//this is component reuse across programs, overwrite the previously declared stream ...
if (status & GF_M2TS_TABLE_FOUND) {
GF_LOG(GF_LOG_INFO, GF_LOG_CONTAINER, ("[MPEG-2 TS] PID %d reused across programs %d and %d, not completely supported\n", pid, ts->ess[pid]->program->number, es->program->number ) );
//add stream to program but don't reassign the pid table until the stream is playing (>GF_M2TS_PES_FRAMING_SKIP)
gf_list_add(pmt->program->streams, es);
if (!(es->flags & GF_M2TS_ES_IS_SECTION) ) gf_m2ts_set_pes_framing(pes, GF_M2TS_PES_FRAMING_SKIP);
nb_es++;
//skip assignment below
es = NULL;
}
/*watchout for pmt update - FIXME this likely won't work in most cases*/
else {
GF_M2TS_ES *o_es = ts->ess[es->pid];
if ((o_es->stream_type == es->stream_type)
&& ((o_es->flags & GF_M2TS_ES_STATIC_FLAGS_MASK) == (es->flags & GF_M2TS_ES_STATIC_FLAGS_MASK))
&& (o_es->mpeg4_es_id == es->mpeg4_es_id)
&& ((o_es->flags & GF_M2TS_ES_IS_SECTION) || ((GF_M2TS_PES *)o_es)->lang == ((GF_M2TS_PES *)es)->lang)
) {
gf_free(es);
es = NULL;
} else {
gf_m2ts_es_del(o_es, ts);
ts->ess[es->pid] = NULL;
}
}
}
if (es) {
ts->ess[es->pid] = es;
gf_list_add(pmt->program->streams, es);
if (!(es->flags & GF_M2TS_ES_IS_SECTION) ) gf_m2ts_set_pes_framing(pes, GF_M2TS_PES_FRAMING_SKIP);
nb_es++;
if (es->stream_type == GF_M2TS_VIDEO_HEVC) nb_hevc++;
else if (es->stream_type == GF_M2TS_VIDEO_HEVC_TEMPORAL) nb_hevc_temp++;
else if (es->stream_type == GF_M2TS_VIDEO_SHVC) nb_shvc++;
else if (es->stream_type == GF_M2TS_VIDEO_SHVC_TEMPORAL) nb_shvc_temp++;
else if (es->stream_type == GF_M2TS_VIDEO_MHVC) nb_mhvc++;
else if (es->stream_type == GF_M2TS_VIDEO_MHVC_TEMPORAL) nb_mhvc_temp++;
}
}
//Table 2-139, implied hierarchy indexes
if (nb_hevc_temp + nb_shvc + nb_shvc_temp + nb_mhvc+ nb_mhvc_temp) {
for (i=0; i<gf_list_count(pmt->program->streams); i++) {
GF_M2TS_PES *es = (GF_M2TS_PES *)gf_list_get(pmt->program->streams, i);
if ( !(es->flags & GF_M2TS_ES_IS_PES)) continue;
if (es->depends_on_pid) continue;
switch (es->stream_type) {
case GF_M2TS_VIDEO_HEVC_TEMPORAL:
es->depends_on_pid = 1;
break;
case GF_M2TS_VIDEO_SHVC:
if (!nb_hevc_temp) es->depends_on_pid = 1;
else es->depends_on_pid = 2;
break;
case GF_M2TS_VIDEO_SHVC_TEMPORAL:
es->depends_on_pid = 3;
break;
case GF_M2TS_VIDEO_MHVC:
if (!nb_hevc_temp) es->depends_on_pid = 1;
else es->depends_on_pid = 2;
break;
case GF_M2TS_VIDEO_MHVC_TEMPORAL:
if (!nb_hevc_temp) es->depends_on_pid = 2;
else es->depends_on_pid = 3;
break;
}
}
}
if (nb_es) {
u32 i;
//translate hierarchy descriptors indexes into PIDs - check whether the PMT-index rules are the same for HEVC
for (i=0; i<gf_list_count(pmt->program->streams); i++) {
GF_M2TS_PES *an_es = NULL;
GF_M2TS_PES *es = (GF_M2TS_PES *)gf_list_get(pmt->program->streams, i);
if ( !(es->flags & GF_M2TS_ES_IS_PES)) continue;
if (!es->depends_on_pid) continue;
//fixeme we are not always assured that hierarchy_layer_index matches the stream index...
//+1 is because our first stream is the PMT
an_es = (GF_M2TS_PES *)gf_list_get(pmt->program->streams, es->depends_on_pid);
if (an_es) {
es->depends_on_pid = an_es->pid;
} else {
GF_LOG(GF_LOG_WARNING, GF_LOG_CONTAINER, ("[M2TS] Wrong dependency index in hierarchy descriptor, assuming non-scalable stream\n"));
es->depends_on_pid = 0;
}
}
evt_type = (status&GF_M2TS_TABLE_FOUND) ? GF_M2TS_EVT_PMT_FOUND : GF_M2TS_EVT_PMT_UPDATE;
if (ts->on_event) ts->on_event(ts, evt_type, pmt->program);
} else {
/* if we found no new ES it's simply a repeat of the PMT */
if (ts->on_event) ts->on_event(ts, GF_M2TS_EVT_PMT_REPEAT, pmt->program);
}
}
static void gf_m2ts_process_pat(GF_M2TS_Demuxer *ts, GF_M2TS_SECTION_ES *ses, GF_List *sections, u8 table_id, u16 ex_table_id, u8 version_number, u8 last_section_number, u32 status)
{
GF_M2TS_Program *prog;
GF_M2TS_SECTION_ES *pmt;
u32 i, nb_progs, evt_type;
u32 nb_sections;
u32 data_size;
unsigned char *data;
GF_M2TS_Section *section;
/*wait for the last section */
if (!(status&GF_M2TS_TABLE_END)) return;
/*skip if already received*/
if (status&GF_M2TS_TABLE_REPEAT) {
if (ts->on_event) ts->on_event(ts, GF_M2TS_EVT_PAT_REPEAT, NULL);
return;
}
nb_sections = gf_list_count(sections);
if (nb_sections > 1) {
GF_LOG(GF_LOG_WARNING, GF_LOG_CONTAINER, ("PAT on multiple sections not supported\n"));
}
section = (GF_M2TS_Section *)gf_list_get(sections, 0);
data = section->data;
data_size = section->data_size;
if (!(status&GF_M2TS_TABLE_UPDATE) && gf_list_count(ts->programs)) {
if (ts->pat->demux_restarted) {
ts->pat->demux_restarted = 0;
} else {
GF_LOG(GF_LOG_ERROR, GF_LOG_CONTAINER, ("Multiple different PAT on single TS found, ignoring new PAT declaration (table id %d - extended table id %d)\n", table_id, ex_table_id));
}
return;
}
nb_progs = data_size / 4;
for (i=0; i<nb_progs; i++) {
u16 number, pid;
number = (data[0]<<8) | data[1];
pid = (data[2]&0x1f)<<8 | data[3];
data += 4;
if (number==0) {
if (!ts->nit) {
ts->nit = gf_m2ts_section_filter_new(gf_m2ts_process_nit, 0);
}
} else if (!pid) {
GF_LOG(GF_LOG_ERROR, GF_LOG_CONTAINER, ("Broken PAT found reserved PID 0, ignoring\n", pid));
} else if (! ts->ess[pid]) {
GF_SAFEALLOC(prog, GF_M2TS_Program);
if (!prog) {
GF_LOG(GF_LOG_ERROR, GF_LOG_CONTAINER, ("Fail to allocate program for pid %d\n", pid));
return;
}
prog->streams = gf_list_new();
prog->pmt_pid = pid;
prog->number = number;
prog->ts = ts;
gf_list_add(ts->programs, prog);
GF_SAFEALLOC(pmt, GF_M2TS_SECTION_ES);
if (!pmt) {
GF_LOG(GF_LOG_ERROR, GF_LOG_CONTAINER, ("Fail to allocate pmt filter for pid %d\n", pid));
return;
}
pmt->flags = GF_M2TS_ES_IS_SECTION;
gf_list_add(prog->streams, pmt);
pmt->pid = prog->pmt_pid;
pmt->program = prog;
ts->ess[pmt->pid] = (GF_M2TS_ES *)pmt;
pmt->sec = gf_m2ts_section_filter_new(gf_m2ts_process_pmt, 0);
}
}
evt_type = (status&GF_M2TS_TABLE_UPDATE) ? GF_M2TS_EVT_PAT_UPDATE : GF_M2TS_EVT_PAT_FOUND;
if (ts->on_event) ts->on_event(ts, evt_type, NULL);
}
static void gf_m2ts_process_cat(GF_M2TS_Demuxer *ts, GF_M2TS_SECTION_ES *ses, GF_List *sections, u8 table_id, u16 ex_table_id, u8 version_number, u8 last_section_number, u32 status)
{
u32 evt_type;
/*
GF_M2TS_Program *prog;
GF_M2TS_SECTION_ES *pmt;
u32 i, nb_progs;
u32 nb_sections;
u32 data_size;
unsigned char *data;
GF_M2TS_Section *section;
*/
/*wait for the last section */
if (!(status&GF_M2TS_TABLE_END)) return;
/*skip if already received*/
if (status&GF_M2TS_TABLE_REPEAT) {
if (ts->on_event) ts->on_event(ts, GF_M2TS_EVT_CAT_REPEAT, NULL);
return;
}
/*
nb_sections = gf_list_count(sections);
if (nb_sections > 1) {
GF_LOG(GF_LOG_WARNING, GF_LOG_CONTAINER, ("CAT on multiple sections not supported\n"));
}
section = (GF_M2TS_Section *)gf_list_get(sections, 0);
data = section->data;
data_size = section->data_size;
nb_progs = data_size / 4;
for (i=0; i<nb_progs; i++) {
u16 number, pid;
number = (data[0]<<8) | data[1];
pid = (data[2]&0x1f)<<8 | data[3];
data += 4;
if (number==0) {
if (!ts->nit) {
ts->nit = gf_m2ts_section_filter_new(gf_m2ts_process_nit, 0);
}
} else {
GF_SAFEALLOC(prog, GF_M2TS_Program);
prog->streams = gf_list_new();
prog->pmt_pid = pid;
prog->number = number;
gf_list_add(ts->programs, prog);
GF_SAFEALLOC(pmt, GF_M2TS_SECTION_ES);
pmt->flags = GF_M2TS_ES_IS_SECTION;
gf_list_add(prog->streams, pmt);
pmt->pid = prog->pmt_pid;
pmt->program = prog;
ts->ess[pmt->pid] = (GF_M2TS_ES *)pmt;
pmt->sec = gf_m2ts_section_filter_new(gf_m2ts_process_pmt, 0);
}
}
*/
evt_type = (status&GF_M2TS_TABLE_UPDATE) ? GF_M2TS_EVT_CAT_UPDATE : GF_M2TS_EVT_CAT_FOUND;
if (ts->on_event) ts->on_event(ts, evt_type, NULL);
}
u64 gf_m2ts_get_pts(unsigned char *data)
{
u64 pts;
u32 val;
pts = (u64)((data[0] >> 1) & 0x07) << 30;
val = (data[1] << 8) | data[2];
pts |= (u64)(val >> 1) << 15;
val = (data[3] << 8) | data[4];
pts |= (u64)(val >> 1);
return pts;
}
void gf_m2ts_pes_header(GF_M2TS_PES *pes, unsigned char *data, u32 data_size, GF_M2TS_PESHeader *pesh)
{
u32 has_pts, has_dts;
u32 len_check;
memset(pesh, 0, sizeof(GF_M2TS_PESHeader));
len_check = 0;
pesh->id = data[0];
pesh->pck_len = (data[1]<<8) | data[2];
/*
2bits
scrambling_control = gf_bs_read_int(bs,2);
priority = gf_bs_read_int(bs,1);
*/
pesh->data_alignment = (data[3] & 0x4) ? 1 : 0;
/*
copyright = gf_bs_read_int(bs,1);
original = gf_bs_read_int(bs,1);
*/
has_pts = (data[4]&0x80);
has_dts = has_pts ? (data[4]&0x40) : 0;
/*
ESCR_flag = gf_bs_read_int(bs,1);
ES_rate_flag = gf_bs_read_int(bs,1);
DSM_flag = gf_bs_read_int(bs,1);
additional_copy_flag = gf_bs_read_int(bs,1);
prev_crc_flag = gf_bs_read_int(bs,1);
extension_flag = gf_bs_read_int(bs,1);
*/
pesh->hdr_data_len = data[5];
data += 6;
if (has_pts) {
pesh->PTS = gf_m2ts_get_pts(data);
data+=5;
len_check += 5;
}
if (has_dts) {
pesh->DTS = gf_m2ts_get_pts(data);
//data+=5;
len_check += 5;
} else {
pesh->DTS = pesh->PTS;
}
if (len_check < pesh->hdr_data_len) {
GF_LOG(GF_LOG_DEBUG, GF_LOG_CONTAINER, ("[MPEG-2 TS] PID %d Skipping %d bytes in pes header\n", pes->pid, pesh->hdr_data_len - len_check));
} else if (len_check > pesh->hdr_data_len) {
GF_LOG(GF_LOG_ERROR, GF_LOG_CONTAINER, ("[MPEG-2 TS] PID %d Wrong pes_header_data_length field %d bytes - read %d\n", pes->pid, pesh->hdr_data_len, len_check));
}
if ((pesh->PTS<90000) && ((s32)pesh->DTS<0)) {
GF_LOG(GF_LOG_WARNING, GF_LOG_CONTAINER, ("[MPEG-2 TS] PID %d Wrong DTS %d negative for PTS %d - forcing to 0\n", pes->pid, pesh->DTS, pesh->PTS));
pesh->DTS=0;
}
}
static void gf_m2ts_store_temi(GF_M2TS_Demuxer *ts, GF_M2TS_PES *pes)
{
GF_BitStream *bs = gf_bs_new(pes->temi_tc_desc, pes->temi_tc_desc_len, GF_BITSTREAM_READ);
u32 has_timestamp = gf_bs_read_int(bs, 2);
Bool has_ntp = (Bool) gf_bs_read_int(bs, 1);
/*u32 has_ptp = */gf_bs_read_int(bs, 1);
/*u32 has_timecode = */gf_bs_read_int(bs, 2);
memset(&pes->temi_tc, 0, sizeof(GF_M2TS_TemiTimecodeDescriptor));
pes->temi_tc.force_reload = gf_bs_read_int(bs, 1);
pes->temi_tc.is_paused = gf_bs_read_int(bs, 1);
pes->temi_tc.is_discontinuity = gf_bs_read_int(bs, 1);
gf_bs_read_int(bs, 7);
pes->temi_tc.timeline_id = gf_bs_read_int(bs, 8);
if (has_timestamp) {
pes->temi_tc.media_timescale = gf_bs_read_u32(bs);
if (has_timestamp==2)
pes->temi_tc.media_timestamp = gf_bs_read_u64(bs);
else
pes->temi_tc.media_timestamp = gf_bs_read_u32(bs);
}
if (has_ntp) {
pes->temi_tc.ntp = gf_bs_read_u64(bs);
}
gf_bs_del(bs);
pes->temi_tc_desc_len = 0;
pes->temi_pending = 1;
}
void gf_m2ts_flush_pes(GF_M2TS_Demuxer *ts, GF_M2TS_PES *pes)
{
GF_M2TS_PESHeader pesh;
if (!ts) return;
/*we need at least a full, valid start code and PES header !!*/
if ((pes->pck_data_len >= 4) && !pes->pck_data[0] && !pes->pck_data[1] && (pes->pck_data[2] == 0x1)) {
u32 len;
Bool has_pes_header = GF_TRUE;
u32 stream_id = pes->pck_data[3];
Bool same_pts = GF_FALSE;
switch (stream_id) {
case GF_M2_STREAMID_PROGRAM_STREAM_MAP:
case GF_M2_STREAMID_PADDING:
case GF_M2_STREAMID_PRIVATE_2:
case GF_M2_STREAMID_ECM:
case GF_M2_STREAMID_EMM:
case GF_M2_STREAMID_PROGRAM_STREAM_DIRECTORY:
case GF_M2_STREAMID_DSMCC:
case GF_M2_STREAMID_H222_TYPE_E:
has_pes_header = GF_FALSE;
break;
}
if (has_pes_header) {
/*OK read header*/
gf_m2ts_pes_header(pes, pes->pck_data + 3, pes->pck_data_len - 3, &pesh);
/*send PES timing*/
if (ts->notify_pes_timing) {
GF_M2TS_PES_PCK pck;
memset(&pck, 0, sizeof(GF_M2TS_PES_PCK));
pck.PTS = pesh.PTS;
pck.DTS = pesh.DTS;
pck.stream = pes;
if (pes->rap) pck.flags |= GF_M2TS_PES_PCK_RAP;
pes->pes_end_packet_number = ts->pck_number;
if (ts->on_event) ts->on_event(ts, GF_M2TS_EVT_PES_TIMING, &pck);
}
GF_LOG(GF_LOG_DEBUG, GF_LOG_CONTAINER, ("[MPEG-2 TS] PID %d Got PES header DTS %d PTS %d\n", pes->pid, pesh.DTS, pesh.PTS));
if (pesh.PTS) {
if (pesh.PTS == pes->PTS) {
same_pts = GF_TRUE;
GF_LOG(GF_LOG_WARNING, GF_LOG_CONTAINER, ("[MPEG-2 TS] PID %d - same PTS "LLU" for two consecutive PES packets \n", pes->pid, pes->PTS));
}
#ifndef GPAC_DISABLE_LOG
/*FIXME - this test should only be done for non bi-directionnally coded media
else if (pesh.PTS < pes->PTS) {
GF_LOG(GF_LOG_WARNING, GF_LOG_CONTAINER, ("[MPEG-2 TS] PID %d - PTS "LLU" less than previous packet PTS "LLU"\n", pes->pid, pesh.PTS, pes->PTS) );
}
*/
#endif
pes->PTS = pesh.PTS;
#ifndef GPAC_DISABLE_LOG
{
if (pes->DTS && (pesh.DTS == pes->DTS)) {
GF_LOG(GF_LOG_WARNING, GF_LOG_CONTAINER, ("[MPEG-2 TS] PID %d - same DTS "LLU" for two consecutive PES packets \n", pes->pid, pes->DTS));
}
if (pesh.DTS < pes->DTS) {
GF_LOG(GF_LOG_WARNING, GF_LOG_CONTAINER, ("[MPEG-2 TS] PID %d - DTS "LLU" less than previous DTS "LLU"\n", pes->pid, pesh.DTS, pes->DTS));
}
}
#endif
pes->DTS = pesh.DTS;
}
/*no PTSs were coded, same time*/
else if (!pesh.hdr_data_len) {
same_pts = GF_TRUE;
}
/*3-byte start-code + 6 bytes header + hdr extensions*/
len = 9 + pesh.hdr_data_len;
} else {
/*3-byte start-code + 1 byte streamid*/
len = 4;
memset(&pesh, 0, sizeof(pesh));
}
if ((u8) pes->pck_data[3]==0xfa) {
GF_M2TS_SL_PCK sl_pck;
GF_LOG(GF_LOG_DEBUG, GF_LOG_CONTAINER, ("[MPEG-2 TS] SL Packet in PES for %d - ES ID %d\n", pes->pid, pes->mpeg4_es_id));
if (pes->pck_data_len > len) {
sl_pck.data = (char *)pes->pck_data + len;
sl_pck.data_len = pes->pck_data_len - len;
sl_pck.stream = (GF_M2TS_ES *)pes;
if (ts->on_event) ts->on_event(ts, GF_M2TS_EVT_SL_PCK, &sl_pck);
} else {
GF_LOG(GF_LOG_ERROR, GF_LOG_CONTAINER, ("[MPEG-2 TS] Bad SL Packet size: (%d indicated < %d header)\n", pes->pid, pes->pck_data_len, len));
}
} else if (pes->reframe) {
u32 remain = 0;
u32 offset = len;
if (pesh.pck_len && (pesh.pck_len-3-pesh.hdr_data_len != pes->pck_data_len-len)) {
GF_LOG(GF_LOG_WARNING, GF_LOG_CONTAINER, ("[MPEG-2 TS] PID %d PES payload size %d but received %d bytes\n", pes->pid, (u32) ( pesh.pck_len-3-pesh.hdr_data_len), pes->pck_data_len-len));
}
//copy over the remaining of previous PES payload before start of this PES payload
if (pes->prev_data_len) {
if (pes->prev_data_len < len) {
offset = len - pes->prev_data_len;
memcpy(pes->pck_data + offset, pes->prev_data, pes->prev_data_len);
} else {
GF_LOG(GF_LOG_WARNING, GF_LOG_CONTAINER, ("[MPEG-2 TS] PID %d PES reassembly buffer overflow (%d bytes not processed from previous PES) - discarding prev data\n", pes->pid, pes->prev_data_len ));
}
}
if (!pes->temi_pending && pes->temi_tc_desc_len) {
gf_m2ts_store_temi(ts, pes);
}
if (pes->temi_pending) {
pes->temi_pending = 0;
pes->temi_tc.pes_pts = pes->PTS;
if (ts->on_event)
ts->on_event(ts, GF_M2TS_EVT_TEMI_TIMECODE, &pes->temi_tc);
}
if (! ts->seek_mode)
remain = pes->reframe(ts, pes, same_pts, pes->pck_data+offset, pes->pck_data_len-offset, &pesh);
//CLEANUP alloc stuff
if (pes->prev_data) gf_free(pes->prev_data);
pes->prev_data = NULL;
pes->prev_data_len = 0;
if (remain) {
pes->prev_data = gf_malloc(sizeof(char)*remain);
assert(pes->pck_data_len >= remain);
memcpy(pes->prev_data, pes->pck_data + pes->pck_data_len - remain, remain);
pes->prev_data_len = remain;
}
}
} else if (pes->pck_data_len) {
GF_LOG(GF_LOG_WARNING, GF_LOG_CONTAINER, ("[MPEG-2 TS] PES %d: Bad PES Header, discarding packet (maybe stream is encrypted ?)\n", pes->pid));
}
pes->pck_data_len = 0;
pes->pes_len = 0;
pes->rap = 0;
}
static void gf_m2ts_process_pes(GF_M2TS_Demuxer *ts, GF_M2TS_PES *pes, GF_M2TS_Header *hdr, unsigned char *data, u32 data_size, GF_M2TS_AdaptationField *paf)
{
u8 expect_cc;
Bool disc=0;
Bool flush_pes = 0;
/*duplicated packet, NOT A DISCONTINUITY, we should discard the packet - however we may encounter this configuration in DASH at segment boundaries.
If payload start is set, ignore duplication*/
if (hdr->continuity_counter==pes->cc) {
if (!hdr->payload_start || (hdr->adaptation_field!=3) ) {
GF_LOG(GF_LOG_INFO, GF_LOG_CONTAINER, ("[MPEG-2 TS] PES %d: Duplicated Packet found (CC %d) - skipping\n", pes->pid, pes->cc));
return;
}
} else {
expect_cc = (pes->cc<0) ? hdr->continuity_counter : (pes->cc + 1) & 0xf;
if (expect_cc != hdr->continuity_counter)
disc = 1;
}
pes->cc = hdr->continuity_counter;
if (disc) {
if (pes->flags & GF_M2TS_ES_IGNORE_NEXT_DISCONTINUITY) {
pes->flags &= ~GF_M2TS_ES_IGNORE_NEXT_DISCONTINUITY;
disc = 0;
}
if (disc) {
if (hdr->payload_start) {
if (pes->pck_data_len) {
GF_LOG(GF_LOG_WARNING, GF_LOG_CONTAINER, ("[MPEG-2 TS] PES %d: Packet discontinuity (%d expected - got %d) - may have lost end of previous PES\n", pes->pid, expect_cc, hdr->continuity_counter));
}
} else {
if (pes->pck_data_len) {
GF_LOG(GF_LOG_ERROR, GF_LOG_CONTAINER, ("[MPEG-2 TS] PES %d: Packet discontinuity (%d expected - got %d) - trashing PES packet\n", pes->pid, expect_cc, hdr->continuity_counter));
}
pes->pck_data_len = 0;
pes->pes_len = 0;
pes->cc = -1;
return;
}
}
}
if (!pes->reframe) return;
if (hdr->payload_start) {
flush_pes = 1;
pes->pes_start_packet_number = ts->pck_number;
pes->before_last_pcr_value = pes->program->before_last_pcr_value;
pes->before_last_pcr_value_pck_number = pes->program->before_last_pcr_value_pck_number;
pes->last_pcr_value = pes->program->last_pcr_value;
pes->last_pcr_value_pck_number = pes->program->last_pcr_value_pck_number;
} else if (pes->pes_len && (pes->pck_data_len + data_size == pes->pes_len + 6)) {
/* 6 = startcode+stream_id+length*/
/*reassemble pes*/
if (pes->pck_data_len + data_size > pes->pck_alloc_len) {
pes->pck_alloc_len = pes->pck_data_len + data_size;
pes->pck_data = (u8*)gf_realloc(pes->pck_data, pes->pck_alloc_len);
}
memcpy(pes->pck_data+pes->pck_data_len, data, data_size);
pes->pck_data_len += data_size;
/*force discard*/
data_size = 0;
flush_pes = 1;
}
/*PES first fragment: flush previous packet*/
if (flush_pes && pes->pck_data_len) {
gf_m2ts_flush_pes(ts, pes);
if (!data_size) return;
}
/*we need to wait for first packet of PES*/
if (!pes->pck_data_len && !hdr->payload_start) {
GF_LOG(GF_LOG_DEBUG, GF_LOG_CONTAINER, ("[MPEG-2 TS] PID %d: Waiting for PES header, trashing data\n", hdr->pid));
return;
}
/*reassemble*/
if (pes->pck_data_len + data_size > pes->pck_alloc_len ) {
pes->pck_alloc_len = pes->pck_data_len + data_size;
pes->pck_data = (u8*)gf_realloc(pes->pck_data, pes->pck_alloc_len);
}
memcpy(pes->pck_data + pes->pck_data_len, data, data_size);
pes->pck_data_len += data_size;
if (paf && paf->random_access_indicator) pes->rap = 1;
if (hdr->payload_start && !pes->pes_len && (pes->pck_data_len>=6)) {
pes->pes_len = (pes->pck_data[4]<<8) | pes->pck_data[5];
GF_LOG(GF_LOG_DEBUG, GF_LOG_CONTAINER, ("[MPEG-2 TS] PID %d: Got PES packet len %d\n", pes->pid, pes->pes_len));
if (pes->pes_len + 6 == pes->pck_data_len) {
gf_m2ts_flush_pes(ts, pes);
}
}
}
static void gf_m2ts_get_adaptation_field(GF_M2TS_Demuxer *ts, GF_M2TS_AdaptationField *paf, unsigned char *data, u32 size, u32 pid)
{
unsigned char *af_extension;
paf->discontinuity_indicator = (data[0] & 0x80) ? 1 : 0;
paf->random_access_indicator = (data[0] & 0x40) ? 1 : 0;
paf->priority_indicator = (data[0] & 0x20) ? 1 : 0;
paf->PCR_flag = (data[0] & 0x10) ? 1 : 0;
paf->OPCR_flag = (data[0] & 0x8) ? 1 : 0;
paf->splicing_point_flag = (data[0] & 0x4) ? 1 : 0;
paf->transport_private_data_flag = (data[0] & 0x2) ? 1 : 0;
paf->adaptation_field_extension_flag = (data[0] & 0x1) ? 1 : 0;
af_extension = data + 1;
if (paf->PCR_flag == 1) {
u32 base = ((u32)data[1] << 24) | ((u32)data[2] << 16) | ((u32)data[3] << 8) | (u32)data[4];
u64 PCR = (u64) base;
paf->PCR_base = (PCR << 1) | (data[5] >> 7);
paf->PCR_ext = ((data[5] & 1) << 8) | data[6];
af_extension += 6;
}
if (paf->adaptation_field_extension_flag) {
u32 afext_bytes;
Bool ltw_flag, pwr_flag, seamless_flag, af_desc_not_present;
if (paf->OPCR_flag) {
af_extension += 6;
}
if (paf->splicing_point_flag) {
af_extension += 1;
}
if (paf->transport_private_data_flag) {
u32 priv_bytes = af_extension[0];
af_extension += 1 + priv_bytes;
}
afext_bytes = af_extension[0];
ltw_flag = af_extension[1] & 0x80 ? 1 : 0;
pwr_flag = af_extension[1] & 0x40 ? 1 : 0;
seamless_flag = af_extension[1] & 0x20 ? 1 : 0;
af_desc_not_present = af_extension[1] & 0x10 ? 1 : 0;
af_extension += 2;
if (!afext_bytes) {
GF_LOG(GF_LOG_ERROR, GF_LOG_CONTAINER, ("[MPEG-2 TS] PID %d: Bad Adaptation Extension found\n", pid));
return;
}
afext_bytes-=1;
if (ltw_flag) {
af_extension += 2;
if (afext_bytes<2) {
GF_LOG(GF_LOG_ERROR, GF_LOG_CONTAINER, ("[MPEG-2 TS] PID %d: Bad Adaptation Extension found\n", pid));
return;
}
afext_bytes-=2;
}
if (pwr_flag) {
af_extension += 3;
if (afext_bytes<3) {
GF_LOG(GF_LOG_ERROR, GF_LOG_CONTAINER, ("[MPEG-2 TS] PID %d: Bad Adaptation Extension found\n", pid));
return;
}
afext_bytes-=3;
}
if (seamless_flag) {
af_extension += 3;
if (afext_bytes<3) {
GF_LOG(GF_LOG_ERROR, GF_LOG_CONTAINER, ("[MPEG-2 TS] PID %d: Bad Adaptation Extension found\n", pid));
return;
}
afext_bytes-=3;
}
if (! af_desc_not_present) {
while (afext_bytes) {
GF_BitStream *bs;
char *desc;
u8 desc_tag = af_extension[0];
u8 desc_len = af_extension[1];
if (!desc_len || (u32) desc_len+2 > afext_bytes) {
GF_LOG(GF_LOG_ERROR, GF_LOG_CONTAINER, ("[MPEG-2 TS] PID %d: Bad Adaptation Descriptor found (tag %d) size is %d but only %d bytes available\n", pid, desc_tag, desc_len, afext_bytes));
break;
}
desc = (char *) af_extension+2;
bs = gf_bs_new(desc, desc_len, GF_BITSTREAM_READ);
switch (desc_tag) {
case GF_M2TS_AFDESC_LOCATION_DESCRIPTOR:
{
Bool use_base_temi_url;
char URL[255];
GF_M2TS_TemiLocationDescriptor temi_loc;
memset(&temi_loc, 0, sizeof(GF_M2TS_TemiLocationDescriptor) );
temi_loc.reload_external = gf_bs_read_int(bs, 1);
temi_loc.is_announce = gf_bs_read_int(bs, 1);
temi_loc.is_splicing = gf_bs_read_int(bs, 1);
use_base_temi_url = gf_bs_read_int(bs, 1);
gf_bs_read_int(bs, 5); //reserved
temi_loc.timeline_id = gf_bs_read_int(bs, 7);
if (!use_base_temi_url) {
char *_url = URL;
u8 scheme = gf_bs_read_int(bs, 8);
u8 url_len = gf_bs_read_int(bs, 8);
switch (scheme) {
case 1:
strcpy(URL, "http://");
_url = URL+7;
break;
case 2:
strcpy(URL, "https://");
_url = URL+8;
break;
}
gf_bs_read_data(bs, _url, url_len);
_url[url_len] = 0;
}
temi_loc.external_URL = URL;
GF_LOG(GF_LOG_INFO, GF_LOG_CONTAINER, ("[MPEG-2 TS] PID %d AF Location descriptor found - URL %s\n", pid, URL));
if (ts->on_event) ts->on_event(ts, GF_M2TS_EVT_TEMI_LOCATION, &temi_loc);
}
break;
case GF_M2TS_AFDESC_TIMELINE_DESCRIPTOR:
if (ts->ess[pid] && (ts->ess[pid]->flags & GF_M2TS_ES_IS_PES)) {
GF_M2TS_PES *pes = (GF_M2TS_PES *) ts->ess[pid];
if (pes->temi_tc_desc_len)
gf_m2ts_store_temi(ts, pes);
if (pes->temi_tc_desc_alloc_size < desc_len) {
pes->temi_tc_desc = gf_realloc(pes->temi_tc_desc, desc_len);
pes->temi_tc_desc_alloc_size = desc_len;
}
memcpy(pes->temi_tc_desc, desc, desc_len);
pes->temi_tc_desc_len = desc_len;
GF_LOG(GF_LOG_DEBUG, GF_LOG_CONTAINER, ("[MPEG-2 TS] PID %d AF Timeline descriptor found\n", pid));
}
break;
}
gf_bs_del(bs);
af_extension += 2+desc_len;
afext_bytes -= 2+desc_len;
}
}
}
GF_LOG(GF_LOG_DEBUG, GF_LOG_CONTAINER, ("[MPEG-2 TS] PID %d: Adaptation Field found: Discontinuity %d - RAP %d - PCR: "LLD"\n", pid, paf->discontinuity_indicator, paf->random_access_indicator, paf->PCR_flag ? paf->PCR_base * 300 + paf->PCR_ext : 0));
}
static GF_Err gf_m2ts_process_packet(GF_M2TS_Demuxer *ts, unsigned char *data)
{
GF_M2TS_ES *es;
GF_M2TS_Header hdr;
GF_M2TS_AdaptationField af, *paf;
u32 payload_size, af_size;
u32 pos = 0;
ts->pck_number++;
/* read TS packet header*/
hdr.sync = data[0];
if (hdr.sync != 0x47) {
GF_LOG(GF_LOG_WARNING, GF_LOG_CONTAINER, ("[MPEG-2 TS] TS Packet %d does not start with sync marker\n", ts->pck_number));
return GF_CORRUPTED_DATA;
}
hdr.error = (data[1] & 0x80) ? 1 : 0;
hdr.payload_start = (data[1] & 0x40) ? 1 : 0;
hdr.priority = (data[1] & 0x20) ? 1 : 0;
hdr.pid = ( (data[1]&0x1f) << 8) | data[2];
hdr.scrambling_ctrl = (data[3] >> 6) & 0x3;
hdr.adaptation_field = (data[3] >> 4) & 0x3;
hdr.continuity_counter = data[3] & 0xf;
if (hdr.error) {
GF_LOG(GF_LOG_WARNING, GF_LOG_CONTAINER, ("[MPEG-2 TS] TS Packet %d has error (PID could be %d)\n", ts->pck_number, hdr.pid));
return GF_CORRUPTED_DATA;
}
//#if DEBUG_TS_PACKET
GF_LOG(GF_LOG_DEBUG, GF_LOG_CONTAINER, ("[MPEG-2 TS] TS Packet %d PID %d CC %d Encrypted %d\n", ts->pck_number, hdr.pid, hdr.continuity_counter, hdr.scrambling_ctrl));
//#endif
if (hdr.scrambling_ctrl) {
//TODO add decyphering
GF_LOG(GF_LOG_WARNING, GF_LOG_CONTAINER, ("[MPEG-2 TS] TS Packet %d is scrambled - not supported\n", ts->pck_number, hdr.pid));
return GF_NOT_SUPPORTED;
}
paf = NULL;
payload_size = 184;
pos = 4;
switch (hdr.adaptation_field) {
/*adaptation+data*/
case 3:
af_size = data[4];
if (af_size>183) {
GF_LOG(GF_LOG_ERROR, GF_LOG_CONTAINER, ("[MPEG-2 TS] TS Packet %d AF field larger than 183 !\n", ts->pck_number));
//error
return GF_CORRUPTED_DATA;
}
paf = ⁡
memset(paf, 0, sizeof(GF_M2TS_AdaptationField));
//this will stop you when processing invalid (yet existing) mpeg2ts streams in debug
assert( af_size<=183);
if (af_size>183)
GF_LOG(GF_LOG_WARNING, GF_LOG_CONTAINER, ("[MPEG-2 TS] TS Packet %d Detected wrong adaption field size %u when control value is 3\n", ts->pck_number, af_size));
if (af_size) gf_m2ts_get_adaptation_field(ts, paf, data+5, af_size, hdr.pid);
pos += 1+af_size;
payload_size = 183 - af_size;
break;
/*adaptation only - still process in case of PCR*/
case 2:
af_size = data[4];
if (af_size != 183) {
GF_LOG(GF_LOG_WARNING, GF_LOG_CONTAINER, ("[MPEG-2 TS] TS Packet %d AF size is %d when it must be 183 for AF type 2\n", ts->pck_number, af_size));
return GF_CORRUPTED_DATA;
}
paf = ⁡
memset(paf, 0, sizeof(GF_M2TS_AdaptationField));
gf_m2ts_get_adaptation_field(ts, paf, data+5, af_size, hdr.pid);
payload_size = 0;
/*no payload and no PCR, return*/
if (!paf->PCR_flag)
return GF_OK;
break;
/*reserved*/
case 0:
return GF_OK;
default:
break;
}
data += pos;
/*PAT*/
if (hdr.pid == GF_M2TS_PID_PAT) {
gf_m2ts_gather_section(ts, ts->pat, NULL, &hdr, data, payload_size);
return GF_OK;
} else if (hdr.pid == GF_M2TS_PID_CAT) {
gf_m2ts_gather_section(ts, ts->cat, NULL, &hdr, data, payload_size);
return GF_OK;
}
es = ts->ess[hdr.pid];
if (paf && paf->PCR_flag) {
if (!es) {
u32 i, j;
for(i=0; i<gf_list_count(ts->programs); i++) {
GF_M2TS_PES *first_pes = NULL;
GF_M2TS_Program *program = (GF_M2TS_Program *)gf_list_get(ts->programs,i);
if(program->pcr_pid != hdr.pid) continue;
for (j=0; j<gf_list_count(program->streams); j++) {
GF_M2TS_PES *pes = (GF_M2TS_PES *) gf_list_get(program->streams, j);
if (pes->flags & GF_M2TS_INHERIT_PCR) {
ts->ess[hdr.pid] = (GF_M2TS_ES *) pes;
pes->flags |= GF_M2TS_FAKE_PCR;
break;
}
if (pes->flags & GF_M2TS_ES_IS_PES) {
first_pes = pes;
}
}
//non found, use the first media stream as a PCR destination - Q: is it legal to have PCR only streams not declared in PMT ?
if (!es && first_pes) {
es = (GF_M2TS_ES *) first_pes;
first_pes->flags |= GF_M2TS_FAKE_PCR;
}
break;
}
if (!es)
es = ts->ess[hdr.pid];
}
if (es) {
GF_M2TS_PES_PCK pck;
s64 prev_diff_in_us;
Bool discontinuity;
s32 cc = -1;
if (es->flags & GF_M2TS_FAKE_PCR) {
cc = es->program->pcr_cc;
es->program->pcr_cc = hdr.continuity_counter;
}
else if (es->flags & GF_M2TS_ES_IS_PES) cc = ((GF_M2TS_PES*)es)->cc;
else if (((GF_M2TS_SECTION_ES*)es)->sec) cc = ((GF_M2TS_SECTION_ES*)es)->sec->cc;
discontinuity = paf->discontinuity_indicator;
if ((cc>=0) && es->program->before_last_pcr_value) {
//no increment of CC if AF only packet
if (hdr.adaptation_field == 2) {
if (hdr.continuity_counter != cc) {
discontinuity = GF_TRUE;
}
} else if (hdr.continuity_counter != ((cc + 1) & 0xF)) {
discontinuity = GF_TRUE;
}
}
memset(&pck, 0, sizeof(GF_M2TS_PES_PCK));
prev_diff_in_us = (s64) (es->program->last_pcr_value /27- es->program->before_last_pcr_value/27);
es->program->before_last_pcr_value = es->program->last_pcr_value;
es->program->before_last_pcr_value_pck_number = es->program->last_pcr_value_pck_number;
es->program->last_pcr_value_pck_number = ts->pck_number;
es->program->last_pcr_value = paf->PCR_base * 300 + paf->PCR_ext;
if (!es->program->last_pcr_value) es->program->last_pcr_value = 1;
GF_LOG(GF_LOG_DEBUG, GF_LOG_CONTAINER, ("[MPEG-2 TS] PID %d PCR found "LLU" ("LLU" at 90kHz) - PCR diff is %d us\n", hdr.pid, es->program->last_pcr_value, es->program->last_pcr_value/300, (s32) (es->program->last_pcr_value - es->program->before_last_pcr_value)/27 ));
pck.PTS = es->program->last_pcr_value;
pck.stream = (GF_M2TS_PES *)es;
//try to ignore all discontinuities that are less than 200 ms (seen in some HLS setup ...)
if (discontinuity) {
s64 diff_in_us = (s64) (es->program->last_pcr_value - es->program->before_last_pcr_value) / 27;
u64 diff = ABS(diff_in_us - prev_diff_in_us);
if ((diff_in_us<0) && (diff_in_us >= -200000)) {
GF_LOG(GF_LOG_WARNING, GF_LOG_CONTAINER, ("[MPEG-2 TS] PID %d new PCR, with discontinuity signaled, is less than previously received PCR (diff %d us) but not too large, trying to ignore discontinuity\n", hdr.pid, diff_in_us));
}
//ignore PCR discontinuity indicator if PCR found is larger than previously received PCR and diffence between PCR before and after discontinuity indicator is smaller than 50ms
else if ((diff_in_us > 0) && (diff < 200000)) {
GF_LOG(GF_LOG_DEBUG, GF_LOG_CONTAINER, ("[MPEG-2 TS] PID %d PCR discontinuity signaled but diff is small (diff %d us - PCR diff %d vs prev PCR diff %d) - ignore it\n", hdr.pid, diff, diff_in_us, prev_diff_in_us));
} else if (paf->discontinuity_indicator) {
GF_LOG(GF_LOG_DEBUG, GF_LOG_CONTAINER, ("[MPEG-2 TS] PID %d PCR discontinuity signaled (diff %d us - PCR diff %d vs prev PCR diff %d)\n", hdr.pid, diff, diff_in_us, prev_diff_in_us));
pck.flags = GF_M2TS_PES_PCK_DISCONTINUITY;
} else {
GF_LOG(GF_LOG_WARNING, GF_LOG_CONTAINER, ("[MPEG-2 TS] PID %d PCR discontinuity not signaled (diff %d us - PCR diff %d vs prev PCR diff %d)\n", hdr.pid, diff, diff_in_us, prev_diff_in_us));
pck.flags = GF_M2TS_PES_PCK_DISCONTINUITY;
}
}
else if ( (es->program->last_pcr_value < es->program->before_last_pcr_value) ) {
s64 diff_in_us = (s64) (es->program->last_pcr_value - es->program->before_last_pcr_value) / 27;
//if less than 200 ms before PCR loop at the last PCR, this is a PCR loop
if (GF_M2TS_MAX_PCR - es->program->before_last_pcr_value < 5400000 /*2*2700000*/) {
GF_LOG(GF_LOG_INFO, GF_LOG_CONTAINER, ("[MPEG-2 TS] PID %d PCR loop found from "LLU" to "LLU" \n", hdr.pid, es->program->before_last_pcr_value, es->program->last_pcr_value));
} else if ((diff_in_us<0) && (diff_in_us >= -200000)) {
GF_LOG(GF_LOG_WARNING, GF_LOG_CONTAINER, ("[MPEG-2 TS] PID %d new PCR, without discontinuity signaled, is less than previously received PCR (diff %d us) but not too large, trying to ignore discontinuity\n", hdr.pid, diff_in_us));
} else {
GF_LOG(GF_LOG_WARNING, GF_LOG_CONTAINER, ("[MPEG-2 TS] PID %d PCR found "LLU" is less than previously received PCR "LLU" (PCR diff %g sec) but no discontinuity signaled\n", hdr.pid, es->program->last_pcr_value, es->program->before_last_pcr_value, (GF_M2TS_MAX_PCR - es->program->before_last_pcr_value + es->program->last_pcr_value) / 27000000.0));
pck.flags = GF_M2TS_PES_PCK_DISCONTINUITY;
}
}
if (pck.flags & GF_M2TS_PES_PCK_DISCONTINUITY) {
gf_m2ts_reset_parsers_for_program(ts, es->program);
}
if (ts->on_event) {
ts->on_event(ts, GF_M2TS_EVT_PES_PCR, &pck);
}
}
}
/*check for DVB reserved PIDs*/
if (!es) {
if (hdr.pid == GF_M2TS_PID_SDT_BAT_ST) {
gf_m2ts_gather_section(ts, ts->sdt, NULL, &hdr, data, payload_size);
return GF_OK;
} else if (hdr.pid == GF_M2TS_PID_NIT_ST) {
/*ignore them, unused at application level*/
gf_m2ts_gather_section(ts, ts->nit, NULL, &hdr, data, payload_size);
return GF_OK;
} else if (hdr.pid == GF_M2TS_PID_EIT_ST_CIT) {
/* ignore EIT messages for the moment */
gf_m2ts_gather_section(ts, ts->eit, NULL, &hdr, data, payload_size);
return GF_OK;
} else if (hdr.pid == GF_M2TS_PID_TDT_TOT_ST) {
gf_m2ts_gather_section(ts, ts->tdt_tot, NULL, &hdr, data, payload_size);
} else {
/* ignore packet */
}
} else if (es->flags & GF_M2TS_ES_IS_SECTION) { /* The stream uses sections to carry its payload */
GF_M2TS_SECTION_ES *ses = (GF_M2TS_SECTION_ES *)es;
if (ses->sec) gf_m2ts_gather_section(ts, ses->sec, ses, &hdr, data, payload_size);
} else {
GF_M2TS_PES *pes = (GF_M2TS_PES *)es;
/* regular stream using PES packets */
if (pes->reframe && payload_size) gf_m2ts_process_pes(ts, pes, &hdr, data, payload_size, paf);
}
return GF_OK;
}
GF_EXPORT
GF_Err gf_m2ts_process_data(GF_M2TS_Demuxer *ts, u8 *data, u32 data_size)
{
GF_Err e=GF_OK;
u32 pos, pck_size;
Bool is_align = 1;
if (ts->buffer_size) {
//we are sync, copy remaining bytes
if ( (ts->buffer[0]==0x47) && (ts->buffer_size<200)) {
u32 pck_size = ts->prefix_present ? 192 : 188;
if (ts->alloc_size < 200) {
ts->alloc_size = 200;
ts->buffer = (char*)gf_realloc(ts->buffer, sizeof(char)*ts->alloc_size);
}
memcpy(ts->buffer + ts->buffer_size, data, pck_size - ts->buffer_size);
e |= gf_m2ts_process_packet(ts, (unsigned char *)ts->buffer);
data += (pck_size - ts->buffer_size);
data_size = data_size - (pck_size - ts->buffer_size);
}
//not sync, copy over the complete buffer
else {
if (ts->alloc_size < ts->buffer_size+data_size) {
ts->alloc_size = ts->buffer_size+data_size;
ts->buffer = (char*)gf_realloc(ts->buffer, sizeof(char)*ts->alloc_size);
}
memcpy(ts->buffer + ts->buffer_size, data, sizeof(char)*data_size);
ts->buffer_size += data_size;
is_align = 0;
data = ts->buffer;
data_size = ts->buffer_size;
}
}
/*sync input data*/
pos = gf_m2ts_sync(ts, data, data_size, is_align);
if (pos==data_size) {
if (is_align) {
if (ts->alloc_size<data_size) {
ts->buffer = (char*)gf_realloc(ts->buffer, sizeof(char)*data_size);
ts->alloc_size = data_size;
}
memcpy(ts->buffer, data, sizeof(char)*data_size);
ts->buffer_size = data_size;
}
return GF_OK;
}
pck_size = ts->prefix_present ? 192 : 188;
for (;;) {
/*wait for a complete packet*/
if (data_size < pos + pck_size) {
ts->buffer_size = data_size - pos;
data += pos;
if (!ts->buffer_size) {
return e;
}
assert(ts->buffer_size<pck_size);
if (is_align) {
u32 s = ts->buffer_size;
if (s<200) s = 200;
if (ts->alloc_size < s) {
ts->alloc_size = s;
ts->buffer = (char*)gf_realloc(ts->buffer, sizeof(char)*ts->alloc_size);
}
memcpy(ts->buffer, data, sizeof(char)*ts->buffer_size);
} else {
memmove(ts->buffer, data, sizeof(char)*ts->buffer_size);
}
return e;
}
/*process*/
e |= gf_m2ts_process_packet(ts, (unsigned char *)data + pos);
pos += pck_size;
}
return e;
}
//unused
#if 0
GF_ESD *gf_m2ts_get_esd(GF_M2TS_ES *es)
{
GF_ESD *esd;
u32 k, esd_count;
esd = NULL;
if (es->program->pmt_iod && es->program->pmt_iod->ESDescriptors) {
esd_count = gf_list_count(es->program->pmt_iod->ESDescriptors);
for (k = 0; k < esd_count; k++) {
GF_ESD *esd_tmp = (GF_ESD *)gf_list_get(es->program->pmt_iod->ESDescriptors, k);
if (esd_tmp->ESID != es->mpeg4_es_id) continue;
esd = esd_tmp;
break;
}
}
if (!esd && es->program->additional_ods) {
u32 od_count, od_index;
od_count = gf_list_count(es->program->additional_ods);
for (od_index = 0; od_index < od_count; od_index++) {
GF_ObjectDescriptor *od = (GF_ObjectDescriptor *)gf_list_get(es->program->additional_ods, od_index);
esd_count = gf_list_count(od->ESDescriptors);
for (k = 0; k < esd_count; k++) {
GF_ESD *esd_tmp = (GF_ESD *)gf_list_get(od->ESDescriptors, k);
if (esd_tmp->ESID != es->mpeg4_es_id) continue;
esd = esd_tmp;
break;
}
}
}
return esd;
}
void gf_m2ts_set_segment_switch(GF_M2TS_Demuxer *ts)
{
u32 i;
for (i=0; i<GF_M2TS_MAX_STREAMS; i++) {
GF_M2TS_ES *es = (GF_M2TS_ES *) ts->ess[i];
if (!es) continue;
es->flags |= GF_M2TS_ES_IGNORE_NEXT_DISCONTINUITY;
}
}
#endif
GF_EXPORT
void gf_m2ts_reset_parsers_for_program(GF_M2TS_Demuxer *ts, GF_M2TS_Program *prog)
{
u32 i;
for (i=0; i<GF_M2TS_MAX_STREAMS; i++) {
GF_M2TS_ES *es = (GF_M2TS_ES *) ts->ess[i];
if (!es) continue;
if (prog && (es->program != prog) ) continue;
if (es->flags & GF_M2TS_ES_IS_SECTION) {
GF_M2TS_SECTION_ES *ses = (GF_M2TS_SECTION_ES *)es;
gf_m2ts_section_filter_reset(ses->sec);
} else {
GF_M2TS_PES *pes = (GF_M2TS_PES *)es;
if (!pes || (pes->pid==pes->program->pmt_pid)) continue;
pes->cc = -1;
pes->frame_state = 0;
pes->pck_data_len = 0;
if (pes->prev_data) gf_free(pes->prev_data);
pes->prev_data = NULL;
pes->prev_data_len = 0;
pes->PTS = pes->DTS = 0;
// pes->prev_PTS = 0;
// pes->first_dts = 0;
pes->pes_len = pes->pes_end_packet_number = pes->pes_start_packet_number = 0;
if (pes->buf) gf_free(pes->buf);
pes->buf = NULL;
if (pes->temi_tc_desc) gf_free(pes->temi_tc_desc);
pes->temi_tc_desc = NULL;
pes->temi_tc_desc_len = pes->temi_tc_desc_alloc_size = 0;
pes->before_last_pcr_value = pes->before_last_pcr_value_pck_number = 0;
pes->last_pcr_value = pes->last_pcr_value_pck_number = 0;
if (pes->program->pcr_pid==pes->pid) {
pes->program->last_pcr_value = pes->program->last_pcr_value_pck_number = 0;
pes->program->before_last_pcr_value = pes->program->before_last_pcr_value_pck_number = 0;
}
}
}
}
GF_EXPORT
void gf_m2ts_reset_parsers(GF_M2TS_Demuxer *ts)
{
gf_m2ts_reset_parsers_for_program(ts, NULL);
ts->pck_number = 0;
gf_m2ts_section_filter_reset(ts->cat);
gf_m2ts_section_filter_reset(ts->pat);
gf_m2ts_section_filter_reset(ts->sdt);
gf_m2ts_section_filter_reset(ts->nit);
gf_m2ts_section_filter_reset(ts->eit);
gf_m2ts_section_filter_reset(ts->tdt_tot);
}
#if 0 //unused
u32 gf_m2ts_pes_get_framing_mode(GF_M2TS_PES *pes)
{
if (pes->flags & GF_M2TS_ES_IS_SECTION) {
if (pes->flags & GF_M2TS_ES_IS_SL) {
if ( ((GF_M2TS_SECTION_ES *)pes)->sec->process_section == NULL)
return GF_M2TS_PES_FRAMING_DEFAULT;
}
return GF_M2TS_PES_FRAMING_SKIP_NO_RESET;
}
if (!pes->reframe ) return GF_M2TS_PES_FRAMING_SKIP_NO_RESET;
if (pes->reframe == gf_m2ts_reframe_default) return GF_M2TS_PES_FRAMING_RAW;
if (pes->reframe == gf_m2ts_reframe_reset) return GF_M2TS_PES_FRAMING_SKIP;
return GF_M2TS_PES_FRAMING_DEFAULT;
}
#endif
GF_EXPORT
GF_Err gf_m2ts_set_pes_framing(GF_M2TS_PES *pes, u32 mode)
{
if (!pes) return GF_BAD_PARAM;
GF_LOG(GF_LOG_DEBUG, GF_LOG_CONTAINER, ("[MPEG-2 TS] Setting pes framing mode of PID %d to %d\n", pes->pid, mode) );
/*ignore request for section PIDs*/
if (pes->flags & GF_M2TS_ES_IS_SECTION) {
if (pes->flags & GF_M2TS_ES_IS_SL) {
if (mode==GF_M2TS_PES_FRAMING_DEFAULT) {
((GF_M2TS_SECTION_ES *)pes)->sec->process_section = gf_m2ts_process_mpeg4section;
} else {
((GF_M2TS_SECTION_ES *)pes)->sec->process_section = NULL;
}
}
return GF_OK;
}
if (pes->pid==pes->program->pmt_pid) return GF_BAD_PARAM;
//if component reuse, disable previous pes
if ((mode > GF_M2TS_PES_FRAMING_SKIP) && (pes->program->ts->ess[pes->pid] != (GF_M2TS_ES *) pes)) {
GF_M2TS_PES *o_pes = (GF_M2TS_PES *) pes->program->ts->ess[pes->pid];
if (o_pes->flags & GF_M2TS_ES_IS_PES)
gf_m2ts_set_pes_framing(o_pes, GF_M2TS_PES_FRAMING_SKIP);
GF_LOG(GF_LOG_INFO, GF_LOG_CONTAINER, ("[MPEG-2 TS] Reassinging PID %d from program %d to program %d\n", pes->pid, o_pes->program->number, pes->program->number) );
pes->program->ts->ess[pes->pid] = (GF_M2TS_ES *) pes;
}
switch (mode) {
case GF_M2TS_PES_FRAMING_RAW:
pes->reframe = gf_m2ts_reframe_default;
break;
case GF_M2TS_PES_FRAMING_SKIP:
pes->reframe = gf_m2ts_reframe_reset;
break;
case GF_M2TS_PES_FRAMING_SKIP_NO_RESET:
pes->reframe = NULL;
break;
case GF_M2TS_PES_FRAMING_DEFAULT:
default:
switch (pes->stream_type) {
case GF_M2TS_VIDEO_MPEG1:
case GF_M2TS_VIDEO_MPEG2:
case GF_M2TS_VIDEO_H264:
case GF_M2TS_VIDEO_SVC:
case GF_M2TS_VIDEO_HEVC:
case GF_M2TS_VIDEO_HEVC_TEMPORAL:
case GF_M2TS_VIDEO_HEVC_MCTS:
case GF_M2TS_VIDEO_SHVC:
case GF_M2TS_VIDEO_SHVC_TEMPORAL:
case GF_M2TS_VIDEO_MHVC:
case GF_M2TS_VIDEO_MHVC_TEMPORAL:
case GF_M2TS_AUDIO_MPEG1:
case GF_M2TS_AUDIO_MPEG2:
case GF_M2TS_AUDIO_AAC:
case GF_M2TS_AUDIO_LATM_AAC:
case GF_M2TS_AUDIO_AC3:
case GF_M2TS_AUDIO_EC3:
//for all our supported codec types, use a reframer filter
pes->reframe = gf_m2ts_reframe_default;
break;
case GF_M2TS_PRIVATE_DATA:
/* TODO: handle DVB subtitle streams */
break;
case GF_M2TS_METADATA_ID3_HLS:
//TODO
pes->reframe = gf_m2ts_reframe_id3_pes;
break;
default:
pes->reframe = gf_m2ts_reframe_default;
break;
}
break;
}
return GF_OK;
}
GF_EXPORT
GF_M2TS_Demuxer *gf_m2ts_demux_new()
{
GF_M2TS_Demuxer *ts;
GF_SAFEALLOC(ts, GF_M2TS_Demuxer);
if (!ts) return NULL;
ts->programs = gf_list_new();
ts->SDTs = gf_list_new();
ts->pat = gf_m2ts_section_filter_new(gf_m2ts_process_pat, 0);
ts->cat = gf_m2ts_section_filter_new(gf_m2ts_process_cat, 0);
ts->sdt = gf_m2ts_section_filter_new(gf_m2ts_process_sdt, 1);
ts->nit = gf_m2ts_section_filter_new(gf_m2ts_process_nit, 0);
ts->eit = gf_m2ts_section_filter_new(NULL/*gf_m2ts_process_eit*/, 1);
ts->tdt_tot = gf_m2ts_section_filter_new(gf_m2ts_process_tdt_tot, 1);
#ifdef GPAC_ENABLE_MPE
gf_dvb_mpe_init(ts);
#endif
ts->nb_prog_pmt_received = 0;
ts->ChannelAppList = gf_list_new();
return ts;
}
GF_EXPORT
void gf_m2ts_demux_dmscc_init(GF_M2TS_Demuxer *ts) {
char temp_dir[GF_MAX_PATH];
u32 length;
GF_Err e;
ts->dsmcc_controler = gf_list_new();
ts->process_dmscc = 1;
strcpy(temp_dir, gf_get_default_cache_directory() );
length = (u32) strlen(temp_dir);
if(temp_dir[length-1] == GF_PATH_SEPARATOR) {
temp_dir[length-1] = 0;
}
ts->dsmcc_root_dir = (char*)gf_calloc(strlen(temp_dir)+strlen("CarouselData")+2,sizeof(char));
sprintf(ts->dsmcc_root_dir,"%s%cCarouselData",temp_dir,GF_PATH_SEPARATOR);
e = gf_mkdir(ts->dsmcc_root_dir);
if(e) {
GF_LOG(GF_LOG_INFO, GF_LOG_CONTAINER, ("[Process DSMCC] Error during the creation of the directory %s \n",ts->dsmcc_root_dir));
}
}
GF_EXPORT
void gf_m2ts_demux_del(GF_M2TS_Demuxer *ts)
{
u32 i;
if (ts->pat) gf_m2ts_section_filter_del(ts->pat);
if (ts->cat) gf_m2ts_section_filter_del(ts->cat);
if (ts->sdt) gf_m2ts_section_filter_del(ts->sdt);
if (ts->nit) gf_m2ts_section_filter_del(ts->nit);
if (ts->eit) gf_m2ts_section_filter_del(ts->eit);
if (ts->tdt_tot) gf_m2ts_section_filter_del(ts->tdt_tot);
for (i=0; i<GF_M2TS_MAX_STREAMS; i++) {
//bacause of pure PCR streams, en ES might be reassigned on 2 PIDs, one for the ES and one for the PCR
if (ts->ess[i] && (ts->ess[i]->pid==i)) gf_m2ts_es_del(ts->ess[i], ts);
}
if (ts->buffer) gf_free(ts->buffer);
while (gf_list_count(ts->programs)) {
GF_M2TS_Program *p = (GF_M2TS_Program *)gf_list_last(ts->programs);
gf_list_rem_last(ts->programs);
gf_list_del(p->streams);
/*reset OD list*/
if (p->additional_ods) {
gf_odf_desc_list_del(p->additional_ods);
gf_list_del(p->additional_ods);
}
if (p->pmt_iod) gf_odf_desc_del((GF_Descriptor *)p->pmt_iod);
if (p->metadata_pointer_descriptor) gf_m2ts_metadata_pointer_descriptor_del(p->metadata_pointer_descriptor);
gf_free(p);
}
gf_list_del(ts->programs);
if (ts->TDT_time) gf_free(ts->TDT_time);
gf_m2ts_reset_sdt(ts);
if (ts->tdt_tot)
gf_list_del(ts->SDTs);
#ifdef GPAC_ENABLE_MPE
gf_dvb_mpe_shutdown(ts);
#endif
if (ts->dsmcc_controler) {
if (gf_list_count(ts->dsmcc_controler)) {
#ifdef GPAC_ENABLE_DSMCC
GF_M2TS_DSMCC_OVERLORD* dsmcc_overlord = (GF_M2TS_DSMCC_OVERLORD*)gf_list_get(ts->dsmcc_controler,0);
gf_cleanup_dir(dsmcc_overlord->root_dir);
gf_rmdir(dsmcc_overlord->root_dir);
gf_m2ts_delete_dsmcc_overlord(dsmcc_overlord);
if(ts->dsmcc_root_dir) {
gf_free(ts->dsmcc_root_dir);
}
#endif
}
gf_list_del(ts->dsmcc_controler);
}
while(gf_list_count(ts->ChannelAppList)) {
#ifdef GPAC_ENABLE_DSMCC
GF_M2TS_CHANNEL_APPLICATION_INFO* ChanAppInfo = (GF_M2TS_CHANNEL_APPLICATION_INFO*)gf_list_get(ts->ChannelAppList,0);
gf_m2ts_delete_channel_application_info(ChanAppInfo);
gf_list_rem(ts->ChannelAppList,0);
#endif
}
gf_list_del(ts->ChannelAppList);
if (ts->dsmcc_root_dir) gf_free(ts->dsmcc_root_dir);
gf_free(ts);
}
#if 0//unused
void gf_m2ts_print_info(GF_M2TS_Demuxer *ts)
{
#ifdef GPAC_ENABLE_MPE
gf_m2ts_print_mpe_info(ts);
#endif
}
#endif
#define M2TS_PROBE_SIZE 188000
static Bool gf_m2ts_probe_buffer(char *buf, u32 size)
{
GF_Err e;
GF_M2TS_Demuxer *ts;
u32 lt;
lt = gf_log_get_tool_level(GF_LOG_CONTAINER);
gf_log_set_tool_level(GF_LOG_CONTAINER, GF_LOG_QUIET);
ts = gf_m2ts_demux_new();
e = gf_m2ts_process_data(ts, buf, size);
if (!ts->pck_number) e = GF_BAD_PARAM;
gf_m2ts_demux_del(ts);
gf_log_set_tool_level(GF_LOG_CONTAINER, lt);
if (e) return GF_FALSE;
return GF_TRUE;
}
GF_EXPORT
Bool gf_m2ts_probe_file(const char *fileName)
{
char buf[M2TS_PROBE_SIZE];
u32 size;
FILE *t;
if (!strncmp(fileName, "gmem://", 7)) {
u8 *mem_address;
if (gf_blob_get_data(fileName, &mem_address, &size) != GF_OK) {
return GF_FALSE;
}
if (size>M2TS_PROBE_SIZE) size = M2TS_PROBE_SIZE;
memcpy(buf, mem_address, size);
} else {
t = gf_fopen(fileName, "rb");
if (!t) return 0;
size = (u32) fread(buf, 1, M2TS_PROBE_SIZE, t);
gf_fclose(t);
if ((s32) size <= 0) return 0;
}
return gf_m2ts_probe_buffer(buf, size);
}
GF_EXPORT
Bool gf_m2ts_probe_data(const u8 *data, u32 size)
{
size /= 188;
size *= 188;
return gf_m2ts_probe_buffer((char *) data, size);
}
static void rewrite_pts_dts(unsigned char *ptr, u64 TS)
{
ptr[0] &= 0xf1;
ptr[0] |= (unsigned char)((TS&0x1c0000000ULL)>>29);
ptr[1] = (unsigned char)((TS&0x03fc00000ULL)>>22);
ptr[2] &= 0x1;
ptr[2] |= (unsigned char)((TS&0x0003f8000ULL)>>14);
ptr[3] = (unsigned char)((TS&0x000007f80ULL)>>7);
ptr[4] &= 0x1;
ptr[4] |= (unsigned char)((TS&0x00000007fULL)<<1);
assert(((u64)(ptr[0]&0xe)<<29) + ((u64)ptr[1]<<22) + ((u64)(ptr[2]&0xfe)<<14) + ((u64)ptr[3]<<7) + ((ptr[4]&0xfe)>>1) == TS);
}
#define ADJUST_TIMESTAMP(_TS) \
if (_TS < (u64) -ts_shift) _TS = pcr_mod + _TS + ts_shift; \
else _TS = _TS + ts_shift; \
while (_TS > pcr_mod) _TS -= pcr_mod; \
GF_EXPORT
GF_Err gf_m2ts_restamp(u8 *buffer, u32 size, s64 ts_shift, u8 *is_pes)
{
u32 done = 0;
u64 pcr_mod;
// if (!ts_shift) return GF_OK;
pcr_mod = 0x80000000;
pcr_mod*=4;
while (done + 188 <= size) {
u8 *pesh;
u8 *pck;
u64 pcr_base=0, pcr_ext=0;
u16 pid;
u8 adaptation_field, adaptation_field_length;
pck = (u8*) buffer+done;
if (pck[0]!=0x47) {
GF_LOG(GF_LOG_ERROR, GF_LOG_CONTAINER, ("[M2TS Restamp] Invalid sync byte %X\n", pck[0]));
return GF_NON_COMPLIANT_BITSTREAM;
}
pid = ((pck[1] & 0x1f) <<8 ) + pck[2];
adaptation_field_length = 0;
adaptation_field = (pck[3] >> 4) & 0x3;
if ((adaptation_field==2) || (adaptation_field==3)) {
adaptation_field_length = pck[4];
if ( pck[5]&0x10 /*PCR_flag*/) {
pcr_base = (((u64)pck[6])<<25) + (pck[7]<<17) + (pck[8]<<9) + (pck[9]<<1) + (pck[10]>>7);
pcr_ext = ((pck[10]&1)<<8) + pck[11];
ADJUST_TIMESTAMP(pcr_base);
pck[6] = (unsigned char)(0xff&(pcr_base>>25));
pck[7] = (unsigned char)(0xff&(pcr_base>>17));
pck[8] = (unsigned char)(0xff&(pcr_base>>9));
pck[9] = (unsigned char)(0xff&(pcr_base>>1));
pck[10] = (unsigned char)(((0x1&pcr_base)<<7) | 0x7e | ((0x100&pcr_ext)>>8));
if (pcr_ext != ((pck[10]&1)<<8) + pck[11]) {
GF_LOG(GF_LOG_ERROR, GF_LOG_CONTAINER, ("[M2TS Restamp] Sanity check failed for PCR restamping\n"));
return GF_IO_ERR;
}
pck[11] = (unsigned char)(0xff&pcr_ext);
}
/*add adaptation_field_length field*/
adaptation_field_length++;
}
if (!is_pes[pid] || !(pck[1]&0x40)) {
done+=188;
continue;
}
pesh = &pck[4+adaptation_field_length];
if ((pesh[0]==0x00) && (pesh[1]==0x00) && (pesh[2]==0x01)) {
Bool has_pts, has_dts;
if ((pesh[6]&0xc0)!=0x80) {
done+=188;
continue;
}
has_pts = (pesh[7]&0x80);
has_dts = has_pts ? (pesh[7]&0x40) : 0;
if (has_pts) {
u64 PTS;
if (((pesh[9]&0xe0)>>4)!=0x2) {
GF_LOG(GF_LOG_WARNING, GF_LOG_CONTAINER, ("[M2TS Restamp] PID %4d: Wrong PES header, PTS decoding: '0010' expected\n", pid));
done+=188;
continue;
}
PTS = gf_m2ts_get_pts(pesh + 9);
ADJUST_TIMESTAMP(PTS);
rewrite_pts_dts(pesh+9, PTS);
}
if (has_dts) {
u64 DTS = gf_m2ts_get_pts(pesh + 14);
ADJUST_TIMESTAMP(DTS);
rewrite_pts_dts(pesh+14, DTS);
}
} else {
GF_LOG(GF_LOG_WARNING, GF_LOG_CONTAINER, ("[M2TS Restamp] PID %4d: Wrong PES not beginning with start code\n", pid));
}
done+=188;
}
return GF_OK;
}
#endif /*GPAC_DISABLE_MPEG2TS*/
| ./CrossVul/dataset_final_sorted/CWE-416/c/good_1373_0 |
crossvul-cpp_data_good_1007_0 | 404: Not Found | ./CrossVul/dataset_final_sorted/CWE-416/c/good_1007_0 |
crossvul-cpp_data_bad_104_0 | /*
* Simple NUMA memory policy for the Linux kernel.
*
* Copyright 2003,2004 Andi Kleen, SuSE Labs.
* (C) Copyright 2005 Christoph Lameter, Silicon Graphics, Inc.
* Subject to the GNU Public License, version 2.
*
* NUMA policy allows the user to give hints in which node(s) memory should
* be allocated.
*
* Support four policies per VMA and per process:
*
* The VMA policy has priority over the process policy for a page fault.
*
* interleave Allocate memory interleaved over a set of nodes,
* with normal fallback if it fails.
* For VMA based allocations this interleaves based on the
* offset into the backing object or offset into the mapping
* for anonymous memory. For process policy an process counter
* is used.
*
* bind Only allocate memory on a specific set of nodes,
* no fallback.
* FIXME: memory is allocated starting with the first node
* to the last. It would be better if bind would truly restrict
* the allocation to memory nodes instead
*
* preferred Try a specific node first before normal fallback.
* As a special case NUMA_NO_NODE here means do the allocation
* on the local CPU. This is normally identical to default,
* but useful to set in a VMA when you have a non default
* process policy.
*
* default Allocate on the local node first, or when on a VMA
* use the process policy. This is what Linux always did
* in a NUMA aware kernel and still does by, ahem, default.
*
* The process policy is applied for most non interrupt memory allocations
* in that process' context. Interrupts ignore the policies and always
* try to allocate on the local CPU. The VMA policy is only applied for memory
* allocations for a VMA in the VM.
*
* Currently there are a few corner cases in swapping where the policy
* is not applied, but the majority should be handled. When process policy
* is used it is not remembered over swap outs/swap ins.
*
* Only the highest zone in the zone hierarchy gets policied. Allocations
* requesting a lower zone just use default policy. This implies that
* on systems with highmem kernel lowmem allocation don't get policied.
* Same with GFP_DMA allocations.
*
* For shmfs/tmpfs/hugetlbfs shared memory the policy is shared between
* all users and remembered even when nobody has memory mapped.
*/
/* Notebook:
fix mmap readahead to honour policy and enable policy for any page cache
object
statistics for bigpages
global policy for page cache? currently it uses process policy. Requires
first item above.
handle mremap for shared memory (currently ignored for the policy)
grows down?
make bind policy root only? It can trigger oom much faster and the
kernel is not always grateful with that.
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/mempolicy.h>
#include <linux/mm.h>
#include <linux/highmem.h>
#include <linux/hugetlb.h>
#include <linux/kernel.h>
#include <linux/sched.h>
#include <linux/sched/mm.h>
#include <linux/sched/numa_balancing.h>
#include <linux/sched/task.h>
#include <linux/nodemask.h>
#include <linux/cpuset.h>
#include <linux/slab.h>
#include <linux/string.h>
#include <linux/export.h>
#include <linux/nsproxy.h>
#include <linux/interrupt.h>
#include <linux/init.h>
#include <linux/compat.h>
#include <linux/swap.h>
#include <linux/seq_file.h>
#include <linux/proc_fs.h>
#include <linux/migrate.h>
#include <linux/ksm.h>
#include <linux/rmap.h>
#include <linux/security.h>
#include <linux/syscalls.h>
#include <linux/ctype.h>
#include <linux/mm_inline.h>
#include <linux/mmu_notifier.h>
#include <linux/printk.h>
#include <asm/tlbflush.h>
#include <linux/uaccess.h>
#include "internal.h"
/* Internal flags */
#define MPOL_MF_DISCONTIG_OK (MPOL_MF_INTERNAL << 0) /* Skip checks for continuous vmas */
#define MPOL_MF_INVERT (MPOL_MF_INTERNAL << 1) /* Invert check for nodemask */
static struct kmem_cache *policy_cache;
static struct kmem_cache *sn_cache;
/* Highest zone. An specific allocation for a zone below that is not
policied. */
enum zone_type policy_zone = 0;
/*
* run-time system-wide default policy => local allocation
*/
static struct mempolicy default_policy = {
.refcnt = ATOMIC_INIT(1), /* never free it */
.mode = MPOL_PREFERRED,
.flags = MPOL_F_LOCAL,
};
static struct mempolicy preferred_node_policy[MAX_NUMNODES];
struct mempolicy *get_task_policy(struct task_struct *p)
{
struct mempolicy *pol = p->mempolicy;
int node;
if (pol)
return pol;
node = numa_node_id();
if (node != NUMA_NO_NODE) {
pol = &preferred_node_policy[node];
/* preferred_node_policy is not initialised early in boot */
if (pol->mode)
return pol;
}
return &default_policy;
}
static const struct mempolicy_operations {
int (*create)(struct mempolicy *pol, const nodemask_t *nodes);
void (*rebind)(struct mempolicy *pol, const nodemask_t *nodes);
} mpol_ops[MPOL_MAX];
static inline int mpol_store_user_nodemask(const struct mempolicy *pol)
{
return pol->flags & MPOL_MODE_FLAGS;
}
static void mpol_relative_nodemask(nodemask_t *ret, const nodemask_t *orig,
const nodemask_t *rel)
{
nodemask_t tmp;
nodes_fold(tmp, *orig, nodes_weight(*rel));
nodes_onto(*ret, tmp, *rel);
}
static int mpol_new_interleave(struct mempolicy *pol, const nodemask_t *nodes)
{
if (nodes_empty(*nodes))
return -EINVAL;
pol->v.nodes = *nodes;
return 0;
}
static int mpol_new_preferred(struct mempolicy *pol, const nodemask_t *nodes)
{
if (!nodes)
pol->flags |= MPOL_F_LOCAL; /* local allocation */
else if (nodes_empty(*nodes))
return -EINVAL; /* no allowed nodes */
else
pol->v.preferred_node = first_node(*nodes);
return 0;
}
static int mpol_new_bind(struct mempolicy *pol, const nodemask_t *nodes)
{
if (nodes_empty(*nodes))
return -EINVAL;
pol->v.nodes = *nodes;
return 0;
}
/*
* mpol_set_nodemask is called after mpol_new() to set up the nodemask, if
* any, for the new policy. mpol_new() has already validated the nodes
* parameter with respect to the policy mode and flags. But, we need to
* handle an empty nodemask with MPOL_PREFERRED here.
*
* Must be called holding task's alloc_lock to protect task's mems_allowed
* and mempolicy. May also be called holding the mmap_semaphore for write.
*/
static int mpol_set_nodemask(struct mempolicy *pol,
const nodemask_t *nodes, struct nodemask_scratch *nsc)
{
int ret;
/* if mode is MPOL_DEFAULT, pol is NULL. This is right. */
if (pol == NULL)
return 0;
/* Check N_MEMORY */
nodes_and(nsc->mask1,
cpuset_current_mems_allowed, node_states[N_MEMORY]);
VM_BUG_ON(!nodes);
if (pol->mode == MPOL_PREFERRED && nodes_empty(*nodes))
nodes = NULL; /* explicit local allocation */
else {
if (pol->flags & MPOL_F_RELATIVE_NODES)
mpol_relative_nodemask(&nsc->mask2, nodes, &nsc->mask1);
else
nodes_and(nsc->mask2, *nodes, nsc->mask1);
if (mpol_store_user_nodemask(pol))
pol->w.user_nodemask = *nodes;
else
pol->w.cpuset_mems_allowed =
cpuset_current_mems_allowed;
}
if (nodes)
ret = mpol_ops[pol->mode].create(pol, &nsc->mask2);
else
ret = mpol_ops[pol->mode].create(pol, NULL);
return ret;
}
/*
* This function just creates a new policy, does some check and simple
* initialization. You must invoke mpol_set_nodemask() to set nodes.
*/
static struct mempolicy *mpol_new(unsigned short mode, unsigned short flags,
nodemask_t *nodes)
{
struct mempolicy *policy;
pr_debug("setting mode %d flags %d nodes[0] %lx\n",
mode, flags, nodes ? nodes_addr(*nodes)[0] : NUMA_NO_NODE);
if (mode == MPOL_DEFAULT) {
if (nodes && !nodes_empty(*nodes))
return ERR_PTR(-EINVAL);
return NULL;
}
VM_BUG_ON(!nodes);
/*
* MPOL_PREFERRED cannot be used with MPOL_F_STATIC_NODES or
* MPOL_F_RELATIVE_NODES if the nodemask is empty (local allocation).
* All other modes require a valid pointer to a non-empty nodemask.
*/
if (mode == MPOL_PREFERRED) {
if (nodes_empty(*nodes)) {
if (((flags & MPOL_F_STATIC_NODES) ||
(flags & MPOL_F_RELATIVE_NODES)))
return ERR_PTR(-EINVAL);
}
} else if (mode == MPOL_LOCAL) {
if (!nodes_empty(*nodes) ||
(flags & MPOL_F_STATIC_NODES) ||
(flags & MPOL_F_RELATIVE_NODES))
return ERR_PTR(-EINVAL);
mode = MPOL_PREFERRED;
} else if (nodes_empty(*nodes))
return ERR_PTR(-EINVAL);
policy = kmem_cache_alloc(policy_cache, GFP_KERNEL);
if (!policy)
return ERR_PTR(-ENOMEM);
atomic_set(&policy->refcnt, 1);
policy->mode = mode;
policy->flags = flags;
return policy;
}
/* Slow path of a mpol destructor. */
void __mpol_put(struct mempolicy *p)
{
if (!atomic_dec_and_test(&p->refcnt))
return;
kmem_cache_free(policy_cache, p);
}
static void mpol_rebind_default(struct mempolicy *pol, const nodemask_t *nodes)
{
}
static void mpol_rebind_nodemask(struct mempolicy *pol, const nodemask_t *nodes)
{
nodemask_t tmp;
if (pol->flags & MPOL_F_STATIC_NODES)
nodes_and(tmp, pol->w.user_nodemask, *nodes);
else if (pol->flags & MPOL_F_RELATIVE_NODES)
mpol_relative_nodemask(&tmp, &pol->w.user_nodemask, nodes);
else {
nodes_remap(tmp, pol->v.nodes,pol->w.cpuset_mems_allowed,
*nodes);
pol->w.cpuset_mems_allowed = tmp;
}
if (nodes_empty(tmp))
tmp = *nodes;
pol->v.nodes = tmp;
}
static void mpol_rebind_preferred(struct mempolicy *pol,
const nodemask_t *nodes)
{
nodemask_t tmp;
if (pol->flags & MPOL_F_STATIC_NODES) {
int node = first_node(pol->w.user_nodemask);
if (node_isset(node, *nodes)) {
pol->v.preferred_node = node;
pol->flags &= ~MPOL_F_LOCAL;
} else
pol->flags |= MPOL_F_LOCAL;
} else if (pol->flags & MPOL_F_RELATIVE_NODES) {
mpol_relative_nodemask(&tmp, &pol->w.user_nodemask, nodes);
pol->v.preferred_node = first_node(tmp);
} else if (!(pol->flags & MPOL_F_LOCAL)) {
pol->v.preferred_node = node_remap(pol->v.preferred_node,
pol->w.cpuset_mems_allowed,
*nodes);
pol->w.cpuset_mems_allowed = *nodes;
}
}
/*
* mpol_rebind_policy - Migrate a policy to a different set of nodes
*
* Per-vma policies are protected by mmap_sem. Allocations using per-task
* policies are protected by task->mems_allowed_seq to prevent a premature
* OOM/allocation failure due to parallel nodemask modification.
*/
static void mpol_rebind_policy(struct mempolicy *pol, const nodemask_t *newmask)
{
if (!pol)
return;
if (!mpol_store_user_nodemask(pol) &&
nodes_equal(pol->w.cpuset_mems_allowed, *newmask))
return;
mpol_ops[pol->mode].rebind(pol, newmask);
}
/*
* Wrapper for mpol_rebind_policy() that just requires task
* pointer, and updates task mempolicy.
*
* Called with task's alloc_lock held.
*/
void mpol_rebind_task(struct task_struct *tsk, const nodemask_t *new)
{
mpol_rebind_policy(tsk->mempolicy, new);
}
/*
* Rebind each vma in mm to new nodemask.
*
* Call holding a reference to mm. Takes mm->mmap_sem during call.
*/
void mpol_rebind_mm(struct mm_struct *mm, nodemask_t *new)
{
struct vm_area_struct *vma;
down_write(&mm->mmap_sem);
for (vma = mm->mmap; vma; vma = vma->vm_next)
mpol_rebind_policy(vma->vm_policy, new);
up_write(&mm->mmap_sem);
}
static const struct mempolicy_operations mpol_ops[MPOL_MAX] = {
[MPOL_DEFAULT] = {
.rebind = mpol_rebind_default,
},
[MPOL_INTERLEAVE] = {
.create = mpol_new_interleave,
.rebind = mpol_rebind_nodemask,
},
[MPOL_PREFERRED] = {
.create = mpol_new_preferred,
.rebind = mpol_rebind_preferred,
},
[MPOL_BIND] = {
.create = mpol_new_bind,
.rebind = mpol_rebind_nodemask,
},
};
static void migrate_page_add(struct page *page, struct list_head *pagelist,
unsigned long flags);
struct queue_pages {
struct list_head *pagelist;
unsigned long flags;
nodemask_t *nmask;
struct vm_area_struct *prev;
};
/*
* Scan through pages checking if pages follow certain conditions,
* and move them to the pagelist if they do.
*/
static int queue_pages_pte_range(pmd_t *pmd, unsigned long addr,
unsigned long end, struct mm_walk *walk)
{
struct vm_area_struct *vma = walk->vma;
struct page *page;
struct queue_pages *qp = walk->private;
unsigned long flags = qp->flags;
int nid, ret;
pte_t *pte;
spinlock_t *ptl;
if (pmd_trans_huge(*pmd)) {
ptl = pmd_lock(walk->mm, pmd);
if (pmd_trans_huge(*pmd)) {
page = pmd_page(*pmd);
if (is_huge_zero_page(page)) {
spin_unlock(ptl);
__split_huge_pmd(vma, pmd, addr, false, NULL);
} else {
get_page(page);
spin_unlock(ptl);
lock_page(page);
ret = split_huge_page(page);
unlock_page(page);
put_page(page);
if (ret)
return 0;
}
} else {
spin_unlock(ptl);
}
}
if (pmd_trans_unstable(pmd))
return 0;
retry:
pte = pte_offset_map_lock(walk->mm, pmd, addr, &ptl);
for (; addr != end; pte++, addr += PAGE_SIZE) {
if (!pte_present(*pte))
continue;
page = vm_normal_page(vma, addr, *pte);
if (!page)
continue;
/*
* vm_normal_page() filters out zero pages, but there might
* still be PageReserved pages to skip, perhaps in a VDSO.
*/
if (PageReserved(page))
continue;
nid = page_to_nid(page);
if (node_isset(nid, *qp->nmask) == !!(flags & MPOL_MF_INVERT))
continue;
if (PageTransCompound(page)) {
get_page(page);
pte_unmap_unlock(pte, ptl);
lock_page(page);
ret = split_huge_page(page);
unlock_page(page);
put_page(page);
/* Failed to split -- skip. */
if (ret) {
pte = pte_offset_map_lock(walk->mm, pmd,
addr, &ptl);
continue;
}
goto retry;
}
migrate_page_add(page, qp->pagelist, flags);
}
pte_unmap_unlock(pte - 1, ptl);
cond_resched();
return 0;
}
static int queue_pages_hugetlb(pte_t *pte, unsigned long hmask,
unsigned long addr, unsigned long end,
struct mm_walk *walk)
{
#ifdef CONFIG_HUGETLB_PAGE
struct queue_pages *qp = walk->private;
unsigned long flags = qp->flags;
int nid;
struct page *page;
spinlock_t *ptl;
pte_t entry;
ptl = huge_pte_lock(hstate_vma(walk->vma), walk->mm, pte);
entry = huge_ptep_get(pte);
if (!pte_present(entry))
goto unlock;
page = pte_page(entry);
nid = page_to_nid(page);
if (node_isset(nid, *qp->nmask) == !!(flags & MPOL_MF_INVERT))
goto unlock;
/* With MPOL_MF_MOVE, we migrate only unshared hugepage. */
if (flags & (MPOL_MF_MOVE_ALL) ||
(flags & MPOL_MF_MOVE && page_mapcount(page) == 1))
isolate_huge_page(page, qp->pagelist);
unlock:
spin_unlock(ptl);
#else
BUG();
#endif
return 0;
}
#ifdef CONFIG_NUMA_BALANCING
/*
* This is used to mark a range of virtual addresses to be inaccessible.
* These are later cleared by a NUMA hinting fault. Depending on these
* faults, pages may be migrated for better NUMA placement.
*
* This is assuming that NUMA faults are handled using PROT_NONE. If
* an architecture makes a different choice, it will need further
* changes to the core.
*/
unsigned long change_prot_numa(struct vm_area_struct *vma,
unsigned long addr, unsigned long end)
{
int nr_updated;
nr_updated = change_protection(vma, addr, end, PAGE_NONE, 0, 1);
if (nr_updated)
count_vm_numa_events(NUMA_PTE_UPDATES, nr_updated);
return nr_updated;
}
#else
static unsigned long change_prot_numa(struct vm_area_struct *vma,
unsigned long addr, unsigned long end)
{
return 0;
}
#endif /* CONFIG_NUMA_BALANCING */
static int queue_pages_test_walk(unsigned long start, unsigned long end,
struct mm_walk *walk)
{
struct vm_area_struct *vma = walk->vma;
struct queue_pages *qp = walk->private;
unsigned long endvma = vma->vm_end;
unsigned long flags = qp->flags;
if (!vma_migratable(vma))
return 1;
if (endvma > end)
endvma = end;
if (vma->vm_start > start)
start = vma->vm_start;
if (!(flags & MPOL_MF_DISCONTIG_OK)) {
if (!vma->vm_next && vma->vm_end < end)
return -EFAULT;
if (qp->prev && qp->prev->vm_end < vma->vm_start)
return -EFAULT;
}
qp->prev = vma;
if (flags & MPOL_MF_LAZY) {
/* Similar to task_numa_work, skip inaccessible VMAs */
if (!is_vm_hugetlb_page(vma) &&
(vma->vm_flags & (VM_READ | VM_EXEC | VM_WRITE)) &&
!(vma->vm_flags & VM_MIXEDMAP))
change_prot_numa(vma, start, endvma);
return 1;
}
/* queue pages from current vma */
if (flags & (MPOL_MF_MOVE | MPOL_MF_MOVE_ALL))
return 0;
return 1;
}
/*
* Walk through page tables and collect pages to be migrated.
*
* If pages found in a given range are on a set of nodes (determined by
* @nodes and @flags,) it's isolated and queued to the pagelist which is
* passed via @private.)
*/
static int
queue_pages_range(struct mm_struct *mm, unsigned long start, unsigned long end,
nodemask_t *nodes, unsigned long flags,
struct list_head *pagelist)
{
struct queue_pages qp = {
.pagelist = pagelist,
.flags = flags,
.nmask = nodes,
.prev = NULL,
};
struct mm_walk queue_pages_walk = {
.hugetlb_entry = queue_pages_hugetlb,
.pmd_entry = queue_pages_pte_range,
.test_walk = queue_pages_test_walk,
.mm = mm,
.private = &qp,
};
return walk_page_range(start, end, &queue_pages_walk);
}
/*
* Apply policy to a single VMA
* This must be called with the mmap_sem held for writing.
*/
static int vma_replace_policy(struct vm_area_struct *vma,
struct mempolicy *pol)
{
int err;
struct mempolicy *old;
struct mempolicy *new;
pr_debug("vma %lx-%lx/%lx vm_ops %p vm_file %p set_policy %p\n",
vma->vm_start, vma->vm_end, vma->vm_pgoff,
vma->vm_ops, vma->vm_file,
vma->vm_ops ? vma->vm_ops->set_policy : NULL);
new = mpol_dup(pol);
if (IS_ERR(new))
return PTR_ERR(new);
if (vma->vm_ops && vma->vm_ops->set_policy) {
err = vma->vm_ops->set_policy(vma, new);
if (err)
goto err_out;
}
old = vma->vm_policy;
vma->vm_policy = new; /* protected by mmap_sem */
mpol_put(old);
return 0;
err_out:
mpol_put(new);
return err;
}
/* Step 2: apply policy to a range and do splits. */
static int mbind_range(struct mm_struct *mm, unsigned long start,
unsigned long end, struct mempolicy *new_pol)
{
struct vm_area_struct *next;
struct vm_area_struct *prev;
struct vm_area_struct *vma;
int err = 0;
pgoff_t pgoff;
unsigned long vmstart;
unsigned long vmend;
vma = find_vma(mm, start);
if (!vma || vma->vm_start > start)
return -EFAULT;
prev = vma->vm_prev;
if (start > vma->vm_start)
prev = vma;
for (; vma && vma->vm_start < end; prev = vma, vma = next) {
next = vma->vm_next;
vmstart = max(start, vma->vm_start);
vmend = min(end, vma->vm_end);
if (mpol_equal(vma_policy(vma), new_pol))
continue;
pgoff = vma->vm_pgoff +
((vmstart - vma->vm_start) >> PAGE_SHIFT);
prev = vma_merge(mm, prev, vmstart, vmend, vma->vm_flags,
vma->anon_vma, vma->vm_file, pgoff,
new_pol, vma->vm_userfaultfd_ctx);
if (prev) {
vma = prev;
next = vma->vm_next;
if (mpol_equal(vma_policy(vma), new_pol))
continue;
/* vma_merge() joined vma && vma->next, case 8 */
goto replace;
}
if (vma->vm_start != vmstart) {
err = split_vma(vma->vm_mm, vma, vmstart, 1);
if (err)
goto out;
}
if (vma->vm_end != vmend) {
err = split_vma(vma->vm_mm, vma, vmend, 0);
if (err)
goto out;
}
replace:
err = vma_replace_policy(vma, new_pol);
if (err)
goto out;
}
out:
return err;
}
/* Set the process memory policy */
static long do_set_mempolicy(unsigned short mode, unsigned short flags,
nodemask_t *nodes)
{
struct mempolicy *new, *old;
NODEMASK_SCRATCH(scratch);
int ret;
if (!scratch)
return -ENOMEM;
new = mpol_new(mode, flags, nodes);
if (IS_ERR(new)) {
ret = PTR_ERR(new);
goto out;
}
task_lock(current);
ret = mpol_set_nodemask(new, nodes, scratch);
if (ret) {
task_unlock(current);
mpol_put(new);
goto out;
}
old = current->mempolicy;
current->mempolicy = new;
if (new && new->mode == MPOL_INTERLEAVE)
current->il_prev = MAX_NUMNODES-1;
task_unlock(current);
mpol_put(old);
ret = 0;
out:
NODEMASK_SCRATCH_FREE(scratch);
return ret;
}
/*
* Return nodemask for policy for get_mempolicy() query
*
* Called with task's alloc_lock held
*/
static void get_policy_nodemask(struct mempolicy *p, nodemask_t *nodes)
{
nodes_clear(*nodes);
if (p == &default_policy)
return;
switch (p->mode) {
case MPOL_BIND:
/* Fall through */
case MPOL_INTERLEAVE:
*nodes = p->v.nodes;
break;
case MPOL_PREFERRED:
if (!(p->flags & MPOL_F_LOCAL))
node_set(p->v.preferred_node, *nodes);
/* else return empty node mask for local allocation */
break;
default:
BUG();
}
}
static int lookup_node(unsigned long addr)
{
struct page *p;
int err;
err = get_user_pages(addr & PAGE_MASK, 1, 0, &p, NULL);
if (err >= 0) {
err = page_to_nid(p);
put_page(p);
}
return err;
}
/* Retrieve NUMA policy */
static long do_get_mempolicy(int *policy, nodemask_t *nmask,
unsigned long addr, unsigned long flags)
{
int err;
struct mm_struct *mm = current->mm;
struct vm_area_struct *vma = NULL;
struct mempolicy *pol = current->mempolicy;
if (flags &
~(unsigned long)(MPOL_F_NODE|MPOL_F_ADDR|MPOL_F_MEMS_ALLOWED))
return -EINVAL;
if (flags & MPOL_F_MEMS_ALLOWED) {
if (flags & (MPOL_F_NODE|MPOL_F_ADDR))
return -EINVAL;
*policy = 0; /* just so it's initialized */
task_lock(current);
*nmask = cpuset_current_mems_allowed;
task_unlock(current);
return 0;
}
if (flags & MPOL_F_ADDR) {
/*
* Do NOT fall back to task policy if the
* vma/shared policy at addr is NULL. We
* want to return MPOL_DEFAULT in this case.
*/
down_read(&mm->mmap_sem);
vma = find_vma_intersection(mm, addr, addr+1);
if (!vma) {
up_read(&mm->mmap_sem);
return -EFAULT;
}
if (vma->vm_ops && vma->vm_ops->get_policy)
pol = vma->vm_ops->get_policy(vma, addr);
else
pol = vma->vm_policy;
} else if (addr)
return -EINVAL;
if (!pol)
pol = &default_policy; /* indicates default behavior */
if (flags & MPOL_F_NODE) {
if (flags & MPOL_F_ADDR) {
err = lookup_node(addr);
if (err < 0)
goto out;
*policy = err;
} else if (pol == current->mempolicy &&
pol->mode == MPOL_INTERLEAVE) {
*policy = next_node_in(current->il_prev, pol->v.nodes);
} else {
err = -EINVAL;
goto out;
}
} else {
*policy = pol == &default_policy ? MPOL_DEFAULT :
pol->mode;
/*
* Internal mempolicy flags must be masked off before exposing
* the policy to userspace.
*/
*policy |= (pol->flags & MPOL_MODE_FLAGS);
}
if (vma) {
up_read(¤t->mm->mmap_sem);
vma = NULL;
}
err = 0;
if (nmask) {
if (mpol_store_user_nodemask(pol)) {
*nmask = pol->w.user_nodemask;
} else {
task_lock(current);
get_policy_nodemask(pol, nmask);
task_unlock(current);
}
}
out:
mpol_cond_put(pol);
if (vma)
up_read(¤t->mm->mmap_sem);
return err;
}
#ifdef CONFIG_MIGRATION
/*
* page migration
*/
static void migrate_page_add(struct page *page, struct list_head *pagelist,
unsigned long flags)
{
/*
* Avoid migrating a page that is shared with others.
*/
if ((flags & MPOL_MF_MOVE_ALL) || page_mapcount(page) == 1) {
if (!isolate_lru_page(page)) {
list_add_tail(&page->lru, pagelist);
inc_node_page_state(page, NR_ISOLATED_ANON +
page_is_file_cache(page));
}
}
}
static struct page *new_node_page(struct page *page, unsigned long node, int **x)
{
if (PageHuge(page))
return alloc_huge_page_node(page_hstate(compound_head(page)),
node);
else
return __alloc_pages_node(node, GFP_HIGHUSER_MOVABLE |
__GFP_THISNODE, 0);
}
/*
* Migrate pages from one node to a target node.
* Returns error or the number of pages not migrated.
*/
static int migrate_to_node(struct mm_struct *mm, int source, int dest,
int flags)
{
nodemask_t nmask;
LIST_HEAD(pagelist);
int err = 0;
nodes_clear(nmask);
node_set(source, nmask);
/*
* This does not "check" the range but isolates all pages that
* need migration. Between passing in the full user address
* space range and MPOL_MF_DISCONTIG_OK, this call can not fail.
*/
VM_BUG_ON(!(flags & (MPOL_MF_MOVE | MPOL_MF_MOVE_ALL)));
queue_pages_range(mm, mm->mmap->vm_start, mm->task_size, &nmask,
flags | MPOL_MF_DISCONTIG_OK, &pagelist);
if (!list_empty(&pagelist)) {
err = migrate_pages(&pagelist, new_node_page, NULL, dest,
MIGRATE_SYNC, MR_SYSCALL);
if (err)
putback_movable_pages(&pagelist);
}
return err;
}
/*
* Move pages between the two nodesets so as to preserve the physical
* layout as much as possible.
*
* Returns the number of page that could not be moved.
*/
int do_migrate_pages(struct mm_struct *mm, const nodemask_t *from,
const nodemask_t *to, int flags)
{
int busy = 0;
int err;
nodemask_t tmp;
err = migrate_prep();
if (err)
return err;
down_read(&mm->mmap_sem);
/*
* Find a 'source' bit set in 'tmp' whose corresponding 'dest'
* bit in 'to' is not also set in 'tmp'. Clear the found 'source'
* bit in 'tmp', and return that <source, dest> pair for migration.
* The pair of nodemasks 'to' and 'from' define the map.
*
* If no pair of bits is found that way, fallback to picking some
* pair of 'source' and 'dest' bits that are not the same. If the
* 'source' and 'dest' bits are the same, this represents a node
* that will be migrating to itself, so no pages need move.
*
* If no bits are left in 'tmp', or if all remaining bits left
* in 'tmp' correspond to the same bit in 'to', return false
* (nothing left to migrate).
*
* This lets us pick a pair of nodes to migrate between, such that
* if possible the dest node is not already occupied by some other
* source node, minimizing the risk of overloading the memory on a
* node that would happen if we migrated incoming memory to a node
* before migrating outgoing memory source that same node.
*
* A single scan of tmp is sufficient. As we go, we remember the
* most recent <s, d> pair that moved (s != d). If we find a pair
* that not only moved, but what's better, moved to an empty slot
* (d is not set in tmp), then we break out then, with that pair.
* Otherwise when we finish scanning from_tmp, we at least have the
* most recent <s, d> pair that moved. If we get all the way through
* the scan of tmp without finding any node that moved, much less
* moved to an empty node, then there is nothing left worth migrating.
*/
tmp = *from;
while (!nodes_empty(tmp)) {
int s,d;
int source = NUMA_NO_NODE;
int dest = 0;
for_each_node_mask(s, tmp) {
/*
* do_migrate_pages() tries to maintain the relative
* node relationship of the pages established between
* threads and memory areas.
*
* However if the number of source nodes is not equal to
* the number of destination nodes we can not preserve
* this node relative relationship. In that case, skip
* copying memory from a node that is in the destination
* mask.
*
* Example: [2,3,4] -> [3,4,5] moves everything.
* [0-7] - > [3,4,5] moves only 0,1,2,6,7.
*/
if ((nodes_weight(*from) != nodes_weight(*to)) &&
(node_isset(s, *to)))
continue;
d = node_remap(s, *from, *to);
if (s == d)
continue;
source = s; /* Node moved. Memorize */
dest = d;
/* dest not in remaining from nodes? */
if (!node_isset(dest, tmp))
break;
}
if (source == NUMA_NO_NODE)
break;
node_clear(source, tmp);
err = migrate_to_node(mm, source, dest, flags);
if (err > 0)
busy += err;
if (err < 0)
break;
}
up_read(&mm->mmap_sem);
if (err < 0)
return err;
return busy;
}
/*
* Allocate a new page for page migration based on vma policy.
* Start by assuming the page is mapped by the same vma as contains @start.
* Search forward from there, if not. N.B., this assumes that the
* list of pages handed to migrate_pages()--which is how we get here--
* is in virtual address order.
*/
static struct page *new_page(struct page *page, unsigned long start, int **x)
{
struct vm_area_struct *vma;
unsigned long uninitialized_var(address);
vma = find_vma(current->mm, start);
while (vma) {
address = page_address_in_vma(page, vma);
if (address != -EFAULT)
break;
vma = vma->vm_next;
}
if (PageHuge(page)) {
BUG_ON(!vma);
return alloc_huge_page_noerr(vma, address, 1);
}
/*
* if !vma, alloc_page_vma() will use task or system default policy
*/
return alloc_page_vma(GFP_HIGHUSER_MOVABLE | __GFP_RETRY_MAYFAIL,
vma, address);
}
#else
static void migrate_page_add(struct page *page, struct list_head *pagelist,
unsigned long flags)
{
}
int do_migrate_pages(struct mm_struct *mm, const nodemask_t *from,
const nodemask_t *to, int flags)
{
return -ENOSYS;
}
static struct page *new_page(struct page *page, unsigned long start, int **x)
{
return NULL;
}
#endif
static long do_mbind(unsigned long start, unsigned long len,
unsigned short mode, unsigned short mode_flags,
nodemask_t *nmask, unsigned long flags)
{
struct mm_struct *mm = current->mm;
struct mempolicy *new;
unsigned long end;
int err;
LIST_HEAD(pagelist);
if (flags & ~(unsigned long)MPOL_MF_VALID)
return -EINVAL;
if ((flags & MPOL_MF_MOVE_ALL) && !capable(CAP_SYS_NICE))
return -EPERM;
if (start & ~PAGE_MASK)
return -EINVAL;
if (mode == MPOL_DEFAULT)
flags &= ~MPOL_MF_STRICT;
len = (len + PAGE_SIZE - 1) & PAGE_MASK;
end = start + len;
if (end < start)
return -EINVAL;
if (end == start)
return 0;
new = mpol_new(mode, mode_flags, nmask);
if (IS_ERR(new))
return PTR_ERR(new);
if (flags & MPOL_MF_LAZY)
new->flags |= MPOL_F_MOF;
/*
* If we are using the default policy then operation
* on discontinuous address spaces is okay after all
*/
if (!new)
flags |= MPOL_MF_DISCONTIG_OK;
pr_debug("mbind %lx-%lx mode:%d flags:%d nodes:%lx\n",
start, start + len, mode, mode_flags,
nmask ? nodes_addr(*nmask)[0] : NUMA_NO_NODE);
if (flags & (MPOL_MF_MOVE | MPOL_MF_MOVE_ALL)) {
err = migrate_prep();
if (err)
goto mpol_out;
}
{
NODEMASK_SCRATCH(scratch);
if (scratch) {
down_write(&mm->mmap_sem);
task_lock(current);
err = mpol_set_nodemask(new, nmask, scratch);
task_unlock(current);
if (err)
up_write(&mm->mmap_sem);
} else
err = -ENOMEM;
NODEMASK_SCRATCH_FREE(scratch);
}
if (err)
goto mpol_out;
err = queue_pages_range(mm, start, end, nmask,
flags | MPOL_MF_INVERT, &pagelist);
if (!err)
err = mbind_range(mm, start, end, new);
if (!err) {
int nr_failed = 0;
if (!list_empty(&pagelist)) {
WARN_ON_ONCE(flags & MPOL_MF_LAZY);
nr_failed = migrate_pages(&pagelist, new_page, NULL,
start, MIGRATE_SYNC, MR_MEMPOLICY_MBIND);
if (nr_failed)
putback_movable_pages(&pagelist);
}
if (nr_failed && (flags & MPOL_MF_STRICT))
err = -EIO;
} else
putback_movable_pages(&pagelist);
up_write(&mm->mmap_sem);
mpol_out:
mpol_put(new);
return err;
}
/*
* User space interface with variable sized bitmaps for nodelists.
*/
/* Copy a node mask from user space. */
static int get_nodes(nodemask_t *nodes, const unsigned long __user *nmask,
unsigned long maxnode)
{
unsigned long k;
unsigned long nlongs;
unsigned long endmask;
--maxnode;
nodes_clear(*nodes);
if (maxnode == 0 || !nmask)
return 0;
if (maxnode > PAGE_SIZE*BITS_PER_BYTE)
return -EINVAL;
nlongs = BITS_TO_LONGS(maxnode);
if ((maxnode % BITS_PER_LONG) == 0)
endmask = ~0UL;
else
endmask = (1UL << (maxnode % BITS_PER_LONG)) - 1;
/* When the user specified more nodes than supported just check
if the non supported part is all zero. */
if (nlongs > BITS_TO_LONGS(MAX_NUMNODES)) {
if (nlongs > PAGE_SIZE/sizeof(long))
return -EINVAL;
for (k = BITS_TO_LONGS(MAX_NUMNODES); k < nlongs; k++) {
unsigned long t;
if (get_user(t, nmask + k))
return -EFAULT;
if (k == nlongs - 1) {
if (t & endmask)
return -EINVAL;
} else if (t)
return -EINVAL;
}
nlongs = BITS_TO_LONGS(MAX_NUMNODES);
endmask = ~0UL;
}
if (copy_from_user(nodes_addr(*nodes), nmask, nlongs*sizeof(unsigned long)))
return -EFAULT;
nodes_addr(*nodes)[nlongs-1] &= endmask;
return 0;
}
/* Copy a kernel node mask to user space */
static int copy_nodes_to_user(unsigned long __user *mask, unsigned long maxnode,
nodemask_t *nodes)
{
unsigned long copy = ALIGN(maxnode-1, 64) / 8;
const int nbytes = BITS_TO_LONGS(MAX_NUMNODES) * sizeof(long);
if (copy > nbytes) {
if (copy > PAGE_SIZE)
return -EINVAL;
if (clear_user((char __user *)mask + nbytes, copy - nbytes))
return -EFAULT;
copy = nbytes;
}
return copy_to_user(mask, nodes_addr(*nodes), copy) ? -EFAULT : 0;
}
SYSCALL_DEFINE6(mbind, unsigned long, start, unsigned long, len,
unsigned long, mode, const unsigned long __user *, nmask,
unsigned long, maxnode, unsigned, flags)
{
nodemask_t nodes;
int err;
unsigned short mode_flags;
mode_flags = mode & MPOL_MODE_FLAGS;
mode &= ~MPOL_MODE_FLAGS;
if (mode >= MPOL_MAX)
return -EINVAL;
if ((mode_flags & MPOL_F_STATIC_NODES) &&
(mode_flags & MPOL_F_RELATIVE_NODES))
return -EINVAL;
err = get_nodes(&nodes, nmask, maxnode);
if (err)
return err;
return do_mbind(start, len, mode, mode_flags, &nodes, flags);
}
/* Set the process memory policy */
SYSCALL_DEFINE3(set_mempolicy, int, mode, const unsigned long __user *, nmask,
unsigned long, maxnode)
{
int err;
nodemask_t nodes;
unsigned short flags;
flags = mode & MPOL_MODE_FLAGS;
mode &= ~MPOL_MODE_FLAGS;
if ((unsigned int)mode >= MPOL_MAX)
return -EINVAL;
if ((flags & MPOL_F_STATIC_NODES) && (flags & MPOL_F_RELATIVE_NODES))
return -EINVAL;
err = get_nodes(&nodes, nmask, maxnode);
if (err)
return err;
return do_set_mempolicy(mode, flags, &nodes);
}
SYSCALL_DEFINE4(migrate_pages, pid_t, pid, unsigned long, maxnode,
const unsigned long __user *, old_nodes,
const unsigned long __user *, new_nodes)
{
const struct cred *cred = current_cred(), *tcred;
struct mm_struct *mm = NULL;
struct task_struct *task;
nodemask_t task_nodes;
int err;
nodemask_t *old;
nodemask_t *new;
NODEMASK_SCRATCH(scratch);
if (!scratch)
return -ENOMEM;
old = &scratch->mask1;
new = &scratch->mask2;
err = get_nodes(old, old_nodes, maxnode);
if (err)
goto out;
err = get_nodes(new, new_nodes, maxnode);
if (err)
goto out;
/* Find the mm_struct */
rcu_read_lock();
task = pid ? find_task_by_vpid(pid) : current;
if (!task) {
rcu_read_unlock();
err = -ESRCH;
goto out;
}
get_task_struct(task);
err = -EINVAL;
/*
* Check if this process has the right to modify the specified
* process. The right exists if the process has administrative
* capabilities, superuser privileges or the same
* userid as the target process.
*/
tcred = __task_cred(task);
if (!uid_eq(cred->euid, tcred->suid) && !uid_eq(cred->euid, tcred->uid) &&
!uid_eq(cred->uid, tcred->suid) && !uid_eq(cred->uid, tcred->uid) &&
!capable(CAP_SYS_NICE)) {
rcu_read_unlock();
err = -EPERM;
goto out_put;
}
rcu_read_unlock();
task_nodes = cpuset_mems_allowed(task);
/* Is the user allowed to access the target nodes? */
if (!nodes_subset(*new, task_nodes) && !capable(CAP_SYS_NICE)) {
err = -EPERM;
goto out_put;
}
if (!nodes_subset(*new, node_states[N_MEMORY])) {
err = -EINVAL;
goto out_put;
}
err = security_task_movememory(task);
if (err)
goto out_put;
mm = get_task_mm(task);
put_task_struct(task);
if (!mm) {
err = -EINVAL;
goto out;
}
err = do_migrate_pages(mm, old, new,
capable(CAP_SYS_NICE) ? MPOL_MF_MOVE_ALL : MPOL_MF_MOVE);
mmput(mm);
out:
NODEMASK_SCRATCH_FREE(scratch);
return err;
out_put:
put_task_struct(task);
goto out;
}
/* Retrieve NUMA policy */
SYSCALL_DEFINE5(get_mempolicy, int __user *, policy,
unsigned long __user *, nmask, unsigned long, maxnode,
unsigned long, addr, unsigned long, flags)
{
int err;
int uninitialized_var(pval);
nodemask_t nodes;
if (nmask != NULL && maxnode < MAX_NUMNODES)
return -EINVAL;
err = do_get_mempolicy(&pval, &nodes, addr, flags);
if (err)
return err;
if (policy && put_user(pval, policy))
return -EFAULT;
if (nmask)
err = copy_nodes_to_user(nmask, maxnode, &nodes);
return err;
}
#ifdef CONFIG_COMPAT
COMPAT_SYSCALL_DEFINE5(get_mempolicy, int __user *, policy,
compat_ulong_t __user *, nmask,
compat_ulong_t, maxnode,
compat_ulong_t, addr, compat_ulong_t, flags)
{
long err;
unsigned long __user *nm = NULL;
unsigned long nr_bits, alloc_size;
DECLARE_BITMAP(bm, MAX_NUMNODES);
nr_bits = min_t(unsigned long, maxnode-1, MAX_NUMNODES);
alloc_size = ALIGN(nr_bits, BITS_PER_LONG) / 8;
if (nmask)
nm = compat_alloc_user_space(alloc_size);
err = sys_get_mempolicy(policy, nm, nr_bits+1, addr, flags);
if (!err && nmask) {
unsigned long copy_size;
copy_size = min_t(unsigned long, sizeof(bm), alloc_size);
err = copy_from_user(bm, nm, copy_size);
/* ensure entire bitmap is zeroed */
err |= clear_user(nmask, ALIGN(maxnode-1, 8) / 8);
err |= compat_put_bitmap(nmask, bm, nr_bits);
}
return err;
}
COMPAT_SYSCALL_DEFINE3(set_mempolicy, int, mode, compat_ulong_t __user *, nmask,
compat_ulong_t, maxnode)
{
unsigned long __user *nm = NULL;
unsigned long nr_bits, alloc_size;
DECLARE_BITMAP(bm, MAX_NUMNODES);
nr_bits = min_t(unsigned long, maxnode-1, MAX_NUMNODES);
alloc_size = ALIGN(nr_bits, BITS_PER_LONG) / 8;
if (nmask) {
if (compat_get_bitmap(bm, nmask, nr_bits))
return -EFAULT;
nm = compat_alloc_user_space(alloc_size);
if (copy_to_user(nm, bm, alloc_size))
return -EFAULT;
}
return sys_set_mempolicy(mode, nm, nr_bits+1);
}
COMPAT_SYSCALL_DEFINE6(mbind, compat_ulong_t, start, compat_ulong_t, len,
compat_ulong_t, mode, compat_ulong_t __user *, nmask,
compat_ulong_t, maxnode, compat_ulong_t, flags)
{
unsigned long __user *nm = NULL;
unsigned long nr_bits, alloc_size;
nodemask_t bm;
nr_bits = min_t(unsigned long, maxnode-1, MAX_NUMNODES);
alloc_size = ALIGN(nr_bits, BITS_PER_LONG) / 8;
if (nmask) {
if (compat_get_bitmap(nodes_addr(bm), nmask, nr_bits))
return -EFAULT;
nm = compat_alloc_user_space(alloc_size);
if (copy_to_user(nm, nodes_addr(bm), alloc_size))
return -EFAULT;
}
return sys_mbind(start, len, mode, nm, nr_bits+1, flags);
}
#endif
struct mempolicy *__get_vma_policy(struct vm_area_struct *vma,
unsigned long addr)
{
struct mempolicy *pol = NULL;
if (vma) {
if (vma->vm_ops && vma->vm_ops->get_policy) {
pol = vma->vm_ops->get_policy(vma, addr);
} else if (vma->vm_policy) {
pol = vma->vm_policy;
/*
* shmem_alloc_page() passes MPOL_F_SHARED policy with
* a pseudo vma whose vma->vm_ops=NULL. Take a reference
* count on these policies which will be dropped by
* mpol_cond_put() later
*/
if (mpol_needs_cond_ref(pol))
mpol_get(pol);
}
}
return pol;
}
/*
* get_vma_policy(@vma, @addr)
* @vma: virtual memory area whose policy is sought
* @addr: address in @vma for shared policy lookup
*
* Returns effective policy for a VMA at specified address.
* Falls back to current->mempolicy or system default policy, as necessary.
* Shared policies [those marked as MPOL_F_SHARED] require an extra reference
* count--added by the get_policy() vm_op, as appropriate--to protect against
* freeing by another task. It is the caller's responsibility to free the
* extra reference for shared policies.
*/
static struct mempolicy *get_vma_policy(struct vm_area_struct *vma,
unsigned long addr)
{
struct mempolicy *pol = __get_vma_policy(vma, addr);
if (!pol)
pol = get_task_policy(current);
return pol;
}
bool vma_policy_mof(struct vm_area_struct *vma)
{
struct mempolicy *pol;
if (vma->vm_ops && vma->vm_ops->get_policy) {
bool ret = false;
pol = vma->vm_ops->get_policy(vma, vma->vm_start);
if (pol && (pol->flags & MPOL_F_MOF))
ret = true;
mpol_cond_put(pol);
return ret;
}
pol = vma->vm_policy;
if (!pol)
pol = get_task_policy(current);
return pol->flags & MPOL_F_MOF;
}
static int apply_policy_zone(struct mempolicy *policy, enum zone_type zone)
{
enum zone_type dynamic_policy_zone = policy_zone;
BUG_ON(dynamic_policy_zone == ZONE_MOVABLE);
/*
* if policy->v.nodes has movable memory only,
* we apply policy when gfp_zone(gfp) = ZONE_MOVABLE only.
*
* policy->v.nodes is intersect with node_states[N_MEMORY].
* so if the following test faile, it implies
* policy->v.nodes has movable memory only.
*/
if (!nodes_intersects(policy->v.nodes, node_states[N_HIGH_MEMORY]))
dynamic_policy_zone = ZONE_MOVABLE;
return zone >= dynamic_policy_zone;
}
/*
* Return a nodemask representing a mempolicy for filtering nodes for
* page allocation
*/
static nodemask_t *policy_nodemask(gfp_t gfp, struct mempolicy *policy)
{
/* Lower zones don't get a nodemask applied for MPOL_BIND */
if (unlikely(policy->mode == MPOL_BIND) &&
apply_policy_zone(policy, gfp_zone(gfp)) &&
cpuset_nodemask_valid_mems_allowed(&policy->v.nodes))
return &policy->v.nodes;
return NULL;
}
/* Return the node id preferred by the given mempolicy, or the given id */
static int policy_node(gfp_t gfp, struct mempolicy *policy,
int nd)
{
if (policy->mode == MPOL_PREFERRED && !(policy->flags & MPOL_F_LOCAL))
nd = policy->v.preferred_node;
else {
/*
* __GFP_THISNODE shouldn't even be used with the bind policy
* because we might easily break the expectation to stay on the
* requested node and not break the policy.
*/
WARN_ON_ONCE(policy->mode == MPOL_BIND && (gfp & __GFP_THISNODE));
}
return nd;
}
/* Do dynamic interleaving for a process */
static unsigned interleave_nodes(struct mempolicy *policy)
{
unsigned next;
struct task_struct *me = current;
next = next_node_in(me->il_prev, policy->v.nodes);
if (next < MAX_NUMNODES)
me->il_prev = next;
return next;
}
/*
* Depending on the memory policy provide a node from which to allocate the
* next slab entry.
*/
unsigned int mempolicy_slab_node(void)
{
struct mempolicy *policy;
int node = numa_mem_id();
if (in_interrupt())
return node;
policy = current->mempolicy;
if (!policy || policy->flags & MPOL_F_LOCAL)
return node;
switch (policy->mode) {
case MPOL_PREFERRED:
/*
* handled MPOL_F_LOCAL above
*/
return policy->v.preferred_node;
case MPOL_INTERLEAVE:
return interleave_nodes(policy);
case MPOL_BIND: {
struct zoneref *z;
/*
* Follow bind policy behavior and start allocation at the
* first node.
*/
struct zonelist *zonelist;
enum zone_type highest_zoneidx = gfp_zone(GFP_KERNEL);
zonelist = &NODE_DATA(node)->node_zonelists[ZONELIST_FALLBACK];
z = first_zones_zonelist(zonelist, highest_zoneidx,
&policy->v.nodes);
return z->zone ? z->zone->node : node;
}
default:
BUG();
}
}
/*
* Do static interleaving for a VMA with known offset @n. Returns the n'th
* node in pol->v.nodes (starting from n=0), wrapping around if n exceeds the
* number of present nodes.
*/
static unsigned offset_il_node(struct mempolicy *pol,
struct vm_area_struct *vma, unsigned long n)
{
unsigned nnodes = nodes_weight(pol->v.nodes);
unsigned target;
int i;
int nid;
if (!nnodes)
return numa_node_id();
target = (unsigned int)n % nnodes;
nid = first_node(pol->v.nodes);
for (i = 0; i < target; i++)
nid = next_node(nid, pol->v.nodes);
return nid;
}
/* Determine a node number for interleave */
static inline unsigned interleave_nid(struct mempolicy *pol,
struct vm_area_struct *vma, unsigned long addr, int shift)
{
if (vma) {
unsigned long off;
/*
* for small pages, there is no difference between
* shift and PAGE_SHIFT, so the bit-shift is safe.
* for huge pages, since vm_pgoff is in units of small
* pages, we need to shift off the always 0 bits to get
* a useful offset.
*/
BUG_ON(shift < PAGE_SHIFT);
off = vma->vm_pgoff >> (shift - PAGE_SHIFT);
off += (addr - vma->vm_start) >> shift;
return offset_il_node(pol, vma, off);
} else
return interleave_nodes(pol);
}
#ifdef CONFIG_HUGETLBFS
/*
* huge_node(@vma, @addr, @gfp_flags, @mpol)
* @vma: virtual memory area whose policy is sought
* @addr: address in @vma for shared policy lookup and interleave policy
* @gfp_flags: for requested zone
* @mpol: pointer to mempolicy pointer for reference counted mempolicy
* @nodemask: pointer to nodemask pointer for MPOL_BIND nodemask
*
* Returns a nid suitable for a huge page allocation and a pointer
* to the struct mempolicy for conditional unref after allocation.
* If the effective policy is 'BIND, returns a pointer to the mempolicy's
* @nodemask for filtering the zonelist.
*
* Must be protected by read_mems_allowed_begin()
*/
int huge_node(struct vm_area_struct *vma, unsigned long addr, gfp_t gfp_flags,
struct mempolicy **mpol, nodemask_t **nodemask)
{
int nid;
*mpol = get_vma_policy(vma, addr);
*nodemask = NULL; /* assume !MPOL_BIND */
if (unlikely((*mpol)->mode == MPOL_INTERLEAVE)) {
nid = interleave_nid(*mpol, vma, addr,
huge_page_shift(hstate_vma(vma)));
} else {
nid = policy_node(gfp_flags, *mpol, numa_node_id());
if ((*mpol)->mode == MPOL_BIND)
*nodemask = &(*mpol)->v.nodes;
}
return nid;
}
/*
* init_nodemask_of_mempolicy
*
* If the current task's mempolicy is "default" [NULL], return 'false'
* to indicate default policy. Otherwise, extract the policy nodemask
* for 'bind' or 'interleave' policy into the argument nodemask, or
* initialize the argument nodemask to contain the single node for
* 'preferred' or 'local' policy and return 'true' to indicate presence
* of non-default mempolicy.
*
* We don't bother with reference counting the mempolicy [mpol_get/put]
* because the current task is examining it's own mempolicy and a task's
* mempolicy is only ever changed by the task itself.
*
* N.B., it is the caller's responsibility to free a returned nodemask.
*/
bool init_nodemask_of_mempolicy(nodemask_t *mask)
{
struct mempolicy *mempolicy;
int nid;
if (!(mask && current->mempolicy))
return false;
task_lock(current);
mempolicy = current->mempolicy;
switch (mempolicy->mode) {
case MPOL_PREFERRED:
if (mempolicy->flags & MPOL_F_LOCAL)
nid = numa_node_id();
else
nid = mempolicy->v.preferred_node;
init_nodemask_of_node(mask, nid);
break;
case MPOL_BIND:
/* Fall through */
case MPOL_INTERLEAVE:
*mask = mempolicy->v.nodes;
break;
default:
BUG();
}
task_unlock(current);
return true;
}
#endif
/*
* mempolicy_nodemask_intersects
*
* If tsk's mempolicy is "default" [NULL], return 'true' to indicate default
* policy. Otherwise, check for intersection between mask and the policy
* nodemask for 'bind' or 'interleave' policy. For 'perferred' or 'local'
* policy, always return true since it may allocate elsewhere on fallback.
*
* Takes task_lock(tsk) to prevent freeing of its mempolicy.
*/
bool mempolicy_nodemask_intersects(struct task_struct *tsk,
const nodemask_t *mask)
{
struct mempolicy *mempolicy;
bool ret = true;
if (!mask)
return ret;
task_lock(tsk);
mempolicy = tsk->mempolicy;
if (!mempolicy)
goto out;
switch (mempolicy->mode) {
case MPOL_PREFERRED:
/*
* MPOL_PREFERRED and MPOL_F_LOCAL are only preferred nodes to
* allocate from, they may fallback to other nodes when oom.
* Thus, it's possible for tsk to have allocated memory from
* nodes in mask.
*/
break;
case MPOL_BIND:
case MPOL_INTERLEAVE:
ret = nodes_intersects(mempolicy->v.nodes, *mask);
break;
default:
BUG();
}
out:
task_unlock(tsk);
return ret;
}
/* Allocate a page in interleaved policy.
Own path because it needs to do special accounting. */
static struct page *alloc_page_interleave(gfp_t gfp, unsigned order,
unsigned nid)
{
struct page *page;
page = __alloc_pages(gfp, order, nid);
if (page && page_to_nid(page) == nid)
inc_zone_page_state(page, NUMA_INTERLEAVE_HIT);
return page;
}
/**
* alloc_pages_vma - Allocate a page for a VMA.
*
* @gfp:
* %GFP_USER user allocation.
* %GFP_KERNEL kernel allocations,
* %GFP_HIGHMEM highmem/user allocations,
* %GFP_FS allocation should not call back into a file system.
* %GFP_ATOMIC don't sleep.
*
* @order:Order of the GFP allocation.
* @vma: Pointer to VMA or NULL if not available.
* @addr: Virtual Address of the allocation. Must be inside the VMA.
* @node: Which node to prefer for allocation (modulo policy).
* @hugepage: for hugepages try only the preferred node if possible
*
* This function allocates a page from the kernel page pool and applies
* a NUMA policy associated with the VMA or the current process.
* When VMA is not NULL caller must hold down_read on the mmap_sem of the
* mm_struct of the VMA to prevent it from going away. Should be used for
* all allocations for pages that will be mapped into user space. Returns
* NULL when no page can be allocated.
*/
struct page *
alloc_pages_vma(gfp_t gfp, int order, struct vm_area_struct *vma,
unsigned long addr, int node, bool hugepage)
{
struct mempolicy *pol;
struct page *page;
int preferred_nid;
nodemask_t *nmask;
pol = get_vma_policy(vma, addr);
if (pol->mode == MPOL_INTERLEAVE) {
unsigned nid;
nid = interleave_nid(pol, vma, addr, PAGE_SHIFT + order);
mpol_cond_put(pol);
page = alloc_page_interleave(gfp, order, nid);
goto out;
}
if (unlikely(IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE) && hugepage)) {
int hpage_node = node;
/*
* For hugepage allocation and non-interleave policy which
* allows the current node (or other explicitly preferred
* node) we only try to allocate from the current/preferred
* node and don't fall back to other nodes, as the cost of
* remote accesses would likely offset THP benefits.
*
* If the policy is interleave, or does not allow the current
* node in its nodemask, we allocate the standard way.
*/
if (pol->mode == MPOL_PREFERRED &&
!(pol->flags & MPOL_F_LOCAL))
hpage_node = pol->v.preferred_node;
nmask = policy_nodemask(gfp, pol);
if (!nmask || node_isset(hpage_node, *nmask)) {
mpol_cond_put(pol);
page = __alloc_pages_node(hpage_node,
gfp | __GFP_THISNODE, order);
goto out;
}
}
nmask = policy_nodemask(gfp, pol);
preferred_nid = policy_node(gfp, pol, node);
page = __alloc_pages_nodemask(gfp, order, preferred_nid, nmask);
mpol_cond_put(pol);
out:
return page;
}
/**
* alloc_pages_current - Allocate pages.
*
* @gfp:
* %GFP_USER user allocation,
* %GFP_KERNEL kernel allocation,
* %GFP_HIGHMEM highmem allocation,
* %GFP_FS don't call back into a file system.
* %GFP_ATOMIC don't sleep.
* @order: Power of two of allocation size in pages. 0 is a single page.
*
* Allocate a page from the kernel page pool. When not in
* interrupt context and apply the current process NUMA policy.
* Returns NULL when no page can be allocated.
*/
struct page *alloc_pages_current(gfp_t gfp, unsigned order)
{
struct mempolicy *pol = &default_policy;
struct page *page;
if (!in_interrupt() && !(gfp & __GFP_THISNODE))
pol = get_task_policy(current);
/*
* No reference counting needed for current->mempolicy
* nor system default_policy
*/
if (pol->mode == MPOL_INTERLEAVE)
page = alloc_page_interleave(gfp, order, interleave_nodes(pol));
else
page = __alloc_pages_nodemask(gfp, order,
policy_node(gfp, pol, numa_node_id()),
policy_nodemask(gfp, pol));
return page;
}
EXPORT_SYMBOL(alloc_pages_current);
int vma_dup_policy(struct vm_area_struct *src, struct vm_area_struct *dst)
{
struct mempolicy *pol = mpol_dup(vma_policy(src));
if (IS_ERR(pol))
return PTR_ERR(pol);
dst->vm_policy = pol;
return 0;
}
/*
* If mpol_dup() sees current->cpuset == cpuset_being_rebound, then it
* rebinds the mempolicy its copying by calling mpol_rebind_policy()
* with the mems_allowed returned by cpuset_mems_allowed(). This
* keeps mempolicies cpuset relative after its cpuset moves. See
* further kernel/cpuset.c update_nodemask().
*
* current's mempolicy may be rebinded by the other task(the task that changes
* cpuset's mems), so we needn't do rebind work for current task.
*/
/* Slow path of a mempolicy duplicate */
struct mempolicy *__mpol_dup(struct mempolicy *old)
{
struct mempolicy *new = kmem_cache_alloc(policy_cache, GFP_KERNEL);
if (!new)
return ERR_PTR(-ENOMEM);
/* task's mempolicy is protected by alloc_lock */
if (old == current->mempolicy) {
task_lock(current);
*new = *old;
task_unlock(current);
} else
*new = *old;
if (current_cpuset_is_being_rebound()) {
nodemask_t mems = cpuset_mems_allowed(current);
mpol_rebind_policy(new, &mems);
}
atomic_set(&new->refcnt, 1);
return new;
}
/* Slow path of a mempolicy comparison */
bool __mpol_equal(struct mempolicy *a, struct mempolicy *b)
{
if (!a || !b)
return false;
if (a->mode != b->mode)
return false;
if (a->flags != b->flags)
return false;
if (mpol_store_user_nodemask(a))
if (!nodes_equal(a->w.user_nodemask, b->w.user_nodemask))
return false;
switch (a->mode) {
case MPOL_BIND:
/* Fall through */
case MPOL_INTERLEAVE:
return !!nodes_equal(a->v.nodes, b->v.nodes);
case MPOL_PREFERRED:
return a->v.preferred_node == b->v.preferred_node;
default:
BUG();
return false;
}
}
/*
* Shared memory backing store policy support.
*
* Remember policies even when nobody has shared memory mapped.
* The policies are kept in Red-Black tree linked from the inode.
* They are protected by the sp->lock rwlock, which should be held
* for any accesses to the tree.
*/
/*
* lookup first element intersecting start-end. Caller holds sp->lock for
* reading or for writing
*/
static struct sp_node *
sp_lookup(struct shared_policy *sp, unsigned long start, unsigned long end)
{
struct rb_node *n = sp->root.rb_node;
while (n) {
struct sp_node *p = rb_entry(n, struct sp_node, nd);
if (start >= p->end)
n = n->rb_right;
else if (end <= p->start)
n = n->rb_left;
else
break;
}
if (!n)
return NULL;
for (;;) {
struct sp_node *w = NULL;
struct rb_node *prev = rb_prev(n);
if (!prev)
break;
w = rb_entry(prev, struct sp_node, nd);
if (w->end <= start)
break;
n = prev;
}
return rb_entry(n, struct sp_node, nd);
}
/*
* Insert a new shared policy into the list. Caller holds sp->lock for
* writing.
*/
static void sp_insert(struct shared_policy *sp, struct sp_node *new)
{
struct rb_node **p = &sp->root.rb_node;
struct rb_node *parent = NULL;
struct sp_node *nd;
while (*p) {
parent = *p;
nd = rb_entry(parent, struct sp_node, nd);
if (new->start < nd->start)
p = &(*p)->rb_left;
else if (new->end > nd->end)
p = &(*p)->rb_right;
else
BUG();
}
rb_link_node(&new->nd, parent, p);
rb_insert_color(&new->nd, &sp->root);
pr_debug("inserting %lx-%lx: %d\n", new->start, new->end,
new->policy ? new->policy->mode : 0);
}
/* Find shared policy intersecting idx */
struct mempolicy *
mpol_shared_policy_lookup(struct shared_policy *sp, unsigned long idx)
{
struct mempolicy *pol = NULL;
struct sp_node *sn;
if (!sp->root.rb_node)
return NULL;
read_lock(&sp->lock);
sn = sp_lookup(sp, idx, idx+1);
if (sn) {
mpol_get(sn->policy);
pol = sn->policy;
}
read_unlock(&sp->lock);
return pol;
}
static void sp_free(struct sp_node *n)
{
mpol_put(n->policy);
kmem_cache_free(sn_cache, n);
}
/**
* mpol_misplaced - check whether current page node is valid in policy
*
* @page: page to be checked
* @vma: vm area where page mapped
* @addr: virtual address where page mapped
*
* Lookup current policy node id for vma,addr and "compare to" page's
* node id.
*
* Returns:
* -1 - not misplaced, page is in the right node
* node - node id where the page should be
*
* Policy determination "mimics" alloc_page_vma().
* Called from fault path where we know the vma and faulting address.
*/
int mpol_misplaced(struct page *page, struct vm_area_struct *vma, unsigned long addr)
{
struct mempolicy *pol;
struct zoneref *z;
int curnid = page_to_nid(page);
unsigned long pgoff;
int thiscpu = raw_smp_processor_id();
int thisnid = cpu_to_node(thiscpu);
int polnid = -1;
int ret = -1;
BUG_ON(!vma);
pol = get_vma_policy(vma, addr);
if (!(pol->flags & MPOL_F_MOF))
goto out;
switch (pol->mode) {
case MPOL_INTERLEAVE:
BUG_ON(addr >= vma->vm_end);
BUG_ON(addr < vma->vm_start);
pgoff = vma->vm_pgoff;
pgoff += (addr - vma->vm_start) >> PAGE_SHIFT;
polnid = offset_il_node(pol, vma, pgoff);
break;
case MPOL_PREFERRED:
if (pol->flags & MPOL_F_LOCAL)
polnid = numa_node_id();
else
polnid = pol->v.preferred_node;
break;
case MPOL_BIND:
/*
* allows binding to multiple nodes.
* use current page if in policy nodemask,
* else select nearest allowed node, if any.
* If no allowed nodes, use current [!misplaced].
*/
if (node_isset(curnid, pol->v.nodes))
goto out;
z = first_zones_zonelist(
node_zonelist(numa_node_id(), GFP_HIGHUSER),
gfp_zone(GFP_HIGHUSER),
&pol->v.nodes);
polnid = z->zone->node;
break;
default:
BUG();
}
/* Migrate the page towards the node whose CPU is referencing it */
if (pol->flags & MPOL_F_MORON) {
polnid = thisnid;
if (!should_numa_migrate_memory(current, page, curnid, thiscpu))
goto out;
}
if (curnid != polnid)
ret = polnid;
out:
mpol_cond_put(pol);
return ret;
}
/*
* Drop the (possibly final) reference to task->mempolicy. It needs to be
* dropped after task->mempolicy is set to NULL so that any allocation done as
* part of its kmem_cache_free(), such as by KASAN, doesn't reference a freed
* policy.
*/
void mpol_put_task_policy(struct task_struct *task)
{
struct mempolicy *pol;
task_lock(task);
pol = task->mempolicy;
task->mempolicy = NULL;
task_unlock(task);
mpol_put(pol);
}
static void sp_delete(struct shared_policy *sp, struct sp_node *n)
{
pr_debug("deleting %lx-l%lx\n", n->start, n->end);
rb_erase(&n->nd, &sp->root);
sp_free(n);
}
static void sp_node_init(struct sp_node *node, unsigned long start,
unsigned long end, struct mempolicy *pol)
{
node->start = start;
node->end = end;
node->policy = pol;
}
static struct sp_node *sp_alloc(unsigned long start, unsigned long end,
struct mempolicy *pol)
{
struct sp_node *n;
struct mempolicy *newpol;
n = kmem_cache_alloc(sn_cache, GFP_KERNEL);
if (!n)
return NULL;
newpol = mpol_dup(pol);
if (IS_ERR(newpol)) {
kmem_cache_free(sn_cache, n);
return NULL;
}
newpol->flags |= MPOL_F_SHARED;
sp_node_init(n, start, end, newpol);
return n;
}
/* Replace a policy range. */
static int shared_policy_replace(struct shared_policy *sp, unsigned long start,
unsigned long end, struct sp_node *new)
{
struct sp_node *n;
struct sp_node *n_new = NULL;
struct mempolicy *mpol_new = NULL;
int ret = 0;
restart:
write_lock(&sp->lock);
n = sp_lookup(sp, start, end);
/* Take care of old policies in the same range. */
while (n && n->start < end) {
struct rb_node *next = rb_next(&n->nd);
if (n->start >= start) {
if (n->end <= end)
sp_delete(sp, n);
else
n->start = end;
} else {
/* Old policy spanning whole new range. */
if (n->end > end) {
if (!n_new)
goto alloc_new;
*mpol_new = *n->policy;
atomic_set(&mpol_new->refcnt, 1);
sp_node_init(n_new, end, n->end, mpol_new);
n->end = start;
sp_insert(sp, n_new);
n_new = NULL;
mpol_new = NULL;
break;
} else
n->end = start;
}
if (!next)
break;
n = rb_entry(next, struct sp_node, nd);
}
if (new)
sp_insert(sp, new);
write_unlock(&sp->lock);
ret = 0;
err_out:
if (mpol_new)
mpol_put(mpol_new);
if (n_new)
kmem_cache_free(sn_cache, n_new);
return ret;
alloc_new:
write_unlock(&sp->lock);
ret = -ENOMEM;
n_new = kmem_cache_alloc(sn_cache, GFP_KERNEL);
if (!n_new)
goto err_out;
mpol_new = kmem_cache_alloc(policy_cache, GFP_KERNEL);
if (!mpol_new)
goto err_out;
goto restart;
}
/**
* mpol_shared_policy_init - initialize shared policy for inode
* @sp: pointer to inode shared policy
* @mpol: struct mempolicy to install
*
* Install non-NULL @mpol in inode's shared policy rb-tree.
* On entry, the current task has a reference on a non-NULL @mpol.
* This must be released on exit.
* This is called at get_inode() calls and we can use GFP_KERNEL.
*/
void mpol_shared_policy_init(struct shared_policy *sp, struct mempolicy *mpol)
{
int ret;
sp->root = RB_ROOT; /* empty tree == default mempolicy */
rwlock_init(&sp->lock);
if (mpol) {
struct vm_area_struct pvma;
struct mempolicy *new;
NODEMASK_SCRATCH(scratch);
if (!scratch)
goto put_mpol;
/* contextualize the tmpfs mount point mempolicy */
new = mpol_new(mpol->mode, mpol->flags, &mpol->w.user_nodemask);
if (IS_ERR(new))
goto free_scratch; /* no valid nodemask intersection */
task_lock(current);
ret = mpol_set_nodemask(new, &mpol->w.user_nodemask, scratch);
task_unlock(current);
if (ret)
goto put_new;
/* Create pseudo-vma that contains just the policy */
memset(&pvma, 0, sizeof(struct vm_area_struct));
pvma.vm_end = TASK_SIZE; /* policy covers entire file */
mpol_set_shared_policy(sp, &pvma, new); /* adds ref */
put_new:
mpol_put(new); /* drop initial ref */
free_scratch:
NODEMASK_SCRATCH_FREE(scratch);
put_mpol:
mpol_put(mpol); /* drop our incoming ref on sb mpol */
}
}
int mpol_set_shared_policy(struct shared_policy *info,
struct vm_area_struct *vma, struct mempolicy *npol)
{
int err;
struct sp_node *new = NULL;
unsigned long sz = vma_pages(vma);
pr_debug("set_shared_policy %lx sz %lu %d %d %lx\n",
vma->vm_pgoff,
sz, npol ? npol->mode : -1,
npol ? npol->flags : -1,
npol ? nodes_addr(npol->v.nodes)[0] : NUMA_NO_NODE);
if (npol) {
new = sp_alloc(vma->vm_pgoff, vma->vm_pgoff + sz, npol);
if (!new)
return -ENOMEM;
}
err = shared_policy_replace(info, vma->vm_pgoff, vma->vm_pgoff+sz, new);
if (err && new)
sp_free(new);
return err;
}
/* Free a backing policy store on inode delete. */
void mpol_free_shared_policy(struct shared_policy *p)
{
struct sp_node *n;
struct rb_node *next;
if (!p->root.rb_node)
return;
write_lock(&p->lock);
next = rb_first(&p->root);
while (next) {
n = rb_entry(next, struct sp_node, nd);
next = rb_next(&n->nd);
sp_delete(p, n);
}
write_unlock(&p->lock);
}
#ifdef CONFIG_NUMA_BALANCING
static int __initdata numabalancing_override;
static void __init check_numabalancing_enable(void)
{
bool numabalancing_default = false;
if (IS_ENABLED(CONFIG_NUMA_BALANCING_DEFAULT_ENABLED))
numabalancing_default = true;
/* Parsed by setup_numabalancing. override == 1 enables, -1 disables */
if (numabalancing_override)
set_numabalancing_state(numabalancing_override == 1);
if (num_online_nodes() > 1 && !numabalancing_override) {
pr_info("%s automatic NUMA balancing. Configure with numa_balancing= or the kernel.numa_balancing sysctl\n",
numabalancing_default ? "Enabling" : "Disabling");
set_numabalancing_state(numabalancing_default);
}
}
static int __init setup_numabalancing(char *str)
{
int ret = 0;
if (!str)
goto out;
if (!strcmp(str, "enable")) {
numabalancing_override = 1;
ret = 1;
} else if (!strcmp(str, "disable")) {
numabalancing_override = -1;
ret = 1;
}
out:
if (!ret)
pr_warn("Unable to parse numa_balancing=\n");
return ret;
}
__setup("numa_balancing=", setup_numabalancing);
#else
static inline void __init check_numabalancing_enable(void)
{
}
#endif /* CONFIG_NUMA_BALANCING */
/* assumes fs == KERNEL_DS */
void __init numa_policy_init(void)
{
nodemask_t interleave_nodes;
unsigned long largest = 0;
int nid, prefer = 0;
policy_cache = kmem_cache_create("numa_policy",
sizeof(struct mempolicy),
0, SLAB_PANIC, NULL);
sn_cache = kmem_cache_create("shared_policy_node",
sizeof(struct sp_node),
0, SLAB_PANIC, NULL);
for_each_node(nid) {
preferred_node_policy[nid] = (struct mempolicy) {
.refcnt = ATOMIC_INIT(1),
.mode = MPOL_PREFERRED,
.flags = MPOL_F_MOF | MPOL_F_MORON,
.v = { .preferred_node = nid, },
};
}
/*
* Set interleaving policy for system init. Interleaving is only
* enabled across suitably sized nodes (default is >= 16MB), or
* fall back to the largest node if they're all smaller.
*/
nodes_clear(interleave_nodes);
for_each_node_state(nid, N_MEMORY) {
unsigned long total_pages = node_present_pages(nid);
/* Preserve the largest node */
if (largest < total_pages) {
largest = total_pages;
prefer = nid;
}
/* Interleave this node? */
if ((total_pages << PAGE_SHIFT) >= (16 << 20))
node_set(nid, interleave_nodes);
}
/* All too small, use the largest */
if (unlikely(nodes_empty(interleave_nodes)))
node_set(prefer, interleave_nodes);
if (do_set_mempolicy(MPOL_INTERLEAVE, 0, &interleave_nodes))
pr_err("%s: interleaving failed\n", __func__);
check_numabalancing_enable();
}
/* Reset policy of current process to default */
void numa_default_policy(void)
{
do_set_mempolicy(MPOL_DEFAULT, 0, NULL);
}
/*
* Parse and format mempolicy from/to strings
*/
/*
* "local" is implemented internally by MPOL_PREFERRED with MPOL_F_LOCAL flag.
*/
static const char * const policy_modes[] =
{
[MPOL_DEFAULT] = "default",
[MPOL_PREFERRED] = "prefer",
[MPOL_BIND] = "bind",
[MPOL_INTERLEAVE] = "interleave",
[MPOL_LOCAL] = "local",
};
#ifdef CONFIG_TMPFS
/**
* mpol_parse_str - parse string to mempolicy, for tmpfs mpol mount option.
* @str: string containing mempolicy to parse
* @mpol: pointer to struct mempolicy pointer, returned on success.
*
* Format of input:
* <mode>[=<flags>][:<nodelist>]
*
* On success, returns 0, else 1
*/
int mpol_parse_str(char *str, struct mempolicy **mpol)
{
struct mempolicy *new = NULL;
unsigned short mode;
unsigned short mode_flags;
nodemask_t nodes;
char *nodelist = strchr(str, ':');
char *flags = strchr(str, '=');
int err = 1;
if (nodelist) {
/* NUL-terminate mode or flags string */
*nodelist++ = '\0';
if (nodelist_parse(nodelist, nodes))
goto out;
if (!nodes_subset(nodes, node_states[N_MEMORY]))
goto out;
} else
nodes_clear(nodes);
if (flags)
*flags++ = '\0'; /* terminate mode string */
for (mode = 0; mode < MPOL_MAX; mode++) {
if (!strcmp(str, policy_modes[mode])) {
break;
}
}
if (mode >= MPOL_MAX)
goto out;
switch (mode) {
case MPOL_PREFERRED:
/*
* Insist on a nodelist of one node only
*/
if (nodelist) {
char *rest = nodelist;
while (isdigit(*rest))
rest++;
if (*rest)
goto out;
}
break;
case MPOL_INTERLEAVE:
/*
* Default to online nodes with memory if no nodelist
*/
if (!nodelist)
nodes = node_states[N_MEMORY];
break;
case MPOL_LOCAL:
/*
* Don't allow a nodelist; mpol_new() checks flags
*/
if (nodelist)
goto out;
mode = MPOL_PREFERRED;
break;
case MPOL_DEFAULT:
/*
* Insist on a empty nodelist
*/
if (!nodelist)
err = 0;
goto out;
case MPOL_BIND:
/*
* Insist on a nodelist
*/
if (!nodelist)
goto out;
}
mode_flags = 0;
if (flags) {
/*
* Currently, we only support two mutually exclusive
* mode flags.
*/
if (!strcmp(flags, "static"))
mode_flags |= MPOL_F_STATIC_NODES;
else if (!strcmp(flags, "relative"))
mode_flags |= MPOL_F_RELATIVE_NODES;
else
goto out;
}
new = mpol_new(mode, mode_flags, &nodes);
if (IS_ERR(new))
goto out;
/*
* Save nodes for mpol_to_str() to show the tmpfs mount options
* for /proc/mounts, /proc/pid/mounts and /proc/pid/mountinfo.
*/
if (mode != MPOL_PREFERRED)
new->v.nodes = nodes;
else if (nodelist)
new->v.preferred_node = first_node(nodes);
else
new->flags |= MPOL_F_LOCAL;
/*
* Save nodes for contextualization: this will be used to "clone"
* the mempolicy in a specific context [cpuset] at a later time.
*/
new->w.user_nodemask = nodes;
err = 0;
out:
/* Restore string for error message */
if (nodelist)
*--nodelist = ':';
if (flags)
*--flags = '=';
if (!err)
*mpol = new;
return err;
}
#endif /* CONFIG_TMPFS */
/**
* mpol_to_str - format a mempolicy structure for printing
* @buffer: to contain formatted mempolicy string
* @maxlen: length of @buffer
* @pol: pointer to mempolicy to be formatted
*
* Convert @pol into a string. If @buffer is too short, truncate the string.
* Recommend a @maxlen of at least 32 for the longest mode, "interleave", the
* longest flag, "relative", and to display at least a few node ids.
*/
void mpol_to_str(char *buffer, int maxlen, struct mempolicy *pol)
{
char *p = buffer;
nodemask_t nodes = NODE_MASK_NONE;
unsigned short mode = MPOL_DEFAULT;
unsigned short flags = 0;
if (pol && pol != &default_policy && !(pol->flags & MPOL_F_MORON)) {
mode = pol->mode;
flags = pol->flags;
}
switch (mode) {
case MPOL_DEFAULT:
break;
case MPOL_PREFERRED:
if (flags & MPOL_F_LOCAL)
mode = MPOL_LOCAL;
else
node_set(pol->v.preferred_node, nodes);
break;
case MPOL_BIND:
case MPOL_INTERLEAVE:
nodes = pol->v.nodes;
break;
default:
WARN_ON_ONCE(1);
snprintf(p, maxlen, "unknown");
return;
}
p += snprintf(p, maxlen, "%s", policy_modes[mode]);
if (flags & MPOL_MODE_FLAGS) {
p += snprintf(p, buffer + maxlen - p, "=");
/*
* Currently, the only defined flags are mutually exclusive
*/
if (flags & MPOL_F_STATIC_NODES)
p += snprintf(p, buffer + maxlen - p, "static");
else if (flags & MPOL_F_RELATIVE_NODES)
p += snprintf(p, buffer + maxlen - p, "relative");
}
if (!nodes_empty(nodes))
p += scnprintf(p, buffer + maxlen - p, ":%*pbl",
nodemask_pr_args(&nodes));
}
| ./CrossVul/dataset_final_sorted/CWE-416/c/bad_104_0 |
crossvul-cpp_data_bad_832_1 | // SPDX-License-Identifier: GPL-2.0+
#include <linux/io.h>
#include "ipmi_si.h"
static unsigned char intf_mem_inb(const struct si_sm_io *io,
unsigned int offset)
{
return readb((io->addr)+(offset * io->regspacing));
}
static void intf_mem_outb(const struct si_sm_io *io, unsigned int offset,
unsigned char b)
{
writeb(b, (io->addr)+(offset * io->regspacing));
}
static unsigned char intf_mem_inw(const struct si_sm_io *io,
unsigned int offset)
{
return (readw((io->addr)+(offset * io->regspacing)) >> io->regshift)
& 0xff;
}
static void intf_mem_outw(const struct si_sm_io *io, unsigned int offset,
unsigned char b)
{
writeb(b << io->regshift, (io->addr)+(offset * io->regspacing));
}
static unsigned char intf_mem_inl(const struct si_sm_io *io,
unsigned int offset)
{
return (readl((io->addr)+(offset * io->regspacing)) >> io->regshift)
& 0xff;
}
static void intf_mem_outl(const struct si_sm_io *io, unsigned int offset,
unsigned char b)
{
writel(b << io->regshift, (io->addr)+(offset * io->regspacing));
}
#ifdef readq
static unsigned char mem_inq(const struct si_sm_io *io, unsigned int offset)
{
return (readq((io->addr)+(offset * io->regspacing)) >> io->regshift)
& 0xff;
}
static void mem_outq(const struct si_sm_io *io, unsigned int offset,
unsigned char b)
{
writeq((u64)b << io->regshift, (io->addr)+(offset * io->regspacing));
}
#endif
static void mem_region_cleanup(struct si_sm_io *io, int num)
{
unsigned long addr = io->addr_data;
int idx;
for (idx = 0; idx < num; idx++)
release_mem_region(addr + idx * io->regspacing,
io->regsize);
}
static void mem_cleanup(struct si_sm_io *io)
{
if (io->addr) {
iounmap(io->addr);
mem_region_cleanup(io, io->io_size);
}
}
int ipmi_si_mem_setup(struct si_sm_io *io)
{
unsigned long addr = io->addr_data;
int mapsize, idx;
if (!addr)
return -ENODEV;
io->io_cleanup = mem_cleanup;
/*
* Figure out the actual readb/readw/readl/etc routine to use based
* upon the register size.
*/
switch (io->regsize) {
case 1:
io->inputb = intf_mem_inb;
io->outputb = intf_mem_outb;
break;
case 2:
io->inputb = intf_mem_inw;
io->outputb = intf_mem_outw;
break;
case 4:
io->inputb = intf_mem_inl;
io->outputb = intf_mem_outl;
break;
#ifdef readq
case 8:
io->inputb = mem_inq;
io->outputb = mem_outq;
break;
#endif
default:
dev_warn(io->dev, "Invalid register size: %d\n",
io->regsize);
return -EINVAL;
}
/*
* Some BIOSes reserve disjoint memory regions in their ACPI
* tables. This causes problems when trying to request the
* entire region. Therefore we must request each register
* separately.
*/
for (idx = 0; idx < io->io_size; idx++) {
if (request_mem_region(addr + idx * io->regspacing,
io->regsize, DEVICE_NAME) == NULL) {
/* Undo allocations */
mem_region_cleanup(io, idx);
return -EIO;
}
}
/*
* Calculate the total amount of memory to claim. This is an
* unusual looking calculation, but it avoids claiming any
* more memory than it has to. It will claim everything
* between the first address to the end of the last full
* register.
*/
mapsize = ((io->io_size * io->regspacing)
- (io->regspacing - io->regsize));
io->addr = ioremap(addr, mapsize);
if (io->addr == NULL) {
mem_region_cleanup(io, io->io_size);
return -EIO;
}
return 0;
}
| ./CrossVul/dataset_final_sorted/CWE-416/c/bad_832_1 |
crossvul-cpp_data_bad_5503_0 | /*
* Digital Audio (PCM) abstract layer
* Copyright (c) by Jaroslav Kysela <perex@perex.cz>
* Abramo Bagnara <abramo@alsa-project.org>
*
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*
*/
#include <linux/slab.h>
#include <linux/time.h>
#include <linux/math64.h>
#include <linux/export.h>
#include <sound/core.h>
#include <sound/control.h>
#include <sound/tlv.h>
#include <sound/info.h>
#include <sound/pcm.h>
#include <sound/pcm_params.h>
#include <sound/timer.h>
#ifdef CONFIG_SND_PCM_XRUN_DEBUG
#define CREATE_TRACE_POINTS
#include "pcm_trace.h"
#else
#define trace_hwptr(substream, pos, in_interrupt)
#define trace_xrun(substream)
#define trace_hw_ptr_error(substream, reason)
#endif
/*
* fill ring buffer with silence
* runtime->silence_start: starting pointer to silence area
* runtime->silence_filled: size filled with silence
* runtime->silence_threshold: threshold from application
* runtime->silence_size: maximal size from application
*
* when runtime->silence_size >= runtime->boundary - fill processed area with silence immediately
*/
void snd_pcm_playback_silence(struct snd_pcm_substream *substream, snd_pcm_uframes_t new_hw_ptr)
{
struct snd_pcm_runtime *runtime = substream->runtime;
snd_pcm_uframes_t frames, ofs, transfer;
if (runtime->silence_size < runtime->boundary) {
snd_pcm_sframes_t noise_dist, n;
if (runtime->silence_start != runtime->control->appl_ptr) {
n = runtime->control->appl_ptr - runtime->silence_start;
if (n < 0)
n += runtime->boundary;
if ((snd_pcm_uframes_t)n < runtime->silence_filled)
runtime->silence_filled -= n;
else
runtime->silence_filled = 0;
runtime->silence_start = runtime->control->appl_ptr;
}
if (runtime->silence_filled >= runtime->buffer_size)
return;
noise_dist = snd_pcm_playback_hw_avail(runtime) + runtime->silence_filled;
if (noise_dist >= (snd_pcm_sframes_t) runtime->silence_threshold)
return;
frames = runtime->silence_threshold - noise_dist;
if (frames > runtime->silence_size)
frames = runtime->silence_size;
} else {
if (new_hw_ptr == ULONG_MAX) { /* initialization */
snd_pcm_sframes_t avail = snd_pcm_playback_hw_avail(runtime);
if (avail > runtime->buffer_size)
avail = runtime->buffer_size;
runtime->silence_filled = avail > 0 ? avail : 0;
runtime->silence_start = (runtime->status->hw_ptr +
runtime->silence_filled) %
runtime->boundary;
} else {
ofs = runtime->status->hw_ptr;
frames = new_hw_ptr - ofs;
if ((snd_pcm_sframes_t)frames < 0)
frames += runtime->boundary;
runtime->silence_filled -= frames;
if ((snd_pcm_sframes_t)runtime->silence_filled < 0) {
runtime->silence_filled = 0;
runtime->silence_start = new_hw_ptr;
} else {
runtime->silence_start = ofs;
}
}
frames = runtime->buffer_size - runtime->silence_filled;
}
if (snd_BUG_ON(frames > runtime->buffer_size))
return;
if (frames == 0)
return;
ofs = runtime->silence_start % runtime->buffer_size;
while (frames > 0) {
transfer = ofs + frames > runtime->buffer_size ? runtime->buffer_size - ofs : frames;
if (runtime->access == SNDRV_PCM_ACCESS_RW_INTERLEAVED ||
runtime->access == SNDRV_PCM_ACCESS_MMAP_INTERLEAVED) {
if (substream->ops->silence) {
int err;
err = substream->ops->silence(substream, -1, ofs, transfer);
snd_BUG_ON(err < 0);
} else {
char *hwbuf = runtime->dma_area + frames_to_bytes(runtime, ofs);
snd_pcm_format_set_silence(runtime->format, hwbuf, transfer * runtime->channels);
}
} else {
unsigned int c;
unsigned int channels = runtime->channels;
if (substream->ops->silence) {
for (c = 0; c < channels; ++c) {
int err;
err = substream->ops->silence(substream, c, ofs, transfer);
snd_BUG_ON(err < 0);
}
} else {
size_t dma_csize = runtime->dma_bytes / channels;
for (c = 0; c < channels; ++c) {
char *hwbuf = runtime->dma_area + (c * dma_csize) + samples_to_bytes(runtime, ofs);
snd_pcm_format_set_silence(runtime->format, hwbuf, transfer);
}
}
}
runtime->silence_filled += transfer;
frames -= transfer;
ofs = 0;
}
}
#ifdef CONFIG_SND_DEBUG
void snd_pcm_debug_name(struct snd_pcm_substream *substream,
char *name, size_t len)
{
snprintf(name, len, "pcmC%dD%d%c:%d",
substream->pcm->card->number,
substream->pcm->device,
substream->stream ? 'c' : 'p',
substream->number);
}
EXPORT_SYMBOL(snd_pcm_debug_name);
#endif
#define XRUN_DEBUG_BASIC (1<<0)
#define XRUN_DEBUG_STACK (1<<1) /* dump also stack */
#define XRUN_DEBUG_JIFFIESCHECK (1<<2) /* do jiffies check */
#ifdef CONFIG_SND_PCM_XRUN_DEBUG
#define xrun_debug(substream, mask) \
((substream)->pstr->xrun_debug & (mask))
#else
#define xrun_debug(substream, mask) 0
#endif
#define dump_stack_on_xrun(substream) do { \
if (xrun_debug(substream, XRUN_DEBUG_STACK)) \
dump_stack(); \
} while (0)
static void xrun(struct snd_pcm_substream *substream)
{
struct snd_pcm_runtime *runtime = substream->runtime;
trace_xrun(substream);
if (runtime->tstamp_mode == SNDRV_PCM_TSTAMP_ENABLE)
snd_pcm_gettime(runtime, (struct timespec *)&runtime->status->tstamp);
snd_pcm_stop(substream, SNDRV_PCM_STATE_XRUN);
if (xrun_debug(substream, XRUN_DEBUG_BASIC)) {
char name[16];
snd_pcm_debug_name(substream, name, sizeof(name));
pcm_warn(substream->pcm, "XRUN: %s\n", name);
dump_stack_on_xrun(substream);
}
}
#ifdef CONFIG_SND_PCM_XRUN_DEBUG
#define hw_ptr_error(substream, in_interrupt, reason, fmt, args...) \
do { \
trace_hw_ptr_error(substream, reason); \
if (xrun_debug(substream, XRUN_DEBUG_BASIC)) { \
pr_err_ratelimited("ALSA: PCM: [%c] " reason ": " fmt, \
(in_interrupt) ? 'Q' : 'P', ##args); \
dump_stack_on_xrun(substream); \
} \
} while (0)
#else /* ! CONFIG_SND_PCM_XRUN_DEBUG */
#define hw_ptr_error(substream, fmt, args...) do { } while (0)
#endif
int snd_pcm_update_state(struct snd_pcm_substream *substream,
struct snd_pcm_runtime *runtime)
{
snd_pcm_uframes_t avail;
if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK)
avail = snd_pcm_playback_avail(runtime);
else
avail = snd_pcm_capture_avail(runtime);
if (avail > runtime->avail_max)
runtime->avail_max = avail;
if (runtime->status->state == SNDRV_PCM_STATE_DRAINING) {
if (avail >= runtime->buffer_size) {
snd_pcm_drain_done(substream);
return -EPIPE;
}
} else {
if (avail >= runtime->stop_threshold) {
xrun(substream);
return -EPIPE;
}
}
if (runtime->twake) {
if (avail >= runtime->twake)
wake_up(&runtime->tsleep);
} else if (avail >= runtime->control->avail_min)
wake_up(&runtime->sleep);
return 0;
}
static void update_audio_tstamp(struct snd_pcm_substream *substream,
struct timespec *curr_tstamp,
struct timespec *audio_tstamp)
{
struct snd_pcm_runtime *runtime = substream->runtime;
u64 audio_frames, audio_nsecs;
struct timespec driver_tstamp;
if (runtime->tstamp_mode != SNDRV_PCM_TSTAMP_ENABLE)
return;
if (!(substream->ops->get_time_info) ||
(runtime->audio_tstamp_report.actual_type ==
SNDRV_PCM_AUDIO_TSTAMP_TYPE_DEFAULT)) {
/*
* provide audio timestamp derived from pointer position
* add delay only if requested
*/
audio_frames = runtime->hw_ptr_wrap + runtime->status->hw_ptr;
if (runtime->audio_tstamp_config.report_delay) {
if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK)
audio_frames -= runtime->delay;
else
audio_frames += runtime->delay;
}
audio_nsecs = div_u64(audio_frames * 1000000000LL,
runtime->rate);
*audio_tstamp = ns_to_timespec(audio_nsecs);
}
runtime->status->audio_tstamp = *audio_tstamp;
runtime->status->tstamp = *curr_tstamp;
/*
* re-take a driver timestamp to let apps detect if the reference tstamp
* read by low-level hardware was provided with a delay
*/
snd_pcm_gettime(substream->runtime, (struct timespec *)&driver_tstamp);
runtime->driver_tstamp = driver_tstamp;
}
static int snd_pcm_update_hw_ptr0(struct snd_pcm_substream *substream,
unsigned int in_interrupt)
{
struct snd_pcm_runtime *runtime = substream->runtime;
snd_pcm_uframes_t pos;
snd_pcm_uframes_t old_hw_ptr, new_hw_ptr, hw_base;
snd_pcm_sframes_t hdelta, delta;
unsigned long jdelta;
unsigned long curr_jiffies;
struct timespec curr_tstamp;
struct timespec audio_tstamp;
int crossed_boundary = 0;
old_hw_ptr = runtime->status->hw_ptr;
/*
* group pointer, time and jiffies reads to allow for more
* accurate correlations/corrections.
* The values are stored at the end of this routine after
* corrections for hw_ptr position
*/
pos = substream->ops->pointer(substream);
curr_jiffies = jiffies;
if (runtime->tstamp_mode == SNDRV_PCM_TSTAMP_ENABLE) {
if ((substream->ops->get_time_info) &&
(runtime->audio_tstamp_config.type_requested != SNDRV_PCM_AUDIO_TSTAMP_TYPE_DEFAULT)) {
substream->ops->get_time_info(substream, &curr_tstamp,
&audio_tstamp,
&runtime->audio_tstamp_config,
&runtime->audio_tstamp_report);
/* re-test in case tstamp type is not supported in hardware and was demoted to DEFAULT */
if (runtime->audio_tstamp_report.actual_type == SNDRV_PCM_AUDIO_TSTAMP_TYPE_DEFAULT)
snd_pcm_gettime(runtime, (struct timespec *)&curr_tstamp);
} else
snd_pcm_gettime(runtime, (struct timespec *)&curr_tstamp);
}
if (pos == SNDRV_PCM_POS_XRUN) {
xrun(substream);
return -EPIPE;
}
if (pos >= runtime->buffer_size) {
if (printk_ratelimit()) {
char name[16];
snd_pcm_debug_name(substream, name, sizeof(name));
pcm_err(substream->pcm,
"invalid position: %s, pos = %ld, buffer size = %ld, period size = %ld\n",
name, pos, runtime->buffer_size,
runtime->period_size);
}
pos = 0;
}
pos -= pos % runtime->min_align;
trace_hwptr(substream, pos, in_interrupt);
hw_base = runtime->hw_ptr_base;
new_hw_ptr = hw_base + pos;
if (in_interrupt) {
/* we know that one period was processed */
/* delta = "expected next hw_ptr" for in_interrupt != 0 */
delta = runtime->hw_ptr_interrupt + runtime->period_size;
if (delta > new_hw_ptr) {
/* check for double acknowledged interrupts */
hdelta = curr_jiffies - runtime->hw_ptr_jiffies;
if (hdelta > runtime->hw_ptr_buffer_jiffies/2 + 1) {
hw_base += runtime->buffer_size;
if (hw_base >= runtime->boundary) {
hw_base = 0;
crossed_boundary++;
}
new_hw_ptr = hw_base + pos;
goto __delta;
}
}
}
/* new_hw_ptr might be lower than old_hw_ptr in case when */
/* pointer crosses the end of the ring buffer */
if (new_hw_ptr < old_hw_ptr) {
hw_base += runtime->buffer_size;
if (hw_base >= runtime->boundary) {
hw_base = 0;
crossed_boundary++;
}
new_hw_ptr = hw_base + pos;
}
__delta:
delta = new_hw_ptr - old_hw_ptr;
if (delta < 0)
delta += runtime->boundary;
if (runtime->no_period_wakeup) {
snd_pcm_sframes_t xrun_threshold;
/*
* Without regular period interrupts, we have to check
* the elapsed time to detect xruns.
*/
jdelta = curr_jiffies - runtime->hw_ptr_jiffies;
if (jdelta < runtime->hw_ptr_buffer_jiffies / 2)
goto no_delta_check;
hdelta = jdelta - delta * HZ / runtime->rate;
xrun_threshold = runtime->hw_ptr_buffer_jiffies / 2 + 1;
while (hdelta > xrun_threshold) {
delta += runtime->buffer_size;
hw_base += runtime->buffer_size;
if (hw_base >= runtime->boundary) {
hw_base = 0;
crossed_boundary++;
}
new_hw_ptr = hw_base + pos;
hdelta -= runtime->hw_ptr_buffer_jiffies;
}
goto no_delta_check;
}
/* something must be really wrong */
if (delta >= runtime->buffer_size + runtime->period_size) {
hw_ptr_error(substream, in_interrupt, "Unexpected hw_ptr",
"(stream=%i, pos=%ld, new_hw_ptr=%ld, old_hw_ptr=%ld)\n",
substream->stream, (long)pos,
(long)new_hw_ptr, (long)old_hw_ptr);
return 0;
}
/* Do jiffies check only in xrun_debug mode */
if (!xrun_debug(substream, XRUN_DEBUG_JIFFIESCHECK))
goto no_jiffies_check;
/* Skip the jiffies check for hardwares with BATCH flag.
* Such hardware usually just increases the position at each IRQ,
* thus it can't give any strange position.
*/
if (runtime->hw.info & SNDRV_PCM_INFO_BATCH)
goto no_jiffies_check;
hdelta = delta;
if (hdelta < runtime->delay)
goto no_jiffies_check;
hdelta -= runtime->delay;
jdelta = curr_jiffies - runtime->hw_ptr_jiffies;
if (((hdelta * HZ) / runtime->rate) > jdelta + HZ/100) {
delta = jdelta /
(((runtime->period_size * HZ) / runtime->rate)
+ HZ/100);
/* move new_hw_ptr according jiffies not pos variable */
new_hw_ptr = old_hw_ptr;
hw_base = delta;
/* use loop to avoid checks for delta overflows */
/* the delta value is small or zero in most cases */
while (delta > 0) {
new_hw_ptr += runtime->period_size;
if (new_hw_ptr >= runtime->boundary) {
new_hw_ptr -= runtime->boundary;
crossed_boundary--;
}
delta--;
}
/* align hw_base to buffer_size */
hw_ptr_error(substream, in_interrupt, "hw_ptr skipping",
"(pos=%ld, delta=%ld, period=%ld, jdelta=%lu/%lu/%lu, hw_ptr=%ld/%ld)\n",
(long)pos, (long)hdelta,
(long)runtime->period_size, jdelta,
((hdelta * HZ) / runtime->rate), hw_base,
(unsigned long)old_hw_ptr,
(unsigned long)new_hw_ptr);
/* reset values to proper state */
delta = 0;
hw_base = new_hw_ptr - (new_hw_ptr % runtime->buffer_size);
}
no_jiffies_check:
if (delta > runtime->period_size + runtime->period_size / 2) {
hw_ptr_error(substream, in_interrupt,
"Lost interrupts?",
"(stream=%i, delta=%ld, new_hw_ptr=%ld, old_hw_ptr=%ld)\n",
substream->stream, (long)delta,
(long)new_hw_ptr,
(long)old_hw_ptr);
}
no_delta_check:
if (runtime->status->hw_ptr == new_hw_ptr) {
update_audio_tstamp(substream, &curr_tstamp, &audio_tstamp);
return 0;
}
if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK &&
runtime->silence_size > 0)
snd_pcm_playback_silence(substream, new_hw_ptr);
if (in_interrupt) {
delta = new_hw_ptr - runtime->hw_ptr_interrupt;
if (delta < 0)
delta += runtime->boundary;
delta -= (snd_pcm_uframes_t)delta % runtime->period_size;
runtime->hw_ptr_interrupt += delta;
if (runtime->hw_ptr_interrupt >= runtime->boundary)
runtime->hw_ptr_interrupt -= runtime->boundary;
}
runtime->hw_ptr_base = hw_base;
runtime->status->hw_ptr = new_hw_ptr;
runtime->hw_ptr_jiffies = curr_jiffies;
if (crossed_boundary) {
snd_BUG_ON(crossed_boundary != 1);
runtime->hw_ptr_wrap += runtime->boundary;
}
update_audio_tstamp(substream, &curr_tstamp, &audio_tstamp);
return snd_pcm_update_state(substream, runtime);
}
/* CAUTION: call it with irq disabled */
int snd_pcm_update_hw_ptr(struct snd_pcm_substream *substream)
{
return snd_pcm_update_hw_ptr0(substream, 0);
}
/**
* snd_pcm_set_ops - set the PCM operators
* @pcm: the pcm instance
* @direction: stream direction, SNDRV_PCM_STREAM_XXX
* @ops: the operator table
*
* Sets the given PCM operators to the pcm instance.
*/
void snd_pcm_set_ops(struct snd_pcm *pcm, int direction,
const struct snd_pcm_ops *ops)
{
struct snd_pcm_str *stream = &pcm->streams[direction];
struct snd_pcm_substream *substream;
for (substream = stream->substream; substream != NULL; substream = substream->next)
substream->ops = ops;
}
EXPORT_SYMBOL(snd_pcm_set_ops);
/**
* snd_pcm_sync - set the PCM sync id
* @substream: the pcm substream
*
* Sets the PCM sync identifier for the card.
*/
void snd_pcm_set_sync(struct snd_pcm_substream *substream)
{
struct snd_pcm_runtime *runtime = substream->runtime;
runtime->sync.id32[0] = substream->pcm->card->number;
runtime->sync.id32[1] = -1;
runtime->sync.id32[2] = -1;
runtime->sync.id32[3] = -1;
}
EXPORT_SYMBOL(snd_pcm_set_sync);
/*
* Standard ioctl routine
*/
static inline unsigned int div32(unsigned int a, unsigned int b,
unsigned int *r)
{
if (b == 0) {
*r = 0;
return UINT_MAX;
}
*r = a % b;
return a / b;
}
static inline unsigned int div_down(unsigned int a, unsigned int b)
{
if (b == 0)
return UINT_MAX;
return a / b;
}
static inline unsigned int div_up(unsigned int a, unsigned int b)
{
unsigned int r;
unsigned int q;
if (b == 0)
return UINT_MAX;
q = div32(a, b, &r);
if (r)
++q;
return q;
}
static inline unsigned int mul(unsigned int a, unsigned int b)
{
if (a == 0)
return 0;
if (div_down(UINT_MAX, a) < b)
return UINT_MAX;
return a * b;
}
static inline unsigned int muldiv32(unsigned int a, unsigned int b,
unsigned int c, unsigned int *r)
{
u_int64_t n = (u_int64_t) a * b;
if (c == 0) {
snd_BUG_ON(!n);
*r = 0;
return UINT_MAX;
}
n = div_u64_rem(n, c, r);
if (n >= UINT_MAX) {
*r = 0;
return UINT_MAX;
}
return n;
}
/**
* snd_interval_refine - refine the interval value of configurator
* @i: the interval value to refine
* @v: the interval value to refer to
*
* Refines the interval value with the reference value.
* The interval is changed to the range satisfying both intervals.
* The interval status (min, max, integer, etc.) are evaluated.
*
* Return: Positive if the value is changed, zero if it's not changed, or a
* negative error code.
*/
int snd_interval_refine(struct snd_interval *i, const struct snd_interval *v)
{
int changed = 0;
if (snd_BUG_ON(snd_interval_empty(i)))
return -EINVAL;
if (i->min < v->min) {
i->min = v->min;
i->openmin = v->openmin;
changed = 1;
} else if (i->min == v->min && !i->openmin && v->openmin) {
i->openmin = 1;
changed = 1;
}
if (i->max > v->max) {
i->max = v->max;
i->openmax = v->openmax;
changed = 1;
} else if (i->max == v->max && !i->openmax && v->openmax) {
i->openmax = 1;
changed = 1;
}
if (!i->integer && v->integer) {
i->integer = 1;
changed = 1;
}
if (i->integer) {
if (i->openmin) {
i->min++;
i->openmin = 0;
}
if (i->openmax) {
i->max--;
i->openmax = 0;
}
} else if (!i->openmin && !i->openmax && i->min == i->max)
i->integer = 1;
if (snd_interval_checkempty(i)) {
snd_interval_none(i);
return -EINVAL;
}
return changed;
}
EXPORT_SYMBOL(snd_interval_refine);
static int snd_interval_refine_first(struct snd_interval *i)
{
if (snd_BUG_ON(snd_interval_empty(i)))
return -EINVAL;
if (snd_interval_single(i))
return 0;
i->max = i->min;
i->openmax = i->openmin;
if (i->openmax)
i->max++;
return 1;
}
static int snd_interval_refine_last(struct snd_interval *i)
{
if (snd_BUG_ON(snd_interval_empty(i)))
return -EINVAL;
if (snd_interval_single(i))
return 0;
i->min = i->max;
i->openmin = i->openmax;
if (i->openmin)
i->min--;
return 1;
}
void snd_interval_mul(const struct snd_interval *a, const struct snd_interval *b, struct snd_interval *c)
{
if (a->empty || b->empty) {
snd_interval_none(c);
return;
}
c->empty = 0;
c->min = mul(a->min, b->min);
c->openmin = (a->openmin || b->openmin);
c->max = mul(a->max, b->max);
c->openmax = (a->openmax || b->openmax);
c->integer = (a->integer && b->integer);
}
/**
* snd_interval_div - refine the interval value with division
* @a: dividend
* @b: divisor
* @c: quotient
*
* c = a / b
*
* Returns non-zero if the value is changed, zero if not changed.
*/
void snd_interval_div(const struct snd_interval *a, const struct snd_interval *b, struct snd_interval *c)
{
unsigned int r;
if (a->empty || b->empty) {
snd_interval_none(c);
return;
}
c->empty = 0;
c->min = div32(a->min, b->max, &r);
c->openmin = (r || a->openmin || b->openmax);
if (b->min > 0) {
c->max = div32(a->max, b->min, &r);
if (r) {
c->max++;
c->openmax = 1;
} else
c->openmax = (a->openmax || b->openmin);
} else {
c->max = UINT_MAX;
c->openmax = 0;
}
c->integer = 0;
}
/**
* snd_interval_muldivk - refine the interval value
* @a: dividend 1
* @b: dividend 2
* @k: divisor (as integer)
* @c: result
*
* c = a * b / k
*
* Returns non-zero if the value is changed, zero if not changed.
*/
void snd_interval_muldivk(const struct snd_interval *a, const struct snd_interval *b,
unsigned int k, struct snd_interval *c)
{
unsigned int r;
if (a->empty || b->empty) {
snd_interval_none(c);
return;
}
c->empty = 0;
c->min = muldiv32(a->min, b->min, k, &r);
c->openmin = (r || a->openmin || b->openmin);
c->max = muldiv32(a->max, b->max, k, &r);
if (r) {
c->max++;
c->openmax = 1;
} else
c->openmax = (a->openmax || b->openmax);
c->integer = 0;
}
/**
* snd_interval_mulkdiv - refine the interval value
* @a: dividend 1
* @k: dividend 2 (as integer)
* @b: divisor
* @c: result
*
* c = a * k / b
*
* Returns non-zero if the value is changed, zero if not changed.
*/
void snd_interval_mulkdiv(const struct snd_interval *a, unsigned int k,
const struct snd_interval *b, struct snd_interval *c)
{
unsigned int r;
if (a->empty || b->empty) {
snd_interval_none(c);
return;
}
c->empty = 0;
c->min = muldiv32(a->min, k, b->max, &r);
c->openmin = (r || a->openmin || b->openmax);
if (b->min > 0) {
c->max = muldiv32(a->max, k, b->min, &r);
if (r) {
c->max++;
c->openmax = 1;
} else
c->openmax = (a->openmax || b->openmin);
} else {
c->max = UINT_MAX;
c->openmax = 0;
}
c->integer = 0;
}
/* ---- */
/**
* snd_interval_ratnum - refine the interval value
* @i: interval to refine
* @rats_count: number of ratnum_t
* @rats: ratnum_t array
* @nump: pointer to store the resultant numerator
* @denp: pointer to store the resultant denominator
*
* Return: Positive if the value is changed, zero if it's not changed, or a
* negative error code.
*/
int snd_interval_ratnum(struct snd_interval *i,
unsigned int rats_count, const struct snd_ratnum *rats,
unsigned int *nump, unsigned int *denp)
{
unsigned int best_num, best_den;
int best_diff;
unsigned int k;
struct snd_interval t;
int err;
unsigned int result_num, result_den;
int result_diff;
best_num = best_den = best_diff = 0;
for (k = 0; k < rats_count; ++k) {
unsigned int num = rats[k].num;
unsigned int den;
unsigned int q = i->min;
int diff;
if (q == 0)
q = 1;
den = div_up(num, q);
if (den < rats[k].den_min)
continue;
if (den > rats[k].den_max)
den = rats[k].den_max;
else {
unsigned int r;
r = (den - rats[k].den_min) % rats[k].den_step;
if (r != 0)
den -= r;
}
diff = num - q * den;
if (diff < 0)
diff = -diff;
if (best_num == 0 ||
diff * best_den < best_diff * den) {
best_diff = diff;
best_den = den;
best_num = num;
}
}
if (best_den == 0) {
i->empty = 1;
return -EINVAL;
}
t.min = div_down(best_num, best_den);
t.openmin = !!(best_num % best_den);
result_num = best_num;
result_diff = best_diff;
result_den = best_den;
best_num = best_den = best_diff = 0;
for (k = 0; k < rats_count; ++k) {
unsigned int num = rats[k].num;
unsigned int den;
unsigned int q = i->max;
int diff;
if (q == 0) {
i->empty = 1;
return -EINVAL;
}
den = div_down(num, q);
if (den > rats[k].den_max)
continue;
if (den < rats[k].den_min)
den = rats[k].den_min;
else {
unsigned int r;
r = (den - rats[k].den_min) % rats[k].den_step;
if (r != 0)
den += rats[k].den_step - r;
}
diff = q * den - num;
if (diff < 0)
diff = -diff;
if (best_num == 0 ||
diff * best_den < best_diff * den) {
best_diff = diff;
best_den = den;
best_num = num;
}
}
if (best_den == 0) {
i->empty = 1;
return -EINVAL;
}
t.max = div_up(best_num, best_den);
t.openmax = !!(best_num % best_den);
t.integer = 0;
err = snd_interval_refine(i, &t);
if (err < 0)
return err;
if (snd_interval_single(i)) {
if (best_diff * result_den < result_diff * best_den) {
result_num = best_num;
result_den = best_den;
}
if (nump)
*nump = result_num;
if (denp)
*denp = result_den;
}
return err;
}
EXPORT_SYMBOL(snd_interval_ratnum);
/**
* snd_interval_ratden - refine the interval value
* @i: interval to refine
* @rats_count: number of struct ratden
* @rats: struct ratden array
* @nump: pointer to store the resultant numerator
* @denp: pointer to store the resultant denominator
*
* Return: Positive if the value is changed, zero if it's not changed, or a
* negative error code.
*/
static int snd_interval_ratden(struct snd_interval *i,
unsigned int rats_count,
const struct snd_ratden *rats,
unsigned int *nump, unsigned int *denp)
{
unsigned int best_num, best_diff, best_den;
unsigned int k;
struct snd_interval t;
int err;
best_num = best_den = best_diff = 0;
for (k = 0; k < rats_count; ++k) {
unsigned int num;
unsigned int den = rats[k].den;
unsigned int q = i->min;
int diff;
num = mul(q, den);
if (num > rats[k].num_max)
continue;
if (num < rats[k].num_min)
num = rats[k].num_max;
else {
unsigned int r;
r = (num - rats[k].num_min) % rats[k].num_step;
if (r != 0)
num += rats[k].num_step - r;
}
diff = num - q * den;
if (best_num == 0 ||
diff * best_den < best_diff * den) {
best_diff = diff;
best_den = den;
best_num = num;
}
}
if (best_den == 0) {
i->empty = 1;
return -EINVAL;
}
t.min = div_down(best_num, best_den);
t.openmin = !!(best_num % best_den);
best_num = best_den = best_diff = 0;
for (k = 0; k < rats_count; ++k) {
unsigned int num;
unsigned int den = rats[k].den;
unsigned int q = i->max;
int diff;
num = mul(q, den);
if (num < rats[k].num_min)
continue;
if (num > rats[k].num_max)
num = rats[k].num_max;
else {
unsigned int r;
r = (num - rats[k].num_min) % rats[k].num_step;
if (r != 0)
num -= r;
}
diff = q * den - num;
if (best_num == 0 ||
diff * best_den < best_diff * den) {
best_diff = diff;
best_den = den;
best_num = num;
}
}
if (best_den == 0) {
i->empty = 1;
return -EINVAL;
}
t.max = div_up(best_num, best_den);
t.openmax = !!(best_num % best_den);
t.integer = 0;
err = snd_interval_refine(i, &t);
if (err < 0)
return err;
if (snd_interval_single(i)) {
if (nump)
*nump = best_num;
if (denp)
*denp = best_den;
}
return err;
}
/**
* snd_interval_list - refine the interval value from the list
* @i: the interval value to refine
* @count: the number of elements in the list
* @list: the value list
* @mask: the bit-mask to evaluate
*
* Refines the interval value from the list.
* When mask is non-zero, only the elements corresponding to bit 1 are
* evaluated.
*
* Return: Positive if the value is changed, zero if it's not changed, or a
* negative error code.
*/
int snd_interval_list(struct snd_interval *i, unsigned int count,
const unsigned int *list, unsigned int mask)
{
unsigned int k;
struct snd_interval list_range;
if (!count) {
i->empty = 1;
return -EINVAL;
}
snd_interval_any(&list_range);
list_range.min = UINT_MAX;
list_range.max = 0;
for (k = 0; k < count; k++) {
if (mask && !(mask & (1 << k)))
continue;
if (!snd_interval_test(i, list[k]))
continue;
list_range.min = min(list_range.min, list[k]);
list_range.max = max(list_range.max, list[k]);
}
return snd_interval_refine(i, &list_range);
}
EXPORT_SYMBOL(snd_interval_list);
/**
* snd_interval_ranges - refine the interval value from the list of ranges
* @i: the interval value to refine
* @count: the number of elements in the list of ranges
* @ranges: the ranges list
* @mask: the bit-mask to evaluate
*
* Refines the interval value from the list of ranges.
* When mask is non-zero, only the elements corresponding to bit 1 are
* evaluated.
*
* Return: Positive if the value is changed, zero if it's not changed, or a
* negative error code.
*/
int snd_interval_ranges(struct snd_interval *i, unsigned int count,
const struct snd_interval *ranges, unsigned int mask)
{
unsigned int k;
struct snd_interval range_union;
struct snd_interval range;
if (!count) {
snd_interval_none(i);
return -EINVAL;
}
snd_interval_any(&range_union);
range_union.min = UINT_MAX;
range_union.max = 0;
for (k = 0; k < count; k++) {
if (mask && !(mask & (1 << k)))
continue;
snd_interval_copy(&range, &ranges[k]);
if (snd_interval_refine(&range, i) < 0)
continue;
if (snd_interval_empty(&range))
continue;
if (range.min < range_union.min) {
range_union.min = range.min;
range_union.openmin = 1;
}
if (range.min == range_union.min && !range.openmin)
range_union.openmin = 0;
if (range.max > range_union.max) {
range_union.max = range.max;
range_union.openmax = 1;
}
if (range.max == range_union.max && !range.openmax)
range_union.openmax = 0;
}
return snd_interval_refine(i, &range_union);
}
EXPORT_SYMBOL(snd_interval_ranges);
static int snd_interval_step(struct snd_interval *i, unsigned int step)
{
unsigned int n;
int changed = 0;
n = i->min % step;
if (n != 0 || i->openmin) {
i->min += step - n;
i->openmin = 0;
changed = 1;
}
n = i->max % step;
if (n != 0 || i->openmax) {
i->max -= n;
i->openmax = 0;
changed = 1;
}
if (snd_interval_checkempty(i)) {
i->empty = 1;
return -EINVAL;
}
return changed;
}
/* Info constraints helpers */
/**
* snd_pcm_hw_rule_add - add the hw-constraint rule
* @runtime: the pcm runtime instance
* @cond: condition bits
* @var: the variable to evaluate
* @func: the evaluation function
* @private: the private data pointer passed to function
* @dep: the dependent variables
*
* Return: Zero if successful, or a negative error code on failure.
*/
int snd_pcm_hw_rule_add(struct snd_pcm_runtime *runtime, unsigned int cond,
int var,
snd_pcm_hw_rule_func_t func, void *private,
int dep, ...)
{
struct snd_pcm_hw_constraints *constrs = &runtime->hw_constraints;
struct snd_pcm_hw_rule *c;
unsigned int k;
va_list args;
va_start(args, dep);
if (constrs->rules_num >= constrs->rules_all) {
struct snd_pcm_hw_rule *new;
unsigned int new_rules = constrs->rules_all + 16;
new = kcalloc(new_rules, sizeof(*c), GFP_KERNEL);
if (!new) {
va_end(args);
return -ENOMEM;
}
if (constrs->rules) {
memcpy(new, constrs->rules,
constrs->rules_num * sizeof(*c));
kfree(constrs->rules);
}
constrs->rules = new;
constrs->rules_all = new_rules;
}
c = &constrs->rules[constrs->rules_num];
c->cond = cond;
c->func = func;
c->var = var;
c->private = private;
k = 0;
while (1) {
if (snd_BUG_ON(k >= ARRAY_SIZE(c->deps))) {
va_end(args);
return -EINVAL;
}
c->deps[k++] = dep;
if (dep < 0)
break;
dep = va_arg(args, int);
}
constrs->rules_num++;
va_end(args);
return 0;
}
EXPORT_SYMBOL(snd_pcm_hw_rule_add);
/**
* snd_pcm_hw_constraint_mask - apply the given bitmap mask constraint
* @runtime: PCM runtime instance
* @var: hw_params variable to apply the mask
* @mask: the bitmap mask
*
* Apply the constraint of the given bitmap mask to a 32-bit mask parameter.
*
* Return: Zero if successful, or a negative error code on failure.
*/
int snd_pcm_hw_constraint_mask(struct snd_pcm_runtime *runtime, snd_pcm_hw_param_t var,
u_int32_t mask)
{
struct snd_pcm_hw_constraints *constrs = &runtime->hw_constraints;
struct snd_mask *maskp = constrs_mask(constrs, var);
*maskp->bits &= mask;
memset(maskp->bits + 1, 0, (SNDRV_MASK_MAX-32) / 8); /* clear rest */
if (*maskp->bits == 0)
return -EINVAL;
return 0;
}
/**
* snd_pcm_hw_constraint_mask64 - apply the given bitmap mask constraint
* @runtime: PCM runtime instance
* @var: hw_params variable to apply the mask
* @mask: the 64bit bitmap mask
*
* Apply the constraint of the given bitmap mask to a 64-bit mask parameter.
*
* Return: Zero if successful, or a negative error code on failure.
*/
int snd_pcm_hw_constraint_mask64(struct snd_pcm_runtime *runtime, snd_pcm_hw_param_t var,
u_int64_t mask)
{
struct snd_pcm_hw_constraints *constrs = &runtime->hw_constraints;
struct snd_mask *maskp = constrs_mask(constrs, var);
maskp->bits[0] &= (u_int32_t)mask;
maskp->bits[1] &= (u_int32_t)(mask >> 32);
memset(maskp->bits + 2, 0, (SNDRV_MASK_MAX-64) / 8); /* clear rest */
if (! maskp->bits[0] && ! maskp->bits[1])
return -EINVAL;
return 0;
}
EXPORT_SYMBOL(snd_pcm_hw_constraint_mask64);
/**
* snd_pcm_hw_constraint_integer - apply an integer constraint to an interval
* @runtime: PCM runtime instance
* @var: hw_params variable to apply the integer constraint
*
* Apply the constraint of integer to an interval parameter.
*
* Return: Positive if the value is changed, zero if it's not changed, or a
* negative error code.
*/
int snd_pcm_hw_constraint_integer(struct snd_pcm_runtime *runtime, snd_pcm_hw_param_t var)
{
struct snd_pcm_hw_constraints *constrs = &runtime->hw_constraints;
return snd_interval_setinteger(constrs_interval(constrs, var));
}
EXPORT_SYMBOL(snd_pcm_hw_constraint_integer);
/**
* snd_pcm_hw_constraint_minmax - apply a min/max range constraint to an interval
* @runtime: PCM runtime instance
* @var: hw_params variable to apply the range
* @min: the minimal value
* @max: the maximal value
*
* Apply the min/max range constraint to an interval parameter.
*
* Return: Positive if the value is changed, zero if it's not changed, or a
* negative error code.
*/
int snd_pcm_hw_constraint_minmax(struct snd_pcm_runtime *runtime, snd_pcm_hw_param_t var,
unsigned int min, unsigned int max)
{
struct snd_pcm_hw_constraints *constrs = &runtime->hw_constraints;
struct snd_interval t;
t.min = min;
t.max = max;
t.openmin = t.openmax = 0;
t.integer = 0;
return snd_interval_refine(constrs_interval(constrs, var), &t);
}
EXPORT_SYMBOL(snd_pcm_hw_constraint_minmax);
static int snd_pcm_hw_rule_list(struct snd_pcm_hw_params *params,
struct snd_pcm_hw_rule *rule)
{
struct snd_pcm_hw_constraint_list *list = rule->private;
return snd_interval_list(hw_param_interval(params, rule->var), list->count, list->list, list->mask);
}
/**
* snd_pcm_hw_constraint_list - apply a list of constraints to a parameter
* @runtime: PCM runtime instance
* @cond: condition bits
* @var: hw_params variable to apply the list constraint
* @l: list
*
* Apply the list of constraints to an interval parameter.
*
* Return: Zero if successful, or a negative error code on failure.
*/
int snd_pcm_hw_constraint_list(struct snd_pcm_runtime *runtime,
unsigned int cond,
snd_pcm_hw_param_t var,
const struct snd_pcm_hw_constraint_list *l)
{
return snd_pcm_hw_rule_add(runtime, cond, var,
snd_pcm_hw_rule_list, (void *)l,
var, -1);
}
EXPORT_SYMBOL(snd_pcm_hw_constraint_list);
static int snd_pcm_hw_rule_ranges(struct snd_pcm_hw_params *params,
struct snd_pcm_hw_rule *rule)
{
struct snd_pcm_hw_constraint_ranges *r = rule->private;
return snd_interval_ranges(hw_param_interval(params, rule->var),
r->count, r->ranges, r->mask);
}
/**
* snd_pcm_hw_constraint_ranges - apply list of range constraints to a parameter
* @runtime: PCM runtime instance
* @cond: condition bits
* @var: hw_params variable to apply the list of range constraints
* @r: ranges
*
* Apply the list of range constraints to an interval parameter.
*
* Return: Zero if successful, or a negative error code on failure.
*/
int snd_pcm_hw_constraint_ranges(struct snd_pcm_runtime *runtime,
unsigned int cond,
snd_pcm_hw_param_t var,
const struct snd_pcm_hw_constraint_ranges *r)
{
return snd_pcm_hw_rule_add(runtime, cond, var,
snd_pcm_hw_rule_ranges, (void *)r,
var, -1);
}
EXPORT_SYMBOL(snd_pcm_hw_constraint_ranges);
static int snd_pcm_hw_rule_ratnums(struct snd_pcm_hw_params *params,
struct snd_pcm_hw_rule *rule)
{
const struct snd_pcm_hw_constraint_ratnums *r = rule->private;
unsigned int num = 0, den = 0;
int err;
err = snd_interval_ratnum(hw_param_interval(params, rule->var),
r->nrats, r->rats, &num, &den);
if (err >= 0 && den && rule->var == SNDRV_PCM_HW_PARAM_RATE) {
params->rate_num = num;
params->rate_den = den;
}
return err;
}
/**
* snd_pcm_hw_constraint_ratnums - apply ratnums constraint to a parameter
* @runtime: PCM runtime instance
* @cond: condition bits
* @var: hw_params variable to apply the ratnums constraint
* @r: struct snd_ratnums constriants
*
* Return: Zero if successful, or a negative error code on failure.
*/
int snd_pcm_hw_constraint_ratnums(struct snd_pcm_runtime *runtime,
unsigned int cond,
snd_pcm_hw_param_t var,
const struct snd_pcm_hw_constraint_ratnums *r)
{
return snd_pcm_hw_rule_add(runtime, cond, var,
snd_pcm_hw_rule_ratnums, (void *)r,
var, -1);
}
EXPORT_SYMBOL(snd_pcm_hw_constraint_ratnums);
static int snd_pcm_hw_rule_ratdens(struct snd_pcm_hw_params *params,
struct snd_pcm_hw_rule *rule)
{
const struct snd_pcm_hw_constraint_ratdens *r = rule->private;
unsigned int num = 0, den = 0;
int err = snd_interval_ratden(hw_param_interval(params, rule->var),
r->nrats, r->rats, &num, &den);
if (err >= 0 && den && rule->var == SNDRV_PCM_HW_PARAM_RATE) {
params->rate_num = num;
params->rate_den = den;
}
return err;
}
/**
* snd_pcm_hw_constraint_ratdens - apply ratdens constraint to a parameter
* @runtime: PCM runtime instance
* @cond: condition bits
* @var: hw_params variable to apply the ratdens constraint
* @r: struct snd_ratdens constriants
*
* Return: Zero if successful, or a negative error code on failure.
*/
int snd_pcm_hw_constraint_ratdens(struct snd_pcm_runtime *runtime,
unsigned int cond,
snd_pcm_hw_param_t var,
const struct snd_pcm_hw_constraint_ratdens *r)
{
return snd_pcm_hw_rule_add(runtime, cond, var,
snd_pcm_hw_rule_ratdens, (void *)r,
var, -1);
}
EXPORT_SYMBOL(snd_pcm_hw_constraint_ratdens);
static int snd_pcm_hw_rule_msbits(struct snd_pcm_hw_params *params,
struct snd_pcm_hw_rule *rule)
{
unsigned int l = (unsigned long) rule->private;
int width = l & 0xffff;
unsigned int msbits = l >> 16;
struct snd_interval *i = hw_param_interval(params, SNDRV_PCM_HW_PARAM_SAMPLE_BITS);
if (!snd_interval_single(i))
return 0;
if ((snd_interval_value(i) == width) ||
(width == 0 && snd_interval_value(i) > msbits))
params->msbits = min_not_zero(params->msbits, msbits);
return 0;
}
/**
* snd_pcm_hw_constraint_msbits - add a hw constraint msbits rule
* @runtime: PCM runtime instance
* @cond: condition bits
* @width: sample bits width
* @msbits: msbits width
*
* This constraint will set the number of most significant bits (msbits) if a
* sample format with the specified width has been select. If width is set to 0
* the msbits will be set for any sample format with a width larger than the
* specified msbits.
*
* Return: Zero if successful, or a negative error code on failure.
*/
int snd_pcm_hw_constraint_msbits(struct snd_pcm_runtime *runtime,
unsigned int cond,
unsigned int width,
unsigned int msbits)
{
unsigned long l = (msbits << 16) | width;
return snd_pcm_hw_rule_add(runtime, cond, -1,
snd_pcm_hw_rule_msbits,
(void*) l,
SNDRV_PCM_HW_PARAM_SAMPLE_BITS, -1);
}
EXPORT_SYMBOL(snd_pcm_hw_constraint_msbits);
static int snd_pcm_hw_rule_step(struct snd_pcm_hw_params *params,
struct snd_pcm_hw_rule *rule)
{
unsigned long step = (unsigned long) rule->private;
return snd_interval_step(hw_param_interval(params, rule->var), step);
}
/**
* snd_pcm_hw_constraint_step - add a hw constraint step rule
* @runtime: PCM runtime instance
* @cond: condition bits
* @var: hw_params variable to apply the step constraint
* @step: step size
*
* Return: Zero if successful, or a negative error code on failure.
*/
int snd_pcm_hw_constraint_step(struct snd_pcm_runtime *runtime,
unsigned int cond,
snd_pcm_hw_param_t var,
unsigned long step)
{
return snd_pcm_hw_rule_add(runtime, cond, var,
snd_pcm_hw_rule_step, (void *) step,
var, -1);
}
EXPORT_SYMBOL(snd_pcm_hw_constraint_step);
static int snd_pcm_hw_rule_pow2(struct snd_pcm_hw_params *params, struct snd_pcm_hw_rule *rule)
{
static unsigned int pow2_sizes[] = {
1<<0, 1<<1, 1<<2, 1<<3, 1<<4, 1<<5, 1<<6, 1<<7,
1<<8, 1<<9, 1<<10, 1<<11, 1<<12, 1<<13, 1<<14, 1<<15,
1<<16, 1<<17, 1<<18, 1<<19, 1<<20, 1<<21, 1<<22, 1<<23,
1<<24, 1<<25, 1<<26, 1<<27, 1<<28, 1<<29, 1<<30
};
return snd_interval_list(hw_param_interval(params, rule->var),
ARRAY_SIZE(pow2_sizes), pow2_sizes, 0);
}
/**
* snd_pcm_hw_constraint_pow2 - add a hw constraint power-of-2 rule
* @runtime: PCM runtime instance
* @cond: condition bits
* @var: hw_params variable to apply the power-of-2 constraint
*
* Return: Zero if successful, or a negative error code on failure.
*/
int snd_pcm_hw_constraint_pow2(struct snd_pcm_runtime *runtime,
unsigned int cond,
snd_pcm_hw_param_t var)
{
return snd_pcm_hw_rule_add(runtime, cond, var,
snd_pcm_hw_rule_pow2, NULL,
var, -1);
}
EXPORT_SYMBOL(snd_pcm_hw_constraint_pow2);
static int snd_pcm_hw_rule_noresample_func(struct snd_pcm_hw_params *params,
struct snd_pcm_hw_rule *rule)
{
unsigned int base_rate = (unsigned int)(uintptr_t)rule->private;
struct snd_interval *rate;
rate = hw_param_interval(params, SNDRV_PCM_HW_PARAM_RATE);
return snd_interval_list(rate, 1, &base_rate, 0);
}
/**
* snd_pcm_hw_rule_noresample - add a rule to allow disabling hw resampling
* @runtime: PCM runtime instance
* @base_rate: the rate at which the hardware does not resample
*
* Return: Zero if successful, or a negative error code on failure.
*/
int snd_pcm_hw_rule_noresample(struct snd_pcm_runtime *runtime,
unsigned int base_rate)
{
return snd_pcm_hw_rule_add(runtime, SNDRV_PCM_HW_PARAMS_NORESAMPLE,
SNDRV_PCM_HW_PARAM_RATE,
snd_pcm_hw_rule_noresample_func,
(void *)(uintptr_t)base_rate,
SNDRV_PCM_HW_PARAM_RATE, -1);
}
EXPORT_SYMBOL(snd_pcm_hw_rule_noresample);
static void _snd_pcm_hw_param_any(struct snd_pcm_hw_params *params,
snd_pcm_hw_param_t var)
{
if (hw_is_mask(var)) {
snd_mask_any(hw_param_mask(params, var));
params->cmask |= 1 << var;
params->rmask |= 1 << var;
return;
}
if (hw_is_interval(var)) {
snd_interval_any(hw_param_interval(params, var));
params->cmask |= 1 << var;
params->rmask |= 1 << var;
return;
}
snd_BUG();
}
void _snd_pcm_hw_params_any(struct snd_pcm_hw_params *params)
{
unsigned int k;
memset(params, 0, sizeof(*params));
for (k = SNDRV_PCM_HW_PARAM_FIRST_MASK; k <= SNDRV_PCM_HW_PARAM_LAST_MASK; k++)
_snd_pcm_hw_param_any(params, k);
for (k = SNDRV_PCM_HW_PARAM_FIRST_INTERVAL; k <= SNDRV_PCM_HW_PARAM_LAST_INTERVAL; k++)
_snd_pcm_hw_param_any(params, k);
params->info = ~0U;
}
EXPORT_SYMBOL(_snd_pcm_hw_params_any);
/**
* snd_pcm_hw_param_value - return @params field @var value
* @params: the hw_params instance
* @var: parameter to retrieve
* @dir: pointer to the direction (-1,0,1) or %NULL
*
* Return: The value for field @var if it's fixed in configuration space
* defined by @params. -%EINVAL otherwise.
*/
int snd_pcm_hw_param_value(const struct snd_pcm_hw_params *params,
snd_pcm_hw_param_t var, int *dir)
{
if (hw_is_mask(var)) {
const struct snd_mask *mask = hw_param_mask_c(params, var);
if (!snd_mask_single(mask))
return -EINVAL;
if (dir)
*dir = 0;
return snd_mask_value(mask);
}
if (hw_is_interval(var)) {
const struct snd_interval *i = hw_param_interval_c(params, var);
if (!snd_interval_single(i))
return -EINVAL;
if (dir)
*dir = i->openmin;
return snd_interval_value(i);
}
return -EINVAL;
}
EXPORT_SYMBOL(snd_pcm_hw_param_value);
void _snd_pcm_hw_param_setempty(struct snd_pcm_hw_params *params,
snd_pcm_hw_param_t var)
{
if (hw_is_mask(var)) {
snd_mask_none(hw_param_mask(params, var));
params->cmask |= 1 << var;
params->rmask |= 1 << var;
} else if (hw_is_interval(var)) {
snd_interval_none(hw_param_interval(params, var));
params->cmask |= 1 << var;
params->rmask |= 1 << var;
} else {
snd_BUG();
}
}
EXPORT_SYMBOL(_snd_pcm_hw_param_setempty);
static int _snd_pcm_hw_param_first(struct snd_pcm_hw_params *params,
snd_pcm_hw_param_t var)
{
int changed;
if (hw_is_mask(var))
changed = snd_mask_refine_first(hw_param_mask(params, var));
else if (hw_is_interval(var))
changed = snd_interval_refine_first(hw_param_interval(params, var));
else
return -EINVAL;
if (changed) {
params->cmask |= 1 << var;
params->rmask |= 1 << var;
}
return changed;
}
/**
* snd_pcm_hw_param_first - refine config space and return minimum value
* @pcm: PCM instance
* @params: the hw_params instance
* @var: parameter to retrieve
* @dir: pointer to the direction (-1,0,1) or %NULL
*
* Inside configuration space defined by @params remove from @var all
* values > minimum. Reduce configuration space accordingly.
*
* Return: The minimum, or a negative error code on failure.
*/
int snd_pcm_hw_param_first(struct snd_pcm_substream *pcm,
struct snd_pcm_hw_params *params,
snd_pcm_hw_param_t var, int *dir)
{
int changed = _snd_pcm_hw_param_first(params, var);
if (changed < 0)
return changed;
if (params->rmask) {
int err = snd_pcm_hw_refine(pcm, params);
if (snd_BUG_ON(err < 0))
return err;
}
return snd_pcm_hw_param_value(params, var, dir);
}
EXPORT_SYMBOL(snd_pcm_hw_param_first);
static int _snd_pcm_hw_param_last(struct snd_pcm_hw_params *params,
snd_pcm_hw_param_t var)
{
int changed;
if (hw_is_mask(var))
changed = snd_mask_refine_last(hw_param_mask(params, var));
else if (hw_is_interval(var))
changed = snd_interval_refine_last(hw_param_interval(params, var));
else
return -EINVAL;
if (changed) {
params->cmask |= 1 << var;
params->rmask |= 1 << var;
}
return changed;
}
/**
* snd_pcm_hw_param_last - refine config space and return maximum value
* @pcm: PCM instance
* @params: the hw_params instance
* @var: parameter to retrieve
* @dir: pointer to the direction (-1,0,1) or %NULL
*
* Inside configuration space defined by @params remove from @var all
* values < maximum. Reduce configuration space accordingly.
*
* Return: The maximum, or a negative error code on failure.
*/
int snd_pcm_hw_param_last(struct snd_pcm_substream *pcm,
struct snd_pcm_hw_params *params,
snd_pcm_hw_param_t var, int *dir)
{
int changed = _snd_pcm_hw_param_last(params, var);
if (changed < 0)
return changed;
if (params->rmask) {
int err = snd_pcm_hw_refine(pcm, params);
if (snd_BUG_ON(err < 0))
return err;
}
return snd_pcm_hw_param_value(params, var, dir);
}
EXPORT_SYMBOL(snd_pcm_hw_param_last);
/**
* snd_pcm_hw_param_choose - choose a configuration defined by @params
* @pcm: PCM instance
* @params: the hw_params instance
*
* Choose one configuration from configuration space defined by @params.
* The configuration chosen is that obtained fixing in this order:
* first access, first format, first subformat, min channels,
* min rate, min period time, max buffer size, min tick time
*
* Return: Zero if successful, or a negative error code on failure.
*/
int snd_pcm_hw_params_choose(struct snd_pcm_substream *pcm,
struct snd_pcm_hw_params *params)
{
static int vars[] = {
SNDRV_PCM_HW_PARAM_ACCESS,
SNDRV_PCM_HW_PARAM_FORMAT,
SNDRV_PCM_HW_PARAM_SUBFORMAT,
SNDRV_PCM_HW_PARAM_CHANNELS,
SNDRV_PCM_HW_PARAM_RATE,
SNDRV_PCM_HW_PARAM_PERIOD_TIME,
SNDRV_PCM_HW_PARAM_BUFFER_SIZE,
SNDRV_PCM_HW_PARAM_TICK_TIME,
-1
};
int err, *v;
for (v = vars; *v != -1; v++) {
if (*v != SNDRV_PCM_HW_PARAM_BUFFER_SIZE)
err = snd_pcm_hw_param_first(pcm, params, *v, NULL);
else
err = snd_pcm_hw_param_last(pcm, params, *v, NULL);
if (snd_BUG_ON(err < 0))
return err;
}
return 0;
}
static int snd_pcm_lib_ioctl_reset(struct snd_pcm_substream *substream,
void *arg)
{
struct snd_pcm_runtime *runtime = substream->runtime;
unsigned long flags;
snd_pcm_stream_lock_irqsave(substream, flags);
if (snd_pcm_running(substream) &&
snd_pcm_update_hw_ptr(substream) >= 0)
runtime->status->hw_ptr %= runtime->buffer_size;
else {
runtime->status->hw_ptr = 0;
runtime->hw_ptr_wrap = 0;
}
snd_pcm_stream_unlock_irqrestore(substream, flags);
return 0;
}
static int snd_pcm_lib_ioctl_channel_info(struct snd_pcm_substream *substream,
void *arg)
{
struct snd_pcm_channel_info *info = arg;
struct snd_pcm_runtime *runtime = substream->runtime;
int width;
if (!(runtime->info & SNDRV_PCM_INFO_MMAP)) {
info->offset = -1;
return 0;
}
width = snd_pcm_format_physical_width(runtime->format);
if (width < 0)
return width;
info->offset = 0;
switch (runtime->access) {
case SNDRV_PCM_ACCESS_MMAP_INTERLEAVED:
case SNDRV_PCM_ACCESS_RW_INTERLEAVED:
info->first = info->channel * width;
info->step = runtime->channels * width;
break;
case SNDRV_PCM_ACCESS_MMAP_NONINTERLEAVED:
case SNDRV_PCM_ACCESS_RW_NONINTERLEAVED:
{
size_t size = runtime->dma_bytes / runtime->channels;
info->first = info->channel * size * 8;
info->step = width;
break;
}
default:
snd_BUG();
break;
}
return 0;
}
static int snd_pcm_lib_ioctl_fifo_size(struct snd_pcm_substream *substream,
void *arg)
{
struct snd_pcm_hw_params *params = arg;
snd_pcm_format_t format;
int channels;
ssize_t frame_size;
params->fifo_size = substream->runtime->hw.fifo_size;
if (!(substream->runtime->hw.info & SNDRV_PCM_INFO_FIFO_IN_FRAMES)) {
format = params_format(params);
channels = params_channels(params);
frame_size = snd_pcm_format_size(format, channels);
if (frame_size > 0)
params->fifo_size /= (unsigned)frame_size;
}
return 0;
}
/**
* snd_pcm_lib_ioctl - a generic PCM ioctl callback
* @substream: the pcm substream instance
* @cmd: ioctl command
* @arg: ioctl argument
*
* Processes the generic ioctl commands for PCM.
* Can be passed as the ioctl callback for PCM ops.
*
* Return: Zero if successful, or a negative error code on failure.
*/
int snd_pcm_lib_ioctl(struct snd_pcm_substream *substream,
unsigned int cmd, void *arg)
{
switch (cmd) {
case SNDRV_PCM_IOCTL1_INFO:
return 0;
case SNDRV_PCM_IOCTL1_RESET:
return snd_pcm_lib_ioctl_reset(substream, arg);
case SNDRV_PCM_IOCTL1_CHANNEL_INFO:
return snd_pcm_lib_ioctl_channel_info(substream, arg);
case SNDRV_PCM_IOCTL1_FIFO_SIZE:
return snd_pcm_lib_ioctl_fifo_size(substream, arg);
}
return -ENXIO;
}
EXPORT_SYMBOL(snd_pcm_lib_ioctl);
/**
* snd_pcm_period_elapsed - update the pcm status for the next period
* @substream: the pcm substream instance
*
* This function is called from the interrupt handler when the
* PCM has processed the period size. It will update the current
* pointer, wake up sleepers, etc.
*
* Even if more than one periods have elapsed since the last call, you
* have to call this only once.
*/
void snd_pcm_period_elapsed(struct snd_pcm_substream *substream)
{
struct snd_pcm_runtime *runtime;
unsigned long flags;
if (PCM_RUNTIME_CHECK(substream))
return;
runtime = substream->runtime;
snd_pcm_stream_lock_irqsave(substream, flags);
if (!snd_pcm_running(substream) ||
snd_pcm_update_hw_ptr0(substream, 1) < 0)
goto _end;
#ifdef CONFIG_SND_PCM_TIMER
if (substream->timer_running)
snd_timer_interrupt(substream->timer, 1);
#endif
_end:
snd_pcm_stream_unlock_irqrestore(substream, flags);
kill_fasync(&runtime->fasync, SIGIO, POLL_IN);
}
EXPORT_SYMBOL(snd_pcm_period_elapsed);
/*
* Wait until avail_min data becomes available
* Returns a negative error code if any error occurs during operation.
* The available space is stored on availp. When err = 0 and avail = 0
* on the capture stream, it indicates the stream is in DRAINING state.
*/
static int wait_for_avail(struct snd_pcm_substream *substream,
snd_pcm_uframes_t *availp)
{
struct snd_pcm_runtime *runtime = substream->runtime;
int is_playback = substream->stream == SNDRV_PCM_STREAM_PLAYBACK;
wait_queue_t wait;
int err = 0;
snd_pcm_uframes_t avail = 0;
long wait_time, tout;
init_waitqueue_entry(&wait, current);
set_current_state(TASK_INTERRUPTIBLE);
add_wait_queue(&runtime->tsleep, &wait);
if (runtime->no_period_wakeup)
wait_time = MAX_SCHEDULE_TIMEOUT;
else {
wait_time = 10;
if (runtime->rate) {
long t = runtime->period_size * 2 / runtime->rate;
wait_time = max(t, wait_time);
}
wait_time = msecs_to_jiffies(wait_time * 1000);
}
for (;;) {
if (signal_pending(current)) {
err = -ERESTARTSYS;
break;
}
/*
* We need to check if space became available already
* (and thus the wakeup happened already) first to close
* the race of space already having become available.
* This check must happen after been added to the waitqueue
* and having current state be INTERRUPTIBLE.
*/
if (is_playback)
avail = snd_pcm_playback_avail(runtime);
else
avail = snd_pcm_capture_avail(runtime);
if (avail >= runtime->twake)
break;
snd_pcm_stream_unlock_irq(substream);
tout = schedule_timeout(wait_time);
snd_pcm_stream_lock_irq(substream);
set_current_state(TASK_INTERRUPTIBLE);
switch (runtime->status->state) {
case SNDRV_PCM_STATE_SUSPENDED:
err = -ESTRPIPE;
goto _endloop;
case SNDRV_PCM_STATE_XRUN:
err = -EPIPE;
goto _endloop;
case SNDRV_PCM_STATE_DRAINING:
if (is_playback)
err = -EPIPE;
else
avail = 0; /* indicate draining */
goto _endloop;
case SNDRV_PCM_STATE_OPEN:
case SNDRV_PCM_STATE_SETUP:
case SNDRV_PCM_STATE_DISCONNECTED:
err = -EBADFD;
goto _endloop;
case SNDRV_PCM_STATE_PAUSED:
continue;
}
if (!tout) {
pcm_dbg(substream->pcm,
"%s write error (DMA or IRQ trouble?)\n",
is_playback ? "playback" : "capture");
err = -EIO;
break;
}
}
_endloop:
set_current_state(TASK_RUNNING);
remove_wait_queue(&runtime->tsleep, &wait);
*availp = avail;
return err;
}
static int snd_pcm_lib_write_transfer(struct snd_pcm_substream *substream,
unsigned int hwoff,
unsigned long data, unsigned int off,
snd_pcm_uframes_t frames)
{
struct snd_pcm_runtime *runtime = substream->runtime;
int err;
char __user *buf = (char __user *) data + frames_to_bytes(runtime, off);
if (substream->ops->copy) {
if ((err = substream->ops->copy(substream, -1, hwoff, buf, frames)) < 0)
return err;
} else {
char *hwbuf = runtime->dma_area + frames_to_bytes(runtime, hwoff);
if (copy_from_user(hwbuf, buf, frames_to_bytes(runtime, frames)))
return -EFAULT;
}
return 0;
}
typedef int (*transfer_f)(struct snd_pcm_substream *substream, unsigned int hwoff,
unsigned long data, unsigned int off,
snd_pcm_uframes_t size);
static snd_pcm_sframes_t snd_pcm_lib_write1(struct snd_pcm_substream *substream,
unsigned long data,
snd_pcm_uframes_t size,
int nonblock,
transfer_f transfer)
{
struct snd_pcm_runtime *runtime = substream->runtime;
snd_pcm_uframes_t xfer = 0;
snd_pcm_uframes_t offset = 0;
snd_pcm_uframes_t avail;
int err = 0;
if (size == 0)
return 0;
snd_pcm_stream_lock_irq(substream);
switch (runtime->status->state) {
case SNDRV_PCM_STATE_PREPARED:
case SNDRV_PCM_STATE_RUNNING:
case SNDRV_PCM_STATE_PAUSED:
break;
case SNDRV_PCM_STATE_XRUN:
err = -EPIPE;
goto _end_unlock;
case SNDRV_PCM_STATE_SUSPENDED:
err = -ESTRPIPE;
goto _end_unlock;
default:
err = -EBADFD;
goto _end_unlock;
}
runtime->twake = runtime->control->avail_min ? : 1;
if (runtime->status->state == SNDRV_PCM_STATE_RUNNING)
snd_pcm_update_hw_ptr(substream);
avail = snd_pcm_playback_avail(runtime);
while (size > 0) {
snd_pcm_uframes_t frames, appl_ptr, appl_ofs;
snd_pcm_uframes_t cont;
if (!avail) {
if (nonblock) {
err = -EAGAIN;
goto _end_unlock;
}
runtime->twake = min_t(snd_pcm_uframes_t, size,
runtime->control->avail_min ? : 1);
err = wait_for_avail(substream, &avail);
if (err < 0)
goto _end_unlock;
}
frames = size > avail ? avail : size;
cont = runtime->buffer_size - runtime->control->appl_ptr % runtime->buffer_size;
if (frames > cont)
frames = cont;
if (snd_BUG_ON(!frames)) {
runtime->twake = 0;
snd_pcm_stream_unlock_irq(substream);
return -EINVAL;
}
appl_ptr = runtime->control->appl_ptr;
appl_ofs = appl_ptr % runtime->buffer_size;
snd_pcm_stream_unlock_irq(substream);
err = transfer(substream, appl_ofs, data, offset, frames);
snd_pcm_stream_lock_irq(substream);
if (err < 0)
goto _end_unlock;
switch (runtime->status->state) {
case SNDRV_PCM_STATE_XRUN:
err = -EPIPE;
goto _end_unlock;
case SNDRV_PCM_STATE_SUSPENDED:
err = -ESTRPIPE;
goto _end_unlock;
default:
break;
}
appl_ptr += frames;
if (appl_ptr >= runtime->boundary)
appl_ptr -= runtime->boundary;
runtime->control->appl_ptr = appl_ptr;
if (substream->ops->ack)
substream->ops->ack(substream);
offset += frames;
size -= frames;
xfer += frames;
avail -= frames;
if (runtime->status->state == SNDRV_PCM_STATE_PREPARED &&
snd_pcm_playback_hw_avail(runtime) >= (snd_pcm_sframes_t)runtime->start_threshold) {
err = snd_pcm_start(substream);
if (err < 0)
goto _end_unlock;
}
}
_end_unlock:
runtime->twake = 0;
if (xfer > 0 && err >= 0)
snd_pcm_update_state(substream, runtime);
snd_pcm_stream_unlock_irq(substream);
return xfer > 0 ? (snd_pcm_sframes_t)xfer : err;
}
/* sanity-check for read/write methods */
static int pcm_sanity_check(struct snd_pcm_substream *substream)
{
struct snd_pcm_runtime *runtime;
if (PCM_RUNTIME_CHECK(substream))
return -ENXIO;
runtime = substream->runtime;
if (snd_BUG_ON(!substream->ops->copy && !runtime->dma_area))
return -EINVAL;
if (runtime->status->state == SNDRV_PCM_STATE_OPEN)
return -EBADFD;
return 0;
}
snd_pcm_sframes_t snd_pcm_lib_write(struct snd_pcm_substream *substream, const void __user *buf, snd_pcm_uframes_t size)
{
struct snd_pcm_runtime *runtime;
int nonblock;
int err;
err = pcm_sanity_check(substream);
if (err < 0)
return err;
runtime = substream->runtime;
nonblock = !!(substream->f_flags & O_NONBLOCK);
if (runtime->access != SNDRV_PCM_ACCESS_RW_INTERLEAVED &&
runtime->channels > 1)
return -EINVAL;
return snd_pcm_lib_write1(substream, (unsigned long)buf, size, nonblock,
snd_pcm_lib_write_transfer);
}
EXPORT_SYMBOL(snd_pcm_lib_write);
static int snd_pcm_lib_writev_transfer(struct snd_pcm_substream *substream,
unsigned int hwoff,
unsigned long data, unsigned int off,
snd_pcm_uframes_t frames)
{
struct snd_pcm_runtime *runtime = substream->runtime;
int err;
void __user **bufs = (void __user **)data;
int channels = runtime->channels;
int c;
if (substream->ops->copy) {
if (snd_BUG_ON(!substream->ops->silence))
return -EINVAL;
for (c = 0; c < channels; ++c, ++bufs) {
if (*bufs == NULL) {
if ((err = substream->ops->silence(substream, c, hwoff, frames)) < 0)
return err;
} else {
char __user *buf = *bufs + samples_to_bytes(runtime, off);
if ((err = substream->ops->copy(substream, c, hwoff, buf, frames)) < 0)
return err;
}
}
} else {
/* default transfer behaviour */
size_t dma_csize = runtime->dma_bytes / channels;
for (c = 0; c < channels; ++c, ++bufs) {
char *hwbuf = runtime->dma_area + (c * dma_csize) + samples_to_bytes(runtime, hwoff);
if (*bufs == NULL) {
snd_pcm_format_set_silence(runtime->format, hwbuf, frames);
} else {
char __user *buf = *bufs + samples_to_bytes(runtime, off);
if (copy_from_user(hwbuf, buf, samples_to_bytes(runtime, frames)))
return -EFAULT;
}
}
}
return 0;
}
snd_pcm_sframes_t snd_pcm_lib_writev(struct snd_pcm_substream *substream,
void __user **bufs,
snd_pcm_uframes_t frames)
{
struct snd_pcm_runtime *runtime;
int nonblock;
int err;
err = pcm_sanity_check(substream);
if (err < 0)
return err;
runtime = substream->runtime;
nonblock = !!(substream->f_flags & O_NONBLOCK);
if (runtime->access != SNDRV_PCM_ACCESS_RW_NONINTERLEAVED)
return -EINVAL;
return snd_pcm_lib_write1(substream, (unsigned long)bufs, frames,
nonblock, snd_pcm_lib_writev_transfer);
}
EXPORT_SYMBOL(snd_pcm_lib_writev);
static int snd_pcm_lib_read_transfer(struct snd_pcm_substream *substream,
unsigned int hwoff,
unsigned long data, unsigned int off,
snd_pcm_uframes_t frames)
{
struct snd_pcm_runtime *runtime = substream->runtime;
int err;
char __user *buf = (char __user *) data + frames_to_bytes(runtime, off);
if (substream->ops->copy) {
if ((err = substream->ops->copy(substream, -1, hwoff, buf, frames)) < 0)
return err;
} else {
char *hwbuf = runtime->dma_area + frames_to_bytes(runtime, hwoff);
if (copy_to_user(buf, hwbuf, frames_to_bytes(runtime, frames)))
return -EFAULT;
}
return 0;
}
static snd_pcm_sframes_t snd_pcm_lib_read1(struct snd_pcm_substream *substream,
unsigned long data,
snd_pcm_uframes_t size,
int nonblock,
transfer_f transfer)
{
struct snd_pcm_runtime *runtime = substream->runtime;
snd_pcm_uframes_t xfer = 0;
snd_pcm_uframes_t offset = 0;
snd_pcm_uframes_t avail;
int err = 0;
if (size == 0)
return 0;
snd_pcm_stream_lock_irq(substream);
switch (runtime->status->state) {
case SNDRV_PCM_STATE_PREPARED:
if (size >= runtime->start_threshold) {
err = snd_pcm_start(substream);
if (err < 0)
goto _end_unlock;
}
break;
case SNDRV_PCM_STATE_DRAINING:
case SNDRV_PCM_STATE_RUNNING:
case SNDRV_PCM_STATE_PAUSED:
break;
case SNDRV_PCM_STATE_XRUN:
err = -EPIPE;
goto _end_unlock;
case SNDRV_PCM_STATE_SUSPENDED:
err = -ESTRPIPE;
goto _end_unlock;
default:
err = -EBADFD;
goto _end_unlock;
}
runtime->twake = runtime->control->avail_min ? : 1;
if (runtime->status->state == SNDRV_PCM_STATE_RUNNING)
snd_pcm_update_hw_ptr(substream);
avail = snd_pcm_capture_avail(runtime);
while (size > 0) {
snd_pcm_uframes_t frames, appl_ptr, appl_ofs;
snd_pcm_uframes_t cont;
if (!avail) {
if (runtime->status->state ==
SNDRV_PCM_STATE_DRAINING) {
snd_pcm_stop(substream, SNDRV_PCM_STATE_SETUP);
goto _end_unlock;
}
if (nonblock) {
err = -EAGAIN;
goto _end_unlock;
}
runtime->twake = min_t(snd_pcm_uframes_t, size,
runtime->control->avail_min ? : 1);
err = wait_for_avail(substream, &avail);
if (err < 0)
goto _end_unlock;
if (!avail)
continue; /* draining */
}
frames = size > avail ? avail : size;
cont = runtime->buffer_size - runtime->control->appl_ptr % runtime->buffer_size;
if (frames > cont)
frames = cont;
if (snd_BUG_ON(!frames)) {
runtime->twake = 0;
snd_pcm_stream_unlock_irq(substream);
return -EINVAL;
}
appl_ptr = runtime->control->appl_ptr;
appl_ofs = appl_ptr % runtime->buffer_size;
snd_pcm_stream_unlock_irq(substream);
err = transfer(substream, appl_ofs, data, offset, frames);
snd_pcm_stream_lock_irq(substream);
if (err < 0)
goto _end_unlock;
switch (runtime->status->state) {
case SNDRV_PCM_STATE_XRUN:
err = -EPIPE;
goto _end_unlock;
case SNDRV_PCM_STATE_SUSPENDED:
err = -ESTRPIPE;
goto _end_unlock;
default:
break;
}
appl_ptr += frames;
if (appl_ptr >= runtime->boundary)
appl_ptr -= runtime->boundary;
runtime->control->appl_ptr = appl_ptr;
if (substream->ops->ack)
substream->ops->ack(substream);
offset += frames;
size -= frames;
xfer += frames;
avail -= frames;
}
_end_unlock:
runtime->twake = 0;
if (xfer > 0 && err >= 0)
snd_pcm_update_state(substream, runtime);
snd_pcm_stream_unlock_irq(substream);
return xfer > 0 ? (snd_pcm_sframes_t)xfer : err;
}
snd_pcm_sframes_t snd_pcm_lib_read(struct snd_pcm_substream *substream, void __user *buf, snd_pcm_uframes_t size)
{
struct snd_pcm_runtime *runtime;
int nonblock;
int err;
err = pcm_sanity_check(substream);
if (err < 0)
return err;
runtime = substream->runtime;
nonblock = !!(substream->f_flags & O_NONBLOCK);
if (runtime->access != SNDRV_PCM_ACCESS_RW_INTERLEAVED)
return -EINVAL;
return snd_pcm_lib_read1(substream, (unsigned long)buf, size, nonblock, snd_pcm_lib_read_transfer);
}
EXPORT_SYMBOL(snd_pcm_lib_read);
static int snd_pcm_lib_readv_transfer(struct snd_pcm_substream *substream,
unsigned int hwoff,
unsigned long data, unsigned int off,
snd_pcm_uframes_t frames)
{
struct snd_pcm_runtime *runtime = substream->runtime;
int err;
void __user **bufs = (void __user **)data;
int channels = runtime->channels;
int c;
if (substream->ops->copy) {
for (c = 0; c < channels; ++c, ++bufs) {
char __user *buf;
if (*bufs == NULL)
continue;
buf = *bufs + samples_to_bytes(runtime, off);
if ((err = substream->ops->copy(substream, c, hwoff, buf, frames)) < 0)
return err;
}
} else {
snd_pcm_uframes_t dma_csize = runtime->dma_bytes / channels;
for (c = 0; c < channels; ++c, ++bufs) {
char *hwbuf;
char __user *buf;
if (*bufs == NULL)
continue;
hwbuf = runtime->dma_area + (c * dma_csize) + samples_to_bytes(runtime, hwoff);
buf = *bufs + samples_to_bytes(runtime, off);
if (copy_to_user(buf, hwbuf, samples_to_bytes(runtime, frames)))
return -EFAULT;
}
}
return 0;
}
snd_pcm_sframes_t snd_pcm_lib_readv(struct snd_pcm_substream *substream,
void __user **bufs,
snd_pcm_uframes_t frames)
{
struct snd_pcm_runtime *runtime;
int nonblock;
int err;
err = pcm_sanity_check(substream);
if (err < 0)
return err;
runtime = substream->runtime;
if (runtime->status->state == SNDRV_PCM_STATE_OPEN)
return -EBADFD;
nonblock = !!(substream->f_flags & O_NONBLOCK);
if (runtime->access != SNDRV_PCM_ACCESS_RW_NONINTERLEAVED)
return -EINVAL;
return snd_pcm_lib_read1(substream, (unsigned long)bufs, frames, nonblock, snd_pcm_lib_readv_transfer);
}
EXPORT_SYMBOL(snd_pcm_lib_readv);
/*
* standard channel mapping helpers
*/
/* default channel maps for multi-channel playbacks, up to 8 channels */
const struct snd_pcm_chmap_elem snd_pcm_std_chmaps[] = {
{ .channels = 1,
.map = { SNDRV_CHMAP_MONO } },
{ .channels = 2,
.map = { SNDRV_CHMAP_FL, SNDRV_CHMAP_FR } },
{ .channels = 4,
.map = { SNDRV_CHMAP_FL, SNDRV_CHMAP_FR,
SNDRV_CHMAP_RL, SNDRV_CHMAP_RR } },
{ .channels = 6,
.map = { SNDRV_CHMAP_FL, SNDRV_CHMAP_FR,
SNDRV_CHMAP_RL, SNDRV_CHMAP_RR,
SNDRV_CHMAP_FC, SNDRV_CHMAP_LFE } },
{ .channels = 8,
.map = { SNDRV_CHMAP_FL, SNDRV_CHMAP_FR,
SNDRV_CHMAP_RL, SNDRV_CHMAP_RR,
SNDRV_CHMAP_FC, SNDRV_CHMAP_LFE,
SNDRV_CHMAP_SL, SNDRV_CHMAP_SR } },
{ }
};
EXPORT_SYMBOL_GPL(snd_pcm_std_chmaps);
/* alternative channel maps with CLFE <-> surround swapped for 6/8 channels */
const struct snd_pcm_chmap_elem snd_pcm_alt_chmaps[] = {
{ .channels = 1,
.map = { SNDRV_CHMAP_MONO } },
{ .channels = 2,
.map = { SNDRV_CHMAP_FL, SNDRV_CHMAP_FR } },
{ .channels = 4,
.map = { SNDRV_CHMAP_FL, SNDRV_CHMAP_FR,
SNDRV_CHMAP_RL, SNDRV_CHMAP_RR } },
{ .channels = 6,
.map = { SNDRV_CHMAP_FL, SNDRV_CHMAP_FR,
SNDRV_CHMAP_FC, SNDRV_CHMAP_LFE,
SNDRV_CHMAP_RL, SNDRV_CHMAP_RR } },
{ .channels = 8,
.map = { SNDRV_CHMAP_FL, SNDRV_CHMAP_FR,
SNDRV_CHMAP_FC, SNDRV_CHMAP_LFE,
SNDRV_CHMAP_RL, SNDRV_CHMAP_RR,
SNDRV_CHMAP_SL, SNDRV_CHMAP_SR } },
{ }
};
EXPORT_SYMBOL_GPL(snd_pcm_alt_chmaps);
static bool valid_chmap_channels(const struct snd_pcm_chmap *info, int ch)
{
if (ch > info->max_channels)
return false;
return !info->channel_mask || (info->channel_mask & (1U << ch));
}
static int pcm_chmap_ctl_info(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_info *uinfo)
{
struct snd_pcm_chmap *info = snd_kcontrol_chip(kcontrol);
uinfo->type = SNDRV_CTL_ELEM_TYPE_INTEGER;
uinfo->count = 0;
uinfo->count = info->max_channels;
uinfo->value.integer.min = 0;
uinfo->value.integer.max = SNDRV_CHMAP_LAST;
return 0;
}
/* get callback for channel map ctl element
* stores the channel position firstly matching with the current channels
*/
static int pcm_chmap_ctl_get(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_value *ucontrol)
{
struct snd_pcm_chmap *info = snd_kcontrol_chip(kcontrol);
unsigned int idx = snd_ctl_get_ioffidx(kcontrol, &ucontrol->id);
struct snd_pcm_substream *substream;
const struct snd_pcm_chmap_elem *map;
if (snd_BUG_ON(!info->chmap))
return -EINVAL;
substream = snd_pcm_chmap_substream(info, idx);
if (!substream)
return -ENODEV;
memset(ucontrol->value.integer.value, 0,
sizeof(ucontrol->value.integer.value));
if (!substream->runtime)
return 0; /* no channels set */
for (map = info->chmap; map->channels; map++) {
int i;
if (map->channels == substream->runtime->channels &&
valid_chmap_channels(info, map->channels)) {
for (i = 0; i < map->channels; i++)
ucontrol->value.integer.value[i] = map->map[i];
return 0;
}
}
return -EINVAL;
}
/* tlv callback for channel map ctl element
* expands the pre-defined channel maps in a form of TLV
*/
static int pcm_chmap_ctl_tlv(struct snd_kcontrol *kcontrol, int op_flag,
unsigned int size, unsigned int __user *tlv)
{
struct snd_pcm_chmap *info = snd_kcontrol_chip(kcontrol);
const struct snd_pcm_chmap_elem *map;
unsigned int __user *dst;
int c, count = 0;
if (snd_BUG_ON(!info->chmap))
return -EINVAL;
if (size < 8)
return -ENOMEM;
if (put_user(SNDRV_CTL_TLVT_CONTAINER, tlv))
return -EFAULT;
size -= 8;
dst = tlv + 2;
for (map = info->chmap; map->channels; map++) {
int chs_bytes = map->channels * 4;
if (!valid_chmap_channels(info, map->channels))
continue;
if (size < 8)
return -ENOMEM;
if (put_user(SNDRV_CTL_TLVT_CHMAP_FIXED, dst) ||
put_user(chs_bytes, dst + 1))
return -EFAULT;
dst += 2;
size -= 8;
count += 8;
if (size < chs_bytes)
return -ENOMEM;
size -= chs_bytes;
count += chs_bytes;
for (c = 0; c < map->channels; c++) {
if (put_user(map->map[c], dst))
return -EFAULT;
dst++;
}
}
if (put_user(count, tlv + 1))
return -EFAULT;
return 0;
}
static void pcm_chmap_ctl_private_free(struct snd_kcontrol *kcontrol)
{
struct snd_pcm_chmap *info = snd_kcontrol_chip(kcontrol);
info->pcm->streams[info->stream].chmap_kctl = NULL;
kfree(info);
}
/**
* snd_pcm_add_chmap_ctls - create channel-mapping control elements
* @pcm: the assigned PCM instance
* @stream: stream direction
* @chmap: channel map elements (for query)
* @max_channels: the max number of channels for the stream
* @private_value: the value passed to each kcontrol's private_value field
* @info_ret: store struct snd_pcm_chmap instance if non-NULL
*
* Create channel-mapping control elements assigned to the given PCM stream(s).
* Return: Zero if successful, or a negative error value.
*/
int snd_pcm_add_chmap_ctls(struct snd_pcm *pcm, int stream,
const struct snd_pcm_chmap_elem *chmap,
int max_channels,
unsigned long private_value,
struct snd_pcm_chmap **info_ret)
{
struct snd_pcm_chmap *info;
struct snd_kcontrol_new knew = {
.iface = SNDRV_CTL_ELEM_IFACE_PCM,
.access = SNDRV_CTL_ELEM_ACCESS_READ |
SNDRV_CTL_ELEM_ACCESS_TLV_READ |
SNDRV_CTL_ELEM_ACCESS_TLV_CALLBACK,
.info = pcm_chmap_ctl_info,
.get = pcm_chmap_ctl_get,
.tlv.c = pcm_chmap_ctl_tlv,
};
int err;
info = kzalloc(sizeof(*info), GFP_KERNEL);
if (!info)
return -ENOMEM;
info->pcm = pcm;
info->stream = stream;
info->chmap = chmap;
info->max_channels = max_channels;
if (stream == SNDRV_PCM_STREAM_PLAYBACK)
knew.name = "Playback Channel Map";
else
knew.name = "Capture Channel Map";
knew.device = pcm->device;
knew.count = pcm->streams[stream].substream_count;
knew.private_value = private_value;
info->kctl = snd_ctl_new1(&knew, info);
if (!info->kctl) {
kfree(info);
return -ENOMEM;
}
info->kctl->private_free = pcm_chmap_ctl_private_free;
err = snd_ctl_add(pcm->card, info->kctl);
if (err < 0)
return err;
pcm->streams[stream].chmap_kctl = info->kctl;
if (info_ret)
*info_ret = info;
return 0;
}
EXPORT_SYMBOL_GPL(snd_pcm_add_chmap_ctls);
| ./CrossVul/dataset_final_sorted/CWE-416/c/bad_5503_0 |
crossvul-cpp_data_good_4797_0 | /*
* bsg.c - block layer implementation of the sg v4 interface
*
* Copyright (C) 2004 Jens Axboe <axboe@suse.de> SUSE Labs
* Copyright (C) 2004 Peter M. Jones <pjones@redhat.com>
*
* This file is subject to the terms and conditions of the GNU General Public
* License version 2. See the file "COPYING" in the main directory of this
* archive for more details.
*
*/
#include <linux/module.h>
#include <linux/init.h>
#include <linux/file.h>
#include <linux/blkdev.h>
#include <linux/poll.h>
#include <linux/cdev.h>
#include <linux/jiffies.h>
#include <linux/percpu.h>
#include <linux/uio.h>
#include <linux/idr.h>
#include <linux/bsg.h>
#include <linux/slab.h>
#include <scsi/scsi.h>
#include <scsi/scsi_ioctl.h>
#include <scsi/scsi_cmnd.h>
#include <scsi/scsi_device.h>
#include <scsi/scsi_driver.h>
#include <scsi/sg.h>
#define BSG_DESCRIPTION "Block layer SCSI generic (bsg) driver"
#define BSG_VERSION "0.4"
struct bsg_device {
struct request_queue *queue;
spinlock_t lock;
struct list_head busy_list;
struct list_head done_list;
struct hlist_node dev_list;
atomic_t ref_count;
int queued_cmds;
int done_cmds;
wait_queue_head_t wq_done;
wait_queue_head_t wq_free;
char name[20];
int max_queue;
unsigned long flags;
};
enum {
BSG_F_BLOCK = 1,
};
#define BSG_DEFAULT_CMDS 64
#define BSG_MAX_DEVS 32768
#undef BSG_DEBUG
#ifdef BSG_DEBUG
#define dprintk(fmt, args...) printk(KERN_ERR "%s: " fmt, __func__, ##args)
#else
#define dprintk(fmt, args...)
#endif
static DEFINE_MUTEX(bsg_mutex);
static DEFINE_IDR(bsg_minor_idr);
#define BSG_LIST_ARRAY_SIZE 8
static struct hlist_head bsg_device_list[BSG_LIST_ARRAY_SIZE];
static struct class *bsg_class;
static int bsg_major;
static struct kmem_cache *bsg_cmd_cachep;
/*
* our internal command type
*/
struct bsg_command {
struct bsg_device *bd;
struct list_head list;
struct request *rq;
struct bio *bio;
struct bio *bidi_bio;
int err;
struct sg_io_v4 hdr;
char sense[SCSI_SENSE_BUFFERSIZE];
};
static void bsg_free_command(struct bsg_command *bc)
{
struct bsg_device *bd = bc->bd;
unsigned long flags;
kmem_cache_free(bsg_cmd_cachep, bc);
spin_lock_irqsave(&bd->lock, flags);
bd->queued_cmds--;
spin_unlock_irqrestore(&bd->lock, flags);
wake_up(&bd->wq_free);
}
static struct bsg_command *bsg_alloc_command(struct bsg_device *bd)
{
struct bsg_command *bc = ERR_PTR(-EINVAL);
spin_lock_irq(&bd->lock);
if (bd->queued_cmds >= bd->max_queue)
goto out;
bd->queued_cmds++;
spin_unlock_irq(&bd->lock);
bc = kmem_cache_zalloc(bsg_cmd_cachep, GFP_KERNEL);
if (unlikely(!bc)) {
spin_lock_irq(&bd->lock);
bd->queued_cmds--;
bc = ERR_PTR(-ENOMEM);
goto out;
}
bc->bd = bd;
INIT_LIST_HEAD(&bc->list);
dprintk("%s: returning free cmd %p\n", bd->name, bc);
return bc;
out:
spin_unlock_irq(&bd->lock);
return bc;
}
static inline struct hlist_head *bsg_dev_idx_hash(int index)
{
return &bsg_device_list[index & (BSG_LIST_ARRAY_SIZE - 1)];
}
static int blk_fill_sgv4_hdr_rq(struct request_queue *q, struct request *rq,
struct sg_io_v4 *hdr, struct bsg_device *bd,
fmode_t has_write_perm)
{
if (hdr->request_len > BLK_MAX_CDB) {
rq->cmd = kzalloc(hdr->request_len, GFP_KERNEL);
if (!rq->cmd)
return -ENOMEM;
}
if (copy_from_user(rq->cmd, (void __user *)(unsigned long)hdr->request,
hdr->request_len))
return -EFAULT;
if (hdr->subprotocol == BSG_SUB_PROTOCOL_SCSI_CMD) {
if (blk_verify_command(rq->cmd, has_write_perm))
return -EPERM;
} else if (!capable(CAP_SYS_RAWIO))
return -EPERM;
/*
* fill in request structure
*/
rq->cmd_len = hdr->request_len;
rq->timeout = msecs_to_jiffies(hdr->timeout);
if (!rq->timeout)
rq->timeout = q->sg_timeout;
if (!rq->timeout)
rq->timeout = BLK_DEFAULT_SG_TIMEOUT;
if (rq->timeout < BLK_MIN_SG_TIMEOUT)
rq->timeout = BLK_MIN_SG_TIMEOUT;
return 0;
}
/*
* Check if sg_io_v4 from user is allowed and valid
*/
static int
bsg_validate_sgv4_hdr(struct sg_io_v4 *hdr, int *rw)
{
int ret = 0;
if (hdr->guard != 'Q')
return -EINVAL;
switch (hdr->protocol) {
case BSG_PROTOCOL_SCSI:
switch (hdr->subprotocol) {
case BSG_SUB_PROTOCOL_SCSI_CMD:
case BSG_SUB_PROTOCOL_SCSI_TRANSPORT:
break;
default:
ret = -EINVAL;
}
break;
default:
ret = -EINVAL;
}
*rw = hdr->dout_xfer_len ? WRITE : READ;
return ret;
}
/*
* map sg_io_v4 to a request.
*/
static struct request *
bsg_map_hdr(struct bsg_device *bd, struct sg_io_v4 *hdr, fmode_t has_write_perm,
u8 *sense)
{
struct request_queue *q = bd->queue;
struct request *rq, *next_rq = NULL;
int ret, rw;
unsigned int dxfer_len;
void __user *dxferp = NULL;
struct bsg_class_device *bcd = &q->bsg_dev;
/* if the LLD has been removed then the bsg_unregister_queue will
* eventually be called and the class_dev was freed, so we can no
* longer use this request_queue. Return no such address.
*/
if (!bcd->class_dev)
return ERR_PTR(-ENXIO);
dprintk("map hdr %llx/%u %llx/%u\n", (unsigned long long) hdr->dout_xferp,
hdr->dout_xfer_len, (unsigned long long) hdr->din_xferp,
hdr->din_xfer_len);
ret = bsg_validate_sgv4_hdr(hdr, &rw);
if (ret)
return ERR_PTR(ret);
/*
* map scatter-gather elements separately and string them to request
*/
rq = blk_get_request(q, rw, GFP_KERNEL);
if (IS_ERR(rq))
return rq;
blk_rq_set_block_pc(rq);
ret = blk_fill_sgv4_hdr_rq(q, rq, hdr, bd, has_write_perm);
if (ret)
goto out;
if (rw == WRITE && hdr->din_xfer_len) {
if (!test_bit(QUEUE_FLAG_BIDI, &q->queue_flags)) {
ret = -EOPNOTSUPP;
goto out;
}
next_rq = blk_get_request(q, READ, GFP_KERNEL);
if (IS_ERR(next_rq)) {
ret = PTR_ERR(next_rq);
next_rq = NULL;
goto out;
}
rq->next_rq = next_rq;
next_rq->cmd_type = rq->cmd_type;
dxferp = (void __user *)(unsigned long)hdr->din_xferp;
ret = blk_rq_map_user(q, next_rq, NULL, dxferp,
hdr->din_xfer_len, GFP_KERNEL);
if (ret)
goto out;
}
if (hdr->dout_xfer_len) {
dxfer_len = hdr->dout_xfer_len;
dxferp = (void __user *)(unsigned long)hdr->dout_xferp;
} else if (hdr->din_xfer_len) {
dxfer_len = hdr->din_xfer_len;
dxferp = (void __user *)(unsigned long)hdr->din_xferp;
} else
dxfer_len = 0;
if (dxfer_len) {
ret = blk_rq_map_user(q, rq, NULL, dxferp, dxfer_len,
GFP_KERNEL);
if (ret)
goto out;
}
rq->sense = sense;
rq->sense_len = 0;
return rq;
out:
if (rq->cmd != rq->__cmd)
kfree(rq->cmd);
blk_put_request(rq);
if (next_rq) {
blk_rq_unmap_user(next_rq->bio);
blk_put_request(next_rq);
}
return ERR_PTR(ret);
}
/*
* async completion call-back from the block layer, when scsi/ide/whatever
* calls end_that_request_last() on a request
*/
static void bsg_rq_end_io(struct request *rq, int uptodate)
{
struct bsg_command *bc = rq->end_io_data;
struct bsg_device *bd = bc->bd;
unsigned long flags;
dprintk("%s: finished rq %p bc %p, bio %p stat %d\n",
bd->name, rq, bc, bc->bio, uptodate);
bc->hdr.duration = jiffies_to_msecs(jiffies - bc->hdr.duration);
spin_lock_irqsave(&bd->lock, flags);
list_move_tail(&bc->list, &bd->done_list);
bd->done_cmds++;
spin_unlock_irqrestore(&bd->lock, flags);
wake_up(&bd->wq_done);
}
/*
* do final setup of a 'bc' and submit the matching 'rq' to the block
* layer for io
*/
static void bsg_add_command(struct bsg_device *bd, struct request_queue *q,
struct bsg_command *bc, struct request *rq)
{
int at_head = (0 == (bc->hdr.flags & BSG_FLAG_Q_AT_TAIL));
/*
* add bc command to busy queue and submit rq for io
*/
bc->rq = rq;
bc->bio = rq->bio;
if (rq->next_rq)
bc->bidi_bio = rq->next_rq->bio;
bc->hdr.duration = jiffies;
spin_lock_irq(&bd->lock);
list_add_tail(&bc->list, &bd->busy_list);
spin_unlock_irq(&bd->lock);
dprintk("%s: queueing rq %p, bc %p\n", bd->name, rq, bc);
rq->end_io_data = bc;
blk_execute_rq_nowait(q, NULL, rq, at_head, bsg_rq_end_io);
}
static struct bsg_command *bsg_next_done_cmd(struct bsg_device *bd)
{
struct bsg_command *bc = NULL;
spin_lock_irq(&bd->lock);
if (bd->done_cmds) {
bc = list_first_entry(&bd->done_list, struct bsg_command, list);
list_del(&bc->list);
bd->done_cmds--;
}
spin_unlock_irq(&bd->lock);
return bc;
}
/*
* Get a finished command from the done list
*/
static struct bsg_command *bsg_get_done_cmd(struct bsg_device *bd)
{
struct bsg_command *bc;
int ret;
do {
bc = bsg_next_done_cmd(bd);
if (bc)
break;
if (!test_bit(BSG_F_BLOCK, &bd->flags)) {
bc = ERR_PTR(-EAGAIN);
break;
}
ret = wait_event_interruptible(bd->wq_done, bd->done_cmds);
if (ret) {
bc = ERR_PTR(-ERESTARTSYS);
break;
}
} while (1);
dprintk("%s: returning done %p\n", bd->name, bc);
return bc;
}
static int blk_complete_sgv4_hdr_rq(struct request *rq, struct sg_io_v4 *hdr,
struct bio *bio, struct bio *bidi_bio)
{
int ret = 0;
dprintk("rq %p bio %p 0x%x\n", rq, bio, rq->errors);
/*
* fill in all the output members
*/
hdr->device_status = rq->errors & 0xff;
hdr->transport_status = host_byte(rq->errors);
hdr->driver_status = driver_byte(rq->errors);
hdr->info = 0;
if (hdr->device_status || hdr->transport_status || hdr->driver_status)
hdr->info |= SG_INFO_CHECK;
hdr->response_len = 0;
if (rq->sense_len && hdr->response) {
int len = min_t(unsigned int, hdr->max_response_len,
rq->sense_len);
ret = copy_to_user((void __user *)(unsigned long)hdr->response,
rq->sense, len);
if (!ret)
hdr->response_len = len;
else
ret = -EFAULT;
}
if (rq->next_rq) {
hdr->dout_resid = rq->resid_len;
hdr->din_resid = rq->next_rq->resid_len;
blk_rq_unmap_user(bidi_bio);
blk_put_request(rq->next_rq);
} else if (rq_data_dir(rq) == READ)
hdr->din_resid = rq->resid_len;
else
hdr->dout_resid = rq->resid_len;
/*
* If the request generated a negative error number, return it
* (providing we aren't already returning an error); if it's
* just a protocol response (i.e. non negative), that gets
* processed above.
*/
if (!ret && rq->errors < 0)
ret = rq->errors;
blk_rq_unmap_user(bio);
if (rq->cmd != rq->__cmd)
kfree(rq->cmd);
blk_put_request(rq);
return ret;
}
static bool bsg_complete(struct bsg_device *bd)
{
bool ret = false;
bool spin;
do {
spin_lock_irq(&bd->lock);
BUG_ON(bd->done_cmds > bd->queued_cmds);
/*
* All commands consumed.
*/
if (bd->done_cmds == bd->queued_cmds)
ret = true;
spin = !test_bit(BSG_F_BLOCK, &bd->flags);
spin_unlock_irq(&bd->lock);
} while (!ret && spin);
return ret;
}
static int bsg_complete_all_commands(struct bsg_device *bd)
{
struct bsg_command *bc;
int ret, tret;
dprintk("%s: entered\n", bd->name);
/*
* wait for all commands to complete
*/
io_wait_event(bd->wq_done, bsg_complete(bd));
/*
* discard done commands
*/
ret = 0;
do {
spin_lock_irq(&bd->lock);
if (!bd->queued_cmds) {
spin_unlock_irq(&bd->lock);
break;
}
spin_unlock_irq(&bd->lock);
bc = bsg_get_done_cmd(bd);
if (IS_ERR(bc))
break;
tret = blk_complete_sgv4_hdr_rq(bc->rq, &bc->hdr, bc->bio,
bc->bidi_bio);
if (!ret)
ret = tret;
bsg_free_command(bc);
} while (1);
return ret;
}
static int
__bsg_read(char __user *buf, size_t count, struct bsg_device *bd,
const struct iovec *iov, ssize_t *bytes_read)
{
struct bsg_command *bc;
int nr_commands, ret;
if (count % sizeof(struct sg_io_v4))
return -EINVAL;
ret = 0;
nr_commands = count / sizeof(struct sg_io_v4);
while (nr_commands) {
bc = bsg_get_done_cmd(bd);
if (IS_ERR(bc)) {
ret = PTR_ERR(bc);
break;
}
/*
* this is the only case where we need to copy data back
* after completing the request. so do that here,
* bsg_complete_work() cannot do that for us
*/
ret = blk_complete_sgv4_hdr_rq(bc->rq, &bc->hdr, bc->bio,
bc->bidi_bio);
if (copy_to_user(buf, &bc->hdr, sizeof(bc->hdr)))
ret = -EFAULT;
bsg_free_command(bc);
if (ret)
break;
buf += sizeof(struct sg_io_v4);
*bytes_read += sizeof(struct sg_io_v4);
nr_commands--;
}
return ret;
}
static inline void bsg_set_block(struct bsg_device *bd, struct file *file)
{
if (file->f_flags & O_NONBLOCK)
clear_bit(BSG_F_BLOCK, &bd->flags);
else
set_bit(BSG_F_BLOCK, &bd->flags);
}
/*
* Check if the error is a "real" error that we should return.
*/
static inline int err_block_err(int ret)
{
if (ret && ret != -ENOSPC && ret != -ENODATA && ret != -EAGAIN)
return 1;
return 0;
}
static ssize_t
bsg_read(struct file *file, char __user *buf, size_t count, loff_t *ppos)
{
struct bsg_device *bd = file->private_data;
int ret;
ssize_t bytes_read;
dprintk("%s: read %Zd bytes\n", bd->name, count);
bsg_set_block(bd, file);
bytes_read = 0;
ret = __bsg_read(buf, count, bd, NULL, &bytes_read);
*ppos = bytes_read;
if (!bytes_read || err_block_err(ret))
bytes_read = ret;
return bytes_read;
}
static int __bsg_write(struct bsg_device *bd, const char __user *buf,
size_t count, ssize_t *bytes_written,
fmode_t has_write_perm)
{
struct bsg_command *bc;
struct request *rq;
int ret, nr_commands;
if (count % sizeof(struct sg_io_v4))
return -EINVAL;
nr_commands = count / sizeof(struct sg_io_v4);
rq = NULL;
bc = NULL;
ret = 0;
while (nr_commands) {
struct request_queue *q = bd->queue;
bc = bsg_alloc_command(bd);
if (IS_ERR(bc)) {
ret = PTR_ERR(bc);
bc = NULL;
break;
}
if (copy_from_user(&bc->hdr, buf, sizeof(bc->hdr))) {
ret = -EFAULT;
break;
}
/*
* get a request, fill in the blanks, and add to request queue
*/
rq = bsg_map_hdr(bd, &bc->hdr, has_write_perm, bc->sense);
if (IS_ERR(rq)) {
ret = PTR_ERR(rq);
rq = NULL;
break;
}
bsg_add_command(bd, q, bc, rq);
bc = NULL;
rq = NULL;
nr_commands--;
buf += sizeof(struct sg_io_v4);
*bytes_written += sizeof(struct sg_io_v4);
}
if (bc)
bsg_free_command(bc);
return ret;
}
static ssize_t
bsg_write(struct file *file, const char __user *buf, size_t count, loff_t *ppos)
{
struct bsg_device *bd = file->private_data;
ssize_t bytes_written;
int ret;
dprintk("%s: write %Zd bytes\n", bd->name, count);
if (unlikely(segment_eq(get_fs(), KERNEL_DS)))
return -EINVAL;
bsg_set_block(bd, file);
bytes_written = 0;
ret = __bsg_write(bd, buf, count, &bytes_written,
file->f_mode & FMODE_WRITE);
*ppos = bytes_written;
/*
* return bytes written on non-fatal errors
*/
if (!bytes_written || err_block_err(ret))
bytes_written = ret;
dprintk("%s: returning %Zd\n", bd->name, bytes_written);
return bytes_written;
}
static struct bsg_device *bsg_alloc_device(void)
{
struct bsg_device *bd;
bd = kzalloc(sizeof(struct bsg_device), GFP_KERNEL);
if (unlikely(!bd))
return NULL;
spin_lock_init(&bd->lock);
bd->max_queue = BSG_DEFAULT_CMDS;
INIT_LIST_HEAD(&bd->busy_list);
INIT_LIST_HEAD(&bd->done_list);
INIT_HLIST_NODE(&bd->dev_list);
init_waitqueue_head(&bd->wq_free);
init_waitqueue_head(&bd->wq_done);
return bd;
}
static void bsg_kref_release_function(struct kref *kref)
{
struct bsg_class_device *bcd =
container_of(kref, struct bsg_class_device, ref);
struct device *parent = bcd->parent;
if (bcd->release)
bcd->release(bcd->parent);
put_device(parent);
}
static int bsg_put_device(struct bsg_device *bd)
{
int ret = 0, do_free;
struct request_queue *q = bd->queue;
mutex_lock(&bsg_mutex);
do_free = atomic_dec_and_test(&bd->ref_count);
if (!do_free) {
mutex_unlock(&bsg_mutex);
goto out;
}
hlist_del(&bd->dev_list);
mutex_unlock(&bsg_mutex);
dprintk("%s: tearing down\n", bd->name);
/*
* close can always block
*/
set_bit(BSG_F_BLOCK, &bd->flags);
/*
* correct error detection baddies here again. it's the responsibility
* of the app to properly reap commands before close() if it wants
* fool-proof error detection
*/
ret = bsg_complete_all_commands(bd);
kfree(bd);
out:
kref_put(&q->bsg_dev.ref, bsg_kref_release_function);
if (do_free)
blk_put_queue(q);
return ret;
}
static struct bsg_device *bsg_add_device(struct inode *inode,
struct request_queue *rq,
struct file *file)
{
struct bsg_device *bd;
#ifdef BSG_DEBUG
unsigned char buf[32];
#endif
if (!blk_get_queue(rq))
return ERR_PTR(-ENXIO);
bd = bsg_alloc_device();
if (!bd) {
blk_put_queue(rq);
return ERR_PTR(-ENOMEM);
}
bd->queue = rq;
bsg_set_block(bd, file);
atomic_set(&bd->ref_count, 1);
mutex_lock(&bsg_mutex);
hlist_add_head(&bd->dev_list, bsg_dev_idx_hash(iminor(inode)));
strncpy(bd->name, dev_name(rq->bsg_dev.class_dev), sizeof(bd->name) - 1);
dprintk("bound to <%s>, max queue %d\n",
format_dev_t(buf, inode->i_rdev), bd->max_queue);
mutex_unlock(&bsg_mutex);
return bd;
}
static struct bsg_device *__bsg_get_device(int minor, struct request_queue *q)
{
struct bsg_device *bd;
mutex_lock(&bsg_mutex);
hlist_for_each_entry(bd, bsg_dev_idx_hash(minor), dev_list) {
if (bd->queue == q) {
atomic_inc(&bd->ref_count);
goto found;
}
}
bd = NULL;
found:
mutex_unlock(&bsg_mutex);
return bd;
}
static struct bsg_device *bsg_get_device(struct inode *inode, struct file *file)
{
struct bsg_device *bd;
struct bsg_class_device *bcd;
/*
* find the class device
*/
mutex_lock(&bsg_mutex);
bcd = idr_find(&bsg_minor_idr, iminor(inode));
if (bcd)
kref_get(&bcd->ref);
mutex_unlock(&bsg_mutex);
if (!bcd)
return ERR_PTR(-ENODEV);
bd = __bsg_get_device(iminor(inode), bcd->queue);
if (bd)
return bd;
bd = bsg_add_device(inode, bcd->queue, file);
if (IS_ERR(bd))
kref_put(&bcd->ref, bsg_kref_release_function);
return bd;
}
static int bsg_open(struct inode *inode, struct file *file)
{
struct bsg_device *bd;
bd = bsg_get_device(inode, file);
if (IS_ERR(bd))
return PTR_ERR(bd);
file->private_data = bd;
return 0;
}
static int bsg_release(struct inode *inode, struct file *file)
{
struct bsg_device *bd = file->private_data;
file->private_data = NULL;
return bsg_put_device(bd);
}
static unsigned int bsg_poll(struct file *file, poll_table *wait)
{
struct bsg_device *bd = file->private_data;
unsigned int mask = 0;
poll_wait(file, &bd->wq_done, wait);
poll_wait(file, &bd->wq_free, wait);
spin_lock_irq(&bd->lock);
if (!list_empty(&bd->done_list))
mask |= POLLIN | POLLRDNORM;
if (bd->queued_cmds < bd->max_queue)
mask |= POLLOUT;
spin_unlock_irq(&bd->lock);
return mask;
}
static long bsg_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
{
struct bsg_device *bd = file->private_data;
int __user *uarg = (int __user *) arg;
int ret;
switch (cmd) {
/*
* our own ioctls
*/
case SG_GET_COMMAND_Q:
return put_user(bd->max_queue, uarg);
case SG_SET_COMMAND_Q: {
int queue;
if (get_user(queue, uarg))
return -EFAULT;
if (queue < 1)
return -EINVAL;
spin_lock_irq(&bd->lock);
bd->max_queue = queue;
spin_unlock_irq(&bd->lock);
return 0;
}
/*
* SCSI/sg ioctls
*/
case SG_GET_VERSION_NUM:
case SCSI_IOCTL_GET_IDLUN:
case SCSI_IOCTL_GET_BUS_NUMBER:
case SG_SET_TIMEOUT:
case SG_GET_TIMEOUT:
case SG_GET_RESERVED_SIZE:
case SG_SET_RESERVED_SIZE:
case SG_EMULATED_HOST:
case SCSI_IOCTL_SEND_COMMAND: {
void __user *uarg = (void __user *) arg;
return scsi_cmd_ioctl(bd->queue, NULL, file->f_mode, cmd, uarg);
}
case SG_IO: {
struct request *rq;
struct bio *bio, *bidi_bio = NULL;
struct sg_io_v4 hdr;
int at_head;
u8 sense[SCSI_SENSE_BUFFERSIZE];
if (copy_from_user(&hdr, uarg, sizeof(hdr)))
return -EFAULT;
rq = bsg_map_hdr(bd, &hdr, file->f_mode & FMODE_WRITE, sense);
if (IS_ERR(rq))
return PTR_ERR(rq);
bio = rq->bio;
if (rq->next_rq)
bidi_bio = rq->next_rq->bio;
at_head = (0 == (hdr.flags & BSG_FLAG_Q_AT_TAIL));
blk_execute_rq(bd->queue, NULL, rq, at_head);
ret = blk_complete_sgv4_hdr_rq(rq, &hdr, bio, bidi_bio);
if (copy_to_user(uarg, &hdr, sizeof(hdr)))
return -EFAULT;
return ret;
}
/*
* block device ioctls
*/
default:
#if 0
return ioctl_by_bdev(bd->bdev, cmd, arg);
#else
return -ENOTTY;
#endif
}
}
static const struct file_operations bsg_fops = {
.read = bsg_read,
.write = bsg_write,
.poll = bsg_poll,
.open = bsg_open,
.release = bsg_release,
.unlocked_ioctl = bsg_ioctl,
.owner = THIS_MODULE,
.llseek = default_llseek,
};
void bsg_unregister_queue(struct request_queue *q)
{
struct bsg_class_device *bcd = &q->bsg_dev;
if (!bcd->class_dev)
return;
mutex_lock(&bsg_mutex);
idr_remove(&bsg_minor_idr, bcd->minor);
if (q->kobj.sd)
sysfs_remove_link(&q->kobj, "bsg");
device_unregister(bcd->class_dev);
bcd->class_dev = NULL;
kref_put(&bcd->ref, bsg_kref_release_function);
mutex_unlock(&bsg_mutex);
}
EXPORT_SYMBOL_GPL(bsg_unregister_queue);
int bsg_register_queue(struct request_queue *q, struct device *parent,
const char *name, void (*release)(struct device *))
{
struct bsg_class_device *bcd;
dev_t dev;
int ret;
struct device *class_dev = NULL;
const char *devname;
if (name)
devname = name;
else
devname = dev_name(parent);
/*
* we need a proper transport to send commands, not a stacked device
*/
if (!queue_is_rq_based(q))
return 0;
bcd = &q->bsg_dev;
memset(bcd, 0, sizeof(*bcd));
mutex_lock(&bsg_mutex);
ret = idr_alloc(&bsg_minor_idr, bcd, 0, BSG_MAX_DEVS, GFP_KERNEL);
if (ret < 0) {
if (ret == -ENOSPC) {
printk(KERN_ERR "bsg: too many bsg devices\n");
ret = -EINVAL;
}
goto unlock;
}
bcd->minor = ret;
bcd->queue = q;
bcd->parent = get_device(parent);
bcd->release = release;
kref_init(&bcd->ref);
dev = MKDEV(bsg_major, bcd->minor);
class_dev = device_create(bsg_class, parent, dev, NULL, "%s", devname);
if (IS_ERR(class_dev)) {
ret = PTR_ERR(class_dev);
goto put_dev;
}
bcd->class_dev = class_dev;
if (q->kobj.sd) {
ret = sysfs_create_link(&q->kobj, &bcd->class_dev->kobj, "bsg");
if (ret)
goto unregister_class_dev;
}
mutex_unlock(&bsg_mutex);
return 0;
unregister_class_dev:
device_unregister(class_dev);
put_dev:
put_device(parent);
idr_remove(&bsg_minor_idr, bcd->minor);
unlock:
mutex_unlock(&bsg_mutex);
return ret;
}
EXPORT_SYMBOL_GPL(bsg_register_queue);
static struct cdev bsg_cdev;
static char *bsg_devnode(struct device *dev, umode_t *mode)
{
return kasprintf(GFP_KERNEL, "bsg/%s", dev_name(dev));
}
static int __init bsg_init(void)
{
int ret, i;
dev_t devid;
bsg_cmd_cachep = kmem_cache_create("bsg_cmd",
sizeof(struct bsg_command), 0, 0, NULL);
if (!bsg_cmd_cachep) {
printk(KERN_ERR "bsg: failed creating slab cache\n");
return -ENOMEM;
}
for (i = 0; i < BSG_LIST_ARRAY_SIZE; i++)
INIT_HLIST_HEAD(&bsg_device_list[i]);
bsg_class = class_create(THIS_MODULE, "bsg");
if (IS_ERR(bsg_class)) {
ret = PTR_ERR(bsg_class);
goto destroy_kmemcache;
}
bsg_class->devnode = bsg_devnode;
ret = alloc_chrdev_region(&devid, 0, BSG_MAX_DEVS, "bsg");
if (ret)
goto destroy_bsg_class;
bsg_major = MAJOR(devid);
cdev_init(&bsg_cdev, &bsg_fops);
ret = cdev_add(&bsg_cdev, MKDEV(bsg_major, 0), BSG_MAX_DEVS);
if (ret)
goto unregister_chrdev;
printk(KERN_INFO BSG_DESCRIPTION " version " BSG_VERSION
" loaded (major %d)\n", bsg_major);
return 0;
unregister_chrdev:
unregister_chrdev_region(MKDEV(bsg_major, 0), BSG_MAX_DEVS);
destroy_bsg_class:
class_destroy(bsg_class);
destroy_kmemcache:
kmem_cache_destroy(bsg_cmd_cachep);
return ret;
}
MODULE_AUTHOR("Jens Axboe");
MODULE_DESCRIPTION(BSG_DESCRIPTION);
MODULE_LICENSE("GPL");
device_initcall(bsg_init);
| ./CrossVul/dataset_final_sorted/CWE-416/c/good_4797_0 |
crossvul-cpp_data_bad_5021_6 | /*
* INET An implementation of the TCP/IP protocol suite for the LINUX
* operating system. INET is implemented using the BSD Socket
* interface as the means of communication with the user level.
*
* Support for INET6 connection oriented protocols.
*
* Authors: See the TCPv6 sources
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version
* 2 of the License, or(at your option) any later version.
*/
#include <linux/module.h>
#include <linux/in6.h>
#include <linux/ipv6.h>
#include <linux/jhash.h>
#include <linux/slab.h>
#include <net/addrconf.h>
#include <net/inet_connection_sock.h>
#include <net/inet_ecn.h>
#include <net/inet_hashtables.h>
#include <net/ip6_route.h>
#include <net/sock.h>
#include <net/inet6_connection_sock.h>
int inet6_csk_bind_conflict(const struct sock *sk,
const struct inet_bind_bucket *tb, bool relax)
{
const struct sock *sk2;
int reuse = sk->sk_reuse;
int reuseport = sk->sk_reuseport;
kuid_t uid = sock_i_uid((struct sock *)sk);
/* We must walk the whole port owner list in this case. -DaveM */
/*
* See comment in inet_csk_bind_conflict about sock lookup
* vs net namespaces issues.
*/
sk_for_each_bound(sk2, &tb->owners) {
if (sk != sk2 &&
(!sk->sk_bound_dev_if ||
!sk2->sk_bound_dev_if ||
sk->sk_bound_dev_if == sk2->sk_bound_dev_if)) {
if ((!reuse || !sk2->sk_reuse ||
sk2->sk_state == TCP_LISTEN) &&
(!reuseport || !sk2->sk_reuseport ||
(sk2->sk_state != TCP_TIME_WAIT &&
!uid_eq(uid,
sock_i_uid((struct sock *)sk2))))) {
if (ipv6_rcv_saddr_equal(sk, sk2))
break;
}
if (!relax && reuse && sk2->sk_reuse &&
sk2->sk_state != TCP_LISTEN &&
ipv6_rcv_saddr_equal(sk, sk2))
break;
}
}
return sk2 != NULL;
}
EXPORT_SYMBOL_GPL(inet6_csk_bind_conflict);
struct dst_entry *inet6_csk_route_req(const struct sock *sk,
struct flowi6 *fl6,
const struct request_sock *req,
u8 proto)
{
struct inet_request_sock *ireq = inet_rsk(req);
const struct ipv6_pinfo *np = inet6_sk(sk);
struct in6_addr *final_p, final;
struct dst_entry *dst;
memset(fl6, 0, sizeof(*fl6));
fl6->flowi6_proto = proto;
fl6->daddr = ireq->ir_v6_rmt_addr;
final_p = fl6_update_dst(fl6, np->opt, &final);
fl6->saddr = ireq->ir_v6_loc_addr;
fl6->flowi6_oif = ireq->ir_iif;
fl6->flowi6_mark = ireq->ir_mark;
fl6->fl6_dport = ireq->ir_rmt_port;
fl6->fl6_sport = htons(ireq->ir_num);
security_req_classify_flow(req, flowi6_to_flowi(fl6));
dst = ip6_dst_lookup_flow(sk, fl6, final_p);
if (IS_ERR(dst))
return NULL;
return dst;
}
EXPORT_SYMBOL(inet6_csk_route_req);
void inet6_csk_addr2sockaddr(struct sock *sk, struct sockaddr *uaddr)
{
struct sockaddr_in6 *sin6 = (struct sockaddr_in6 *) uaddr;
sin6->sin6_family = AF_INET6;
sin6->sin6_addr = sk->sk_v6_daddr;
sin6->sin6_port = inet_sk(sk)->inet_dport;
/* We do not store received flowlabel for TCP */
sin6->sin6_flowinfo = 0;
sin6->sin6_scope_id = ipv6_iface_scope_id(&sin6->sin6_addr,
sk->sk_bound_dev_if);
}
EXPORT_SYMBOL_GPL(inet6_csk_addr2sockaddr);
static inline
void __inet6_csk_dst_store(struct sock *sk, struct dst_entry *dst,
const struct in6_addr *daddr,
const struct in6_addr *saddr)
{
__ip6_dst_store(sk, dst, daddr, saddr);
}
static inline
struct dst_entry *__inet6_csk_dst_check(struct sock *sk, u32 cookie)
{
return __sk_dst_check(sk, cookie);
}
static struct dst_entry *inet6_csk_route_socket(struct sock *sk,
struct flowi6 *fl6)
{
struct inet_sock *inet = inet_sk(sk);
struct ipv6_pinfo *np = inet6_sk(sk);
struct in6_addr *final_p, final;
struct dst_entry *dst;
memset(fl6, 0, sizeof(*fl6));
fl6->flowi6_proto = sk->sk_protocol;
fl6->daddr = sk->sk_v6_daddr;
fl6->saddr = np->saddr;
fl6->flowlabel = np->flow_label;
IP6_ECN_flow_xmit(sk, fl6->flowlabel);
fl6->flowi6_oif = sk->sk_bound_dev_if;
fl6->flowi6_mark = sk->sk_mark;
fl6->fl6_sport = inet->inet_sport;
fl6->fl6_dport = inet->inet_dport;
security_sk_classify_flow(sk, flowi6_to_flowi(fl6));
final_p = fl6_update_dst(fl6, np->opt, &final);
dst = __inet6_csk_dst_check(sk, np->dst_cookie);
if (!dst) {
dst = ip6_dst_lookup_flow(sk, fl6, final_p);
if (!IS_ERR(dst))
__inet6_csk_dst_store(sk, dst, NULL, NULL);
}
return dst;
}
int inet6_csk_xmit(struct sock *sk, struct sk_buff *skb, struct flowi *fl_unused)
{
struct ipv6_pinfo *np = inet6_sk(sk);
struct flowi6 fl6;
struct dst_entry *dst;
int res;
dst = inet6_csk_route_socket(sk, &fl6);
if (IS_ERR(dst)) {
sk->sk_err_soft = -PTR_ERR(dst);
sk->sk_route_caps = 0;
kfree_skb(skb);
return PTR_ERR(dst);
}
rcu_read_lock();
skb_dst_set_noref(skb, dst);
/* Restore final destination back after routing done */
fl6.daddr = sk->sk_v6_daddr;
res = ip6_xmit(sk, skb, &fl6, np->opt, np->tclass);
rcu_read_unlock();
return res;
}
EXPORT_SYMBOL_GPL(inet6_csk_xmit);
struct dst_entry *inet6_csk_update_pmtu(struct sock *sk, u32 mtu)
{
struct flowi6 fl6;
struct dst_entry *dst = inet6_csk_route_socket(sk, &fl6);
if (IS_ERR(dst))
return NULL;
dst->ops->update_pmtu(dst, sk, NULL, mtu);
dst = inet6_csk_route_socket(sk, &fl6);
return IS_ERR(dst) ? NULL : dst;
}
EXPORT_SYMBOL_GPL(inet6_csk_update_pmtu);
| ./CrossVul/dataset_final_sorted/CWE-416/c/bad_5021_6 |
crossvul-cpp_data_bad_2949_0 | /* xfrm_user.c: User interface to configure xfrm engine.
*
* Copyright (C) 2002 David S. Miller (davem@redhat.com)
*
* Changes:
* Mitsuru KANDA @USAGI
* Kazunori MIYAZAWA @USAGI
* Kunihiro Ishiguro <kunihiro@ipinfusion.com>
* IPv6 support
*
*/
#include <linux/crypto.h>
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/types.h>
#include <linux/slab.h>
#include <linux/socket.h>
#include <linux/string.h>
#include <linux/net.h>
#include <linux/skbuff.h>
#include <linux/pfkeyv2.h>
#include <linux/ipsec.h>
#include <linux/init.h>
#include <linux/security.h>
#include <net/sock.h>
#include <net/xfrm.h>
#include <net/netlink.h>
#include <net/ah.h>
#include <linux/uaccess.h>
#if IS_ENABLED(CONFIG_IPV6)
#include <linux/in6.h>
#endif
#include <asm/unaligned.h>
static int verify_one_alg(struct nlattr **attrs, enum xfrm_attr_type_t type)
{
struct nlattr *rt = attrs[type];
struct xfrm_algo *algp;
if (!rt)
return 0;
algp = nla_data(rt);
if (nla_len(rt) < xfrm_alg_len(algp))
return -EINVAL;
switch (type) {
case XFRMA_ALG_AUTH:
case XFRMA_ALG_CRYPT:
case XFRMA_ALG_COMP:
break;
default:
return -EINVAL;
}
algp->alg_name[sizeof(algp->alg_name) - 1] = '\0';
return 0;
}
static int verify_auth_trunc(struct nlattr **attrs)
{
struct nlattr *rt = attrs[XFRMA_ALG_AUTH_TRUNC];
struct xfrm_algo_auth *algp;
if (!rt)
return 0;
algp = nla_data(rt);
if (nla_len(rt) < xfrm_alg_auth_len(algp))
return -EINVAL;
algp->alg_name[sizeof(algp->alg_name) - 1] = '\0';
return 0;
}
static int verify_aead(struct nlattr **attrs)
{
struct nlattr *rt = attrs[XFRMA_ALG_AEAD];
struct xfrm_algo_aead *algp;
if (!rt)
return 0;
algp = nla_data(rt);
if (nla_len(rt) < aead_len(algp))
return -EINVAL;
algp->alg_name[sizeof(algp->alg_name) - 1] = '\0';
return 0;
}
static void verify_one_addr(struct nlattr **attrs, enum xfrm_attr_type_t type,
xfrm_address_t **addrp)
{
struct nlattr *rt = attrs[type];
if (rt && addrp)
*addrp = nla_data(rt);
}
static inline int verify_sec_ctx_len(struct nlattr **attrs)
{
struct nlattr *rt = attrs[XFRMA_SEC_CTX];
struct xfrm_user_sec_ctx *uctx;
if (!rt)
return 0;
uctx = nla_data(rt);
if (uctx->len != (sizeof(struct xfrm_user_sec_ctx) + uctx->ctx_len))
return -EINVAL;
return 0;
}
static inline int verify_replay(struct xfrm_usersa_info *p,
struct nlattr **attrs)
{
struct nlattr *rt = attrs[XFRMA_REPLAY_ESN_VAL];
struct xfrm_replay_state_esn *rs;
if (p->flags & XFRM_STATE_ESN) {
if (!rt)
return -EINVAL;
rs = nla_data(rt);
if (rs->bmp_len > XFRMA_REPLAY_ESN_MAX / sizeof(rs->bmp[0]) / 8)
return -EINVAL;
if (nla_len(rt) < xfrm_replay_state_esn_len(rs) &&
nla_len(rt) != sizeof(*rs))
return -EINVAL;
}
if (!rt)
return 0;
/* As only ESP and AH support ESN feature. */
if ((p->id.proto != IPPROTO_ESP) && (p->id.proto != IPPROTO_AH))
return -EINVAL;
if (p->replay_window != 0)
return -EINVAL;
return 0;
}
static int verify_newsa_info(struct xfrm_usersa_info *p,
struct nlattr **attrs)
{
int err;
err = -EINVAL;
switch (p->family) {
case AF_INET:
break;
case AF_INET6:
#if IS_ENABLED(CONFIG_IPV6)
break;
#else
err = -EAFNOSUPPORT;
goto out;
#endif
default:
goto out;
}
err = -EINVAL;
switch (p->id.proto) {
case IPPROTO_AH:
if ((!attrs[XFRMA_ALG_AUTH] &&
!attrs[XFRMA_ALG_AUTH_TRUNC]) ||
attrs[XFRMA_ALG_AEAD] ||
attrs[XFRMA_ALG_CRYPT] ||
attrs[XFRMA_ALG_COMP] ||
attrs[XFRMA_TFCPAD])
goto out;
break;
case IPPROTO_ESP:
if (attrs[XFRMA_ALG_COMP])
goto out;
if (!attrs[XFRMA_ALG_AUTH] &&
!attrs[XFRMA_ALG_AUTH_TRUNC] &&
!attrs[XFRMA_ALG_CRYPT] &&
!attrs[XFRMA_ALG_AEAD])
goto out;
if ((attrs[XFRMA_ALG_AUTH] ||
attrs[XFRMA_ALG_AUTH_TRUNC] ||
attrs[XFRMA_ALG_CRYPT]) &&
attrs[XFRMA_ALG_AEAD])
goto out;
if (attrs[XFRMA_TFCPAD] &&
p->mode != XFRM_MODE_TUNNEL)
goto out;
break;
case IPPROTO_COMP:
if (!attrs[XFRMA_ALG_COMP] ||
attrs[XFRMA_ALG_AEAD] ||
attrs[XFRMA_ALG_AUTH] ||
attrs[XFRMA_ALG_AUTH_TRUNC] ||
attrs[XFRMA_ALG_CRYPT] ||
attrs[XFRMA_TFCPAD] ||
(ntohl(p->id.spi) >= 0x10000))
goto out;
break;
#if IS_ENABLED(CONFIG_IPV6)
case IPPROTO_DSTOPTS:
case IPPROTO_ROUTING:
if (attrs[XFRMA_ALG_COMP] ||
attrs[XFRMA_ALG_AUTH] ||
attrs[XFRMA_ALG_AUTH_TRUNC] ||
attrs[XFRMA_ALG_AEAD] ||
attrs[XFRMA_ALG_CRYPT] ||
attrs[XFRMA_ENCAP] ||
attrs[XFRMA_SEC_CTX] ||
attrs[XFRMA_TFCPAD] ||
!attrs[XFRMA_COADDR])
goto out;
break;
#endif
default:
goto out;
}
if ((err = verify_aead(attrs)))
goto out;
if ((err = verify_auth_trunc(attrs)))
goto out;
if ((err = verify_one_alg(attrs, XFRMA_ALG_AUTH)))
goto out;
if ((err = verify_one_alg(attrs, XFRMA_ALG_CRYPT)))
goto out;
if ((err = verify_one_alg(attrs, XFRMA_ALG_COMP)))
goto out;
if ((err = verify_sec_ctx_len(attrs)))
goto out;
if ((err = verify_replay(p, attrs)))
goto out;
err = -EINVAL;
switch (p->mode) {
case XFRM_MODE_TRANSPORT:
case XFRM_MODE_TUNNEL:
case XFRM_MODE_ROUTEOPTIMIZATION:
case XFRM_MODE_BEET:
break;
default:
goto out;
}
err = 0;
out:
return err;
}
static int attach_one_algo(struct xfrm_algo **algpp, u8 *props,
struct xfrm_algo_desc *(*get_byname)(const char *, int),
struct nlattr *rta)
{
struct xfrm_algo *p, *ualg;
struct xfrm_algo_desc *algo;
if (!rta)
return 0;
ualg = nla_data(rta);
algo = get_byname(ualg->alg_name, 1);
if (!algo)
return -ENOSYS;
*props = algo->desc.sadb_alg_id;
p = kmemdup(ualg, xfrm_alg_len(ualg), GFP_KERNEL);
if (!p)
return -ENOMEM;
strcpy(p->alg_name, algo->name);
*algpp = p;
return 0;
}
static int attach_crypt(struct xfrm_state *x, struct nlattr *rta)
{
struct xfrm_algo *p, *ualg;
struct xfrm_algo_desc *algo;
if (!rta)
return 0;
ualg = nla_data(rta);
algo = xfrm_ealg_get_byname(ualg->alg_name, 1);
if (!algo)
return -ENOSYS;
x->props.ealgo = algo->desc.sadb_alg_id;
p = kmemdup(ualg, xfrm_alg_len(ualg), GFP_KERNEL);
if (!p)
return -ENOMEM;
strcpy(p->alg_name, algo->name);
x->ealg = p;
x->geniv = algo->uinfo.encr.geniv;
return 0;
}
static int attach_auth(struct xfrm_algo_auth **algpp, u8 *props,
struct nlattr *rta)
{
struct xfrm_algo *ualg;
struct xfrm_algo_auth *p;
struct xfrm_algo_desc *algo;
if (!rta)
return 0;
ualg = nla_data(rta);
algo = xfrm_aalg_get_byname(ualg->alg_name, 1);
if (!algo)
return -ENOSYS;
*props = algo->desc.sadb_alg_id;
p = kmalloc(sizeof(*p) + (ualg->alg_key_len + 7) / 8, GFP_KERNEL);
if (!p)
return -ENOMEM;
strcpy(p->alg_name, algo->name);
p->alg_key_len = ualg->alg_key_len;
p->alg_trunc_len = algo->uinfo.auth.icv_truncbits;
memcpy(p->alg_key, ualg->alg_key, (ualg->alg_key_len + 7) / 8);
*algpp = p;
return 0;
}
static int attach_auth_trunc(struct xfrm_algo_auth **algpp, u8 *props,
struct nlattr *rta)
{
struct xfrm_algo_auth *p, *ualg;
struct xfrm_algo_desc *algo;
if (!rta)
return 0;
ualg = nla_data(rta);
algo = xfrm_aalg_get_byname(ualg->alg_name, 1);
if (!algo)
return -ENOSYS;
if (ualg->alg_trunc_len > algo->uinfo.auth.icv_fullbits)
return -EINVAL;
*props = algo->desc.sadb_alg_id;
p = kmemdup(ualg, xfrm_alg_auth_len(ualg), GFP_KERNEL);
if (!p)
return -ENOMEM;
strcpy(p->alg_name, algo->name);
if (!p->alg_trunc_len)
p->alg_trunc_len = algo->uinfo.auth.icv_truncbits;
*algpp = p;
return 0;
}
static int attach_aead(struct xfrm_state *x, struct nlattr *rta)
{
struct xfrm_algo_aead *p, *ualg;
struct xfrm_algo_desc *algo;
if (!rta)
return 0;
ualg = nla_data(rta);
algo = xfrm_aead_get_byname(ualg->alg_name, ualg->alg_icv_len, 1);
if (!algo)
return -ENOSYS;
x->props.ealgo = algo->desc.sadb_alg_id;
p = kmemdup(ualg, aead_len(ualg), GFP_KERNEL);
if (!p)
return -ENOMEM;
strcpy(p->alg_name, algo->name);
x->aead = p;
x->geniv = algo->uinfo.aead.geniv;
return 0;
}
static inline int xfrm_replay_verify_len(struct xfrm_replay_state_esn *replay_esn,
struct nlattr *rp)
{
struct xfrm_replay_state_esn *up;
int ulen;
if (!replay_esn || !rp)
return 0;
up = nla_data(rp);
ulen = xfrm_replay_state_esn_len(up);
/* Check the overall length and the internal bitmap length to avoid
* potential overflow. */
if (nla_len(rp) < ulen ||
xfrm_replay_state_esn_len(replay_esn) != ulen ||
replay_esn->bmp_len != up->bmp_len)
return -EINVAL;
if (up->replay_window > up->bmp_len * sizeof(__u32) * 8)
return -EINVAL;
return 0;
}
static int xfrm_alloc_replay_state_esn(struct xfrm_replay_state_esn **replay_esn,
struct xfrm_replay_state_esn **preplay_esn,
struct nlattr *rta)
{
struct xfrm_replay_state_esn *p, *pp, *up;
int klen, ulen;
if (!rta)
return 0;
up = nla_data(rta);
klen = xfrm_replay_state_esn_len(up);
ulen = nla_len(rta) >= klen ? klen : sizeof(*up);
p = kzalloc(klen, GFP_KERNEL);
if (!p)
return -ENOMEM;
pp = kzalloc(klen, GFP_KERNEL);
if (!pp) {
kfree(p);
return -ENOMEM;
}
memcpy(p, up, ulen);
memcpy(pp, up, ulen);
*replay_esn = p;
*preplay_esn = pp;
return 0;
}
static inline int xfrm_user_sec_ctx_size(struct xfrm_sec_ctx *xfrm_ctx)
{
int len = 0;
if (xfrm_ctx) {
len += sizeof(struct xfrm_user_sec_ctx);
len += xfrm_ctx->ctx_len;
}
return len;
}
static void copy_from_user_state(struct xfrm_state *x, struct xfrm_usersa_info *p)
{
memcpy(&x->id, &p->id, sizeof(x->id));
memcpy(&x->sel, &p->sel, sizeof(x->sel));
memcpy(&x->lft, &p->lft, sizeof(x->lft));
x->props.mode = p->mode;
x->props.replay_window = min_t(unsigned int, p->replay_window,
sizeof(x->replay.bitmap) * 8);
x->props.reqid = p->reqid;
x->props.family = p->family;
memcpy(&x->props.saddr, &p->saddr, sizeof(x->props.saddr));
x->props.flags = p->flags;
if (!x->sel.family && !(p->flags & XFRM_STATE_AF_UNSPEC))
x->sel.family = p->family;
}
/*
* someday when pfkey also has support, we could have the code
* somehow made shareable and move it to xfrm_state.c - JHS
*
*/
static void xfrm_update_ae_params(struct xfrm_state *x, struct nlattr **attrs,
int update_esn)
{
struct nlattr *rp = attrs[XFRMA_REPLAY_VAL];
struct nlattr *re = update_esn ? attrs[XFRMA_REPLAY_ESN_VAL] : NULL;
struct nlattr *lt = attrs[XFRMA_LTIME_VAL];
struct nlattr *et = attrs[XFRMA_ETIMER_THRESH];
struct nlattr *rt = attrs[XFRMA_REPLAY_THRESH];
if (re) {
struct xfrm_replay_state_esn *replay_esn;
replay_esn = nla_data(re);
memcpy(x->replay_esn, replay_esn,
xfrm_replay_state_esn_len(replay_esn));
memcpy(x->preplay_esn, replay_esn,
xfrm_replay_state_esn_len(replay_esn));
}
if (rp) {
struct xfrm_replay_state *replay;
replay = nla_data(rp);
memcpy(&x->replay, replay, sizeof(*replay));
memcpy(&x->preplay, replay, sizeof(*replay));
}
if (lt) {
struct xfrm_lifetime_cur *ltime;
ltime = nla_data(lt);
x->curlft.bytes = ltime->bytes;
x->curlft.packets = ltime->packets;
x->curlft.add_time = ltime->add_time;
x->curlft.use_time = ltime->use_time;
}
if (et)
x->replay_maxage = nla_get_u32(et);
if (rt)
x->replay_maxdiff = nla_get_u32(rt);
}
static struct xfrm_state *xfrm_state_construct(struct net *net,
struct xfrm_usersa_info *p,
struct nlattr **attrs,
int *errp)
{
struct xfrm_state *x = xfrm_state_alloc(net);
int err = -ENOMEM;
if (!x)
goto error_no_put;
copy_from_user_state(x, p);
if (attrs[XFRMA_SA_EXTRA_FLAGS])
x->props.extra_flags = nla_get_u32(attrs[XFRMA_SA_EXTRA_FLAGS]);
if ((err = attach_aead(x, attrs[XFRMA_ALG_AEAD])))
goto error;
if ((err = attach_auth_trunc(&x->aalg, &x->props.aalgo,
attrs[XFRMA_ALG_AUTH_TRUNC])))
goto error;
if (!x->props.aalgo) {
if ((err = attach_auth(&x->aalg, &x->props.aalgo,
attrs[XFRMA_ALG_AUTH])))
goto error;
}
if ((err = attach_crypt(x, attrs[XFRMA_ALG_CRYPT])))
goto error;
if ((err = attach_one_algo(&x->calg, &x->props.calgo,
xfrm_calg_get_byname,
attrs[XFRMA_ALG_COMP])))
goto error;
if (attrs[XFRMA_ENCAP]) {
x->encap = kmemdup(nla_data(attrs[XFRMA_ENCAP]),
sizeof(*x->encap), GFP_KERNEL);
if (x->encap == NULL)
goto error;
}
if (attrs[XFRMA_TFCPAD])
x->tfcpad = nla_get_u32(attrs[XFRMA_TFCPAD]);
if (attrs[XFRMA_COADDR]) {
x->coaddr = kmemdup(nla_data(attrs[XFRMA_COADDR]),
sizeof(*x->coaddr), GFP_KERNEL);
if (x->coaddr == NULL)
goto error;
}
xfrm_mark_get(attrs, &x->mark);
if (attrs[XFRMA_OUTPUT_MARK])
x->props.output_mark = nla_get_u32(attrs[XFRMA_OUTPUT_MARK]);
err = __xfrm_init_state(x, false, attrs[XFRMA_OFFLOAD_DEV]);
if (err)
goto error;
if (attrs[XFRMA_SEC_CTX]) {
err = security_xfrm_state_alloc(x,
nla_data(attrs[XFRMA_SEC_CTX]));
if (err)
goto error;
}
if (attrs[XFRMA_OFFLOAD_DEV]) {
err = xfrm_dev_state_add(net, x,
nla_data(attrs[XFRMA_OFFLOAD_DEV]));
if (err)
goto error;
}
if ((err = xfrm_alloc_replay_state_esn(&x->replay_esn, &x->preplay_esn,
attrs[XFRMA_REPLAY_ESN_VAL])))
goto error;
x->km.seq = p->seq;
x->replay_maxdiff = net->xfrm.sysctl_aevent_rseqth;
/* sysctl_xfrm_aevent_etime is in 100ms units */
x->replay_maxage = (net->xfrm.sysctl_aevent_etime*HZ)/XFRM_AE_ETH_M;
if ((err = xfrm_init_replay(x)))
goto error;
/* override default values from above */
xfrm_update_ae_params(x, attrs, 0);
return x;
error:
x->km.state = XFRM_STATE_DEAD;
xfrm_state_put(x);
error_no_put:
*errp = err;
return NULL;
}
static int xfrm_add_sa(struct sk_buff *skb, struct nlmsghdr *nlh,
struct nlattr **attrs)
{
struct net *net = sock_net(skb->sk);
struct xfrm_usersa_info *p = nlmsg_data(nlh);
struct xfrm_state *x;
int err;
struct km_event c;
err = verify_newsa_info(p, attrs);
if (err)
return err;
x = xfrm_state_construct(net, p, attrs, &err);
if (!x)
return err;
xfrm_state_hold(x);
if (nlh->nlmsg_type == XFRM_MSG_NEWSA)
err = xfrm_state_add(x);
else
err = xfrm_state_update(x);
xfrm_audit_state_add(x, err ? 0 : 1, true);
if (err < 0) {
x->km.state = XFRM_STATE_DEAD;
xfrm_dev_state_delete(x);
__xfrm_state_put(x);
goto out;
}
c.seq = nlh->nlmsg_seq;
c.portid = nlh->nlmsg_pid;
c.event = nlh->nlmsg_type;
km_state_notify(x, &c);
out:
xfrm_state_put(x);
return err;
}
static struct xfrm_state *xfrm_user_state_lookup(struct net *net,
struct xfrm_usersa_id *p,
struct nlattr **attrs,
int *errp)
{
struct xfrm_state *x = NULL;
struct xfrm_mark m;
int err;
u32 mark = xfrm_mark_get(attrs, &m);
if (xfrm_id_proto_match(p->proto, IPSEC_PROTO_ANY)) {
err = -ESRCH;
x = xfrm_state_lookup(net, mark, &p->daddr, p->spi, p->proto, p->family);
} else {
xfrm_address_t *saddr = NULL;
verify_one_addr(attrs, XFRMA_SRCADDR, &saddr);
if (!saddr) {
err = -EINVAL;
goto out;
}
err = -ESRCH;
x = xfrm_state_lookup_byaddr(net, mark,
&p->daddr, saddr,
p->proto, p->family);
}
out:
if (!x && errp)
*errp = err;
return x;
}
static int xfrm_del_sa(struct sk_buff *skb, struct nlmsghdr *nlh,
struct nlattr **attrs)
{
struct net *net = sock_net(skb->sk);
struct xfrm_state *x;
int err = -ESRCH;
struct km_event c;
struct xfrm_usersa_id *p = nlmsg_data(nlh);
x = xfrm_user_state_lookup(net, p, attrs, &err);
if (x == NULL)
return err;
if ((err = security_xfrm_state_delete(x)) != 0)
goto out;
if (xfrm_state_kern(x)) {
err = -EPERM;
goto out;
}
err = xfrm_state_delete(x);
if (err < 0)
goto out;
c.seq = nlh->nlmsg_seq;
c.portid = nlh->nlmsg_pid;
c.event = nlh->nlmsg_type;
km_state_notify(x, &c);
out:
xfrm_audit_state_delete(x, err ? 0 : 1, true);
xfrm_state_put(x);
return err;
}
static void copy_to_user_state(struct xfrm_state *x, struct xfrm_usersa_info *p)
{
memset(p, 0, sizeof(*p));
memcpy(&p->id, &x->id, sizeof(p->id));
memcpy(&p->sel, &x->sel, sizeof(p->sel));
memcpy(&p->lft, &x->lft, sizeof(p->lft));
memcpy(&p->curlft, &x->curlft, sizeof(p->curlft));
put_unaligned(x->stats.replay_window, &p->stats.replay_window);
put_unaligned(x->stats.replay, &p->stats.replay);
put_unaligned(x->stats.integrity_failed, &p->stats.integrity_failed);
memcpy(&p->saddr, &x->props.saddr, sizeof(p->saddr));
p->mode = x->props.mode;
p->replay_window = x->props.replay_window;
p->reqid = x->props.reqid;
p->family = x->props.family;
p->flags = x->props.flags;
p->seq = x->km.seq;
}
struct xfrm_dump_info {
struct sk_buff *in_skb;
struct sk_buff *out_skb;
u32 nlmsg_seq;
u16 nlmsg_flags;
};
static int copy_sec_ctx(struct xfrm_sec_ctx *s, struct sk_buff *skb)
{
struct xfrm_user_sec_ctx *uctx;
struct nlattr *attr;
int ctx_size = sizeof(*uctx) + s->ctx_len;
attr = nla_reserve(skb, XFRMA_SEC_CTX, ctx_size);
if (attr == NULL)
return -EMSGSIZE;
uctx = nla_data(attr);
uctx->exttype = XFRMA_SEC_CTX;
uctx->len = ctx_size;
uctx->ctx_doi = s->ctx_doi;
uctx->ctx_alg = s->ctx_alg;
uctx->ctx_len = s->ctx_len;
memcpy(uctx + 1, s->ctx_str, s->ctx_len);
return 0;
}
static int copy_user_offload(struct xfrm_state_offload *xso, struct sk_buff *skb)
{
struct xfrm_user_offload *xuo;
struct nlattr *attr;
attr = nla_reserve(skb, XFRMA_OFFLOAD_DEV, sizeof(*xuo));
if (attr == NULL)
return -EMSGSIZE;
xuo = nla_data(attr);
memset(xuo, 0, sizeof(*xuo));
xuo->ifindex = xso->dev->ifindex;
xuo->flags = xso->flags;
return 0;
}
static int copy_to_user_auth(struct xfrm_algo_auth *auth, struct sk_buff *skb)
{
struct xfrm_algo *algo;
struct nlattr *nla;
nla = nla_reserve(skb, XFRMA_ALG_AUTH,
sizeof(*algo) + (auth->alg_key_len + 7) / 8);
if (!nla)
return -EMSGSIZE;
algo = nla_data(nla);
strncpy(algo->alg_name, auth->alg_name, sizeof(algo->alg_name));
memcpy(algo->alg_key, auth->alg_key, (auth->alg_key_len + 7) / 8);
algo->alg_key_len = auth->alg_key_len;
return 0;
}
/* Don't change this without updating xfrm_sa_len! */
static int copy_to_user_state_extra(struct xfrm_state *x,
struct xfrm_usersa_info *p,
struct sk_buff *skb)
{
int ret = 0;
copy_to_user_state(x, p);
if (x->props.extra_flags) {
ret = nla_put_u32(skb, XFRMA_SA_EXTRA_FLAGS,
x->props.extra_flags);
if (ret)
goto out;
}
if (x->coaddr) {
ret = nla_put(skb, XFRMA_COADDR, sizeof(*x->coaddr), x->coaddr);
if (ret)
goto out;
}
if (x->lastused) {
ret = nla_put_u64_64bit(skb, XFRMA_LASTUSED, x->lastused,
XFRMA_PAD);
if (ret)
goto out;
}
if (x->aead) {
ret = nla_put(skb, XFRMA_ALG_AEAD, aead_len(x->aead), x->aead);
if (ret)
goto out;
}
if (x->aalg) {
ret = copy_to_user_auth(x->aalg, skb);
if (!ret)
ret = nla_put(skb, XFRMA_ALG_AUTH_TRUNC,
xfrm_alg_auth_len(x->aalg), x->aalg);
if (ret)
goto out;
}
if (x->ealg) {
ret = nla_put(skb, XFRMA_ALG_CRYPT, xfrm_alg_len(x->ealg), x->ealg);
if (ret)
goto out;
}
if (x->calg) {
ret = nla_put(skb, XFRMA_ALG_COMP, sizeof(*(x->calg)), x->calg);
if (ret)
goto out;
}
if (x->encap) {
ret = nla_put(skb, XFRMA_ENCAP, sizeof(*x->encap), x->encap);
if (ret)
goto out;
}
if (x->tfcpad) {
ret = nla_put_u32(skb, XFRMA_TFCPAD, x->tfcpad);
if (ret)
goto out;
}
ret = xfrm_mark_put(skb, &x->mark);
if (ret)
goto out;
if (x->replay_esn)
ret = nla_put(skb, XFRMA_REPLAY_ESN_VAL,
xfrm_replay_state_esn_len(x->replay_esn),
x->replay_esn);
else
ret = nla_put(skb, XFRMA_REPLAY_VAL, sizeof(x->replay),
&x->replay);
if (ret)
goto out;
if(x->xso.dev)
ret = copy_user_offload(&x->xso, skb);
if (ret)
goto out;
if (x->props.output_mark) {
ret = nla_put_u32(skb, XFRMA_OUTPUT_MARK, x->props.output_mark);
if (ret)
goto out;
}
if (x->security)
ret = copy_sec_ctx(x->security, skb);
out:
return ret;
}
static int dump_one_state(struct xfrm_state *x, int count, void *ptr)
{
struct xfrm_dump_info *sp = ptr;
struct sk_buff *in_skb = sp->in_skb;
struct sk_buff *skb = sp->out_skb;
struct xfrm_usersa_info *p;
struct nlmsghdr *nlh;
int err;
nlh = nlmsg_put(skb, NETLINK_CB(in_skb).portid, sp->nlmsg_seq,
XFRM_MSG_NEWSA, sizeof(*p), sp->nlmsg_flags);
if (nlh == NULL)
return -EMSGSIZE;
p = nlmsg_data(nlh);
err = copy_to_user_state_extra(x, p, skb);
if (err) {
nlmsg_cancel(skb, nlh);
return err;
}
nlmsg_end(skb, nlh);
return 0;
}
static int xfrm_dump_sa_done(struct netlink_callback *cb)
{
struct xfrm_state_walk *walk = (struct xfrm_state_walk *) &cb->args[1];
struct sock *sk = cb->skb->sk;
struct net *net = sock_net(sk);
if (cb->args[0])
xfrm_state_walk_done(walk, net);
return 0;
}
static const struct nla_policy xfrma_policy[XFRMA_MAX+1];
static int xfrm_dump_sa(struct sk_buff *skb, struct netlink_callback *cb)
{
struct net *net = sock_net(skb->sk);
struct xfrm_state_walk *walk = (struct xfrm_state_walk *) &cb->args[1];
struct xfrm_dump_info info;
BUILD_BUG_ON(sizeof(struct xfrm_state_walk) >
sizeof(cb->args) - sizeof(cb->args[0]));
info.in_skb = cb->skb;
info.out_skb = skb;
info.nlmsg_seq = cb->nlh->nlmsg_seq;
info.nlmsg_flags = NLM_F_MULTI;
if (!cb->args[0]) {
struct nlattr *attrs[XFRMA_MAX+1];
struct xfrm_address_filter *filter = NULL;
u8 proto = 0;
int err;
err = nlmsg_parse(cb->nlh, 0, attrs, XFRMA_MAX, xfrma_policy,
NULL);
if (err < 0)
return err;
if (attrs[XFRMA_ADDRESS_FILTER]) {
filter = kmemdup(nla_data(attrs[XFRMA_ADDRESS_FILTER]),
sizeof(*filter), GFP_KERNEL);
if (filter == NULL)
return -ENOMEM;
}
if (attrs[XFRMA_PROTO])
proto = nla_get_u8(attrs[XFRMA_PROTO]);
xfrm_state_walk_init(walk, proto, filter);
cb->args[0] = 1;
}
(void) xfrm_state_walk(net, walk, dump_one_state, &info);
return skb->len;
}
static struct sk_buff *xfrm_state_netlink(struct sk_buff *in_skb,
struct xfrm_state *x, u32 seq)
{
struct xfrm_dump_info info;
struct sk_buff *skb;
int err;
skb = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_ATOMIC);
if (!skb)
return ERR_PTR(-ENOMEM);
info.in_skb = in_skb;
info.out_skb = skb;
info.nlmsg_seq = seq;
info.nlmsg_flags = 0;
err = dump_one_state(x, 0, &info);
if (err) {
kfree_skb(skb);
return ERR_PTR(err);
}
return skb;
}
/* A wrapper for nlmsg_multicast() checking that nlsk is still available.
* Must be called with RCU read lock.
*/
static inline int xfrm_nlmsg_multicast(struct net *net, struct sk_buff *skb,
u32 pid, unsigned int group)
{
struct sock *nlsk = rcu_dereference(net->xfrm.nlsk);
if (nlsk)
return nlmsg_multicast(nlsk, skb, pid, group, GFP_ATOMIC);
else
return -1;
}
static inline size_t xfrm_spdinfo_msgsize(void)
{
return NLMSG_ALIGN(4)
+ nla_total_size(sizeof(struct xfrmu_spdinfo))
+ nla_total_size(sizeof(struct xfrmu_spdhinfo))
+ nla_total_size(sizeof(struct xfrmu_spdhthresh))
+ nla_total_size(sizeof(struct xfrmu_spdhthresh));
}
static int build_spdinfo(struct sk_buff *skb, struct net *net,
u32 portid, u32 seq, u32 flags)
{
struct xfrmk_spdinfo si;
struct xfrmu_spdinfo spc;
struct xfrmu_spdhinfo sph;
struct xfrmu_spdhthresh spt4, spt6;
struct nlmsghdr *nlh;
int err;
u32 *f;
unsigned lseq;
nlh = nlmsg_put(skb, portid, seq, XFRM_MSG_NEWSPDINFO, sizeof(u32), 0);
if (nlh == NULL) /* shouldn't really happen ... */
return -EMSGSIZE;
f = nlmsg_data(nlh);
*f = flags;
xfrm_spd_getinfo(net, &si);
spc.incnt = si.incnt;
spc.outcnt = si.outcnt;
spc.fwdcnt = si.fwdcnt;
spc.inscnt = si.inscnt;
spc.outscnt = si.outscnt;
spc.fwdscnt = si.fwdscnt;
sph.spdhcnt = si.spdhcnt;
sph.spdhmcnt = si.spdhmcnt;
do {
lseq = read_seqbegin(&net->xfrm.policy_hthresh.lock);
spt4.lbits = net->xfrm.policy_hthresh.lbits4;
spt4.rbits = net->xfrm.policy_hthresh.rbits4;
spt6.lbits = net->xfrm.policy_hthresh.lbits6;
spt6.rbits = net->xfrm.policy_hthresh.rbits6;
} while (read_seqretry(&net->xfrm.policy_hthresh.lock, lseq));
err = nla_put(skb, XFRMA_SPD_INFO, sizeof(spc), &spc);
if (!err)
err = nla_put(skb, XFRMA_SPD_HINFO, sizeof(sph), &sph);
if (!err)
err = nla_put(skb, XFRMA_SPD_IPV4_HTHRESH, sizeof(spt4), &spt4);
if (!err)
err = nla_put(skb, XFRMA_SPD_IPV6_HTHRESH, sizeof(spt6), &spt6);
if (err) {
nlmsg_cancel(skb, nlh);
return err;
}
nlmsg_end(skb, nlh);
return 0;
}
static int xfrm_set_spdinfo(struct sk_buff *skb, struct nlmsghdr *nlh,
struct nlattr **attrs)
{
struct net *net = sock_net(skb->sk);
struct xfrmu_spdhthresh *thresh4 = NULL;
struct xfrmu_spdhthresh *thresh6 = NULL;
/* selector prefixlen thresholds to hash policies */
if (attrs[XFRMA_SPD_IPV4_HTHRESH]) {
struct nlattr *rta = attrs[XFRMA_SPD_IPV4_HTHRESH];
if (nla_len(rta) < sizeof(*thresh4))
return -EINVAL;
thresh4 = nla_data(rta);
if (thresh4->lbits > 32 || thresh4->rbits > 32)
return -EINVAL;
}
if (attrs[XFRMA_SPD_IPV6_HTHRESH]) {
struct nlattr *rta = attrs[XFRMA_SPD_IPV6_HTHRESH];
if (nla_len(rta) < sizeof(*thresh6))
return -EINVAL;
thresh6 = nla_data(rta);
if (thresh6->lbits > 128 || thresh6->rbits > 128)
return -EINVAL;
}
if (thresh4 || thresh6) {
write_seqlock(&net->xfrm.policy_hthresh.lock);
if (thresh4) {
net->xfrm.policy_hthresh.lbits4 = thresh4->lbits;
net->xfrm.policy_hthresh.rbits4 = thresh4->rbits;
}
if (thresh6) {
net->xfrm.policy_hthresh.lbits6 = thresh6->lbits;
net->xfrm.policy_hthresh.rbits6 = thresh6->rbits;
}
write_sequnlock(&net->xfrm.policy_hthresh.lock);
xfrm_policy_hash_rebuild(net);
}
return 0;
}
static int xfrm_get_spdinfo(struct sk_buff *skb, struct nlmsghdr *nlh,
struct nlattr **attrs)
{
struct net *net = sock_net(skb->sk);
struct sk_buff *r_skb;
u32 *flags = nlmsg_data(nlh);
u32 sportid = NETLINK_CB(skb).portid;
u32 seq = nlh->nlmsg_seq;
r_skb = nlmsg_new(xfrm_spdinfo_msgsize(), GFP_ATOMIC);
if (r_skb == NULL)
return -ENOMEM;
if (build_spdinfo(r_skb, net, sportid, seq, *flags) < 0)
BUG();
return nlmsg_unicast(net->xfrm.nlsk, r_skb, sportid);
}
static inline size_t xfrm_sadinfo_msgsize(void)
{
return NLMSG_ALIGN(4)
+ nla_total_size(sizeof(struct xfrmu_sadhinfo))
+ nla_total_size(4); /* XFRMA_SAD_CNT */
}
static int build_sadinfo(struct sk_buff *skb, struct net *net,
u32 portid, u32 seq, u32 flags)
{
struct xfrmk_sadinfo si;
struct xfrmu_sadhinfo sh;
struct nlmsghdr *nlh;
int err;
u32 *f;
nlh = nlmsg_put(skb, portid, seq, XFRM_MSG_NEWSADINFO, sizeof(u32), 0);
if (nlh == NULL) /* shouldn't really happen ... */
return -EMSGSIZE;
f = nlmsg_data(nlh);
*f = flags;
xfrm_sad_getinfo(net, &si);
sh.sadhmcnt = si.sadhmcnt;
sh.sadhcnt = si.sadhcnt;
err = nla_put_u32(skb, XFRMA_SAD_CNT, si.sadcnt);
if (!err)
err = nla_put(skb, XFRMA_SAD_HINFO, sizeof(sh), &sh);
if (err) {
nlmsg_cancel(skb, nlh);
return err;
}
nlmsg_end(skb, nlh);
return 0;
}
static int xfrm_get_sadinfo(struct sk_buff *skb, struct nlmsghdr *nlh,
struct nlattr **attrs)
{
struct net *net = sock_net(skb->sk);
struct sk_buff *r_skb;
u32 *flags = nlmsg_data(nlh);
u32 sportid = NETLINK_CB(skb).portid;
u32 seq = nlh->nlmsg_seq;
r_skb = nlmsg_new(xfrm_sadinfo_msgsize(), GFP_ATOMIC);
if (r_skb == NULL)
return -ENOMEM;
if (build_sadinfo(r_skb, net, sportid, seq, *flags) < 0)
BUG();
return nlmsg_unicast(net->xfrm.nlsk, r_skb, sportid);
}
static int xfrm_get_sa(struct sk_buff *skb, struct nlmsghdr *nlh,
struct nlattr **attrs)
{
struct net *net = sock_net(skb->sk);
struct xfrm_usersa_id *p = nlmsg_data(nlh);
struct xfrm_state *x;
struct sk_buff *resp_skb;
int err = -ESRCH;
x = xfrm_user_state_lookup(net, p, attrs, &err);
if (x == NULL)
goto out_noput;
resp_skb = xfrm_state_netlink(skb, x, nlh->nlmsg_seq);
if (IS_ERR(resp_skb)) {
err = PTR_ERR(resp_skb);
} else {
err = nlmsg_unicast(net->xfrm.nlsk, resp_skb, NETLINK_CB(skb).portid);
}
xfrm_state_put(x);
out_noput:
return err;
}
static int xfrm_alloc_userspi(struct sk_buff *skb, struct nlmsghdr *nlh,
struct nlattr **attrs)
{
struct net *net = sock_net(skb->sk);
struct xfrm_state *x;
struct xfrm_userspi_info *p;
struct sk_buff *resp_skb;
xfrm_address_t *daddr;
int family;
int err;
u32 mark;
struct xfrm_mark m;
p = nlmsg_data(nlh);
err = verify_spi_info(p->info.id.proto, p->min, p->max);
if (err)
goto out_noput;
family = p->info.family;
daddr = &p->info.id.daddr;
x = NULL;
mark = xfrm_mark_get(attrs, &m);
if (p->info.seq) {
x = xfrm_find_acq_byseq(net, mark, p->info.seq);
if (x && !xfrm_addr_equal(&x->id.daddr, daddr, family)) {
xfrm_state_put(x);
x = NULL;
}
}
if (!x)
x = xfrm_find_acq(net, &m, p->info.mode, p->info.reqid,
p->info.id.proto, daddr,
&p->info.saddr, 1,
family);
err = -ENOENT;
if (x == NULL)
goto out_noput;
err = xfrm_alloc_spi(x, p->min, p->max);
if (err)
goto out;
resp_skb = xfrm_state_netlink(skb, x, nlh->nlmsg_seq);
if (IS_ERR(resp_skb)) {
err = PTR_ERR(resp_skb);
goto out;
}
err = nlmsg_unicast(net->xfrm.nlsk, resp_skb, NETLINK_CB(skb).portid);
out:
xfrm_state_put(x);
out_noput:
return err;
}
static int verify_policy_dir(u8 dir)
{
switch (dir) {
case XFRM_POLICY_IN:
case XFRM_POLICY_OUT:
case XFRM_POLICY_FWD:
break;
default:
return -EINVAL;
}
return 0;
}
static int verify_policy_type(u8 type)
{
switch (type) {
case XFRM_POLICY_TYPE_MAIN:
#ifdef CONFIG_XFRM_SUB_POLICY
case XFRM_POLICY_TYPE_SUB:
#endif
break;
default:
return -EINVAL;
}
return 0;
}
static int verify_newpolicy_info(struct xfrm_userpolicy_info *p)
{
int ret;
switch (p->share) {
case XFRM_SHARE_ANY:
case XFRM_SHARE_SESSION:
case XFRM_SHARE_USER:
case XFRM_SHARE_UNIQUE:
break;
default:
return -EINVAL;
}
switch (p->action) {
case XFRM_POLICY_ALLOW:
case XFRM_POLICY_BLOCK:
break;
default:
return -EINVAL;
}
switch (p->sel.family) {
case AF_INET:
break;
case AF_INET6:
#if IS_ENABLED(CONFIG_IPV6)
break;
#else
return -EAFNOSUPPORT;
#endif
default:
return -EINVAL;
}
ret = verify_policy_dir(p->dir);
if (ret)
return ret;
if (p->index && ((p->index & XFRM_POLICY_MAX) != p->dir))
return -EINVAL;
return 0;
}
static int copy_from_user_sec_ctx(struct xfrm_policy *pol, struct nlattr **attrs)
{
struct nlattr *rt = attrs[XFRMA_SEC_CTX];
struct xfrm_user_sec_ctx *uctx;
if (!rt)
return 0;
uctx = nla_data(rt);
return security_xfrm_policy_alloc(&pol->security, uctx, GFP_KERNEL);
}
static void copy_templates(struct xfrm_policy *xp, struct xfrm_user_tmpl *ut,
int nr)
{
int i;
xp->xfrm_nr = nr;
for (i = 0; i < nr; i++, ut++) {
struct xfrm_tmpl *t = &xp->xfrm_vec[i];
memcpy(&t->id, &ut->id, sizeof(struct xfrm_id));
memcpy(&t->saddr, &ut->saddr,
sizeof(xfrm_address_t));
t->reqid = ut->reqid;
t->mode = ut->mode;
t->share = ut->share;
t->optional = ut->optional;
t->aalgos = ut->aalgos;
t->ealgos = ut->ealgos;
t->calgos = ut->calgos;
/* If all masks are ~0, then we allow all algorithms. */
t->allalgs = !~(t->aalgos & t->ealgos & t->calgos);
t->encap_family = ut->family;
}
}
static int validate_tmpl(int nr, struct xfrm_user_tmpl *ut, u16 family)
{
int i;
if (nr > XFRM_MAX_DEPTH)
return -EINVAL;
for (i = 0; i < nr; i++) {
/* We never validated the ut->family value, so many
* applications simply leave it at zero. The check was
* never made and ut->family was ignored because all
* templates could be assumed to have the same family as
* the policy itself. Now that we will have ipv4-in-ipv6
* and ipv6-in-ipv4 tunnels, this is no longer true.
*/
if (!ut[i].family)
ut[i].family = family;
switch (ut[i].family) {
case AF_INET:
break;
#if IS_ENABLED(CONFIG_IPV6)
case AF_INET6:
break;
#endif
default:
return -EINVAL;
}
}
return 0;
}
static int copy_from_user_tmpl(struct xfrm_policy *pol, struct nlattr **attrs)
{
struct nlattr *rt = attrs[XFRMA_TMPL];
if (!rt) {
pol->xfrm_nr = 0;
} else {
struct xfrm_user_tmpl *utmpl = nla_data(rt);
int nr = nla_len(rt) / sizeof(*utmpl);
int err;
err = validate_tmpl(nr, utmpl, pol->family);
if (err)
return err;
copy_templates(pol, utmpl, nr);
}
return 0;
}
static int copy_from_user_policy_type(u8 *tp, struct nlattr **attrs)
{
struct nlattr *rt = attrs[XFRMA_POLICY_TYPE];
struct xfrm_userpolicy_type *upt;
u8 type = XFRM_POLICY_TYPE_MAIN;
int err;
if (rt) {
upt = nla_data(rt);
type = upt->type;
}
err = verify_policy_type(type);
if (err)
return err;
*tp = type;
return 0;
}
static void copy_from_user_policy(struct xfrm_policy *xp, struct xfrm_userpolicy_info *p)
{
xp->priority = p->priority;
xp->index = p->index;
memcpy(&xp->selector, &p->sel, sizeof(xp->selector));
memcpy(&xp->lft, &p->lft, sizeof(xp->lft));
xp->action = p->action;
xp->flags = p->flags;
xp->family = p->sel.family;
/* XXX xp->share = p->share; */
}
static void copy_to_user_policy(struct xfrm_policy *xp, struct xfrm_userpolicy_info *p, int dir)
{
memset(p, 0, sizeof(*p));
memcpy(&p->sel, &xp->selector, sizeof(p->sel));
memcpy(&p->lft, &xp->lft, sizeof(p->lft));
memcpy(&p->curlft, &xp->curlft, sizeof(p->curlft));
p->priority = xp->priority;
p->index = xp->index;
p->sel.family = xp->family;
p->dir = dir;
p->action = xp->action;
p->flags = xp->flags;
p->share = XFRM_SHARE_ANY; /* XXX xp->share */
}
static struct xfrm_policy *xfrm_policy_construct(struct net *net, struct xfrm_userpolicy_info *p, struct nlattr **attrs, int *errp)
{
struct xfrm_policy *xp = xfrm_policy_alloc(net, GFP_KERNEL);
int err;
if (!xp) {
*errp = -ENOMEM;
return NULL;
}
copy_from_user_policy(xp, p);
err = copy_from_user_policy_type(&xp->type, attrs);
if (err)
goto error;
if (!(err = copy_from_user_tmpl(xp, attrs)))
err = copy_from_user_sec_ctx(xp, attrs);
if (err)
goto error;
xfrm_mark_get(attrs, &xp->mark);
return xp;
error:
*errp = err;
xp->walk.dead = 1;
xfrm_policy_destroy(xp);
return NULL;
}
static int xfrm_add_policy(struct sk_buff *skb, struct nlmsghdr *nlh,
struct nlattr **attrs)
{
struct net *net = sock_net(skb->sk);
struct xfrm_userpolicy_info *p = nlmsg_data(nlh);
struct xfrm_policy *xp;
struct km_event c;
int err;
int excl;
err = verify_newpolicy_info(p);
if (err)
return err;
err = verify_sec_ctx_len(attrs);
if (err)
return err;
xp = xfrm_policy_construct(net, p, attrs, &err);
if (!xp)
return err;
/* shouldn't excl be based on nlh flags??
* Aha! this is anti-netlink really i.e more pfkey derived
* in netlink excl is a flag and you wouldnt need
* a type XFRM_MSG_UPDPOLICY - JHS */
excl = nlh->nlmsg_type == XFRM_MSG_NEWPOLICY;
err = xfrm_policy_insert(p->dir, xp, excl);
xfrm_audit_policy_add(xp, err ? 0 : 1, true);
if (err) {
security_xfrm_policy_free(xp->security);
kfree(xp);
return err;
}
c.event = nlh->nlmsg_type;
c.seq = nlh->nlmsg_seq;
c.portid = nlh->nlmsg_pid;
km_policy_notify(xp, p->dir, &c);
xfrm_pol_put(xp);
return 0;
}
static int copy_to_user_tmpl(struct xfrm_policy *xp, struct sk_buff *skb)
{
struct xfrm_user_tmpl vec[XFRM_MAX_DEPTH];
int i;
if (xp->xfrm_nr == 0)
return 0;
for (i = 0; i < xp->xfrm_nr; i++) {
struct xfrm_user_tmpl *up = &vec[i];
struct xfrm_tmpl *kp = &xp->xfrm_vec[i];
memset(up, 0, sizeof(*up));
memcpy(&up->id, &kp->id, sizeof(up->id));
up->family = kp->encap_family;
memcpy(&up->saddr, &kp->saddr, sizeof(up->saddr));
up->reqid = kp->reqid;
up->mode = kp->mode;
up->share = kp->share;
up->optional = kp->optional;
up->aalgos = kp->aalgos;
up->ealgos = kp->ealgos;
up->calgos = kp->calgos;
}
return nla_put(skb, XFRMA_TMPL,
sizeof(struct xfrm_user_tmpl) * xp->xfrm_nr, vec);
}
static inline int copy_to_user_state_sec_ctx(struct xfrm_state *x, struct sk_buff *skb)
{
if (x->security) {
return copy_sec_ctx(x->security, skb);
}
return 0;
}
static inline int copy_to_user_sec_ctx(struct xfrm_policy *xp, struct sk_buff *skb)
{
if (xp->security)
return copy_sec_ctx(xp->security, skb);
return 0;
}
static inline size_t userpolicy_type_attrsize(void)
{
#ifdef CONFIG_XFRM_SUB_POLICY
return nla_total_size(sizeof(struct xfrm_userpolicy_type));
#else
return 0;
#endif
}
#ifdef CONFIG_XFRM_SUB_POLICY
static int copy_to_user_policy_type(u8 type, struct sk_buff *skb)
{
struct xfrm_userpolicy_type upt = {
.type = type,
};
return nla_put(skb, XFRMA_POLICY_TYPE, sizeof(upt), &upt);
}
#else
static inline int copy_to_user_policy_type(u8 type, struct sk_buff *skb)
{
return 0;
}
#endif
static int dump_one_policy(struct xfrm_policy *xp, int dir, int count, void *ptr)
{
struct xfrm_dump_info *sp = ptr;
struct xfrm_userpolicy_info *p;
struct sk_buff *in_skb = sp->in_skb;
struct sk_buff *skb = sp->out_skb;
struct nlmsghdr *nlh;
int err;
nlh = nlmsg_put(skb, NETLINK_CB(in_skb).portid, sp->nlmsg_seq,
XFRM_MSG_NEWPOLICY, sizeof(*p), sp->nlmsg_flags);
if (nlh == NULL)
return -EMSGSIZE;
p = nlmsg_data(nlh);
copy_to_user_policy(xp, p, dir);
err = copy_to_user_tmpl(xp, skb);
if (!err)
err = copy_to_user_sec_ctx(xp, skb);
if (!err)
err = copy_to_user_policy_type(xp->type, skb);
if (!err)
err = xfrm_mark_put(skb, &xp->mark);
if (err) {
nlmsg_cancel(skb, nlh);
return err;
}
nlmsg_end(skb, nlh);
return 0;
}
static int xfrm_dump_policy_done(struct netlink_callback *cb)
{
struct xfrm_policy_walk *walk = (struct xfrm_policy_walk *) &cb->args[1];
struct net *net = sock_net(cb->skb->sk);
xfrm_policy_walk_done(walk, net);
return 0;
}
static int xfrm_dump_policy(struct sk_buff *skb, struct netlink_callback *cb)
{
struct net *net = sock_net(skb->sk);
struct xfrm_policy_walk *walk = (struct xfrm_policy_walk *) &cb->args[1];
struct xfrm_dump_info info;
BUILD_BUG_ON(sizeof(struct xfrm_policy_walk) >
sizeof(cb->args) - sizeof(cb->args[0]));
info.in_skb = cb->skb;
info.out_skb = skb;
info.nlmsg_seq = cb->nlh->nlmsg_seq;
info.nlmsg_flags = NLM_F_MULTI;
if (!cb->args[0]) {
cb->args[0] = 1;
xfrm_policy_walk_init(walk, XFRM_POLICY_TYPE_ANY);
}
(void) xfrm_policy_walk(net, walk, dump_one_policy, &info);
return skb->len;
}
static struct sk_buff *xfrm_policy_netlink(struct sk_buff *in_skb,
struct xfrm_policy *xp,
int dir, u32 seq)
{
struct xfrm_dump_info info;
struct sk_buff *skb;
int err;
skb = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
if (!skb)
return ERR_PTR(-ENOMEM);
info.in_skb = in_skb;
info.out_skb = skb;
info.nlmsg_seq = seq;
info.nlmsg_flags = 0;
err = dump_one_policy(xp, dir, 0, &info);
if (err) {
kfree_skb(skb);
return ERR_PTR(err);
}
return skb;
}
static int xfrm_get_policy(struct sk_buff *skb, struct nlmsghdr *nlh,
struct nlattr **attrs)
{
struct net *net = sock_net(skb->sk);
struct xfrm_policy *xp;
struct xfrm_userpolicy_id *p;
u8 type = XFRM_POLICY_TYPE_MAIN;
int err;
struct km_event c;
int delete;
struct xfrm_mark m;
u32 mark = xfrm_mark_get(attrs, &m);
p = nlmsg_data(nlh);
delete = nlh->nlmsg_type == XFRM_MSG_DELPOLICY;
err = copy_from_user_policy_type(&type, attrs);
if (err)
return err;
err = verify_policy_dir(p->dir);
if (err)
return err;
if (p->index)
xp = xfrm_policy_byid(net, mark, type, p->dir, p->index, delete, &err);
else {
struct nlattr *rt = attrs[XFRMA_SEC_CTX];
struct xfrm_sec_ctx *ctx;
err = verify_sec_ctx_len(attrs);
if (err)
return err;
ctx = NULL;
if (rt) {
struct xfrm_user_sec_ctx *uctx = nla_data(rt);
err = security_xfrm_policy_alloc(&ctx, uctx, GFP_KERNEL);
if (err)
return err;
}
xp = xfrm_policy_bysel_ctx(net, mark, type, p->dir, &p->sel,
ctx, delete, &err);
security_xfrm_policy_free(ctx);
}
if (xp == NULL)
return -ENOENT;
if (!delete) {
struct sk_buff *resp_skb;
resp_skb = xfrm_policy_netlink(skb, xp, p->dir, nlh->nlmsg_seq);
if (IS_ERR(resp_skb)) {
err = PTR_ERR(resp_skb);
} else {
err = nlmsg_unicast(net->xfrm.nlsk, resp_skb,
NETLINK_CB(skb).portid);
}
} else {
xfrm_audit_policy_delete(xp, err ? 0 : 1, true);
if (err != 0)
goto out;
c.data.byid = p->index;
c.event = nlh->nlmsg_type;
c.seq = nlh->nlmsg_seq;
c.portid = nlh->nlmsg_pid;
km_policy_notify(xp, p->dir, &c);
}
out:
xfrm_pol_put(xp);
return err;
}
static int xfrm_flush_sa(struct sk_buff *skb, struct nlmsghdr *nlh,
struct nlattr **attrs)
{
struct net *net = sock_net(skb->sk);
struct km_event c;
struct xfrm_usersa_flush *p = nlmsg_data(nlh);
int err;
err = xfrm_state_flush(net, p->proto, true);
if (err) {
if (err == -ESRCH) /* empty table */
return 0;
return err;
}
c.data.proto = p->proto;
c.event = nlh->nlmsg_type;
c.seq = nlh->nlmsg_seq;
c.portid = nlh->nlmsg_pid;
c.net = net;
km_state_notify(NULL, &c);
return 0;
}
static inline size_t xfrm_aevent_msgsize(struct xfrm_state *x)
{
size_t replay_size = x->replay_esn ?
xfrm_replay_state_esn_len(x->replay_esn) :
sizeof(struct xfrm_replay_state);
return NLMSG_ALIGN(sizeof(struct xfrm_aevent_id))
+ nla_total_size(replay_size)
+ nla_total_size_64bit(sizeof(struct xfrm_lifetime_cur))
+ nla_total_size(sizeof(struct xfrm_mark))
+ nla_total_size(4) /* XFRM_AE_RTHR */
+ nla_total_size(4); /* XFRM_AE_ETHR */
}
static int build_aevent(struct sk_buff *skb, struct xfrm_state *x, const struct km_event *c)
{
struct xfrm_aevent_id *id;
struct nlmsghdr *nlh;
int err;
nlh = nlmsg_put(skb, c->portid, c->seq, XFRM_MSG_NEWAE, sizeof(*id), 0);
if (nlh == NULL)
return -EMSGSIZE;
id = nlmsg_data(nlh);
memset(&id->sa_id, 0, sizeof(id->sa_id));
memcpy(&id->sa_id.daddr, &x->id.daddr, sizeof(x->id.daddr));
id->sa_id.spi = x->id.spi;
id->sa_id.family = x->props.family;
id->sa_id.proto = x->id.proto;
memcpy(&id->saddr, &x->props.saddr, sizeof(x->props.saddr));
id->reqid = x->props.reqid;
id->flags = c->data.aevent;
if (x->replay_esn) {
err = nla_put(skb, XFRMA_REPLAY_ESN_VAL,
xfrm_replay_state_esn_len(x->replay_esn),
x->replay_esn);
} else {
err = nla_put(skb, XFRMA_REPLAY_VAL, sizeof(x->replay),
&x->replay);
}
if (err)
goto out_cancel;
err = nla_put_64bit(skb, XFRMA_LTIME_VAL, sizeof(x->curlft), &x->curlft,
XFRMA_PAD);
if (err)
goto out_cancel;
if (id->flags & XFRM_AE_RTHR) {
err = nla_put_u32(skb, XFRMA_REPLAY_THRESH, x->replay_maxdiff);
if (err)
goto out_cancel;
}
if (id->flags & XFRM_AE_ETHR) {
err = nla_put_u32(skb, XFRMA_ETIMER_THRESH,
x->replay_maxage * 10 / HZ);
if (err)
goto out_cancel;
}
err = xfrm_mark_put(skb, &x->mark);
if (err)
goto out_cancel;
nlmsg_end(skb, nlh);
return 0;
out_cancel:
nlmsg_cancel(skb, nlh);
return err;
}
static int xfrm_get_ae(struct sk_buff *skb, struct nlmsghdr *nlh,
struct nlattr **attrs)
{
struct net *net = sock_net(skb->sk);
struct xfrm_state *x;
struct sk_buff *r_skb;
int err;
struct km_event c;
u32 mark;
struct xfrm_mark m;
struct xfrm_aevent_id *p = nlmsg_data(nlh);
struct xfrm_usersa_id *id = &p->sa_id;
mark = xfrm_mark_get(attrs, &m);
x = xfrm_state_lookup(net, mark, &id->daddr, id->spi, id->proto, id->family);
if (x == NULL)
return -ESRCH;
r_skb = nlmsg_new(xfrm_aevent_msgsize(x), GFP_ATOMIC);
if (r_skb == NULL) {
xfrm_state_put(x);
return -ENOMEM;
}
/*
* XXX: is this lock really needed - none of the other
* gets lock (the concern is things getting updated
* while we are still reading) - jhs
*/
spin_lock_bh(&x->lock);
c.data.aevent = p->flags;
c.seq = nlh->nlmsg_seq;
c.portid = nlh->nlmsg_pid;
if (build_aevent(r_skb, x, &c) < 0)
BUG();
err = nlmsg_unicast(net->xfrm.nlsk, r_skb, NETLINK_CB(skb).portid);
spin_unlock_bh(&x->lock);
xfrm_state_put(x);
return err;
}
static int xfrm_new_ae(struct sk_buff *skb, struct nlmsghdr *nlh,
struct nlattr **attrs)
{
struct net *net = sock_net(skb->sk);
struct xfrm_state *x;
struct km_event c;
int err = -EINVAL;
u32 mark = 0;
struct xfrm_mark m;
struct xfrm_aevent_id *p = nlmsg_data(nlh);
struct nlattr *rp = attrs[XFRMA_REPLAY_VAL];
struct nlattr *re = attrs[XFRMA_REPLAY_ESN_VAL];
struct nlattr *lt = attrs[XFRMA_LTIME_VAL];
struct nlattr *et = attrs[XFRMA_ETIMER_THRESH];
struct nlattr *rt = attrs[XFRMA_REPLAY_THRESH];
if (!lt && !rp && !re && !et && !rt)
return err;
/* pedantic mode - thou shalt sayeth replaceth */
if (!(nlh->nlmsg_flags&NLM_F_REPLACE))
return err;
mark = xfrm_mark_get(attrs, &m);
x = xfrm_state_lookup(net, mark, &p->sa_id.daddr, p->sa_id.spi, p->sa_id.proto, p->sa_id.family);
if (x == NULL)
return -ESRCH;
if (x->km.state != XFRM_STATE_VALID)
goto out;
err = xfrm_replay_verify_len(x->replay_esn, re);
if (err)
goto out;
spin_lock_bh(&x->lock);
xfrm_update_ae_params(x, attrs, 1);
spin_unlock_bh(&x->lock);
c.event = nlh->nlmsg_type;
c.seq = nlh->nlmsg_seq;
c.portid = nlh->nlmsg_pid;
c.data.aevent = XFRM_AE_CU;
km_state_notify(x, &c);
err = 0;
out:
xfrm_state_put(x);
return err;
}
static int xfrm_flush_policy(struct sk_buff *skb, struct nlmsghdr *nlh,
struct nlattr **attrs)
{
struct net *net = sock_net(skb->sk);
struct km_event c;
u8 type = XFRM_POLICY_TYPE_MAIN;
int err;
err = copy_from_user_policy_type(&type, attrs);
if (err)
return err;
err = xfrm_policy_flush(net, type, true);
if (err) {
if (err == -ESRCH) /* empty table */
return 0;
return err;
}
c.data.type = type;
c.event = nlh->nlmsg_type;
c.seq = nlh->nlmsg_seq;
c.portid = nlh->nlmsg_pid;
c.net = net;
km_policy_notify(NULL, 0, &c);
return 0;
}
static int xfrm_add_pol_expire(struct sk_buff *skb, struct nlmsghdr *nlh,
struct nlattr **attrs)
{
struct net *net = sock_net(skb->sk);
struct xfrm_policy *xp;
struct xfrm_user_polexpire *up = nlmsg_data(nlh);
struct xfrm_userpolicy_info *p = &up->pol;
u8 type = XFRM_POLICY_TYPE_MAIN;
int err = -ENOENT;
struct xfrm_mark m;
u32 mark = xfrm_mark_get(attrs, &m);
err = copy_from_user_policy_type(&type, attrs);
if (err)
return err;
err = verify_policy_dir(p->dir);
if (err)
return err;
if (p->index)
xp = xfrm_policy_byid(net, mark, type, p->dir, p->index, 0, &err);
else {
struct nlattr *rt = attrs[XFRMA_SEC_CTX];
struct xfrm_sec_ctx *ctx;
err = verify_sec_ctx_len(attrs);
if (err)
return err;
ctx = NULL;
if (rt) {
struct xfrm_user_sec_ctx *uctx = nla_data(rt);
err = security_xfrm_policy_alloc(&ctx, uctx, GFP_KERNEL);
if (err)
return err;
}
xp = xfrm_policy_bysel_ctx(net, mark, type, p->dir,
&p->sel, ctx, 0, &err);
security_xfrm_policy_free(ctx);
}
if (xp == NULL)
return -ENOENT;
if (unlikely(xp->walk.dead))
goto out;
err = 0;
if (up->hard) {
xfrm_policy_delete(xp, p->dir);
xfrm_audit_policy_delete(xp, 1, true);
}
km_policy_expired(xp, p->dir, up->hard, nlh->nlmsg_pid);
out:
xfrm_pol_put(xp);
return err;
}
static int xfrm_add_sa_expire(struct sk_buff *skb, struct nlmsghdr *nlh,
struct nlattr **attrs)
{
struct net *net = sock_net(skb->sk);
struct xfrm_state *x;
int err;
struct xfrm_user_expire *ue = nlmsg_data(nlh);
struct xfrm_usersa_info *p = &ue->state;
struct xfrm_mark m;
u32 mark = xfrm_mark_get(attrs, &m);
x = xfrm_state_lookup(net, mark, &p->id.daddr, p->id.spi, p->id.proto, p->family);
err = -ENOENT;
if (x == NULL)
return err;
spin_lock_bh(&x->lock);
err = -EINVAL;
if (x->km.state != XFRM_STATE_VALID)
goto out;
km_state_expired(x, ue->hard, nlh->nlmsg_pid);
if (ue->hard) {
__xfrm_state_delete(x);
xfrm_audit_state_delete(x, 1, true);
}
err = 0;
out:
spin_unlock_bh(&x->lock);
xfrm_state_put(x);
return err;
}
static int xfrm_add_acquire(struct sk_buff *skb, struct nlmsghdr *nlh,
struct nlattr **attrs)
{
struct net *net = sock_net(skb->sk);
struct xfrm_policy *xp;
struct xfrm_user_tmpl *ut;
int i;
struct nlattr *rt = attrs[XFRMA_TMPL];
struct xfrm_mark mark;
struct xfrm_user_acquire *ua = nlmsg_data(nlh);
struct xfrm_state *x = xfrm_state_alloc(net);
int err = -ENOMEM;
if (!x)
goto nomem;
xfrm_mark_get(attrs, &mark);
err = verify_newpolicy_info(&ua->policy);
if (err)
goto free_state;
/* build an XP */
xp = xfrm_policy_construct(net, &ua->policy, attrs, &err);
if (!xp)
goto free_state;
memcpy(&x->id, &ua->id, sizeof(ua->id));
memcpy(&x->props.saddr, &ua->saddr, sizeof(ua->saddr));
memcpy(&x->sel, &ua->sel, sizeof(ua->sel));
xp->mark.m = x->mark.m = mark.m;
xp->mark.v = x->mark.v = mark.v;
ut = nla_data(rt);
/* extract the templates and for each call km_key */
for (i = 0; i < xp->xfrm_nr; i++, ut++) {
struct xfrm_tmpl *t = &xp->xfrm_vec[i];
memcpy(&x->id, &t->id, sizeof(x->id));
x->props.mode = t->mode;
x->props.reqid = t->reqid;
x->props.family = ut->family;
t->aalgos = ua->aalgos;
t->ealgos = ua->ealgos;
t->calgos = ua->calgos;
err = km_query(x, t, xp);
}
kfree(x);
kfree(xp);
return 0;
free_state:
kfree(x);
nomem:
return err;
}
#ifdef CONFIG_XFRM_MIGRATE
static int copy_from_user_migrate(struct xfrm_migrate *ma,
struct xfrm_kmaddress *k,
struct nlattr **attrs, int *num)
{
struct nlattr *rt = attrs[XFRMA_MIGRATE];
struct xfrm_user_migrate *um;
int i, num_migrate;
if (k != NULL) {
struct xfrm_user_kmaddress *uk;
uk = nla_data(attrs[XFRMA_KMADDRESS]);
memcpy(&k->local, &uk->local, sizeof(k->local));
memcpy(&k->remote, &uk->remote, sizeof(k->remote));
k->family = uk->family;
k->reserved = uk->reserved;
}
um = nla_data(rt);
num_migrate = nla_len(rt) / sizeof(*um);
if (num_migrate <= 0 || num_migrate > XFRM_MAX_DEPTH)
return -EINVAL;
for (i = 0; i < num_migrate; i++, um++, ma++) {
memcpy(&ma->old_daddr, &um->old_daddr, sizeof(ma->old_daddr));
memcpy(&ma->old_saddr, &um->old_saddr, sizeof(ma->old_saddr));
memcpy(&ma->new_daddr, &um->new_daddr, sizeof(ma->new_daddr));
memcpy(&ma->new_saddr, &um->new_saddr, sizeof(ma->new_saddr));
ma->proto = um->proto;
ma->mode = um->mode;
ma->reqid = um->reqid;
ma->old_family = um->old_family;
ma->new_family = um->new_family;
}
*num = i;
return 0;
}
static int xfrm_do_migrate(struct sk_buff *skb, struct nlmsghdr *nlh,
struct nlattr **attrs)
{
struct xfrm_userpolicy_id *pi = nlmsg_data(nlh);
struct xfrm_migrate m[XFRM_MAX_DEPTH];
struct xfrm_kmaddress km, *kmp;
u8 type;
int err;
int n = 0;
struct net *net = sock_net(skb->sk);
struct xfrm_encap_tmpl *encap = NULL;
if (attrs[XFRMA_MIGRATE] == NULL)
return -EINVAL;
kmp = attrs[XFRMA_KMADDRESS] ? &km : NULL;
err = copy_from_user_policy_type(&type, attrs);
if (err)
return err;
err = copy_from_user_migrate((struct xfrm_migrate *)m, kmp, attrs, &n);
if (err)
return err;
if (!n)
return 0;
if (attrs[XFRMA_ENCAP]) {
encap = kmemdup(nla_data(attrs[XFRMA_ENCAP]),
sizeof(*encap), GFP_KERNEL);
if (!encap)
return 0;
}
err = xfrm_migrate(&pi->sel, pi->dir, type, m, n, kmp, net, encap);
kfree(encap);
return err;
}
#else
static int xfrm_do_migrate(struct sk_buff *skb, struct nlmsghdr *nlh,
struct nlattr **attrs)
{
return -ENOPROTOOPT;
}
#endif
#ifdef CONFIG_XFRM_MIGRATE
static int copy_to_user_migrate(const struct xfrm_migrate *m, struct sk_buff *skb)
{
struct xfrm_user_migrate um;
memset(&um, 0, sizeof(um));
um.proto = m->proto;
um.mode = m->mode;
um.reqid = m->reqid;
um.old_family = m->old_family;
memcpy(&um.old_daddr, &m->old_daddr, sizeof(um.old_daddr));
memcpy(&um.old_saddr, &m->old_saddr, sizeof(um.old_saddr));
um.new_family = m->new_family;
memcpy(&um.new_daddr, &m->new_daddr, sizeof(um.new_daddr));
memcpy(&um.new_saddr, &m->new_saddr, sizeof(um.new_saddr));
return nla_put(skb, XFRMA_MIGRATE, sizeof(um), &um);
}
static int copy_to_user_kmaddress(const struct xfrm_kmaddress *k, struct sk_buff *skb)
{
struct xfrm_user_kmaddress uk;
memset(&uk, 0, sizeof(uk));
uk.family = k->family;
uk.reserved = k->reserved;
memcpy(&uk.local, &k->local, sizeof(uk.local));
memcpy(&uk.remote, &k->remote, sizeof(uk.remote));
return nla_put(skb, XFRMA_KMADDRESS, sizeof(uk), &uk);
}
static inline size_t xfrm_migrate_msgsize(int num_migrate, int with_kma,
int with_encp)
{
return NLMSG_ALIGN(sizeof(struct xfrm_userpolicy_id))
+ (with_kma ? nla_total_size(sizeof(struct xfrm_kmaddress)) : 0)
+ (with_encp ? nla_total_size(sizeof(struct xfrm_encap_tmpl)) : 0)
+ nla_total_size(sizeof(struct xfrm_user_migrate) * num_migrate)
+ userpolicy_type_attrsize();
}
static int build_migrate(struct sk_buff *skb, const struct xfrm_migrate *m,
int num_migrate, const struct xfrm_kmaddress *k,
const struct xfrm_selector *sel,
const struct xfrm_encap_tmpl *encap, u8 dir, u8 type)
{
const struct xfrm_migrate *mp;
struct xfrm_userpolicy_id *pol_id;
struct nlmsghdr *nlh;
int i, err;
nlh = nlmsg_put(skb, 0, 0, XFRM_MSG_MIGRATE, sizeof(*pol_id), 0);
if (nlh == NULL)
return -EMSGSIZE;
pol_id = nlmsg_data(nlh);
/* copy data from selector, dir, and type to the pol_id */
memset(pol_id, 0, sizeof(*pol_id));
memcpy(&pol_id->sel, sel, sizeof(pol_id->sel));
pol_id->dir = dir;
if (k != NULL) {
err = copy_to_user_kmaddress(k, skb);
if (err)
goto out_cancel;
}
if (encap) {
err = nla_put(skb, XFRMA_ENCAP, sizeof(*encap), encap);
if (err)
goto out_cancel;
}
err = copy_to_user_policy_type(type, skb);
if (err)
goto out_cancel;
for (i = 0, mp = m ; i < num_migrate; i++, mp++) {
err = copy_to_user_migrate(mp, skb);
if (err)
goto out_cancel;
}
nlmsg_end(skb, nlh);
return 0;
out_cancel:
nlmsg_cancel(skb, nlh);
return err;
}
static int xfrm_send_migrate(const struct xfrm_selector *sel, u8 dir, u8 type,
const struct xfrm_migrate *m, int num_migrate,
const struct xfrm_kmaddress *k,
const struct xfrm_encap_tmpl *encap)
{
struct net *net = &init_net;
struct sk_buff *skb;
skb = nlmsg_new(xfrm_migrate_msgsize(num_migrate, !!k, !!encap),
GFP_ATOMIC);
if (skb == NULL)
return -ENOMEM;
/* build migrate */
if (build_migrate(skb, m, num_migrate, k, sel, encap, dir, type) < 0)
BUG();
return xfrm_nlmsg_multicast(net, skb, 0, XFRMNLGRP_MIGRATE);
}
#else
static int xfrm_send_migrate(const struct xfrm_selector *sel, u8 dir, u8 type,
const struct xfrm_migrate *m, int num_migrate,
const struct xfrm_kmaddress *k,
const struct xfrm_encap_tmpl *encap)
{
return -ENOPROTOOPT;
}
#endif
#define XMSGSIZE(type) sizeof(struct type)
static const int xfrm_msg_min[XFRM_NR_MSGTYPES] = {
[XFRM_MSG_NEWSA - XFRM_MSG_BASE] = XMSGSIZE(xfrm_usersa_info),
[XFRM_MSG_DELSA - XFRM_MSG_BASE] = XMSGSIZE(xfrm_usersa_id),
[XFRM_MSG_GETSA - XFRM_MSG_BASE] = XMSGSIZE(xfrm_usersa_id),
[XFRM_MSG_NEWPOLICY - XFRM_MSG_BASE] = XMSGSIZE(xfrm_userpolicy_info),
[XFRM_MSG_DELPOLICY - XFRM_MSG_BASE] = XMSGSIZE(xfrm_userpolicy_id),
[XFRM_MSG_GETPOLICY - XFRM_MSG_BASE] = XMSGSIZE(xfrm_userpolicy_id),
[XFRM_MSG_ALLOCSPI - XFRM_MSG_BASE] = XMSGSIZE(xfrm_userspi_info),
[XFRM_MSG_ACQUIRE - XFRM_MSG_BASE] = XMSGSIZE(xfrm_user_acquire),
[XFRM_MSG_EXPIRE - XFRM_MSG_BASE] = XMSGSIZE(xfrm_user_expire),
[XFRM_MSG_UPDPOLICY - XFRM_MSG_BASE] = XMSGSIZE(xfrm_userpolicy_info),
[XFRM_MSG_UPDSA - XFRM_MSG_BASE] = XMSGSIZE(xfrm_usersa_info),
[XFRM_MSG_POLEXPIRE - XFRM_MSG_BASE] = XMSGSIZE(xfrm_user_polexpire),
[XFRM_MSG_FLUSHSA - XFRM_MSG_BASE] = XMSGSIZE(xfrm_usersa_flush),
[XFRM_MSG_FLUSHPOLICY - XFRM_MSG_BASE] = 0,
[XFRM_MSG_NEWAE - XFRM_MSG_BASE] = XMSGSIZE(xfrm_aevent_id),
[XFRM_MSG_GETAE - XFRM_MSG_BASE] = XMSGSIZE(xfrm_aevent_id),
[XFRM_MSG_REPORT - XFRM_MSG_BASE] = XMSGSIZE(xfrm_user_report),
[XFRM_MSG_MIGRATE - XFRM_MSG_BASE] = XMSGSIZE(xfrm_userpolicy_id),
[XFRM_MSG_GETSADINFO - XFRM_MSG_BASE] = sizeof(u32),
[XFRM_MSG_NEWSPDINFO - XFRM_MSG_BASE] = sizeof(u32),
[XFRM_MSG_GETSPDINFO - XFRM_MSG_BASE] = sizeof(u32),
};
#undef XMSGSIZE
static const struct nla_policy xfrma_policy[XFRMA_MAX+1] = {
[XFRMA_SA] = { .len = sizeof(struct xfrm_usersa_info)},
[XFRMA_POLICY] = { .len = sizeof(struct xfrm_userpolicy_info)},
[XFRMA_LASTUSED] = { .type = NLA_U64},
[XFRMA_ALG_AUTH_TRUNC] = { .len = sizeof(struct xfrm_algo_auth)},
[XFRMA_ALG_AEAD] = { .len = sizeof(struct xfrm_algo_aead) },
[XFRMA_ALG_AUTH] = { .len = sizeof(struct xfrm_algo) },
[XFRMA_ALG_CRYPT] = { .len = sizeof(struct xfrm_algo) },
[XFRMA_ALG_COMP] = { .len = sizeof(struct xfrm_algo) },
[XFRMA_ENCAP] = { .len = sizeof(struct xfrm_encap_tmpl) },
[XFRMA_TMPL] = { .len = sizeof(struct xfrm_user_tmpl) },
[XFRMA_SEC_CTX] = { .len = sizeof(struct xfrm_sec_ctx) },
[XFRMA_LTIME_VAL] = { .len = sizeof(struct xfrm_lifetime_cur) },
[XFRMA_REPLAY_VAL] = { .len = sizeof(struct xfrm_replay_state) },
[XFRMA_REPLAY_THRESH] = { .type = NLA_U32 },
[XFRMA_ETIMER_THRESH] = { .type = NLA_U32 },
[XFRMA_SRCADDR] = { .len = sizeof(xfrm_address_t) },
[XFRMA_COADDR] = { .len = sizeof(xfrm_address_t) },
[XFRMA_POLICY_TYPE] = { .len = sizeof(struct xfrm_userpolicy_type)},
[XFRMA_MIGRATE] = { .len = sizeof(struct xfrm_user_migrate) },
[XFRMA_KMADDRESS] = { .len = sizeof(struct xfrm_user_kmaddress) },
[XFRMA_MARK] = { .len = sizeof(struct xfrm_mark) },
[XFRMA_TFCPAD] = { .type = NLA_U32 },
[XFRMA_REPLAY_ESN_VAL] = { .len = sizeof(struct xfrm_replay_state_esn) },
[XFRMA_SA_EXTRA_FLAGS] = { .type = NLA_U32 },
[XFRMA_PROTO] = { .type = NLA_U8 },
[XFRMA_ADDRESS_FILTER] = { .len = sizeof(struct xfrm_address_filter) },
[XFRMA_OFFLOAD_DEV] = { .len = sizeof(struct xfrm_user_offload) },
[XFRMA_OUTPUT_MARK] = { .len = NLA_U32 },
};
static const struct nla_policy xfrma_spd_policy[XFRMA_SPD_MAX+1] = {
[XFRMA_SPD_IPV4_HTHRESH] = { .len = sizeof(struct xfrmu_spdhthresh) },
[XFRMA_SPD_IPV6_HTHRESH] = { .len = sizeof(struct xfrmu_spdhthresh) },
};
static const struct xfrm_link {
int (*doit)(struct sk_buff *, struct nlmsghdr *, struct nlattr **);
int (*dump)(struct sk_buff *, struct netlink_callback *);
int (*done)(struct netlink_callback *);
const struct nla_policy *nla_pol;
int nla_max;
} xfrm_dispatch[XFRM_NR_MSGTYPES] = {
[XFRM_MSG_NEWSA - XFRM_MSG_BASE] = { .doit = xfrm_add_sa },
[XFRM_MSG_DELSA - XFRM_MSG_BASE] = { .doit = xfrm_del_sa },
[XFRM_MSG_GETSA - XFRM_MSG_BASE] = { .doit = xfrm_get_sa,
.dump = xfrm_dump_sa,
.done = xfrm_dump_sa_done },
[XFRM_MSG_NEWPOLICY - XFRM_MSG_BASE] = { .doit = xfrm_add_policy },
[XFRM_MSG_DELPOLICY - XFRM_MSG_BASE] = { .doit = xfrm_get_policy },
[XFRM_MSG_GETPOLICY - XFRM_MSG_BASE] = { .doit = xfrm_get_policy,
.dump = xfrm_dump_policy,
.done = xfrm_dump_policy_done },
[XFRM_MSG_ALLOCSPI - XFRM_MSG_BASE] = { .doit = xfrm_alloc_userspi },
[XFRM_MSG_ACQUIRE - XFRM_MSG_BASE] = { .doit = xfrm_add_acquire },
[XFRM_MSG_EXPIRE - XFRM_MSG_BASE] = { .doit = xfrm_add_sa_expire },
[XFRM_MSG_UPDPOLICY - XFRM_MSG_BASE] = { .doit = xfrm_add_policy },
[XFRM_MSG_UPDSA - XFRM_MSG_BASE] = { .doit = xfrm_add_sa },
[XFRM_MSG_POLEXPIRE - XFRM_MSG_BASE] = { .doit = xfrm_add_pol_expire},
[XFRM_MSG_FLUSHSA - XFRM_MSG_BASE] = { .doit = xfrm_flush_sa },
[XFRM_MSG_FLUSHPOLICY - XFRM_MSG_BASE] = { .doit = xfrm_flush_policy },
[XFRM_MSG_NEWAE - XFRM_MSG_BASE] = { .doit = xfrm_new_ae },
[XFRM_MSG_GETAE - XFRM_MSG_BASE] = { .doit = xfrm_get_ae },
[XFRM_MSG_MIGRATE - XFRM_MSG_BASE] = { .doit = xfrm_do_migrate },
[XFRM_MSG_GETSADINFO - XFRM_MSG_BASE] = { .doit = xfrm_get_sadinfo },
[XFRM_MSG_NEWSPDINFO - XFRM_MSG_BASE] = { .doit = xfrm_set_spdinfo,
.nla_pol = xfrma_spd_policy,
.nla_max = XFRMA_SPD_MAX },
[XFRM_MSG_GETSPDINFO - XFRM_MSG_BASE] = { .doit = xfrm_get_spdinfo },
};
static int xfrm_user_rcv_msg(struct sk_buff *skb, struct nlmsghdr *nlh,
struct netlink_ext_ack *extack)
{
struct net *net = sock_net(skb->sk);
struct nlattr *attrs[XFRMA_MAX+1];
const struct xfrm_link *link;
int type, err;
#ifdef CONFIG_COMPAT
if (in_compat_syscall())
return -EOPNOTSUPP;
#endif
type = nlh->nlmsg_type;
if (type > XFRM_MSG_MAX)
return -EINVAL;
type -= XFRM_MSG_BASE;
link = &xfrm_dispatch[type];
/* All operations require privileges, even GET */
if (!netlink_net_capable(skb, CAP_NET_ADMIN))
return -EPERM;
if ((type == (XFRM_MSG_GETSA - XFRM_MSG_BASE) ||
type == (XFRM_MSG_GETPOLICY - XFRM_MSG_BASE)) &&
(nlh->nlmsg_flags & NLM_F_DUMP)) {
if (link->dump == NULL)
return -EINVAL;
{
struct netlink_dump_control c = {
.dump = link->dump,
.done = link->done,
};
return netlink_dump_start(net->xfrm.nlsk, skb, nlh, &c);
}
}
err = nlmsg_parse(nlh, xfrm_msg_min[type], attrs,
link->nla_max ? : XFRMA_MAX,
link->nla_pol ? : xfrma_policy, extack);
if (err < 0)
return err;
if (link->doit == NULL)
return -EINVAL;
return link->doit(skb, nlh, attrs);
}
static void xfrm_netlink_rcv(struct sk_buff *skb)
{
struct net *net = sock_net(skb->sk);
mutex_lock(&net->xfrm.xfrm_cfg_mutex);
netlink_rcv_skb(skb, &xfrm_user_rcv_msg);
mutex_unlock(&net->xfrm.xfrm_cfg_mutex);
}
static inline size_t xfrm_expire_msgsize(void)
{
return NLMSG_ALIGN(sizeof(struct xfrm_user_expire))
+ nla_total_size(sizeof(struct xfrm_mark));
}
static int build_expire(struct sk_buff *skb, struct xfrm_state *x, const struct km_event *c)
{
struct xfrm_user_expire *ue;
struct nlmsghdr *nlh;
int err;
nlh = nlmsg_put(skb, c->portid, 0, XFRM_MSG_EXPIRE, sizeof(*ue), 0);
if (nlh == NULL)
return -EMSGSIZE;
ue = nlmsg_data(nlh);
copy_to_user_state(x, &ue->state);
ue->hard = (c->data.hard != 0) ? 1 : 0;
/* clear the padding bytes */
memset(&ue->hard + 1, 0, sizeof(*ue) - offsetofend(typeof(*ue), hard));
err = xfrm_mark_put(skb, &x->mark);
if (err)
return err;
nlmsg_end(skb, nlh);
return 0;
}
static int xfrm_exp_state_notify(struct xfrm_state *x, const struct km_event *c)
{
struct net *net = xs_net(x);
struct sk_buff *skb;
skb = nlmsg_new(xfrm_expire_msgsize(), GFP_ATOMIC);
if (skb == NULL)
return -ENOMEM;
if (build_expire(skb, x, c) < 0) {
kfree_skb(skb);
return -EMSGSIZE;
}
return xfrm_nlmsg_multicast(net, skb, 0, XFRMNLGRP_EXPIRE);
}
static int xfrm_aevent_state_notify(struct xfrm_state *x, const struct km_event *c)
{
struct net *net = xs_net(x);
struct sk_buff *skb;
skb = nlmsg_new(xfrm_aevent_msgsize(x), GFP_ATOMIC);
if (skb == NULL)
return -ENOMEM;
if (build_aevent(skb, x, c) < 0)
BUG();
return xfrm_nlmsg_multicast(net, skb, 0, XFRMNLGRP_AEVENTS);
}
static int xfrm_notify_sa_flush(const struct km_event *c)
{
struct net *net = c->net;
struct xfrm_usersa_flush *p;
struct nlmsghdr *nlh;
struct sk_buff *skb;
int len = NLMSG_ALIGN(sizeof(struct xfrm_usersa_flush));
skb = nlmsg_new(len, GFP_ATOMIC);
if (skb == NULL)
return -ENOMEM;
nlh = nlmsg_put(skb, c->portid, c->seq, XFRM_MSG_FLUSHSA, sizeof(*p), 0);
if (nlh == NULL) {
kfree_skb(skb);
return -EMSGSIZE;
}
p = nlmsg_data(nlh);
p->proto = c->data.proto;
nlmsg_end(skb, nlh);
return xfrm_nlmsg_multicast(net, skb, 0, XFRMNLGRP_SA);
}
static inline size_t xfrm_sa_len(struct xfrm_state *x)
{
size_t l = 0;
if (x->aead)
l += nla_total_size(aead_len(x->aead));
if (x->aalg) {
l += nla_total_size(sizeof(struct xfrm_algo) +
(x->aalg->alg_key_len + 7) / 8);
l += nla_total_size(xfrm_alg_auth_len(x->aalg));
}
if (x->ealg)
l += nla_total_size(xfrm_alg_len(x->ealg));
if (x->calg)
l += nla_total_size(sizeof(*x->calg));
if (x->encap)
l += nla_total_size(sizeof(*x->encap));
if (x->tfcpad)
l += nla_total_size(sizeof(x->tfcpad));
if (x->replay_esn)
l += nla_total_size(xfrm_replay_state_esn_len(x->replay_esn));
else
l += nla_total_size(sizeof(struct xfrm_replay_state));
if (x->security)
l += nla_total_size(sizeof(struct xfrm_user_sec_ctx) +
x->security->ctx_len);
if (x->coaddr)
l += nla_total_size(sizeof(*x->coaddr));
if (x->props.extra_flags)
l += nla_total_size(sizeof(x->props.extra_flags));
if (x->xso.dev)
l += nla_total_size(sizeof(x->xso));
if (x->props.output_mark)
l += nla_total_size(sizeof(x->props.output_mark));
/* Must count x->lastused as it may become non-zero behind our back. */
l += nla_total_size_64bit(sizeof(u64));
return l;
}
static int xfrm_notify_sa(struct xfrm_state *x, const struct km_event *c)
{
struct net *net = xs_net(x);
struct xfrm_usersa_info *p;
struct xfrm_usersa_id *id;
struct nlmsghdr *nlh;
struct sk_buff *skb;
int len = xfrm_sa_len(x);
int headlen, err;
headlen = sizeof(*p);
if (c->event == XFRM_MSG_DELSA) {
len += nla_total_size(headlen);
headlen = sizeof(*id);
len += nla_total_size(sizeof(struct xfrm_mark));
}
len += NLMSG_ALIGN(headlen);
skb = nlmsg_new(len, GFP_ATOMIC);
if (skb == NULL)
return -ENOMEM;
nlh = nlmsg_put(skb, c->portid, c->seq, c->event, headlen, 0);
err = -EMSGSIZE;
if (nlh == NULL)
goto out_free_skb;
p = nlmsg_data(nlh);
if (c->event == XFRM_MSG_DELSA) {
struct nlattr *attr;
id = nlmsg_data(nlh);
memset(id, 0, sizeof(*id));
memcpy(&id->daddr, &x->id.daddr, sizeof(id->daddr));
id->spi = x->id.spi;
id->family = x->props.family;
id->proto = x->id.proto;
attr = nla_reserve(skb, XFRMA_SA, sizeof(*p));
err = -EMSGSIZE;
if (attr == NULL)
goto out_free_skb;
p = nla_data(attr);
}
err = copy_to_user_state_extra(x, p, skb);
if (err)
goto out_free_skb;
nlmsg_end(skb, nlh);
return xfrm_nlmsg_multicast(net, skb, 0, XFRMNLGRP_SA);
out_free_skb:
kfree_skb(skb);
return err;
}
static int xfrm_send_state_notify(struct xfrm_state *x, const struct km_event *c)
{
switch (c->event) {
case XFRM_MSG_EXPIRE:
return xfrm_exp_state_notify(x, c);
case XFRM_MSG_NEWAE:
return xfrm_aevent_state_notify(x, c);
case XFRM_MSG_DELSA:
case XFRM_MSG_UPDSA:
case XFRM_MSG_NEWSA:
return xfrm_notify_sa(x, c);
case XFRM_MSG_FLUSHSA:
return xfrm_notify_sa_flush(c);
default:
printk(KERN_NOTICE "xfrm_user: Unknown SA event %d\n",
c->event);
break;
}
return 0;
}
static inline size_t xfrm_acquire_msgsize(struct xfrm_state *x,
struct xfrm_policy *xp)
{
return NLMSG_ALIGN(sizeof(struct xfrm_user_acquire))
+ nla_total_size(sizeof(struct xfrm_user_tmpl) * xp->xfrm_nr)
+ nla_total_size(sizeof(struct xfrm_mark))
+ nla_total_size(xfrm_user_sec_ctx_size(x->security))
+ userpolicy_type_attrsize();
}
static int build_acquire(struct sk_buff *skb, struct xfrm_state *x,
struct xfrm_tmpl *xt, struct xfrm_policy *xp)
{
__u32 seq = xfrm_get_acqseq();
struct xfrm_user_acquire *ua;
struct nlmsghdr *nlh;
int err;
nlh = nlmsg_put(skb, 0, 0, XFRM_MSG_ACQUIRE, sizeof(*ua), 0);
if (nlh == NULL)
return -EMSGSIZE;
ua = nlmsg_data(nlh);
memcpy(&ua->id, &x->id, sizeof(ua->id));
memcpy(&ua->saddr, &x->props.saddr, sizeof(ua->saddr));
memcpy(&ua->sel, &x->sel, sizeof(ua->sel));
copy_to_user_policy(xp, &ua->policy, XFRM_POLICY_OUT);
ua->aalgos = xt->aalgos;
ua->ealgos = xt->ealgos;
ua->calgos = xt->calgos;
ua->seq = x->km.seq = seq;
err = copy_to_user_tmpl(xp, skb);
if (!err)
err = copy_to_user_state_sec_ctx(x, skb);
if (!err)
err = copy_to_user_policy_type(xp->type, skb);
if (!err)
err = xfrm_mark_put(skb, &xp->mark);
if (err) {
nlmsg_cancel(skb, nlh);
return err;
}
nlmsg_end(skb, nlh);
return 0;
}
static int xfrm_send_acquire(struct xfrm_state *x, struct xfrm_tmpl *xt,
struct xfrm_policy *xp)
{
struct net *net = xs_net(x);
struct sk_buff *skb;
skb = nlmsg_new(xfrm_acquire_msgsize(x, xp), GFP_ATOMIC);
if (skb == NULL)
return -ENOMEM;
if (build_acquire(skb, x, xt, xp) < 0)
BUG();
return xfrm_nlmsg_multicast(net, skb, 0, XFRMNLGRP_ACQUIRE);
}
/* User gives us xfrm_user_policy_info followed by an array of 0
* or more templates.
*/
static struct xfrm_policy *xfrm_compile_policy(struct sock *sk, int opt,
u8 *data, int len, int *dir)
{
struct net *net = sock_net(sk);
struct xfrm_userpolicy_info *p = (struct xfrm_userpolicy_info *)data;
struct xfrm_user_tmpl *ut = (struct xfrm_user_tmpl *) (p + 1);
struct xfrm_policy *xp;
int nr;
switch (sk->sk_family) {
case AF_INET:
if (opt != IP_XFRM_POLICY) {
*dir = -EOPNOTSUPP;
return NULL;
}
break;
#if IS_ENABLED(CONFIG_IPV6)
case AF_INET6:
if (opt != IPV6_XFRM_POLICY) {
*dir = -EOPNOTSUPP;
return NULL;
}
break;
#endif
default:
*dir = -EINVAL;
return NULL;
}
*dir = -EINVAL;
if (len < sizeof(*p) ||
verify_newpolicy_info(p))
return NULL;
nr = ((len - sizeof(*p)) / sizeof(*ut));
if (validate_tmpl(nr, ut, p->sel.family))
return NULL;
if (p->dir > XFRM_POLICY_OUT)
return NULL;
xp = xfrm_policy_alloc(net, GFP_ATOMIC);
if (xp == NULL) {
*dir = -ENOBUFS;
return NULL;
}
copy_from_user_policy(xp, p);
xp->type = XFRM_POLICY_TYPE_MAIN;
copy_templates(xp, ut, nr);
*dir = p->dir;
return xp;
}
static inline size_t xfrm_polexpire_msgsize(struct xfrm_policy *xp)
{
return NLMSG_ALIGN(sizeof(struct xfrm_user_polexpire))
+ nla_total_size(sizeof(struct xfrm_user_tmpl) * xp->xfrm_nr)
+ nla_total_size(xfrm_user_sec_ctx_size(xp->security))
+ nla_total_size(sizeof(struct xfrm_mark))
+ userpolicy_type_attrsize();
}
static int build_polexpire(struct sk_buff *skb, struct xfrm_policy *xp,
int dir, const struct km_event *c)
{
struct xfrm_user_polexpire *upe;
int hard = c->data.hard;
struct nlmsghdr *nlh;
int err;
nlh = nlmsg_put(skb, c->portid, 0, XFRM_MSG_POLEXPIRE, sizeof(*upe), 0);
if (nlh == NULL)
return -EMSGSIZE;
upe = nlmsg_data(nlh);
copy_to_user_policy(xp, &upe->pol, dir);
err = copy_to_user_tmpl(xp, skb);
if (!err)
err = copy_to_user_sec_ctx(xp, skb);
if (!err)
err = copy_to_user_policy_type(xp->type, skb);
if (!err)
err = xfrm_mark_put(skb, &xp->mark);
if (err) {
nlmsg_cancel(skb, nlh);
return err;
}
upe->hard = !!hard;
nlmsg_end(skb, nlh);
return 0;
}
static int xfrm_exp_policy_notify(struct xfrm_policy *xp, int dir, const struct km_event *c)
{
struct net *net = xp_net(xp);
struct sk_buff *skb;
skb = nlmsg_new(xfrm_polexpire_msgsize(xp), GFP_ATOMIC);
if (skb == NULL)
return -ENOMEM;
if (build_polexpire(skb, xp, dir, c) < 0)
BUG();
return xfrm_nlmsg_multicast(net, skb, 0, XFRMNLGRP_EXPIRE);
}
static int xfrm_notify_policy(struct xfrm_policy *xp, int dir, const struct km_event *c)
{
int len = nla_total_size(sizeof(struct xfrm_user_tmpl) * xp->xfrm_nr);
struct net *net = xp_net(xp);
struct xfrm_userpolicy_info *p;
struct xfrm_userpolicy_id *id;
struct nlmsghdr *nlh;
struct sk_buff *skb;
int headlen, err;
headlen = sizeof(*p);
if (c->event == XFRM_MSG_DELPOLICY) {
len += nla_total_size(headlen);
headlen = sizeof(*id);
}
len += userpolicy_type_attrsize();
len += nla_total_size(sizeof(struct xfrm_mark));
len += NLMSG_ALIGN(headlen);
skb = nlmsg_new(len, GFP_ATOMIC);
if (skb == NULL)
return -ENOMEM;
nlh = nlmsg_put(skb, c->portid, c->seq, c->event, headlen, 0);
err = -EMSGSIZE;
if (nlh == NULL)
goto out_free_skb;
p = nlmsg_data(nlh);
if (c->event == XFRM_MSG_DELPOLICY) {
struct nlattr *attr;
id = nlmsg_data(nlh);
memset(id, 0, sizeof(*id));
id->dir = dir;
if (c->data.byid)
id->index = xp->index;
else
memcpy(&id->sel, &xp->selector, sizeof(id->sel));
attr = nla_reserve(skb, XFRMA_POLICY, sizeof(*p));
err = -EMSGSIZE;
if (attr == NULL)
goto out_free_skb;
p = nla_data(attr);
}
copy_to_user_policy(xp, p, dir);
err = copy_to_user_tmpl(xp, skb);
if (!err)
err = copy_to_user_policy_type(xp->type, skb);
if (!err)
err = xfrm_mark_put(skb, &xp->mark);
if (err)
goto out_free_skb;
nlmsg_end(skb, nlh);
return xfrm_nlmsg_multicast(net, skb, 0, XFRMNLGRP_POLICY);
out_free_skb:
kfree_skb(skb);
return err;
}
static int xfrm_notify_policy_flush(const struct km_event *c)
{
struct net *net = c->net;
struct nlmsghdr *nlh;
struct sk_buff *skb;
int err;
skb = nlmsg_new(userpolicy_type_attrsize(), GFP_ATOMIC);
if (skb == NULL)
return -ENOMEM;
nlh = nlmsg_put(skb, c->portid, c->seq, XFRM_MSG_FLUSHPOLICY, 0, 0);
err = -EMSGSIZE;
if (nlh == NULL)
goto out_free_skb;
err = copy_to_user_policy_type(c->data.type, skb);
if (err)
goto out_free_skb;
nlmsg_end(skb, nlh);
return xfrm_nlmsg_multicast(net, skb, 0, XFRMNLGRP_POLICY);
out_free_skb:
kfree_skb(skb);
return err;
}
static int xfrm_send_policy_notify(struct xfrm_policy *xp, int dir, const struct km_event *c)
{
switch (c->event) {
case XFRM_MSG_NEWPOLICY:
case XFRM_MSG_UPDPOLICY:
case XFRM_MSG_DELPOLICY:
return xfrm_notify_policy(xp, dir, c);
case XFRM_MSG_FLUSHPOLICY:
return xfrm_notify_policy_flush(c);
case XFRM_MSG_POLEXPIRE:
return xfrm_exp_policy_notify(xp, dir, c);
default:
printk(KERN_NOTICE "xfrm_user: Unknown Policy event %d\n",
c->event);
}
return 0;
}
static inline size_t xfrm_report_msgsize(void)
{
return NLMSG_ALIGN(sizeof(struct xfrm_user_report));
}
static int build_report(struct sk_buff *skb, u8 proto,
struct xfrm_selector *sel, xfrm_address_t *addr)
{
struct xfrm_user_report *ur;
struct nlmsghdr *nlh;
nlh = nlmsg_put(skb, 0, 0, XFRM_MSG_REPORT, sizeof(*ur), 0);
if (nlh == NULL)
return -EMSGSIZE;
ur = nlmsg_data(nlh);
ur->proto = proto;
memcpy(&ur->sel, sel, sizeof(ur->sel));
if (addr) {
int err = nla_put(skb, XFRMA_COADDR, sizeof(*addr), addr);
if (err) {
nlmsg_cancel(skb, nlh);
return err;
}
}
nlmsg_end(skb, nlh);
return 0;
}
static int xfrm_send_report(struct net *net, u8 proto,
struct xfrm_selector *sel, xfrm_address_t *addr)
{
struct sk_buff *skb;
skb = nlmsg_new(xfrm_report_msgsize(), GFP_ATOMIC);
if (skb == NULL)
return -ENOMEM;
if (build_report(skb, proto, sel, addr) < 0)
BUG();
return xfrm_nlmsg_multicast(net, skb, 0, XFRMNLGRP_REPORT);
}
static inline size_t xfrm_mapping_msgsize(void)
{
return NLMSG_ALIGN(sizeof(struct xfrm_user_mapping));
}
static int build_mapping(struct sk_buff *skb, struct xfrm_state *x,
xfrm_address_t *new_saddr, __be16 new_sport)
{
struct xfrm_user_mapping *um;
struct nlmsghdr *nlh;
nlh = nlmsg_put(skb, 0, 0, XFRM_MSG_MAPPING, sizeof(*um), 0);
if (nlh == NULL)
return -EMSGSIZE;
um = nlmsg_data(nlh);
memcpy(&um->id.daddr, &x->id.daddr, sizeof(um->id.daddr));
um->id.spi = x->id.spi;
um->id.family = x->props.family;
um->id.proto = x->id.proto;
memcpy(&um->new_saddr, new_saddr, sizeof(um->new_saddr));
memcpy(&um->old_saddr, &x->props.saddr, sizeof(um->old_saddr));
um->new_sport = new_sport;
um->old_sport = x->encap->encap_sport;
um->reqid = x->props.reqid;
nlmsg_end(skb, nlh);
return 0;
}
static int xfrm_send_mapping(struct xfrm_state *x, xfrm_address_t *ipaddr,
__be16 sport)
{
struct net *net = xs_net(x);
struct sk_buff *skb;
if (x->id.proto != IPPROTO_ESP)
return -EINVAL;
if (!x->encap)
return -EINVAL;
skb = nlmsg_new(xfrm_mapping_msgsize(), GFP_ATOMIC);
if (skb == NULL)
return -ENOMEM;
if (build_mapping(skb, x, ipaddr, sport) < 0)
BUG();
return xfrm_nlmsg_multicast(net, skb, 0, XFRMNLGRP_MAPPING);
}
static bool xfrm_is_alive(const struct km_event *c)
{
return (bool)xfrm_acquire_is_on(c->net);
}
static struct xfrm_mgr netlink_mgr = {
.notify = xfrm_send_state_notify,
.acquire = xfrm_send_acquire,
.compile_policy = xfrm_compile_policy,
.notify_policy = xfrm_send_policy_notify,
.report = xfrm_send_report,
.migrate = xfrm_send_migrate,
.new_mapping = xfrm_send_mapping,
.is_alive = xfrm_is_alive,
};
static int __net_init xfrm_user_net_init(struct net *net)
{
struct sock *nlsk;
struct netlink_kernel_cfg cfg = {
.groups = XFRMNLGRP_MAX,
.input = xfrm_netlink_rcv,
};
nlsk = netlink_kernel_create(net, NETLINK_XFRM, &cfg);
if (nlsk == NULL)
return -ENOMEM;
net->xfrm.nlsk_stash = nlsk; /* Don't set to NULL */
rcu_assign_pointer(net->xfrm.nlsk, nlsk);
return 0;
}
static void __net_exit xfrm_user_net_exit(struct list_head *net_exit_list)
{
struct net *net;
list_for_each_entry(net, net_exit_list, exit_list)
RCU_INIT_POINTER(net->xfrm.nlsk, NULL);
synchronize_net();
list_for_each_entry(net, net_exit_list, exit_list)
netlink_kernel_release(net->xfrm.nlsk_stash);
}
static struct pernet_operations xfrm_user_net_ops = {
.init = xfrm_user_net_init,
.exit_batch = xfrm_user_net_exit,
};
static int __init xfrm_user_init(void)
{
int rv;
printk(KERN_INFO "Initializing XFRM netlink socket\n");
rv = register_pernet_subsys(&xfrm_user_net_ops);
if (rv < 0)
return rv;
rv = xfrm_register_km(&netlink_mgr);
if (rv < 0)
unregister_pernet_subsys(&xfrm_user_net_ops);
return rv;
}
static void __exit xfrm_user_exit(void)
{
xfrm_unregister_km(&netlink_mgr);
unregister_pernet_subsys(&xfrm_user_net_ops);
}
module_init(xfrm_user_init);
module_exit(xfrm_user_exit);
MODULE_LICENSE("GPL");
MODULE_ALIAS_NET_PF_PROTO(PF_NETLINK, NETLINK_XFRM);
| ./CrossVul/dataset_final_sorted/CWE-416/c/bad_2949_0 |
crossvul-cpp_data_bad_3243_0 | #include "mongoose.h"
#ifdef MG_MODULE_LINES
#line 1 "mongoose/src/internal.h"
#endif
/*
* Copyright (c) 2014 Cesanta Software Limited
* All rights reserved
*/
#ifndef CS_MONGOOSE_SRC_INTERNAL_H_
#define CS_MONGOOSE_SRC_INTERNAL_H_
/* Amalgamated: #include "common/mg_mem.h" */
#ifndef MBUF_REALLOC
#define MBUF_REALLOC MG_REALLOC
#endif
#ifndef MBUF_FREE
#define MBUF_FREE MG_FREE
#endif
#define MG_SET_PTRPTR(_ptr, _v) \
do { \
if (_ptr) *(_ptr) = _v; \
} while (0)
#ifndef MG_INTERNAL
#define MG_INTERNAL static
#endif
#ifdef PICOTCP
#define NO_LIBC
#define MG_DISABLE_PFS
#endif
/* Amalgamated: #include "mongoose/src/net.h" */
/* Amalgamated: #include "mongoose/src/http.h" */
/* Amalgamated: #include "common/cs_dbg.h" */
#define MG_CTL_MSG_MESSAGE_SIZE 8192
/* internals that need to be accessible in unit tests */
MG_INTERNAL struct mg_connection *mg_do_connect(struct mg_connection *nc,
int proto,
union socket_address *sa);
MG_INTERNAL int mg_parse_address(const char *str, union socket_address *sa,
int *proto, char *host, size_t host_len);
MG_INTERNAL void mg_call(struct mg_connection *nc,
mg_event_handler_t ev_handler, void *user_data, int ev,
void *ev_data);
void mg_forward(struct mg_connection *from, struct mg_connection *to);
MG_INTERNAL void mg_add_conn(struct mg_mgr *mgr, struct mg_connection *c);
MG_INTERNAL void mg_remove_conn(struct mg_connection *c);
MG_INTERNAL struct mg_connection *mg_create_connection(
struct mg_mgr *mgr, mg_event_handler_t callback,
struct mg_add_sock_opts opts);
#ifdef _WIN32
/* Retur value is the same as for MultiByteToWideChar. */
int to_wchar(const char *path, wchar_t *wbuf, size_t wbuf_len);
#endif
struct ctl_msg {
mg_event_handler_t callback;
char message[MG_CTL_MSG_MESSAGE_SIZE];
};
#if MG_ENABLE_MQTT
struct mg_mqtt_message;
MG_INTERNAL int parse_mqtt(struct mbuf *io, struct mg_mqtt_message *mm);
#endif
/* Forward declarations for testing. */
extern void *(*test_malloc)(size_t size);
extern void *(*test_calloc)(size_t count, size_t size);
#ifndef MIN
#define MIN(a, b) ((a) < (b) ? (a) : (b))
#endif
#if MG_ENABLE_HTTP
struct mg_serve_http_opts;
/*
* Reassemble the content of the buffer (buf, blen) which should be
* in the HTTP chunked encoding, by collapsing data chunks to the
* beginning of the buffer.
*
* If chunks get reassembled, modify hm->body to point to the reassembled
* body and fire MG_EV_HTTP_CHUNK event. If handler sets MG_F_DELETE_CHUNK
* in nc->flags, delete reassembled body from the mbuf.
*
* Return reassembled body size.
*/
MG_INTERNAL size_t mg_handle_chunked(struct mg_connection *nc,
struct http_message *hm, char *buf,
size_t blen);
MG_INTERNAL int mg_http_common_url_parse(const char *url, const char *schema,
const char *schema_tls, int *use_ssl,
char **user, char **pass, char **addr,
int *port_i, const char **path);
#if MG_ENABLE_FILESYSTEM
MG_INTERNAL int mg_uri_to_local_path(struct http_message *hm,
const struct mg_serve_http_opts *opts,
char **local_path,
struct mg_str *remainder);
MG_INTERNAL time_t mg_parse_date_string(const char *datetime);
MG_INTERNAL int mg_is_not_modified(struct http_message *hm, cs_stat_t *st);
#endif
#if MG_ENABLE_HTTP_CGI
MG_INTERNAL void mg_handle_cgi(struct mg_connection *nc, const char *prog,
const struct mg_str *path_info,
const struct http_message *hm,
const struct mg_serve_http_opts *opts);
struct mg_http_proto_data_cgi;
MG_INTERNAL void mg_http_free_proto_data_cgi(struct mg_http_proto_data_cgi *d);
#endif
#if MG_ENABLE_HTTP_SSI
MG_INTERNAL void mg_handle_ssi_request(struct mg_connection *nc,
struct http_message *hm,
const char *path,
const struct mg_serve_http_opts *opts);
#endif
#if MG_ENABLE_HTTP_WEBDAV
MG_INTERNAL int mg_is_dav_request(const struct mg_str *s);
MG_INTERNAL void mg_handle_propfind(struct mg_connection *nc, const char *path,
cs_stat_t *stp, struct http_message *hm,
struct mg_serve_http_opts *opts);
MG_INTERNAL void mg_handle_lock(struct mg_connection *nc, const char *path);
MG_INTERNAL void mg_handle_mkcol(struct mg_connection *nc, const char *path,
struct http_message *hm);
MG_INTERNAL void mg_handle_move(struct mg_connection *c,
const struct mg_serve_http_opts *opts,
const char *path, struct http_message *hm);
MG_INTERNAL void mg_handle_delete(struct mg_connection *nc,
const struct mg_serve_http_opts *opts,
const char *path);
MG_INTERNAL void mg_handle_put(struct mg_connection *nc, const char *path,
struct http_message *hm);
#endif
#if MG_ENABLE_HTTP_WEBSOCKET
MG_INTERNAL void mg_ws_handler(struct mg_connection *nc, int ev,
void *ev_data MG_UD_ARG(void *user_data));
MG_INTERNAL void mg_ws_handshake(struct mg_connection *nc,
const struct mg_str *key);
#endif
#endif /* MG_ENABLE_HTTP */
MG_INTERNAL int mg_get_errno(void);
MG_INTERNAL void mg_close_conn(struct mg_connection *conn);
MG_INTERNAL int mg_http_common_url_parse(const char *url, const char *schema,
const char *schema_tls, int *use_ssl,
char **user, char **pass, char **addr,
int *port_i, const char **path);
#if MG_ENABLE_SNTP
MG_INTERNAL int mg_sntp_parse_reply(const char *buf, int len,
struct mg_sntp_message *msg);
#endif
#endif /* CS_MONGOOSE_SRC_INTERNAL_H_ */
#ifdef MG_MODULE_LINES
#line 1 "common/mg_mem.h"
#endif
/*
* Copyright (c) 2014-2016 Cesanta Software Limited
* All rights reserved
*/
#ifndef CS_COMMON_MG_MEM_H_
#define CS_COMMON_MG_MEM_H_
#ifndef MG_MALLOC
#define MG_MALLOC malloc
#endif
#ifndef MG_CALLOC
#define MG_CALLOC calloc
#endif
#ifndef MG_REALLOC
#define MG_REALLOC realloc
#endif
#ifndef MG_FREE
#define MG_FREE free
#endif
#endif /* CS_COMMON_MG_MEM_H_ */
#ifdef MG_MODULE_LINES
#line 1 "common/cs_dbg.h"
#endif
/*
* Copyright (c) 2014-2016 Cesanta Software Limited
* All rights reserved
*/
#ifndef CS_COMMON_CS_DBG_H_
#define CS_COMMON_CS_DBG_H_
/* Amalgamated: #include "common/platform.h" */
#if CS_ENABLE_STDIO
#include <stdio.h>
#endif
#ifndef CS_ENABLE_DEBUG
#define CS_ENABLE_DEBUG 0
#endif
#ifndef CS_LOG_ENABLE_TS_DIFF
#define CS_LOG_ENABLE_TS_DIFF 0
#endif
#ifdef __cplusplus
extern "C" {
#endif /* __cplusplus */
enum cs_log_level {
LL_NONE = -1,
LL_ERROR = 0,
LL_WARN = 1,
LL_INFO = 2,
LL_DEBUG = 3,
LL_VERBOSE_DEBUG = 4,
_LL_MIN = -2,
_LL_MAX = 5,
};
void cs_log_set_level(enum cs_log_level level);
#if CS_ENABLE_STDIO
void cs_log_set_file(FILE *file);
extern enum cs_log_level cs_log_level;
void cs_log_print_prefix(const char *func);
void cs_log_printf(const char *fmt, ...);
#define LOG(l, x) \
do { \
if (cs_log_level >= l) { \
cs_log_print_prefix(__func__); \
cs_log_printf x; \
} \
} while (0)
#ifndef CS_NDEBUG
#define DBG(x) \
do { \
if (cs_log_level >= LL_VERBOSE_DEBUG) { \
cs_log_print_prefix(__func__); \
cs_log_printf x; \
} \
} while (0)
#else /* NDEBUG */
#define DBG(x)
#endif
#else /* CS_ENABLE_STDIO */
#define LOG(l, x)
#define DBG(x)
#endif
#ifdef __cplusplus
}
#endif /* __cplusplus */
#endif /* CS_COMMON_CS_DBG_H_ */
#ifdef MG_MODULE_LINES
#line 1 "common/cs_dbg.c"
#endif
/*
* Copyright (c) 2014-2016 Cesanta Software Limited
* All rights reserved
*/
/* Amalgamated: #include "common/cs_dbg.h" */
#include <stdarg.h>
#include <stdio.h>
#include <string.h>
/* Amalgamated: #include "common/cs_time.h" */
enum cs_log_level cs_log_level WEAK =
#if CS_ENABLE_DEBUG
LL_VERBOSE_DEBUG;
#else
LL_ERROR;
#endif
#if CS_ENABLE_STDIO
FILE *cs_log_file WEAK = NULL;
#if CS_LOG_ENABLE_TS_DIFF
double cs_log_ts WEAK;
#endif
void cs_log_print_prefix(const char *func) WEAK;
void cs_log_print_prefix(const char *func) {
char prefix[21];
strncpy(prefix, func, 20);
prefix[20] = '\0';
if (cs_log_file == NULL) cs_log_file = stderr;
fprintf(cs_log_file, "%-20s ", prefix);
#if CS_LOG_ENABLE_TS_DIFF
{
double now = cs_time();
fprintf(cs_log_file, "%7u ", (unsigned int) ((now - cs_log_ts) * 1000000));
cs_log_ts = now;
}
#endif
}
void cs_log_printf(const char *fmt, ...) WEAK;
void cs_log_printf(const char *fmt, ...) {
va_list ap;
va_start(ap, fmt);
vfprintf(cs_log_file, fmt, ap);
va_end(ap);
fputc('\n', cs_log_file);
fflush(cs_log_file);
}
void cs_log_set_file(FILE *file) WEAK;
void cs_log_set_file(FILE *file) {
cs_log_file = file;
}
#endif /* CS_ENABLE_STDIO */
void cs_log_set_level(enum cs_log_level level) WEAK;
void cs_log_set_level(enum cs_log_level level) {
cs_log_level = level;
#if CS_LOG_ENABLE_TS_DIFF && CS_ENABLE_STDIO
cs_log_ts = cs_time();
#endif
}
#ifdef MG_MODULE_LINES
#line 1 "common/base64.c"
#endif
/*
* Copyright (c) 2014 Cesanta Software Limited
* All rights reserved
*/
#ifndef EXCLUDE_COMMON
/* Amalgamated: #include "common/base64.h" */
#include <string.h>
/* Amalgamated: #include "common/cs_dbg.h" */
/* ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/ */
#define NUM_UPPERCASES ('Z' - 'A' + 1)
#define NUM_LETTERS (NUM_UPPERCASES * 2)
#define NUM_DIGITS ('9' - '0' + 1)
/*
* Emit a base64 code char.
*
* Doesn't use memory, thus it's safe to use to safely dump memory in crashdumps
*/
static void cs_base64_emit_code(struct cs_base64_ctx *ctx, int v) {
if (v < NUM_UPPERCASES) {
ctx->b64_putc(v + 'A', ctx->user_data);
} else if (v < (NUM_LETTERS)) {
ctx->b64_putc(v - NUM_UPPERCASES + 'a', ctx->user_data);
} else if (v < (NUM_LETTERS + NUM_DIGITS)) {
ctx->b64_putc(v - NUM_LETTERS + '0', ctx->user_data);
} else {
ctx->b64_putc(v - NUM_LETTERS - NUM_DIGITS == 0 ? '+' : '/',
ctx->user_data);
}
}
static void cs_base64_emit_chunk(struct cs_base64_ctx *ctx) {
int a, b, c;
a = ctx->chunk[0];
b = ctx->chunk[1];
c = ctx->chunk[2];
cs_base64_emit_code(ctx, a >> 2);
cs_base64_emit_code(ctx, ((a & 3) << 4) | (b >> 4));
if (ctx->chunk_size > 1) {
cs_base64_emit_code(ctx, (b & 15) << 2 | (c >> 6));
}
if (ctx->chunk_size > 2) {
cs_base64_emit_code(ctx, c & 63);
}
}
void cs_base64_init(struct cs_base64_ctx *ctx, cs_base64_putc_t b64_putc,
void *user_data) {
ctx->chunk_size = 0;
ctx->b64_putc = b64_putc;
ctx->user_data = user_data;
}
void cs_base64_update(struct cs_base64_ctx *ctx, const char *str, size_t len) {
const unsigned char *src = (const unsigned char *) str;
size_t i;
for (i = 0; i < len; i++) {
ctx->chunk[ctx->chunk_size++] = src[i];
if (ctx->chunk_size == 3) {
cs_base64_emit_chunk(ctx);
ctx->chunk_size = 0;
}
}
}
void cs_base64_finish(struct cs_base64_ctx *ctx) {
if (ctx->chunk_size > 0) {
int i;
memset(&ctx->chunk[ctx->chunk_size], 0, 3 - ctx->chunk_size);
cs_base64_emit_chunk(ctx);
for (i = 0; i < (3 - ctx->chunk_size); i++) {
ctx->b64_putc('=', ctx->user_data);
}
}
}
#define BASE64_ENCODE_BODY \
static const char *b64 = \
"ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/"; \
int i, j, a, b, c; \
\
for (i = j = 0; i < src_len; i += 3) { \
a = src[i]; \
b = i + 1 >= src_len ? 0 : src[i + 1]; \
c = i + 2 >= src_len ? 0 : src[i + 2]; \
\
BASE64_OUT(b64[a >> 2]); \
BASE64_OUT(b64[((a & 3) << 4) | (b >> 4)]); \
if (i + 1 < src_len) { \
BASE64_OUT(b64[(b & 15) << 2 | (c >> 6)]); \
} \
if (i + 2 < src_len) { \
BASE64_OUT(b64[c & 63]); \
} \
} \
\
while (j % 4 != 0) { \
BASE64_OUT('='); \
} \
BASE64_FLUSH()
#define BASE64_OUT(ch) \
do { \
dst[j++] = (ch); \
} while (0)
#define BASE64_FLUSH() \
do { \
dst[j++] = '\0'; \
} while (0)
void cs_base64_encode(const unsigned char *src, int src_len, char *dst) {
BASE64_ENCODE_BODY;
}
#undef BASE64_OUT
#undef BASE64_FLUSH
#if CS_ENABLE_STDIO
#define BASE64_OUT(ch) \
do { \
fprintf(f, "%c", (ch)); \
j++; \
} while (0)
#define BASE64_FLUSH()
void cs_fprint_base64(FILE *f, const unsigned char *src, int src_len) {
BASE64_ENCODE_BODY;
}
#undef BASE64_OUT
#undef BASE64_FLUSH
#endif /* CS_ENABLE_STDIO */
/* Convert one byte of encoded base64 input stream to 6-bit chunk */
static unsigned char from_b64(unsigned char ch) {
/* Inverse lookup map */
static const unsigned char tab[128] = {
255, 255, 255, 255,
255, 255, 255, 255, /* 0 */
255, 255, 255, 255,
255, 255, 255, 255, /* 8 */
255, 255, 255, 255,
255, 255, 255, 255, /* 16 */
255, 255, 255, 255,
255, 255, 255, 255, /* 24 */
255, 255, 255, 255,
255, 255, 255, 255, /* 32 */
255, 255, 255, 62,
255, 255, 255, 63, /* 40 */
52, 53, 54, 55,
56, 57, 58, 59, /* 48 */
60, 61, 255, 255,
255, 200, 255, 255, /* 56 '=' is 200, on index 61 */
255, 0, 1, 2,
3, 4, 5, 6, /* 64 */
7, 8, 9, 10,
11, 12, 13, 14, /* 72 */
15, 16, 17, 18,
19, 20, 21, 22, /* 80 */
23, 24, 25, 255,
255, 255, 255, 255, /* 88 */
255, 26, 27, 28,
29, 30, 31, 32, /* 96 */
33, 34, 35, 36,
37, 38, 39, 40, /* 104 */
41, 42, 43, 44,
45, 46, 47, 48, /* 112 */
49, 50, 51, 255,
255, 255, 255, 255, /* 120 */
};
return tab[ch & 127];
}
int cs_base64_decode(const unsigned char *s, int len, char *dst, int *dec_len) {
unsigned char a, b, c, d;
int orig_len = len;
char *orig_dst = dst;
while (len >= 4 && (a = from_b64(s[0])) != 255 &&
(b = from_b64(s[1])) != 255 && (c = from_b64(s[2])) != 255 &&
(d = from_b64(s[3])) != 255) {
s += 4;
len -= 4;
if (a == 200 || b == 200) break; /* '=' can't be there */
*dst++ = a << 2 | b >> 4;
if (c == 200) break;
*dst++ = b << 4 | c >> 2;
if (d == 200) break;
*dst++ = c << 6 | d;
}
*dst = 0;
if (dec_len != NULL) *dec_len = (dst - orig_dst);
return orig_len - len;
}
#endif /* EXCLUDE_COMMON */
#ifdef MG_MODULE_LINES
#line 1 "common/cs_dirent.h"
#endif
/*
* Copyright (c) 2014-2016 Cesanta Software Limited
* All rights reserved
*/
#ifndef CS_COMMON_CS_DIRENT_H_
#define CS_COMMON_CS_DIRENT_H_
#include <limits.h>
/* Amalgamated: #include "common/platform.h" */
#ifdef __cplusplus
extern "C" {
#endif /* __cplusplus */
#ifdef CS_DEFINE_DIRENT
typedef struct { int dummy; } DIR;
struct dirent {
int d_ino;
#ifdef _WIN32
char d_name[MAX_PATH];
#else
/* TODO(rojer): Use PATH_MAX but make sure it's sane on every platform */
char d_name[256];
#endif
};
DIR *opendir(const char *dir_name);
int closedir(DIR *dir);
struct dirent *readdir(DIR *dir);
#endif /* CS_DEFINE_DIRENT */
#ifdef __cplusplus
}
#endif /* __cplusplus */
#endif /* CS_COMMON_CS_DIRENT_H_ */
#ifdef MG_MODULE_LINES
#line 1 "common/cs_dirent.c"
#endif
/*
* Copyright (c) 2015 Cesanta Software Limited
* All rights reserved
*/
#ifndef EXCLUDE_COMMON
/* Amalgamated: #include "common/mg_mem.h" */
/* Amalgamated: #include "common/cs_dirent.h" */
/*
* This file contains POSIX opendir/closedir/readdir API implementation
* for systems which do not natively support it (e.g. Windows).
*/
#ifdef _WIN32
struct win32_dir {
DIR d;
HANDLE handle;
WIN32_FIND_DATAW info;
struct dirent result;
};
DIR *opendir(const char *name) {
struct win32_dir *dir = NULL;
wchar_t wpath[MAX_PATH];
DWORD attrs;
if (name == NULL) {
SetLastError(ERROR_BAD_ARGUMENTS);
} else if ((dir = (struct win32_dir *) MG_MALLOC(sizeof(*dir))) == NULL) {
SetLastError(ERROR_NOT_ENOUGH_MEMORY);
} else {
to_wchar(name, wpath, ARRAY_SIZE(wpath));
attrs = GetFileAttributesW(wpath);
if (attrs != 0xFFFFFFFF && (attrs & FILE_ATTRIBUTE_DIRECTORY)) {
(void) wcscat(wpath, L"\\*");
dir->handle = FindFirstFileW(wpath, &dir->info);
dir->result.d_name[0] = '\0';
} else {
MG_FREE(dir);
dir = NULL;
}
}
return (DIR *) dir;
}
int closedir(DIR *d) {
struct win32_dir *dir = (struct win32_dir *) d;
int result = 0;
if (dir != NULL) {
if (dir->handle != INVALID_HANDLE_VALUE)
result = FindClose(dir->handle) ? 0 : -1;
MG_FREE(dir);
} else {
result = -1;
SetLastError(ERROR_BAD_ARGUMENTS);
}
return result;
}
struct dirent *readdir(DIR *d) {
struct win32_dir *dir = (struct win32_dir *) d;
struct dirent *result = NULL;
if (dir) {
memset(&dir->result, 0, sizeof(dir->result));
if (dir->handle != INVALID_HANDLE_VALUE) {
result = &dir->result;
(void) WideCharToMultiByte(CP_UTF8, 0, dir->info.cFileName, -1,
result->d_name, sizeof(result->d_name), NULL,
NULL);
if (!FindNextFileW(dir->handle, &dir->info)) {
(void) FindClose(dir->handle);
dir->handle = INVALID_HANDLE_VALUE;
}
} else {
SetLastError(ERROR_FILE_NOT_FOUND);
}
} else {
SetLastError(ERROR_BAD_ARGUMENTS);
}
return result;
}
#endif
#endif /* EXCLUDE_COMMON */
/* ISO C requires a translation unit to contain at least one declaration */
typedef int cs_dirent_dummy;
#ifdef MG_MODULE_LINES
#line 1 "common/cs_time.c"
#endif
/*
* Copyright (c) 2014-2016 Cesanta Software Limited
* All rights reserved
*/
/* Amalgamated: #include "common/cs_time.h" */
#ifndef _WIN32
#include <stddef.h>
/*
* There is no sys/time.h on ARMCC.
*/
#if !(defined(__ARMCC_VERSION) || defined(__ICCARM__)) && \
!defined(__TI_COMPILER_VERSION__) && \
(!defined(CS_PLATFORM) || CS_PLATFORM != CS_P_NXP_LPC)
#include <sys/time.h>
#endif
#else
#include <windows.h>
#endif
double cs_time(void) WEAK;
double cs_time(void) {
double now;
#ifndef _WIN32
struct timeval tv;
if (gettimeofday(&tv, NULL /* tz */) != 0) return 0;
now = (double) tv.tv_sec + (((double) tv.tv_usec) / 1000000.0);
#else
SYSTEMTIME sysnow;
FILETIME ftime;
GetLocalTime(&sysnow);
SystemTimeToFileTime(&sysnow, &ftime);
/*
* 1. VC 6.0 doesn't support conversion uint64 -> double, so, using int64
* This should not cause a problems in this (21th) century
* 2. Windows FILETIME is a number of 100-nanosecond intervals since January
* 1, 1601 while time_t is a number of _seconds_ since January 1, 1970 UTC,
* thus, we need to convert to seconds and adjust amount (subtract 11644473600
* seconds)
*/
now = (double) (((int64_t) ftime.dwLowDateTime +
((int64_t) ftime.dwHighDateTime << 32)) /
10000000.0) -
11644473600;
#endif /* _WIN32 */
return now;
}
#ifdef MG_MODULE_LINES
#line 1 "common/cs_endian.h"
#endif
/*
* Copyright (c) 2014-2016 Cesanta Software Limited
* All rights reserved
*/
#ifndef CS_COMMON_CS_ENDIAN_H_
#define CS_COMMON_CS_ENDIAN_H_
/*
* clang with std=-c99 uses __LITTLE_ENDIAN, by default
* while for ex, RTOS gcc - LITTLE_ENDIAN, by default
* it depends on __USE_BSD, but let's have everything
*/
#if !defined(BYTE_ORDER) && defined(__BYTE_ORDER)
#define BYTE_ORDER __BYTE_ORDER
#ifndef LITTLE_ENDIAN
#define LITTLE_ENDIAN __LITTLE_ENDIAN
#endif /* LITTLE_ENDIAN */
#ifndef BIG_ENDIAN
#define BIG_ENDIAN __LITTLE_ENDIAN
#endif /* BIG_ENDIAN */
#endif /* BYTE_ORDER */
#endif /* CS_COMMON_CS_ENDIAN_H_ */
#ifdef MG_MODULE_LINES
#line 1 "common/md5.c"
#endif
/*
* This code implements the MD5 message-digest algorithm.
* The algorithm is due to Ron Rivest. This code was
* written by Colin Plumb in 1993, no copyright is claimed.
* This code is in the public domain; do with it what you wish.
*
* Equivalent code is available from RSA Data Security, Inc.
* This code has been tested against that, and is equivalent,
* except that you don't need to include two pages of legalese
* with every copy.
*
* To compute the message digest of a chunk of bytes, declare an
* MD5Context structure, pass it to MD5Init, call MD5Update as
* needed on buffers full of bytes, and then call MD5Final, which
* will fill a supplied 16-byte array with the digest.
*/
/* Amalgamated: #include "common/md5.h" */
/* Amalgamated: #include "common/str_util.h" */
#if !defined(EXCLUDE_COMMON)
#if !CS_DISABLE_MD5
/* Amalgamated: #include "common/cs_endian.h" */
static void byteReverse(unsigned char *buf, unsigned longs) {
/* Forrest: MD5 expect LITTLE_ENDIAN, swap if BIG_ENDIAN */
#if BYTE_ORDER == BIG_ENDIAN
do {
uint32_t t = (uint32_t)((unsigned) buf[3] << 8 | buf[2]) << 16 |
((unsigned) buf[1] << 8 | buf[0]);
*(uint32_t *) buf = t;
buf += 4;
} while (--longs);
#else
(void) buf;
(void) longs;
#endif
}
#define F1(x, y, z) (z ^ (x & (y ^ z)))
#define F2(x, y, z) F1(z, x, y)
#define F3(x, y, z) (x ^ y ^ z)
#define F4(x, y, z) (y ^ (x | ~z))
#define MD5STEP(f, w, x, y, z, data, s) \
(w += f(x, y, z) + data, w = w << s | w >> (32 - s), w += x)
/*
* Start MD5 accumulation. Set bit count to 0 and buffer to mysterious
* initialization constants.
*/
void MD5_Init(MD5_CTX *ctx) {
ctx->buf[0] = 0x67452301;
ctx->buf[1] = 0xefcdab89;
ctx->buf[2] = 0x98badcfe;
ctx->buf[3] = 0x10325476;
ctx->bits[0] = 0;
ctx->bits[1] = 0;
}
static void MD5Transform(uint32_t buf[4], uint32_t const in[16]) {
register uint32_t a, b, c, d;
a = buf[0];
b = buf[1];
c = buf[2];
d = buf[3];
MD5STEP(F1, a, b, c, d, in[0] + 0xd76aa478, 7);
MD5STEP(F1, d, a, b, c, in[1] + 0xe8c7b756, 12);
MD5STEP(F1, c, d, a, b, in[2] + 0x242070db, 17);
MD5STEP(F1, b, c, d, a, in[3] + 0xc1bdceee, 22);
MD5STEP(F1, a, b, c, d, in[4] + 0xf57c0faf, 7);
MD5STEP(F1, d, a, b, c, in[5] + 0x4787c62a, 12);
MD5STEP(F1, c, d, a, b, in[6] + 0xa8304613, 17);
MD5STEP(F1, b, c, d, a, in[7] + 0xfd469501, 22);
MD5STEP(F1, a, b, c, d, in[8] + 0x698098d8, 7);
MD5STEP(F1, d, a, b, c, in[9] + 0x8b44f7af, 12);
MD5STEP(F1, c, d, a, b, in[10] + 0xffff5bb1, 17);
MD5STEP(F1, b, c, d, a, in[11] + 0x895cd7be, 22);
MD5STEP(F1, a, b, c, d, in[12] + 0x6b901122, 7);
MD5STEP(F1, d, a, b, c, in[13] + 0xfd987193, 12);
MD5STEP(F1, c, d, a, b, in[14] + 0xa679438e, 17);
MD5STEP(F1, b, c, d, a, in[15] + 0x49b40821, 22);
MD5STEP(F2, a, b, c, d, in[1] + 0xf61e2562, 5);
MD5STEP(F2, d, a, b, c, in[6] + 0xc040b340, 9);
MD5STEP(F2, c, d, a, b, in[11] + 0x265e5a51, 14);
MD5STEP(F2, b, c, d, a, in[0] + 0xe9b6c7aa, 20);
MD5STEP(F2, a, b, c, d, in[5] + 0xd62f105d, 5);
MD5STEP(F2, d, a, b, c, in[10] + 0x02441453, 9);
MD5STEP(F2, c, d, a, b, in[15] + 0xd8a1e681, 14);
MD5STEP(F2, b, c, d, a, in[4] + 0xe7d3fbc8, 20);
MD5STEP(F2, a, b, c, d, in[9] + 0x21e1cde6, 5);
MD5STEP(F2, d, a, b, c, in[14] + 0xc33707d6, 9);
MD5STEP(F2, c, d, a, b, in[3] + 0xf4d50d87, 14);
MD5STEP(F2, b, c, d, a, in[8] + 0x455a14ed, 20);
MD5STEP(F2, a, b, c, d, in[13] + 0xa9e3e905, 5);
MD5STEP(F2, d, a, b, c, in[2] + 0xfcefa3f8, 9);
MD5STEP(F2, c, d, a, b, in[7] + 0x676f02d9, 14);
MD5STEP(F2, b, c, d, a, in[12] + 0x8d2a4c8a, 20);
MD5STEP(F3, a, b, c, d, in[5] + 0xfffa3942, 4);
MD5STEP(F3, d, a, b, c, in[8] + 0x8771f681, 11);
MD5STEP(F3, c, d, a, b, in[11] + 0x6d9d6122, 16);
MD5STEP(F3, b, c, d, a, in[14] + 0xfde5380c, 23);
MD5STEP(F3, a, b, c, d, in[1] + 0xa4beea44, 4);
MD5STEP(F3, d, a, b, c, in[4] + 0x4bdecfa9, 11);
MD5STEP(F3, c, d, a, b, in[7] + 0xf6bb4b60, 16);
MD5STEP(F3, b, c, d, a, in[10] + 0xbebfbc70, 23);
MD5STEP(F3, a, b, c, d, in[13] + 0x289b7ec6, 4);
MD5STEP(F3, d, a, b, c, in[0] + 0xeaa127fa, 11);
MD5STEP(F3, c, d, a, b, in[3] + 0xd4ef3085, 16);
MD5STEP(F3, b, c, d, a, in[6] + 0x04881d05, 23);
MD5STEP(F3, a, b, c, d, in[9] + 0xd9d4d039, 4);
MD5STEP(F3, d, a, b, c, in[12] + 0xe6db99e5, 11);
MD5STEP(F3, c, d, a, b, in[15] + 0x1fa27cf8, 16);
MD5STEP(F3, b, c, d, a, in[2] + 0xc4ac5665, 23);
MD5STEP(F4, a, b, c, d, in[0] + 0xf4292244, 6);
MD5STEP(F4, d, a, b, c, in[7] + 0x432aff97, 10);
MD5STEP(F4, c, d, a, b, in[14] + 0xab9423a7, 15);
MD5STEP(F4, b, c, d, a, in[5] + 0xfc93a039, 21);
MD5STEP(F4, a, b, c, d, in[12] + 0x655b59c3, 6);
MD5STEP(F4, d, a, b, c, in[3] + 0x8f0ccc92, 10);
MD5STEP(F4, c, d, a, b, in[10] + 0xffeff47d, 15);
MD5STEP(F4, b, c, d, a, in[1] + 0x85845dd1, 21);
MD5STEP(F4, a, b, c, d, in[8] + 0x6fa87e4f, 6);
MD5STEP(F4, d, a, b, c, in[15] + 0xfe2ce6e0, 10);
MD5STEP(F4, c, d, a, b, in[6] + 0xa3014314, 15);
MD5STEP(F4, b, c, d, a, in[13] + 0x4e0811a1, 21);
MD5STEP(F4, a, b, c, d, in[4] + 0xf7537e82, 6);
MD5STEP(F4, d, a, b, c, in[11] + 0xbd3af235, 10);
MD5STEP(F4, c, d, a, b, in[2] + 0x2ad7d2bb, 15);
MD5STEP(F4, b, c, d, a, in[9] + 0xeb86d391, 21);
buf[0] += a;
buf[1] += b;
buf[2] += c;
buf[3] += d;
}
void MD5_Update(MD5_CTX *ctx, const unsigned char *buf, size_t len) {
uint32_t t;
t = ctx->bits[0];
if ((ctx->bits[0] = t + ((uint32_t) len << 3)) < t) ctx->bits[1]++;
ctx->bits[1] += (uint32_t) len >> 29;
t = (t >> 3) & 0x3f;
if (t) {
unsigned char *p = (unsigned char *) ctx->in + t;
t = 64 - t;
if (len < t) {
memcpy(p, buf, len);
return;
}
memcpy(p, buf, t);
byteReverse(ctx->in, 16);
MD5Transform(ctx->buf, (uint32_t *) ctx->in);
buf += t;
len -= t;
}
while (len >= 64) {
memcpy(ctx->in, buf, 64);
byteReverse(ctx->in, 16);
MD5Transform(ctx->buf, (uint32_t *) ctx->in);
buf += 64;
len -= 64;
}
memcpy(ctx->in, buf, len);
}
void MD5_Final(unsigned char digest[16], MD5_CTX *ctx) {
unsigned count;
unsigned char *p;
uint32_t *a;
count = (ctx->bits[0] >> 3) & 0x3F;
p = ctx->in + count;
*p++ = 0x80;
count = 64 - 1 - count;
if (count < 8) {
memset(p, 0, count);
byteReverse(ctx->in, 16);
MD5Transform(ctx->buf, (uint32_t *) ctx->in);
memset(ctx->in, 0, 56);
} else {
memset(p, 0, count - 8);
}
byteReverse(ctx->in, 14);
a = (uint32_t *) ctx->in;
a[14] = ctx->bits[0];
a[15] = ctx->bits[1];
MD5Transform(ctx->buf, (uint32_t *) ctx->in);
byteReverse((unsigned char *) ctx->buf, 4);
memcpy(digest, ctx->buf, 16);
memset((char *) ctx, 0, sizeof(*ctx));
}
char *cs_md5(char buf[33], ...) {
unsigned char hash[16];
const unsigned char *p;
va_list ap;
MD5_CTX ctx;
MD5_Init(&ctx);
va_start(ap, buf);
while ((p = va_arg(ap, const unsigned char *) ) != NULL) {
size_t len = va_arg(ap, size_t);
MD5_Update(&ctx, p, len);
}
va_end(ap);
MD5_Final(hash, &ctx);
cs_to_hex(buf, hash, sizeof(hash));
return buf;
}
#endif /* CS_DISABLE_MD5 */
#endif /* EXCLUDE_COMMON */
#ifdef MG_MODULE_LINES
#line 1 "common/mbuf.c"
#endif
/*
* Copyright (c) 2014 Cesanta Software Limited
* All rights reserved
*/
#ifndef EXCLUDE_COMMON
#include <assert.h>
#include <string.h>
/* Amalgamated: #include "common/mbuf.h" */
#ifndef MBUF_REALLOC
#define MBUF_REALLOC realloc
#endif
#ifndef MBUF_FREE
#define MBUF_FREE free
#endif
void mbuf_init(struct mbuf *mbuf, size_t initial_size) WEAK;
void mbuf_init(struct mbuf *mbuf, size_t initial_size) {
mbuf->len = mbuf->size = 0;
mbuf->buf = NULL;
mbuf_resize(mbuf, initial_size);
}
void mbuf_free(struct mbuf *mbuf) WEAK;
void mbuf_free(struct mbuf *mbuf) {
if (mbuf->buf != NULL) {
MBUF_FREE(mbuf->buf);
mbuf_init(mbuf, 0);
}
}
void mbuf_resize(struct mbuf *a, size_t new_size) WEAK;
void mbuf_resize(struct mbuf *a, size_t new_size) {
if (new_size > a->size || (new_size < a->size && new_size >= a->len)) {
char *buf = (char *) MBUF_REALLOC(a->buf, new_size);
/*
* In case realloc fails, there's not much we can do, except keep things as
* they are. Note that NULL is a valid return value from realloc when
* size == 0, but that is covered too.
*/
if (buf == NULL && new_size != 0) return;
a->buf = buf;
a->size = new_size;
}
}
void mbuf_trim(struct mbuf *mbuf) WEAK;
void mbuf_trim(struct mbuf *mbuf) {
mbuf_resize(mbuf, mbuf->len);
}
size_t mbuf_insert(struct mbuf *a, size_t off, const void *buf, size_t) WEAK;
size_t mbuf_insert(struct mbuf *a, size_t off, const void *buf, size_t len) {
char *p = NULL;
assert(a != NULL);
assert(a->len <= a->size);
assert(off <= a->len);
/* check overflow */
if (~(size_t) 0 - (size_t) a->buf < len) return 0;
if (a->len + len <= a->size) {
memmove(a->buf + off + len, a->buf + off, a->len - off);
if (buf != NULL) {
memcpy(a->buf + off, buf, len);
}
a->len += len;
} else {
size_t new_size = (size_t)((a->len + len) * MBUF_SIZE_MULTIPLIER);
if ((p = (char *) MBUF_REALLOC(a->buf, new_size)) != NULL) {
a->buf = p;
memmove(a->buf + off + len, a->buf + off, a->len - off);
if (buf != NULL) memcpy(a->buf + off, buf, len);
a->len += len;
a->size = new_size;
} else {
len = 0;
}
}
return len;
}
size_t mbuf_append(struct mbuf *a, const void *buf, size_t len) WEAK;
size_t mbuf_append(struct mbuf *a, const void *buf, size_t len) {
return mbuf_insert(a, a->len, buf, len);
}
void mbuf_remove(struct mbuf *mb, size_t n) WEAK;
void mbuf_remove(struct mbuf *mb, size_t n) {
if (n > 0 && n <= mb->len) {
memmove(mb->buf, mb->buf + n, mb->len - n);
mb->len -= n;
}
}
#endif /* EXCLUDE_COMMON */
#ifdef MG_MODULE_LINES
#line 1 "common/mg_str.c"
#endif
/*
* Copyright (c) 2014-2016 Cesanta Software Limited
* All rights reserved
*/
/* Amalgamated: #include "common/mg_mem.h" */
/* Amalgamated: #include "common/mg_str.h" */
#include <stdlib.h>
#include <string.h>
int mg_ncasecmp(const char *s1, const char *s2, size_t len) WEAK;
struct mg_str mg_mk_str(const char *s) WEAK;
struct mg_str mg_mk_str(const char *s) {
struct mg_str ret = {s, 0};
if (s != NULL) ret.len = strlen(s);
return ret;
}
struct mg_str mg_mk_str_n(const char *s, size_t len) WEAK;
struct mg_str mg_mk_str_n(const char *s, size_t len) {
struct mg_str ret = {s, len};
return ret;
}
int mg_vcmp(const struct mg_str *str1, const char *str2) WEAK;
int mg_vcmp(const struct mg_str *str1, const char *str2) {
size_t n2 = strlen(str2), n1 = str1->len;
int r = strncmp(str1->p, str2, (n1 < n2) ? n1 : n2);
if (r == 0) {
return n1 - n2;
}
return r;
}
int mg_vcasecmp(const struct mg_str *str1, const char *str2) WEAK;
int mg_vcasecmp(const struct mg_str *str1, const char *str2) {
size_t n2 = strlen(str2), n1 = str1->len;
int r = mg_ncasecmp(str1->p, str2, (n1 < n2) ? n1 : n2);
if (r == 0) {
return n1 - n2;
}
return r;
}
struct mg_str mg_strdup(const struct mg_str s) WEAK;
struct mg_str mg_strdup(const struct mg_str s) {
struct mg_str r = {NULL, 0};
if (s.len > 0 && s.p != NULL) {
r.p = (char *) MG_MALLOC(s.len);
if (r.p != NULL) {
memcpy((char *) r.p, s.p, s.len);
r.len = s.len;
}
}
return r;
}
int mg_strcmp(const struct mg_str str1, const struct mg_str str2) WEAK;
int mg_strcmp(const struct mg_str str1, const struct mg_str str2) {
size_t i = 0;
while (i < str1.len && i < str2.len) {
if (str1.p[i] < str2.p[i]) return -1;
if (str1.p[i] > str2.p[i]) return 1;
i++;
}
if (i < str1.len) return 1;
if (i < str2.len) return -1;
return 0;
}
int mg_strncmp(const struct mg_str, const struct mg_str, size_t n) WEAK;
int mg_strncmp(const struct mg_str str1, const struct mg_str str2, size_t n) {
struct mg_str s1 = str1;
struct mg_str s2 = str2;
if (s1.len > n) {
s1.len = n;
}
if (s2.len > n) {
s2.len = n;
}
return mg_strcmp(s1, s2);
}
#ifdef MG_MODULE_LINES
#line 1 "common/sha1.c"
#endif
/* Copyright(c) By Steve Reid <steve@edmweb.com> */
/* 100% Public Domain */
/* Amalgamated: #include "common/sha1.h" */
#if !CS_DISABLE_SHA1 && !defined(EXCLUDE_COMMON)
/* Amalgamated: #include "common/cs_endian.h" */
#define SHA1HANDSOFF
#if defined(__sun)
/* Amalgamated: #include "common/solarisfixes.h" */
#endif
union char64long16 {
unsigned char c[64];
uint32_t l[16];
};
#define rol(value, bits) (((value) << (bits)) | ((value) >> (32 - (bits))))
static uint32_t blk0(union char64long16 *block, int i) {
/* Forrest: SHA expect BIG_ENDIAN, swap if LITTLE_ENDIAN */
#if BYTE_ORDER == LITTLE_ENDIAN
block->l[i] =
(rol(block->l[i], 24) & 0xFF00FF00) | (rol(block->l[i], 8) & 0x00FF00FF);
#endif
return block->l[i];
}
/* Avoid redefine warning (ARM /usr/include/sys/ucontext.h define R0~R4) */
#undef blk
#undef R0
#undef R1
#undef R2
#undef R3
#undef R4
#define blk(i) \
(block->l[i & 15] = rol(block->l[(i + 13) & 15] ^ block->l[(i + 8) & 15] ^ \
block->l[(i + 2) & 15] ^ block->l[i & 15], \
1))
#define R0(v, w, x, y, z, i) \
z += ((w & (x ^ y)) ^ y) + blk0(block, i) + 0x5A827999 + rol(v, 5); \
w = rol(w, 30);
#define R1(v, w, x, y, z, i) \
z += ((w & (x ^ y)) ^ y) + blk(i) + 0x5A827999 + rol(v, 5); \
w = rol(w, 30);
#define R2(v, w, x, y, z, i) \
z += (w ^ x ^ y) + blk(i) + 0x6ED9EBA1 + rol(v, 5); \
w = rol(w, 30);
#define R3(v, w, x, y, z, i) \
z += (((w | x) & y) | (w & x)) + blk(i) + 0x8F1BBCDC + rol(v, 5); \
w = rol(w, 30);
#define R4(v, w, x, y, z, i) \
z += (w ^ x ^ y) + blk(i) + 0xCA62C1D6 + rol(v, 5); \
w = rol(w, 30);
void cs_sha1_transform(uint32_t state[5], const unsigned char buffer[64]) {
uint32_t a, b, c, d, e;
union char64long16 block[1];
memcpy(block, buffer, 64);
a = state[0];
b = state[1];
c = state[2];
d = state[3];
e = state[4];
R0(a, b, c, d, e, 0);
R0(e, a, b, c, d, 1);
R0(d, e, a, b, c, 2);
R0(c, d, e, a, b, 3);
R0(b, c, d, e, a, 4);
R0(a, b, c, d, e, 5);
R0(e, a, b, c, d, 6);
R0(d, e, a, b, c, 7);
R0(c, d, e, a, b, 8);
R0(b, c, d, e, a, 9);
R0(a, b, c, d, e, 10);
R0(e, a, b, c, d, 11);
R0(d, e, a, b, c, 12);
R0(c, d, e, a, b, 13);
R0(b, c, d, e, a, 14);
R0(a, b, c, d, e, 15);
R1(e, a, b, c, d, 16);
R1(d, e, a, b, c, 17);
R1(c, d, e, a, b, 18);
R1(b, c, d, e, a, 19);
R2(a, b, c, d, e, 20);
R2(e, a, b, c, d, 21);
R2(d, e, a, b, c, 22);
R2(c, d, e, a, b, 23);
R2(b, c, d, e, a, 24);
R2(a, b, c, d, e, 25);
R2(e, a, b, c, d, 26);
R2(d, e, a, b, c, 27);
R2(c, d, e, a, b, 28);
R2(b, c, d, e, a, 29);
R2(a, b, c, d, e, 30);
R2(e, a, b, c, d, 31);
R2(d, e, a, b, c, 32);
R2(c, d, e, a, b, 33);
R2(b, c, d, e, a, 34);
R2(a, b, c, d, e, 35);
R2(e, a, b, c, d, 36);
R2(d, e, a, b, c, 37);
R2(c, d, e, a, b, 38);
R2(b, c, d, e, a, 39);
R3(a, b, c, d, e, 40);
R3(e, a, b, c, d, 41);
R3(d, e, a, b, c, 42);
R3(c, d, e, a, b, 43);
R3(b, c, d, e, a, 44);
R3(a, b, c, d, e, 45);
R3(e, a, b, c, d, 46);
R3(d, e, a, b, c, 47);
R3(c, d, e, a, b, 48);
R3(b, c, d, e, a, 49);
R3(a, b, c, d, e, 50);
R3(e, a, b, c, d, 51);
R3(d, e, a, b, c, 52);
R3(c, d, e, a, b, 53);
R3(b, c, d, e, a, 54);
R3(a, b, c, d, e, 55);
R3(e, a, b, c, d, 56);
R3(d, e, a, b, c, 57);
R3(c, d, e, a, b, 58);
R3(b, c, d, e, a, 59);
R4(a, b, c, d, e, 60);
R4(e, a, b, c, d, 61);
R4(d, e, a, b, c, 62);
R4(c, d, e, a, b, 63);
R4(b, c, d, e, a, 64);
R4(a, b, c, d, e, 65);
R4(e, a, b, c, d, 66);
R4(d, e, a, b, c, 67);
R4(c, d, e, a, b, 68);
R4(b, c, d, e, a, 69);
R4(a, b, c, d, e, 70);
R4(e, a, b, c, d, 71);
R4(d, e, a, b, c, 72);
R4(c, d, e, a, b, 73);
R4(b, c, d, e, a, 74);
R4(a, b, c, d, e, 75);
R4(e, a, b, c, d, 76);
R4(d, e, a, b, c, 77);
R4(c, d, e, a, b, 78);
R4(b, c, d, e, a, 79);
state[0] += a;
state[1] += b;
state[2] += c;
state[3] += d;
state[4] += e;
/* Erase working structures. The order of operations is important,
* used to ensure that compiler doesn't optimize those out. */
memset(block, 0, sizeof(block));
a = b = c = d = e = 0;
(void) a;
(void) b;
(void) c;
(void) d;
(void) e;
}
void cs_sha1_init(cs_sha1_ctx *context) {
context->state[0] = 0x67452301;
context->state[1] = 0xEFCDAB89;
context->state[2] = 0x98BADCFE;
context->state[3] = 0x10325476;
context->state[4] = 0xC3D2E1F0;
context->count[0] = context->count[1] = 0;
}
void cs_sha1_update(cs_sha1_ctx *context, const unsigned char *data,
uint32_t len) {
uint32_t i, j;
j = context->count[0];
if ((context->count[0] += len << 3) < j) context->count[1]++;
context->count[1] += (len >> 29);
j = (j >> 3) & 63;
if ((j + len) > 63) {
memcpy(&context->buffer[j], data, (i = 64 - j));
cs_sha1_transform(context->state, context->buffer);
for (; i + 63 < len; i += 64) {
cs_sha1_transform(context->state, &data[i]);
}
j = 0;
} else
i = 0;
memcpy(&context->buffer[j], &data[i], len - i);
}
void cs_sha1_final(unsigned char digest[20], cs_sha1_ctx *context) {
unsigned i;
unsigned char finalcount[8], c;
for (i = 0; i < 8; i++) {
finalcount[i] = (unsigned char) ((context->count[(i >= 4 ? 0 : 1)] >>
((3 - (i & 3)) * 8)) &
255);
}
c = 0200;
cs_sha1_update(context, &c, 1);
while ((context->count[0] & 504) != 448) {
c = 0000;
cs_sha1_update(context, &c, 1);
}
cs_sha1_update(context, finalcount, 8);
for (i = 0; i < 20; i++) {
digest[i] =
(unsigned char) ((context->state[i >> 2] >> ((3 - (i & 3)) * 8)) & 255);
}
memset(context, '\0', sizeof(*context));
memset(&finalcount, '\0', sizeof(finalcount));
}
void cs_hmac_sha1(const unsigned char *key, size_t keylen,
const unsigned char *data, size_t datalen,
unsigned char out[20]) {
cs_sha1_ctx ctx;
unsigned char buf1[64], buf2[64], tmp_key[20], i;
if (keylen > sizeof(buf1)) {
cs_sha1_init(&ctx);
cs_sha1_update(&ctx, key, keylen);
cs_sha1_final(tmp_key, &ctx);
key = tmp_key;
keylen = sizeof(tmp_key);
}
memset(buf1, 0, sizeof(buf1));
memset(buf2, 0, sizeof(buf2));
memcpy(buf1, key, keylen);
memcpy(buf2, key, keylen);
for (i = 0; i < sizeof(buf1); i++) {
buf1[i] ^= 0x36;
buf2[i] ^= 0x5c;
}
cs_sha1_init(&ctx);
cs_sha1_update(&ctx, buf1, sizeof(buf1));
cs_sha1_update(&ctx, data, datalen);
cs_sha1_final(out, &ctx);
cs_sha1_init(&ctx);
cs_sha1_update(&ctx, buf2, sizeof(buf2));
cs_sha1_update(&ctx, out, 20);
cs_sha1_final(out, &ctx);
}
#endif /* EXCLUDE_COMMON */
#ifdef MG_MODULE_LINES
#line 1 "common/str_util.c"
#endif
/*
* Copyright (c) 2015 Cesanta Software Limited
* All rights reserved
*/
#ifndef EXCLUDE_COMMON
/* Amalgamated: #include "common/mg_mem.h" */
/* Amalgamated: #include "common/platform.h" */
/* Amalgamated: #include "common/str_util.h" */
#ifndef C_DISABLE_BUILTIN_SNPRINTF
#define C_DISABLE_BUILTIN_SNPRINTF 0
#endif
/* Amalgamated: #include "common/mg_mem.h" */
size_t c_strnlen(const char *s, size_t maxlen) WEAK;
size_t c_strnlen(const char *s, size_t maxlen) {
size_t l = 0;
for (; l < maxlen && s[l] != '\0'; l++) {
}
return l;
}
#define C_SNPRINTF_APPEND_CHAR(ch) \
do { \
if (i < (int) buf_size) buf[i] = ch; \
i++; \
} while (0)
#define C_SNPRINTF_FLAG_ZERO 1
#if C_DISABLE_BUILTIN_SNPRINTF
int c_vsnprintf(char *buf, size_t buf_size, const char *fmt, va_list ap) WEAK;
int c_vsnprintf(char *buf, size_t buf_size, const char *fmt, va_list ap) {
return vsnprintf(buf, buf_size, fmt, ap);
}
#else
static int c_itoa(char *buf, size_t buf_size, int64_t num, int base, int flags,
int field_width) {
char tmp[40];
int i = 0, k = 0, neg = 0;
if (num < 0) {
neg++;
num = -num;
}
/* Print into temporary buffer - in reverse order */
do {
int rem = num % base;
if (rem < 10) {
tmp[k++] = '0' + rem;
} else {
tmp[k++] = 'a' + (rem - 10);
}
num /= base;
} while (num > 0);
/* Zero padding */
if (flags && C_SNPRINTF_FLAG_ZERO) {
while (k < field_width && k < (int) sizeof(tmp) - 1) {
tmp[k++] = '0';
}
}
/* And sign */
if (neg) {
tmp[k++] = '-';
}
/* Now output */
while (--k >= 0) {
C_SNPRINTF_APPEND_CHAR(tmp[k]);
}
return i;
}
int c_vsnprintf(char *buf, size_t buf_size, const char *fmt, va_list ap) WEAK;
int c_vsnprintf(char *buf, size_t buf_size, const char *fmt, va_list ap) {
int ch, i = 0, len_mod, flags, precision, field_width;
while ((ch = *fmt++) != '\0') {
if (ch != '%') {
C_SNPRINTF_APPEND_CHAR(ch);
} else {
/*
* Conversion specification:
* zero or more flags (one of: # 0 - <space> + ')
* an optional minimum field width (digits)
* an optional precision (. followed by digits, or *)
* an optional length modifier (one of: hh h l ll L q j z t)
* conversion specifier (one of: d i o u x X e E f F g G a A c s p n)
*/
flags = field_width = precision = len_mod = 0;
/* Flags. only zero-pad flag is supported. */
if (*fmt == '0') {
flags |= C_SNPRINTF_FLAG_ZERO;
}
/* Field width */
while (*fmt >= '0' && *fmt <= '9') {
field_width *= 10;
field_width += *fmt++ - '0';
}
/* Dynamic field width */
if (*fmt == '*') {
field_width = va_arg(ap, int);
fmt++;
}
/* Precision */
if (*fmt == '.') {
fmt++;
if (*fmt == '*') {
precision = va_arg(ap, int);
fmt++;
} else {
while (*fmt >= '0' && *fmt <= '9') {
precision *= 10;
precision += *fmt++ - '0';
}
}
}
/* Length modifier */
switch (*fmt) {
case 'h':
case 'l':
case 'L':
case 'I':
case 'q':
case 'j':
case 'z':
case 't':
len_mod = *fmt++;
if (*fmt == 'h') {
len_mod = 'H';
fmt++;
}
if (*fmt == 'l') {
len_mod = 'q';
fmt++;
}
break;
}
ch = *fmt++;
if (ch == 's') {
const char *s = va_arg(ap, const char *); /* Always fetch parameter */
int j;
int pad = field_width - (precision >= 0 ? c_strnlen(s, precision) : 0);
for (j = 0; j < pad; j++) {
C_SNPRINTF_APPEND_CHAR(' ');
}
/* `s` may be NULL in case of %.*s */
if (s != NULL) {
/* Ignore negative and 0 precisions */
for (j = 0; (precision <= 0 || j < precision) && s[j] != '\0'; j++) {
C_SNPRINTF_APPEND_CHAR(s[j]);
}
}
} else if (ch == 'c') {
ch = va_arg(ap, int); /* Always fetch parameter */
C_SNPRINTF_APPEND_CHAR(ch);
} else if (ch == 'd' && len_mod == 0) {
i += c_itoa(buf + i, buf_size - i, va_arg(ap, int), 10, flags,
field_width);
} else if (ch == 'd' && len_mod == 'l') {
i += c_itoa(buf + i, buf_size - i, va_arg(ap, long), 10, flags,
field_width);
#ifdef SSIZE_MAX
} else if (ch == 'd' && len_mod == 'z') {
i += c_itoa(buf + i, buf_size - i, va_arg(ap, ssize_t), 10, flags,
field_width);
#endif
} else if (ch == 'd' && len_mod == 'q') {
i += c_itoa(buf + i, buf_size - i, va_arg(ap, int64_t), 10, flags,
field_width);
} else if ((ch == 'x' || ch == 'u') && len_mod == 0) {
i += c_itoa(buf + i, buf_size - i, va_arg(ap, unsigned),
ch == 'x' ? 16 : 10, flags, field_width);
} else if ((ch == 'x' || ch == 'u') && len_mod == 'l') {
i += c_itoa(buf + i, buf_size - i, va_arg(ap, unsigned long),
ch == 'x' ? 16 : 10, flags, field_width);
} else if ((ch == 'x' || ch == 'u') && len_mod == 'z') {
i += c_itoa(buf + i, buf_size - i, va_arg(ap, size_t),
ch == 'x' ? 16 : 10, flags, field_width);
} else if (ch == 'p') {
unsigned long num = (unsigned long) (uintptr_t) va_arg(ap, void *);
C_SNPRINTF_APPEND_CHAR('0');
C_SNPRINTF_APPEND_CHAR('x');
i += c_itoa(buf + i, buf_size - i, num, 16, flags, 0);
} else {
#ifndef NO_LIBC
/*
* TODO(lsm): abort is not nice in a library, remove it
* Also, ESP8266 SDK doesn't have it
*/
abort();
#endif
}
}
}
/* Zero-terminate the result */
if (buf_size > 0) {
buf[i < (int) buf_size ? i : (int) buf_size - 1] = '\0';
}
return i;
}
#endif
int c_snprintf(char *buf, size_t buf_size, const char *fmt, ...) WEAK;
int c_snprintf(char *buf, size_t buf_size, const char *fmt, ...) {
int result;
va_list ap;
va_start(ap, fmt);
result = c_vsnprintf(buf, buf_size, fmt, ap);
va_end(ap);
return result;
}
#ifdef _WIN32
int to_wchar(const char *path, wchar_t *wbuf, size_t wbuf_len) {
int ret;
char buf[MAX_PATH * 2], buf2[MAX_PATH * 2], *p;
strncpy(buf, path, sizeof(buf));
buf[sizeof(buf) - 1] = '\0';
/* Trim trailing slashes. Leave backslash for paths like "X:\" */
p = buf + strlen(buf) - 1;
while (p > buf && p[-1] != ':' && (p[0] == '\\' || p[0] == '/')) *p-- = '\0';
memset(wbuf, 0, wbuf_len * sizeof(wchar_t));
ret = MultiByteToWideChar(CP_UTF8, 0, buf, -1, wbuf, (int) wbuf_len);
/*
* Convert back to Unicode. If doubly-converted string does not match the
* original, something is fishy, reject.
*/
WideCharToMultiByte(CP_UTF8, 0, wbuf, (int) wbuf_len, buf2, sizeof(buf2),
NULL, NULL);
if (strcmp(buf, buf2) != 0) {
wbuf[0] = L'\0';
ret = 0;
}
return ret;
}
#endif /* _WIN32 */
/* The simplest O(mn) algorithm. Better implementation are GPLed */
const char *c_strnstr(const char *s, const char *find, size_t slen) WEAK;
const char *c_strnstr(const char *s, const char *find, size_t slen) {
size_t find_length = strlen(find);
size_t i;
for (i = 0; i < slen; i++) {
if (i + find_length > slen) {
return NULL;
}
if (strncmp(&s[i], find, find_length) == 0) {
return &s[i];
}
}
return NULL;
}
#if CS_ENABLE_STRDUP
char *strdup(const char *src) WEAK;
char *strdup(const char *src) {
size_t len = strlen(src) + 1;
char *ret = MG_MALLOC(len);
if (ret != NULL) {
strcpy(ret, src);
}
return ret;
}
#endif
void cs_to_hex(char *to, const unsigned char *p, size_t len) WEAK;
void cs_to_hex(char *to, const unsigned char *p, size_t len) {
static const char *hex = "0123456789abcdef";
for (; len--; p++) {
*to++ = hex[p[0] >> 4];
*to++ = hex[p[0] & 0x0f];
}
*to = '\0';
}
static int fourbit(int ch) {
if (ch >= '0' && ch <= '9') {
return ch - '0';
} else if (ch >= 'a' && ch <= 'f') {
return ch - 'a' + 10;
} else if (ch >= 'A' && ch <= 'F') {
return ch - 'A' + 10;
}
return 0;
}
void cs_from_hex(char *to, const char *p, size_t len) WEAK;
void cs_from_hex(char *to, const char *p, size_t len) {
size_t i;
for (i = 0; i < len; i += 2) {
*to++ = (fourbit(p[i]) << 4) + fourbit(p[i + 1]);
}
*to = '\0';
}
#if CS_ENABLE_TO64
int64_t cs_to64(const char *s) WEAK;
int64_t cs_to64(const char *s) {
int64_t result = 0;
int64_t neg = 1;
while (*s && isspace((unsigned char) *s)) s++;
if (*s == '-') {
neg = -1;
s++;
}
while (isdigit((unsigned char) *s)) {
result *= 10;
result += (*s - '0');
s++;
}
return result * neg;
}
#endif
static int str_util_lowercase(const char *s) {
return tolower(*(const unsigned char *) s);
}
int mg_ncasecmp(const char *s1, const char *s2, size_t len) WEAK;
int mg_ncasecmp(const char *s1, const char *s2, size_t len) {
int diff = 0;
if (len > 0) do {
diff = str_util_lowercase(s1++) - str_util_lowercase(s2++);
} while (diff == 0 && s1[-1] != '\0' && --len > 0);
return diff;
}
int mg_casecmp(const char *s1, const char *s2) WEAK;
int mg_casecmp(const char *s1, const char *s2) {
return mg_ncasecmp(s1, s2, (size_t) ~0);
}
int mg_asprintf(char **buf, size_t size, const char *fmt, ...) WEAK;
int mg_asprintf(char **buf, size_t size, const char *fmt, ...) {
int ret;
va_list ap;
va_start(ap, fmt);
ret = mg_avprintf(buf, size, fmt, ap);
va_end(ap);
return ret;
}
int mg_avprintf(char **buf, size_t size, const char *fmt, va_list ap) WEAK;
int mg_avprintf(char **buf, size_t size, const char *fmt, va_list ap) {
va_list ap_copy;
int len;
va_copy(ap_copy, ap);
len = vsnprintf(*buf, size, fmt, ap_copy);
va_end(ap_copy);
if (len < 0) {
/* eCos and Windows are not standard-compliant and return -1 when
* the buffer is too small. Keep allocating larger buffers until we
* succeed or out of memory. */
*buf = NULL; /* LCOV_EXCL_START */
while (len < 0) {
MG_FREE(*buf);
size *= 2;
if ((*buf = (char *) MG_MALLOC(size)) == NULL) break;
va_copy(ap_copy, ap);
len = vsnprintf(*buf, size, fmt, ap_copy);
va_end(ap_copy);
}
/* LCOV_EXCL_STOP */
} else if (len >= (int) size) {
/* Standard-compliant code path. Allocate a buffer that is large enough. */
if ((*buf = (char *) MG_MALLOC(len + 1)) == NULL) {
len = -1; /* LCOV_EXCL_LINE */
} else { /* LCOV_EXCL_LINE */
va_copy(ap_copy, ap);
len = vsnprintf(*buf, len + 1, fmt, ap_copy);
va_end(ap_copy);
}
}
return len;
}
#endif /* EXCLUDE_COMMON */
#ifdef MG_MODULE_LINES
#line 1 "mongoose/src/tun.h"
#endif
/*
* Copyright (c) 2014-2016 Cesanta Software Limited
* All rights reserved
*/
#ifndef CS_MONGOOSE_SRC_TUN_H_
#define CS_MONGOOSE_SRC_TUN_H_
#if MG_ENABLE_TUN
/* Amalgamated: #include "mongoose/src/net.h" */
/* Amalgamated: #include "common/mg_str.h" */
#ifndef MG_TUN_RECONNECT_INTERVAL
#define MG_TUN_RECONNECT_INTERVAL 1
#endif
#define MG_TUN_PROTO_NAME "mg_tun"
#define MG_TUN_DATA_FRAME 0x0
#define MG_TUN_F_END_STREAM 0x1
/*
* MG TUN frame format is loosely based on HTTP/2.
* However since the communication happens via WebSocket
* there is no need to encode the frame length, since that's
* solved by WebSocket framing.
*
* TODO(mkm): Detailed description of the protocol.
*/
struct mg_tun_frame {
uint8_t type;
uint8_t flags;
uint32_t stream_id; /* opaque stream identifier */
struct mg_str body;
};
struct mg_tun_ssl_opts {
#if MG_ENABLE_SSL
const char *ssl_cert;
const char *ssl_key;
const char *ssl_ca_cert;
#else
int dummy; /* some compilers don't like empty structs */
#endif
};
struct mg_tun_client {
struct mg_mgr *mgr;
struct mg_iface *iface;
const char *disp_url;
struct mg_tun_ssl_opts ssl;
uint32_t last_stream_id; /* stream id of most recently accepted connection */
struct mg_connection *disp;
struct mg_connection *listener;
struct mg_connection *reconnect;
};
#ifdef __cplusplus
extern "C" {
#endif /* __cplusplus */
struct mg_connection *mg_tun_bind_opt(struct mg_mgr *mgr,
const char *dispatcher,
MG_CB(mg_event_handler_t handler,
void *user_data),
struct mg_bind_opts opts);
int mg_tun_parse_frame(void *data, size_t len, struct mg_tun_frame *frame);
void mg_tun_send_frame(struct mg_connection *ws, uint32_t stream_id,
uint8_t type, uint8_t flags, struct mg_str msg);
void mg_tun_destroy_client(struct mg_tun_client *client);
#ifdef __cplusplus
}
#endif /* __cplusplus */
#endif /* MG_ENABLE_TUN */
#endif /* CS_MONGOOSE_SRC_TUN_H_ */
#ifdef MG_MODULE_LINES
#line 1 "mongoose/src/net.c"
#endif
/*
* Copyright (c) 2014 Cesanta Software Limited
* All rights reserved
*
* This software is dual-licensed: you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation. For the terms of this
* license, see <http://www.gnu.org/licenses/>.
*
* You are free to use this software under the terms of the GNU General
* Public License, but WITHOUT ANY WARRANTY; without even the implied
* warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
* See the GNU General Public License for more details.
*
* Alternatively, you can license this software under a commercial
* license, as set out in <https://www.cesanta.com/license>.
*/
/* Amalgamated: #include "common/cs_time.h" */
/* Amalgamated: #include "mongoose/src/dns.h" */
/* Amalgamated: #include "mongoose/src/internal.h" */
/* Amalgamated: #include "mongoose/src/resolv.h" */
/* Amalgamated: #include "mongoose/src/util.h" */
/* Amalgamated: #include "mongoose/src/tun.h" */
#define MG_MAX_HOST_LEN 200
#define MG_COPY_COMMON_CONNECTION_OPTIONS(dst, src) \
memcpy(dst, src, sizeof(*dst));
/* Which flags can be pre-set by the user at connection creation time. */
#define _MG_ALLOWED_CONNECT_FLAGS_MASK \
(MG_F_USER_1 | MG_F_USER_2 | MG_F_USER_3 | MG_F_USER_4 | MG_F_USER_5 | \
MG_F_USER_6 | MG_F_WEBSOCKET_NO_DEFRAG | MG_F_ENABLE_BROADCAST)
/* Which flags should be modifiable by user's callbacks. */
#define _MG_CALLBACK_MODIFIABLE_FLAGS_MASK \
(MG_F_USER_1 | MG_F_USER_2 | MG_F_USER_3 | MG_F_USER_4 | MG_F_USER_5 | \
MG_F_USER_6 | MG_F_WEBSOCKET_NO_DEFRAG | MG_F_SEND_AND_CLOSE | \
MG_F_CLOSE_IMMEDIATELY | MG_F_IS_WEBSOCKET | MG_F_DELETE_CHUNK)
#ifndef intptr_t
#define intptr_t long
#endif
MG_INTERNAL void mg_add_conn(struct mg_mgr *mgr, struct mg_connection *c) {
DBG(("%p %p", mgr, c));
c->mgr = mgr;
c->next = mgr->active_connections;
mgr->active_connections = c;
c->prev = NULL;
if (c->next != NULL) c->next->prev = c;
if (c->sock != INVALID_SOCKET) {
c->iface->vtable->add_conn(c);
}
}
MG_INTERNAL void mg_remove_conn(struct mg_connection *conn) {
if (conn->prev == NULL) conn->mgr->active_connections = conn->next;
if (conn->prev) conn->prev->next = conn->next;
if (conn->next) conn->next->prev = conn->prev;
conn->prev = conn->next = NULL;
conn->iface->vtable->remove_conn(conn);
}
MG_INTERNAL void mg_call(struct mg_connection *nc,
mg_event_handler_t ev_handler, void *user_data, int ev,
void *ev_data) {
if (ev_handler == NULL) {
/*
* If protocol handler is specified, call it. Otherwise, call user-specified
* event handler.
*/
ev_handler = nc->proto_handler ? nc->proto_handler : nc->handler;
}
if (ev != MG_EV_POLL) {
DBG(("%p %s ev=%d ev_data=%p flags=%lu rmbl=%d smbl=%d", nc,
ev_handler == nc->handler ? "user" : "proto", ev, ev_data, nc->flags,
(int) nc->recv_mbuf.len, (int) nc->send_mbuf.len));
}
#if !defined(NO_LIBC) && MG_ENABLE_HEXDUMP
/* LCOV_EXCL_START */
if (nc->mgr->hexdump_file != NULL && ev != MG_EV_POLL &&
ev != MG_EV_SEND /* handled separately */) {
if (ev == MG_EV_RECV) {
mg_hexdump_connection(nc, nc->mgr->hexdump_file, nc->recv_mbuf.buf,
*(int *) ev_data, ev);
} else {
mg_hexdump_connection(nc, nc->mgr->hexdump_file, NULL, 0, ev);
}
}
/* LCOV_EXCL_STOP */
#endif
if (ev_handler != NULL) {
unsigned long flags_before = nc->flags;
size_t recv_mbuf_before = nc->recv_mbuf.len, recved;
ev_handler(nc, ev, ev_data MG_UD_ARG(user_data));
recved = (recv_mbuf_before - nc->recv_mbuf.len);
/* Prevent user handler from fiddling with system flags. */
if (ev_handler == nc->handler && nc->flags != flags_before) {
nc->flags = (flags_before & ~_MG_CALLBACK_MODIFIABLE_FLAGS_MASK) |
(nc->flags & _MG_CALLBACK_MODIFIABLE_FLAGS_MASK);
}
if (recved > 0 && !(nc->flags & MG_F_UDP)) {
nc->iface->vtable->recved(nc, recved);
}
}
if (ev != MG_EV_POLL) {
DBG(("%p after %s flags=%lu rmbl=%d smbl=%d", nc,
ev_handler == nc->handler ? "user" : "proto", nc->flags,
(int) nc->recv_mbuf.len, (int) nc->send_mbuf.len));
}
#if !MG_ENABLE_CALLBACK_USERDATA
(void) user_data;
#endif
}
void mg_if_timer(struct mg_connection *c, double now) {
if (c->ev_timer_time > 0 && now >= c->ev_timer_time) {
double old_value = c->ev_timer_time;
mg_call(c, NULL, c->user_data, MG_EV_TIMER, &now);
/*
* To prevent timer firing all the time, reset the timer after delivery.
* However, in case user sets it to new value, do not reset.
*/
if (c->ev_timer_time == old_value) {
c->ev_timer_time = 0;
}
}
}
void mg_if_poll(struct mg_connection *nc, time_t now) {
if (!(nc->flags & MG_F_SSL) || (nc->flags & MG_F_SSL_HANDSHAKE_DONE)) {
mg_call(nc, NULL, nc->user_data, MG_EV_POLL, &now);
}
}
static void mg_destroy_conn(struct mg_connection *conn, int destroy_if) {
if (destroy_if) conn->iface->vtable->destroy_conn(conn);
if (conn->proto_data != NULL && conn->proto_data_destructor != NULL) {
conn->proto_data_destructor(conn->proto_data);
}
#if MG_ENABLE_SSL
mg_ssl_if_conn_free(conn);
#endif
mbuf_free(&conn->recv_mbuf);
mbuf_free(&conn->send_mbuf);
memset(conn, 0, sizeof(*conn));
MG_FREE(conn);
}
void mg_close_conn(struct mg_connection *conn) {
DBG(("%p %lu %d", conn, conn->flags, conn->sock));
#if MG_ENABLE_SSL
if (conn->flags & MG_F_SSL_HANDSHAKE_DONE) {
mg_ssl_if_conn_close_notify(conn);
}
#endif
mg_remove_conn(conn);
conn->iface->vtable->destroy_conn(conn);
mg_call(conn, NULL, conn->user_data, MG_EV_CLOSE, NULL);
mg_destroy_conn(conn, 0 /* destroy_if */);
}
void mg_mgr_init(struct mg_mgr *m, void *user_data) {
struct mg_mgr_init_opts opts;
memset(&opts, 0, sizeof(opts));
mg_mgr_init_opt(m, user_data, opts);
}
void mg_mgr_init_opt(struct mg_mgr *m, void *user_data,
struct mg_mgr_init_opts opts) {
memset(m, 0, sizeof(*m));
#if MG_ENABLE_BROADCAST
m->ctl[0] = m->ctl[1] = INVALID_SOCKET;
#endif
m->user_data = user_data;
#ifdef _WIN32
{
WSADATA data;
WSAStartup(MAKEWORD(2, 2), &data);
}
#elif defined(__unix__)
/* Ignore SIGPIPE signal, so if client cancels the request, it
* won't kill the whole process. */
signal(SIGPIPE, SIG_IGN);
#endif
#if MG_ENABLE_SSL
{
static int init_done;
if (!init_done) {
mg_ssl_if_init();
init_done++;
}
}
#endif
{
int i;
if (opts.num_ifaces == 0) {
opts.num_ifaces = mg_num_ifaces;
opts.ifaces = mg_ifaces;
}
if (opts.main_iface != NULL) {
opts.ifaces[MG_MAIN_IFACE] = opts.main_iface;
}
m->num_ifaces = opts.num_ifaces;
m->ifaces =
(struct mg_iface **) MG_MALLOC(sizeof(*m->ifaces) * opts.num_ifaces);
for (i = 0; i < mg_num_ifaces; i++) {
m->ifaces[i] = mg_if_create_iface(opts.ifaces[i], m);
m->ifaces[i]->vtable->init(m->ifaces[i]);
}
}
if (opts.nameserver != NULL) {
m->nameserver = strdup(opts.nameserver);
}
DBG(("=================================="));
DBG(("init mgr=%p", m));
}
#if MG_ENABLE_JAVASCRIPT
static enum v7_err mg_send_js(struct v7 *v7, v7_val_t *res) {
v7_val_t arg0 = v7_arg(v7, 0);
v7_val_t arg1 = v7_arg(v7, 1);
struct mg_connection *c = (struct mg_connection *) v7_get_ptr(v7, arg0);
size_t len = 0;
if (v7_is_string(arg1)) {
const char *data = v7_get_string(v7, &arg1, &len);
mg_send(c, data, len);
}
*res = v7_mk_number(v7, len);
return V7_OK;
}
enum v7_err mg_enable_javascript(struct mg_mgr *m, struct v7 *v7,
const char *init_file_name) {
v7_val_t v;
m->v7 = v7;
v7_set_method(v7, v7_get_global(v7), "mg_send", mg_send_js);
return v7_exec_file(v7, init_file_name, &v);
}
#endif
void mg_mgr_free(struct mg_mgr *m) {
struct mg_connection *conn, *tmp_conn;
DBG(("%p", m));
if (m == NULL) return;
/* Do one last poll, see https://github.com/cesanta/mongoose/issues/286 */
mg_mgr_poll(m, 0);
#if MG_ENABLE_BROADCAST
if (m->ctl[0] != INVALID_SOCKET) closesocket(m->ctl[0]);
if (m->ctl[1] != INVALID_SOCKET) closesocket(m->ctl[1]);
m->ctl[0] = m->ctl[1] = INVALID_SOCKET;
#endif
for (conn = m->active_connections; conn != NULL; conn = tmp_conn) {
tmp_conn = conn->next;
mg_close_conn(conn);
}
{
int i;
for (i = 0; i < m->num_ifaces; i++) {
m->ifaces[i]->vtable->free(m->ifaces[i]);
MG_FREE(m->ifaces[i]);
}
MG_FREE(m->ifaces);
}
MG_FREE((char *) m->nameserver);
}
time_t mg_mgr_poll(struct mg_mgr *m, int timeout_ms) {
int i;
time_t now = 0; /* oh GCC, seriously ? */
if (m->num_ifaces == 0) {
LOG(LL_ERROR, ("cannot poll: no interfaces"));
return 0;
}
for (i = 0; i < m->num_ifaces; i++) {
now = m->ifaces[i]->vtable->poll(m->ifaces[i], timeout_ms);
}
return now;
}
int mg_vprintf(struct mg_connection *nc, const char *fmt, va_list ap) {
char mem[MG_VPRINTF_BUFFER_SIZE], *buf = mem;
int len;
if ((len = mg_avprintf(&buf, sizeof(mem), fmt, ap)) > 0) {
mg_send(nc, buf, len);
}
if (buf != mem && buf != NULL) {
MG_FREE(buf); /* LCOV_EXCL_LINE */
} /* LCOV_EXCL_LINE */
return len;
}
int mg_printf(struct mg_connection *conn, const char *fmt, ...) {
int len;
va_list ap;
va_start(ap, fmt);
len = mg_vprintf(conn, fmt, ap);
va_end(ap);
return len;
}
#if MG_ENABLE_SYNC_RESOLVER
/* TODO(lsm): use non-blocking resolver */
static int mg_resolve2(const char *host, struct in_addr *ina) {
#if MG_ENABLE_GETADDRINFO
int rv = 0;
struct addrinfo hints, *servinfo, *p;
struct sockaddr_in *h = NULL;
memset(&hints, 0, sizeof hints);
hints.ai_family = AF_INET;
hints.ai_socktype = SOCK_STREAM;
if ((rv = getaddrinfo(host, NULL, NULL, &servinfo)) != 0) {
DBG(("getaddrinfo(%s) failed: %s", host, strerror(mg_get_errno())));
return 0;
}
for (p = servinfo; p != NULL; p = p->ai_next) {
memcpy(&h, &p->ai_addr, sizeof(struct sockaddr_in *));
memcpy(ina, &h->sin_addr, sizeof(ina));
}
freeaddrinfo(servinfo);
return 1;
#else
struct hostent *he;
if ((he = gethostbyname(host)) == NULL) {
DBG(("gethostbyname(%s) failed: %s", host, strerror(mg_get_errno())));
} else {
memcpy(ina, he->h_addr_list[0], sizeof(*ina));
return 1;
}
return 0;
#endif /* MG_ENABLE_GETADDRINFO */
}
int mg_resolve(const char *host, char *buf, size_t n) {
struct in_addr ad;
return mg_resolve2(host, &ad) ? snprintf(buf, n, "%s", inet_ntoa(ad)) : 0;
}
#endif /* MG_ENABLE_SYNC_RESOLVER */
MG_INTERNAL struct mg_connection *mg_create_connection_base(
struct mg_mgr *mgr, mg_event_handler_t callback,
struct mg_add_sock_opts opts) {
struct mg_connection *conn;
if ((conn = (struct mg_connection *) MG_CALLOC(1, sizeof(*conn))) != NULL) {
conn->sock = INVALID_SOCKET;
conn->handler = callback;
conn->mgr = mgr;
conn->last_io_time = (time_t) mg_time();
conn->iface =
(opts.iface != NULL ? opts.iface : mgr->ifaces[MG_MAIN_IFACE]);
conn->flags = opts.flags & _MG_ALLOWED_CONNECT_FLAGS_MASK;
conn->user_data = opts.user_data;
/*
* SIZE_MAX is defined as a long long constant in
* system headers on some platforms and so it
* doesn't compile with pedantic ansi flags.
*/
conn->recv_mbuf_limit = ~0;
} else {
MG_SET_PTRPTR(opts.error_string, "failed to create connection");
}
return conn;
}
MG_INTERNAL struct mg_connection *mg_create_connection(
struct mg_mgr *mgr, mg_event_handler_t callback,
struct mg_add_sock_opts opts) {
struct mg_connection *conn = mg_create_connection_base(mgr, callback, opts);
if (conn != NULL && !conn->iface->vtable->create_conn(conn)) {
MG_FREE(conn);
conn = NULL;
}
if (conn == NULL) {
MG_SET_PTRPTR(opts.error_string, "failed to init connection");
}
return conn;
}
/*
* Address format: [PROTO://][HOST]:PORT
*
* HOST could be IPv4/IPv6 address or a host name.
* `host` is a destination buffer to hold parsed HOST part. Should be at least
* MG_MAX_HOST_LEN bytes long.
* `proto` is a returned socket type, either SOCK_STREAM or SOCK_DGRAM
*
* Return:
* -1 on parse error
* 0 if HOST needs DNS lookup
* >0 length of the address string
*/
MG_INTERNAL int mg_parse_address(const char *str, union socket_address *sa,
int *proto, char *host, size_t host_len) {
unsigned int a, b, c, d, port = 0;
int ch, len = 0;
#if MG_ENABLE_IPV6
char buf[100];
#endif
/*
* MacOS needs that. If we do not zero it, subsequent bind() will fail.
* Also, all-zeroes in the socket address means binding to all addresses
* for both IPv4 and IPv6 (INADDR_ANY and IN6ADDR_ANY_INIT).
*/
memset(sa, 0, sizeof(*sa));
sa->sin.sin_family = AF_INET;
*proto = SOCK_STREAM;
if (strncmp(str, "udp://", 6) == 0) {
str += 6;
*proto = SOCK_DGRAM;
} else if (strncmp(str, "tcp://", 6) == 0) {
str += 6;
}
if (sscanf(str, "%u.%u.%u.%u:%u%n", &a, &b, &c, &d, &port, &len) == 5) {
/* Bind to a specific IPv4 address, e.g. 192.168.1.5:8080 */
sa->sin.sin_addr.s_addr =
htonl(((uint32_t) a << 24) | ((uint32_t) b << 16) | c << 8 | d);
sa->sin.sin_port = htons((uint16_t) port);
#if MG_ENABLE_IPV6
} else if (sscanf(str, "[%99[^]]]:%u%n", buf, &port, &len) == 2 &&
inet_pton(AF_INET6, buf, &sa->sin6.sin6_addr)) {
/* IPv6 address, e.g. [3ffe:2a00:100:7031::1]:8080 */
sa->sin6.sin6_family = AF_INET6;
sa->sin.sin_port = htons((uint16_t) port);
#endif
#if MG_ENABLE_ASYNC_RESOLVER
} else if (strlen(str) < host_len &&
sscanf(str, "%[^ :]:%u%n", host, &port, &len) == 2) {
sa->sin.sin_port = htons((uint16_t) port);
if (mg_resolve_from_hosts_file(host, sa) != 0) {
/*
* if resolving from hosts file failed and the host
* we are trying to resolve is `localhost` - we should
* try to resolve it using `gethostbyname` and do not try
* to resolve it via DNS server if gethostbyname has failed too
*/
if (mg_ncasecmp(host, "localhost", 9) != 0) {
return 0;
}
#if MG_ENABLE_SYNC_RESOLVER
if (!mg_resolve2(host, &sa->sin.sin_addr)) {
return -1;
}
#else
return -1;
#endif
}
#endif
} else if (sscanf(str, ":%u%n", &port, &len) == 1 ||
sscanf(str, "%u%n", &port, &len) == 1) {
/* If only port is specified, bind to IPv4, INADDR_ANY */
sa->sin.sin_port = htons((uint16_t) port);
} else {
return -1;
}
/* Required for MG_ENABLE_ASYNC_RESOLVER=0 */
(void) host;
(void) host_len;
ch = str[len]; /* Character that follows the address */
return port < 0xffffUL && (ch == '\0' || ch == ',' || isspace(ch)) ? len : -1;
}
struct mg_connection *mg_if_accept_new_conn(struct mg_connection *lc) {
struct mg_add_sock_opts opts;
struct mg_connection *nc;
memset(&opts, 0, sizeof(opts));
nc = mg_create_connection(lc->mgr, lc->handler, opts);
if (nc == NULL) return NULL;
nc->listener = lc;
nc->proto_handler = lc->proto_handler;
nc->user_data = lc->user_data;
nc->recv_mbuf_limit = lc->recv_mbuf_limit;
nc->iface = lc->iface;
if (lc->flags & MG_F_SSL) nc->flags |= MG_F_SSL;
mg_add_conn(nc->mgr, nc);
DBG(("%p %p %d %d", lc, nc, nc->sock, (int) nc->flags));
return nc;
}
void mg_if_accept_tcp_cb(struct mg_connection *nc, union socket_address *sa,
size_t sa_len) {
(void) sa_len;
nc->sa = *sa;
mg_call(nc, NULL, nc->user_data, MG_EV_ACCEPT, &nc->sa);
}
void mg_send(struct mg_connection *nc, const void *buf, int len) {
nc->last_io_time = (time_t) mg_time();
if (nc->flags & MG_F_UDP) {
nc->iface->vtable->udp_send(nc, buf, len);
} else {
nc->iface->vtable->tcp_send(nc, buf, len);
}
#if !defined(NO_LIBC) && MG_ENABLE_HEXDUMP
if (nc->mgr && nc->mgr->hexdump_file != NULL) {
mg_hexdump_connection(nc, nc->mgr->hexdump_file, buf, len, MG_EV_SEND);
}
#endif
}
void mg_if_sent_cb(struct mg_connection *nc, int num_sent) {
if (num_sent < 0) {
nc->flags |= MG_F_CLOSE_IMMEDIATELY;
}
mg_call(nc, NULL, nc->user_data, MG_EV_SEND, &num_sent);
}
MG_INTERNAL void mg_recv_common(struct mg_connection *nc, void *buf, int len,
int own) {
DBG(("%p %d %u", nc, len, (unsigned int) nc->recv_mbuf.len));
if (nc->flags & MG_F_CLOSE_IMMEDIATELY) {
DBG(("%p discarded %d bytes", nc, len));
/*
* This connection will not survive next poll. Do not deliver events,
* send data to /dev/null without acking.
*/
if (own) {
MG_FREE(buf);
}
return;
}
nc->last_io_time = (time_t) mg_time();
if (!own) {
mbuf_append(&nc->recv_mbuf, buf, len);
} else if (nc->recv_mbuf.len == 0) {
/* Adopt buf as recv_mbuf's backing store. */
mbuf_free(&nc->recv_mbuf);
nc->recv_mbuf.buf = (char *) buf;
nc->recv_mbuf.size = nc->recv_mbuf.len = len;
} else {
mbuf_append(&nc->recv_mbuf, buf, len);
MG_FREE(buf);
}
mg_call(nc, NULL, nc->user_data, MG_EV_RECV, &len);
}
void mg_if_recv_tcp_cb(struct mg_connection *nc, void *buf, int len, int own) {
mg_recv_common(nc, buf, len, own);
}
void mg_if_recv_udp_cb(struct mg_connection *nc, void *buf, int len,
union socket_address *sa, size_t sa_len) {
assert(nc->flags & MG_F_UDP);
DBG(("%p %u", nc, (unsigned int) len));
if (nc->flags & MG_F_LISTENING) {
struct mg_connection *lc = nc;
/*
* Do we have an existing connection for this source?
* This is very inefficient for long connection lists.
*/
for (nc = mg_next(lc->mgr, NULL); nc != NULL; nc = mg_next(lc->mgr, nc)) {
if (memcmp(&nc->sa.sa, &sa->sa, sa_len) == 0 && nc->listener == lc) {
break;
}
}
if (nc == NULL) {
struct mg_add_sock_opts opts;
memset(&opts, 0, sizeof(opts));
/* Create fake connection w/out sock initialization */
nc = mg_create_connection_base(lc->mgr, lc->handler, opts);
if (nc != NULL) {
nc->sock = lc->sock;
nc->listener = lc;
nc->sa = *sa;
nc->proto_handler = lc->proto_handler;
nc->user_data = lc->user_data;
nc->recv_mbuf_limit = lc->recv_mbuf_limit;
nc->flags = MG_F_UDP;
/*
* Long-lived UDP "connections" i.e. interactions that involve more
* than one request and response are rare, most are transactional:
* response is sent and the "connection" is closed. Or - should be.
* But users (including ourselves) tend to forget about that part,
* because UDP is connectionless and one does not think about
* processing a UDP request as handling a connection that needs to be
* closed. Thus, we begin with SEND_AND_CLOSE flag set, which should
* be a reasonable default for most use cases, but it is possible to
* turn it off the connection should be kept alive after processing.
*/
nc->flags |= MG_F_SEND_AND_CLOSE;
mg_add_conn(lc->mgr, nc);
mg_call(nc, NULL, nc->user_data, MG_EV_ACCEPT, &nc->sa);
} else {
DBG(("OOM"));
/* No return here, we still need to drop on the floor */
}
}
}
if (nc != NULL) {
mg_recv_common(nc, buf, len, 1);
} else {
/* Drop on the floor. */
MG_FREE(buf);
nc->iface->vtable->recved(nc, len);
}
}
/*
* Schedules an async connect for a resolved address and proto.
* Called from two places: `mg_connect_opt()` and from async resolver.
* When called from the async resolver, it must trigger `MG_EV_CONNECT` event
* with a failure flag to indicate connection failure.
*/
MG_INTERNAL struct mg_connection *mg_do_connect(struct mg_connection *nc,
int proto,
union socket_address *sa) {
DBG(("%p %s://%s:%hu", nc, proto == SOCK_DGRAM ? "udp" : "tcp",
inet_ntoa(sa->sin.sin_addr), ntohs(sa->sin.sin_port)));
nc->flags |= MG_F_CONNECTING;
if (proto == SOCK_DGRAM) {
nc->iface->vtable->connect_udp(nc);
} else {
nc->iface->vtable->connect_tcp(nc, sa);
}
mg_add_conn(nc->mgr, nc);
return nc;
}
void mg_if_connect_cb(struct mg_connection *nc, int err) {
DBG(("%p connect, err=%d", nc, err));
nc->flags &= ~MG_F_CONNECTING;
if (err != 0) {
nc->flags |= MG_F_CLOSE_IMMEDIATELY;
}
mg_call(nc, NULL, nc->user_data, MG_EV_CONNECT, &err);
}
#if MG_ENABLE_ASYNC_RESOLVER
/*
* Callback for the async resolver on mg_connect_opt() call.
* Main task of this function is to trigger MG_EV_CONNECT event with
* either failure (and dealloc the connection)
* or success (and proceed with connect()
*/
static void resolve_cb(struct mg_dns_message *msg, void *data,
enum mg_resolve_err e) {
struct mg_connection *nc = (struct mg_connection *) data;
int i;
int failure = -1;
nc->flags &= ~MG_F_RESOLVING;
if (msg != NULL) {
/*
* Take the first DNS A answer and run...
*/
for (i = 0; i < msg->num_answers; i++) {
if (msg->answers[i].rtype == MG_DNS_A_RECORD) {
/*
* Async resolver guarantees that there is at least one answer.
* TODO(lsm): handle IPv6 answers too
*/
mg_dns_parse_record_data(msg, &msg->answers[i], &nc->sa.sin.sin_addr,
4);
mg_do_connect(nc, nc->flags & MG_F_UDP ? SOCK_DGRAM : SOCK_STREAM,
&nc->sa);
return;
}
}
}
if (e == MG_RESOLVE_TIMEOUT) {
double now = mg_time();
mg_call(nc, NULL, nc->user_data, MG_EV_TIMER, &now);
}
/*
* If we get there was no MG_DNS_A_RECORD in the answer
*/
mg_call(nc, NULL, nc->user_data, MG_EV_CONNECT, &failure);
mg_call(nc, NULL, nc->user_data, MG_EV_CLOSE, NULL);
mg_destroy_conn(nc, 1 /* destroy_if */);
}
#endif
struct mg_connection *mg_connect(struct mg_mgr *mgr, const char *address,
MG_CB(mg_event_handler_t callback,
void *user_data)) {
struct mg_connect_opts opts;
memset(&opts, 0, sizeof(opts));
return mg_connect_opt(mgr, address, MG_CB(callback, user_data), opts);
}
struct mg_connection *mg_connect_opt(struct mg_mgr *mgr, const char *address,
MG_CB(mg_event_handler_t callback,
void *user_data),
struct mg_connect_opts opts) {
struct mg_connection *nc = NULL;
int proto, rc;
struct mg_add_sock_opts add_sock_opts;
char host[MG_MAX_HOST_LEN];
MG_COPY_COMMON_CONNECTION_OPTIONS(&add_sock_opts, &opts);
if ((nc = mg_create_connection(mgr, callback, add_sock_opts)) == NULL) {
return NULL;
}
if ((rc = mg_parse_address(address, &nc->sa, &proto, host, sizeof(host))) <
0) {
/* Address is malformed */
MG_SET_PTRPTR(opts.error_string, "cannot parse address");
mg_destroy_conn(nc, 1 /* destroy_if */);
return NULL;
}
nc->flags |= opts.flags & _MG_ALLOWED_CONNECT_FLAGS_MASK;
nc->flags |= (proto == SOCK_DGRAM) ? MG_F_UDP : 0;
#if MG_ENABLE_CALLBACK_USERDATA
nc->user_data = user_data;
#else
nc->user_data = opts.user_data;
#endif
#if MG_ENABLE_SSL
DBG(("%p %s %s,%s,%s", nc, address, (opts.ssl_cert ? opts.ssl_cert : "-"),
(opts.ssl_key ? opts.ssl_key : "-"),
(opts.ssl_ca_cert ? opts.ssl_ca_cert : "-")));
if (opts.ssl_cert != NULL || opts.ssl_ca_cert != NULL ||
opts.ssl_psk_identity != NULL) {
const char *err_msg = NULL;
struct mg_ssl_if_conn_params params;
if (nc->flags & MG_F_UDP) {
MG_SET_PTRPTR(opts.error_string, "SSL for UDP is not supported");
mg_destroy_conn(nc, 1 /* destroy_if */);
return NULL;
}
memset(¶ms, 0, sizeof(params));
params.cert = opts.ssl_cert;
params.key = opts.ssl_key;
params.ca_cert = opts.ssl_ca_cert;
params.cipher_suites = opts.ssl_cipher_suites;
params.psk_identity = opts.ssl_psk_identity;
params.psk_key = opts.ssl_psk_key;
if (opts.ssl_ca_cert != NULL) {
if (opts.ssl_server_name != NULL) {
if (strcmp(opts.ssl_server_name, "*") != 0) {
params.server_name = opts.ssl_server_name;
}
} else if (rc == 0) { /* If it's a DNS name, use host. */
params.server_name = host;
}
}
if (mg_ssl_if_conn_init(nc, ¶ms, &err_msg) != MG_SSL_OK) {
MG_SET_PTRPTR(opts.error_string, err_msg);
mg_destroy_conn(nc, 1 /* destroy_if */);
return NULL;
}
nc->flags |= MG_F_SSL;
}
#endif /* MG_ENABLE_SSL */
if (rc == 0) {
#if MG_ENABLE_ASYNC_RESOLVER
/*
* DNS resolution is required for host.
* mg_parse_address() fills port in nc->sa, which we pass to resolve_cb()
*/
struct mg_connection *dns_conn = NULL;
struct mg_resolve_async_opts o;
memset(&o, 0, sizeof(o));
o.dns_conn = &dns_conn;
o.nameserver = opts.nameserver;
if (mg_resolve_async_opt(nc->mgr, host, MG_DNS_A_RECORD, resolve_cb, nc,
o) != 0) {
MG_SET_PTRPTR(opts.error_string, "cannot schedule DNS lookup");
mg_destroy_conn(nc, 1 /* destroy_if */);
return NULL;
}
nc->priv_2 = dns_conn;
nc->flags |= MG_F_RESOLVING;
return nc;
#else
MG_SET_PTRPTR(opts.error_string, "Resolver is disabled");
mg_destroy_conn(nc, 1 /* destroy_if */);
return NULL;
#endif
} else {
/* Address is parsed and resolved to IP. proceed with connect() */
return mg_do_connect(nc, proto, &nc->sa);
}
}
struct mg_connection *mg_bind(struct mg_mgr *srv, const char *address,
MG_CB(mg_event_handler_t event_handler,
void *user_data)) {
struct mg_bind_opts opts;
memset(&opts, 0, sizeof(opts));
return mg_bind_opt(srv, address, MG_CB(event_handler, user_data), opts);
}
struct mg_connection *mg_bind_opt(struct mg_mgr *mgr, const char *address,
MG_CB(mg_event_handler_t callback,
void *user_data),
struct mg_bind_opts opts) {
union socket_address sa;
struct mg_connection *nc = NULL;
int proto, rc;
struct mg_add_sock_opts add_sock_opts;
char host[MG_MAX_HOST_LEN];
MG_COPY_COMMON_CONNECTION_OPTIONS(&add_sock_opts, &opts);
#if MG_ENABLE_TUN
if (mg_strncmp(mg_mk_str(address), mg_mk_str("ws://"), 5) == 0 ||
mg_strncmp(mg_mk_str(address), mg_mk_str("wss://"), 6) == 0) {
return mg_tun_bind_opt(mgr, address, MG_CB(callback, user_data), opts);
}
#endif
if (mg_parse_address(address, &sa, &proto, host, sizeof(host)) <= 0) {
MG_SET_PTRPTR(opts.error_string, "cannot parse address");
return NULL;
}
nc = mg_create_connection(mgr, callback, add_sock_opts);
if (nc == NULL) {
return NULL;
}
nc->sa = sa;
nc->flags |= MG_F_LISTENING;
if (proto == SOCK_DGRAM) nc->flags |= MG_F_UDP;
#if MG_ENABLE_SSL
DBG(("%p %s %s,%s,%s", nc, address, (opts.ssl_cert ? opts.ssl_cert : "-"),
(opts.ssl_key ? opts.ssl_key : "-"),
(opts.ssl_ca_cert ? opts.ssl_ca_cert : "-")));
if (opts.ssl_cert != NULL || opts.ssl_ca_cert != NULL) {
const char *err_msg = NULL;
struct mg_ssl_if_conn_params params;
if (nc->flags & MG_F_UDP) {
MG_SET_PTRPTR(opts.error_string, "SSL for UDP is not supported");
mg_destroy_conn(nc, 1 /* destroy_if */);
return NULL;
}
memset(¶ms, 0, sizeof(params));
params.cert = opts.ssl_cert;
params.key = opts.ssl_key;
params.ca_cert = opts.ssl_ca_cert;
params.cipher_suites = opts.ssl_cipher_suites;
if (mg_ssl_if_conn_init(nc, ¶ms, &err_msg) != MG_SSL_OK) {
MG_SET_PTRPTR(opts.error_string, err_msg);
mg_destroy_conn(nc, 1 /* destroy_if */);
return NULL;
}
nc->flags |= MG_F_SSL;
}
#endif /* MG_ENABLE_SSL */
if (nc->flags & MG_F_UDP) {
rc = nc->iface->vtable->listen_udp(nc, &nc->sa);
} else {
rc = nc->iface->vtable->listen_tcp(nc, &nc->sa);
}
if (rc != 0) {
DBG(("Failed to open listener: %d", rc));
MG_SET_PTRPTR(opts.error_string, "failed to open listener");
mg_destroy_conn(nc, 1 /* destroy_if */);
return NULL;
}
mg_add_conn(nc->mgr, nc);
#if MG_ENABLE_CALLBACK_USERDATA
(void) user_data;
#endif
return nc;
}
struct mg_connection *mg_next(struct mg_mgr *s, struct mg_connection *conn) {
return conn == NULL ? s->active_connections : conn->next;
}
#if MG_ENABLE_BROADCAST
void mg_broadcast(struct mg_mgr *mgr, mg_event_handler_t cb, void *data,
size_t len) {
struct ctl_msg ctl_msg;
/*
* Mongoose manager has a socketpair, `struct mg_mgr::ctl`,
* where `mg_broadcast()` pushes the message.
* `mg_mgr_poll()` wakes up, reads a message from the socket pair, and calls
* specified callback for each connection. Thus the callback function executes
* in event manager thread.
*/
if (mgr->ctl[0] != INVALID_SOCKET && data != NULL &&
len < sizeof(ctl_msg.message)) {
size_t dummy;
ctl_msg.callback = cb;
memcpy(ctl_msg.message, data, len);
dummy = MG_SEND_FUNC(mgr->ctl[0], (char *) &ctl_msg,
offsetof(struct ctl_msg, message) + len, 0);
dummy = MG_RECV_FUNC(mgr->ctl[0], (char *) &len, 1, 0);
(void) dummy; /* https://gcc.gnu.org/bugzilla/show_bug.cgi?id=25509 */
}
}
#endif /* MG_ENABLE_BROADCAST */
static int isbyte(int n) {
return n >= 0 && n <= 255;
}
static int parse_net(const char *spec, uint32_t *net, uint32_t *mask) {
int n, a, b, c, d, slash = 32, len = 0;
if ((sscanf(spec, "%d.%d.%d.%d/%d%n", &a, &b, &c, &d, &slash, &n) == 5 ||
sscanf(spec, "%d.%d.%d.%d%n", &a, &b, &c, &d, &n) == 4) &&
isbyte(a) && isbyte(b) && isbyte(c) && isbyte(d) && slash >= 0 &&
slash < 33) {
len = n;
*net =
((uint32_t) a << 24) | ((uint32_t) b << 16) | ((uint32_t) c << 8) | d;
*mask = slash ? 0xffffffffU << (32 - slash) : 0;
}
return len;
}
int mg_check_ip_acl(const char *acl, uint32_t remote_ip) {
int allowed, flag;
uint32_t net, mask;
struct mg_str vec;
/* If any ACL is set, deny by default */
allowed = (acl == NULL || *acl == '\0') ? '+' : '-';
while ((acl = mg_next_comma_list_entry(acl, &vec, NULL)) != NULL) {
flag = vec.p[0];
if ((flag != '+' && flag != '-') ||
parse_net(&vec.p[1], &net, &mask) == 0) {
return -1;
}
if (net == (remote_ip & mask)) {
allowed = flag;
}
}
DBG(("%08x %c", remote_ip, allowed));
return allowed == '+';
}
/* Move data from one connection to another */
void mg_forward(struct mg_connection *from, struct mg_connection *to) {
mg_send(to, from->recv_mbuf.buf, from->recv_mbuf.len);
mbuf_remove(&from->recv_mbuf, from->recv_mbuf.len);
}
double mg_set_timer(struct mg_connection *c, double timestamp) {
double result = c->ev_timer_time;
c->ev_timer_time = timestamp;
/*
* If this connection is resolving, it's not in the list of active
* connections, so not processed yet. It has a DNS resolver connection
* linked to it. Set up a timer for the DNS connection.
*/
DBG(("%p %p %d -> %lu", c, c->priv_2, c->flags & MG_F_RESOLVING,
(unsigned long) timestamp));
if ((c->flags & MG_F_RESOLVING) && c->priv_2 != NULL) {
((struct mg_connection *) c->priv_2)->ev_timer_time = timestamp;
}
return result;
}
void mg_sock_set(struct mg_connection *nc, sock_t sock) {
if (sock != INVALID_SOCKET) {
nc->iface->vtable->sock_set(nc, sock);
}
}
void mg_if_get_conn_addr(struct mg_connection *nc, int remote,
union socket_address *sa) {
nc->iface->vtable->get_conn_addr(nc, remote, sa);
}
struct mg_connection *mg_add_sock_opt(struct mg_mgr *s, sock_t sock,
MG_CB(mg_event_handler_t callback,
void *user_data),
struct mg_add_sock_opts opts) {
#if MG_ENABLE_CALLBACK_USERDATA
opts.user_data = user_data;
#endif
struct mg_connection *nc = mg_create_connection_base(s, callback, opts);
if (nc != NULL) {
mg_sock_set(nc, sock);
mg_add_conn(nc->mgr, nc);
}
return nc;
}
struct mg_connection *mg_add_sock(struct mg_mgr *s, sock_t sock,
MG_CB(mg_event_handler_t callback,
void *user_data)) {
struct mg_add_sock_opts opts;
memset(&opts, 0, sizeof(opts));
return mg_add_sock_opt(s, sock, MG_CB(callback, user_data), opts);
}
double mg_time(void) {
return cs_time();
}
#ifdef MG_MODULE_LINES
#line 1 "mongoose/src/net_if_socket.h"
#endif
/*
* Copyright (c) 2014-2016 Cesanta Software Limited
* All rights reserved
*/
#ifndef CS_MONGOOSE_SRC_NET_IF_SOCKET_H_
#define CS_MONGOOSE_SRC_NET_IF_SOCKET_H_
/* Amalgamated: #include "mongoose/src/net_if.h" */
#ifdef __cplusplus
extern "C" {
#endif /* __cplusplus */
#ifndef MG_ENABLE_NET_IF_SOCKET
#define MG_ENABLE_NET_IF_SOCKET MG_NET_IF == MG_NET_IF_SOCKET
#endif
extern const struct mg_iface_vtable mg_socket_iface_vtable;
#ifdef __cplusplus
}
#endif /* __cplusplus */
#endif /* CS_MONGOOSE_SRC_NET_IF_SOCKET_H_ */
#ifdef MG_MODULE_LINES
#line 1 "mongoose/src/net_if_tun.h"
#endif
/*
* Copyright (c) 2014-2016 Cesanta Software Limited
* All rights reserved
*/
#ifndef CS_MONGOOSE_SRC_NET_IF_TUN_H_
#define CS_MONGOOSE_SRC_NET_IF_TUN_H_
#if MG_ENABLE_TUN
/* Amalgamated: #include "mongoose/src/net_if.h" */
struct mg_tun_client;
#ifdef __cplusplus
extern "C" {
#endif /* __cplusplus */
extern const struct mg_iface_vtable mg_tun_iface_vtable;
struct mg_connection *mg_tun_if_find_conn(struct mg_tun_client *client,
uint32_t stream_id);
#ifdef __cplusplus
}
#endif /* __cplusplus */
#endif /* MG_ENABLE_TUN */
#endif /* CS_MONGOOSE_SRC_NET_IF_TUN_H_ */
#ifdef MG_MODULE_LINES
#line 1 "mongoose/src/net_if.c"
#endif
/* Amalgamated: #include "mongoose/src/net_if.h" */
/* Amalgamated: #include "mongoose/src/internal.h" */
/* Amalgamated: #include "mongoose/src/net_if_socket.h" */
/* Amalgamated: #include "mongoose/src/net_if_tun.h" */
extern const struct mg_iface_vtable mg_default_iface_vtable;
#if MG_ENABLE_TUN
const struct mg_iface_vtable *mg_ifaces[] = {&mg_default_iface_vtable,
&mg_tun_iface_vtable};
#else
const struct mg_iface_vtable *mg_ifaces[] = {&mg_default_iface_vtable};
#endif
int mg_num_ifaces = (int) (sizeof(mg_ifaces) / sizeof(mg_ifaces[0]));
struct mg_iface *mg_if_create_iface(const struct mg_iface_vtable *vtable,
struct mg_mgr *mgr) {
struct mg_iface *iface = (struct mg_iface *) MG_CALLOC(1, sizeof(*iface));
iface->mgr = mgr;
iface->data = NULL;
iface->vtable = vtable;
return iface;
}
struct mg_iface *mg_find_iface(struct mg_mgr *mgr,
const struct mg_iface_vtable *vtable,
struct mg_iface *from) {
int i = 0;
if (from != NULL) {
for (i = 0; i < mgr->num_ifaces; i++) {
if (mgr->ifaces[i] == from) {
i++;
break;
}
}
}
for (; i < mgr->num_ifaces; i++) {
if (mgr->ifaces[i]->vtable == vtable) {
return mgr->ifaces[i];
}
}
return NULL;
}
#ifdef MG_MODULE_LINES
#line 1 "mongoose/src/net_if_socket.c"
#endif
/*
* Copyright (c) 2014-2016 Cesanta Software Limited
* All rights reserved
*/
#if MG_ENABLE_NET_IF_SOCKET
/* Amalgamated: #include "mongoose/src/net_if_socket.h" */
/* Amalgamated: #include "mongoose/src/internal.h" */
/* Amalgamated: #include "mongoose/src/util.h" */
#define MG_TCP_RECV_BUFFER_SIZE 1024
#define MG_UDP_RECV_BUFFER_SIZE 1500
static sock_t mg_open_listening_socket(union socket_address *sa, int type,
int proto);
#if MG_ENABLE_SSL
static void mg_ssl_begin(struct mg_connection *nc);
#endif
void mg_set_non_blocking_mode(sock_t sock) {
#ifdef _WIN32
unsigned long on = 1;
ioctlsocket(sock, FIONBIO, &on);
#else
int flags = fcntl(sock, F_GETFL, 0);
fcntl(sock, F_SETFL, flags | O_NONBLOCK);
#endif
}
static int mg_is_error(int n) {
int err = mg_get_errno();
return (n < 0 && err != EINPROGRESS && err != EWOULDBLOCK
#ifndef WINCE
&& err != EAGAIN && err != EINTR
#endif
#ifdef _WIN32
&& WSAGetLastError() != WSAEINTR &&
WSAGetLastError() != WSAEWOULDBLOCK
#endif
);
}
void mg_socket_if_connect_tcp(struct mg_connection *nc,
const union socket_address *sa) {
int rc, proto = 0;
nc->sock = socket(AF_INET, SOCK_STREAM, proto);
if (nc->sock == INVALID_SOCKET) {
nc->err = mg_get_errno() ? mg_get_errno() : 1;
return;
}
#if !defined(MG_ESP8266)
mg_set_non_blocking_mode(nc->sock);
#endif
rc = connect(nc->sock, &sa->sa, sizeof(sa->sin));
nc->err = mg_is_error(rc) ? mg_get_errno() : 0;
DBG(("%p sock %d rc %d errno %d err %d", nc, nc->sock, rc, mg_get_errno(),
nc->err));
}
void mg_socket_if_connect_udp(struct mg_connection *nc) {
nc->sock = socket(AF_INET, SOCK_DGRAM, 0);
if (nc->sock == INVALID_SOCKET) {
nc->err = mg_get_errno() ? mg_get_errno() : 1;
return;
}
if (nc->flags & MG_F_ENABLE_BROADCAST) {
int optval = 1;
setsockopt(nc->sock, SOL_SOCKET, SO_BROADCAST, (const char *) &optval,
sizeof(optval));
}
nc->err = 0;
}
int mg_socket_if_listen_tcp(struct mg_connection *nc,
union socket_address *sa) {
int proto = 0;
sock_t sock = mg_open_listening_socket(sa, SOCK_STREAM, proto);
if (sock == INVALID_SOCKET) {
return (mg_get_errno() ? mg_get_errno() : 1);
}
mg_sock_set(nc, sock);
return 0;
}
int mg_socket_if_listen_udp(struct mg_connection *nc,
union socket_address *sa) {
sock_t sock = mg_open_listening_socket(sa, SOCK_DGRAM, 0);
if (sock == INVALID_SOCKET) return (mg_get_errno() ? mg_get_errno() : 1);
mg_sock_set(nc, sock);
return 0;
}
void mg_socket_if_tcp_send(struct mg_connection *nc, const void *buf,
size_t len) {
mbuf_append(&nc->send_mbuf, buf, len);
}
void mg_socket_if_udp_send(struct mg_connection *nc, const void *buf,
size_t len) {
mbuf_append(&nc->send_mbuf, buf, len);
}
void mg_socket_if_recved(struct mg_connection *nc, size_t len) {
(void) nc;
(void) len;
}
int mg_socket_if_create_conn(struct mg_connection *nc) {
(void) nc;
return 1;
}
void mg_socket_if_destroy_conn(struct mg_connection *nc) {
if (nc->sock == INVALID_SOCKET) return;
if (!(nc->flags & MG_F_UDP)) {
closesocket(nc->sock);
} else {
/* Only close outgoing UDP sockets or listeners. */
if (nc->listener == NULL) closesocket(nc->sock);
}
nc->sock = INVALID_SOCKET;
}
static int mg_accept_conn(struct mg_connection *lc) {
struct mg_connection *nc;
union socket_address sa;
socklen_t sa_len = sizeof(sa);
/* NOTE(lsm): on Windows, sock is always > FD_SETSIZE */
sock_t sock = accept(lc->sock, &sa.sa, &sa_len);
if (sock == INVALID_SOCKET) {
if (mg_is_error(-1)) DBG(("%p: failed to accept: %d", lc, mg_get_errno()));
return 0;
}
nc = mg_if_accept_new_conn(lc);
if (nc == NULL) {
closesocket(sock);
return 0;
}
DBG(("%p conn from %s:%d", nc, inet_ntoa(sa.sin.sin_addr),
ntohs(sa.sin.sin_port)));
mg_sock_set(nc, sock);
#if MG_ENABLE_SSL
if (lc->flags & MG_F_SSL) {
if (mg_ssl_if_conn_accept(nc, lc) != MG_SSL_OK) mg_close_conn(nc);
} else
#endif
{
mg_if_accept_tcp_cb(nc, &sa, sa_len);
}
return 1;
}
/* 'sa' must be an initialized address to bind to */
static sock_t mg_open_listening_socket(union socket_address *sa, int type,
int proto) {
socklen_t sa_len =
(sa->sa.sa_family == AF_INET) ? sizeof(sa->sin) : sizeof(sa->sin6);
sock_t sock = INVALID_SOCKET;
#if !MG_LWIP
int on = 1;
#endif
if ((sock = socket(sa->sa.sa_family, type, proto)) != INVALID_SOCKET &&
#if !MG_LWIP /* LWIP doesn't support either */
#if defined(_WIN32) && defined(SO_EXCLUSIVEADDRUSE) && !defined(WINCE)
/* "Using SO_REUSEADDR and SO_EXCLUSIVEADDRUSE" http://goo.gl/RmrFTm */
!setsockopt(sock, SOL_SOCKET, SO_EXCLUSIVEADDRUSE, (void *) &on,
sizeof(on)) &&
#endif
#if !defined(_WIN32) || !defined(SO_EXCLUSIVEADDRUSE)
/*
* SO_RESUSEADDR is not enabled on Windows because the semantics of
* SO_REUSEADDR on UNIX and Windows is different. On Windows,
* SO_REUSEADDR allows to bind a socket to a port without error even if
* the port is already open by another program. This is not the behavior
* SO_REUSEADDR was designed for, and leads to hard-to-track failure
* scenarios. Therefore, SO_REUSEADDR was disabled on Windows unless
* SO_EXCLUSIVEADDRUSE is supported and set on a socket.
*/
!setsockopt(sock, SOL_SOCKET, SO_REUSEADDR, (void *) &on, sizeof(on)) &&
#endif
#endif /* !MG_LWIP */
!bind(sock, &sa->sa, sa_len) &&
(type == SOCK_DGRAM || listen(sock, SOMAXCONN) == 0)) {
#if !MG_LWIP
mg_set_non_blocking_mode(sock);
/* In case port was set to 0, get the real port number */
(void) getsockname(sock, &sa->sa, &sa_len);
#endif
} else if (sock != INVALID_SOCKET) {
closesocket(sock);
sock = INVALID_SOCKET;
}
return sock;
}
static void mg_write_to_socket(struct mg_connection *nc) {
struct mbuf *io = &nc->send_mbuf;
int n = 0;
#if MG_LWIP
/* With LWIP we don't know if the socket is ready */
if (io->len == 0) return;
#endif
assert(io->len > 0);
if (nc->flags & MG_F_UDP) {
int n =
sendto(nc->sock, io->buf, io->len, 0, &nc->sa.sa, sizeof(nc->sa.sin));
DBG(("%p %d %d %d %s:%hu", nc, nc->sock, n, mg_get_errno(),
inet_ntoa(nc->sa.sin.sin_addr), ntohs(nc->sa.sin.sin_port)));
if (n > 0) {
mbuf_remove(io, n);
mg_if_sent_cb(nc, n);
}
return;
}
#if MG_ENABLE_SSL
if (nc->flags & MG_F_SSL) {
if (nc->flags & MG_F_SSL_HANDSHAKE_DONE) {
n = mg_ssl_if_write(nc, io->buf, io->len);
DBG(("%p %d bytes -> %d (SSL)", nc, n, nc->sock));
if (n < 0) {
if (n != MG_SSL_WANT_READ && n != MG_SSL_WANT_WRITE) {
nc->flags |= MG_F_CLOSE_IMMEDIATELY;
}
return;
} else {
/* Successful SSL operation, clear off SSL wait flags */
nc->flags &= ~(MG_F_WANT_READ | MG_F_WANT_WRITE);
}
} else {
mg_ssl_begin(nc);
return;
}
} else
#endif
{
n = (int) MG_SEND_FUNC(nc->sock, io->buf, io->len, 0);
DBG(("%p %d bytes -> %d", nc, n, nc->sock));
if (n < 0 && mg_is_error(n)) {
/* Something went wrong, drop the connection. */
nc->flags |= MG_F_CLOSE_IMMEDIATELY;
return;
}
}
if (n > 0) {
mbuf_remove(io, n);
mg_if_sent_cb(nc, n);
}
}
MG_INTERNAL size_t recv_avail_size(struct mg_connection *conn, size_t max) {
size_t avail;
if (conn->recv_mbuf_limit < conn->recv_mbuf.len) return 0;
avail = conn->recv_mbuf_limit - conn->recv_mbuf.len;
return avail > max ? max : avail;
}
static void mg_handle_tcp_read(struct mg_connection *conn) {
int n = 0;
char *buf = (char *) MG_MALLOC(MG_TCP_RECV_BUFFER_SIZE);
if (buf == NULL) {
DBG(("OOM"));
return;
}
#if MG_ENABLE_SSL
if (conn->flags & MG_F_SSL) {
if (conn->flags & MG_F_SSL_HANDSHAKE_DONE) {
/* SSL library may have more bytes ready to read than we ask to read.
* Therefore, read in a loop until we read everything. Without the loop,
* we skip to the next select() cycle which can just timeout. */
while ((n = mg_ssl_if_read(conn, buf, MG_TCP_RECV_BUFFER_SIZE)) > 0) {
DBG(("%p %d bytes <- %d (SSL)", conn, n, conn->sock));
mg_if_recv_tcp_cb(conn, buf, n, 1 /* own */);
buf = NULL;
if (conn->flags & MG_F_CLOSE_IMMEDIATELY) break;
/* buf has been freed, we need a new one. */
buf = (char *) MG_MALLOC(MG_TCP_RECV_BUFFER_SIZE);
if (buf == NULL) break;
}
MG_FREE(buf);
if (n < 0 && n != MG_SSL_WANT_READ) conn->flags |= MG_F_CLOSE_IMMEDIATELY;
} else {
MG_FREE(buf);
mg_ssl_begin(conn);
return;
}
} else
#endif
{
n = (int) MG_RECV_FUNC(conn->sock, buf,
recv_avail_size(conn, MG_TCP_RECV_BUFFER_SIZE), 0);
DBG(("%p %d bytes (PLAIN) <- %d", conn, n, conn->sock));
if (n > 0) {
mg_if_recv_tcp_cb(conn, buf, n, 1 /* own */);
} else {
MG_FREE(buf);
}
if (n == 0) {
/* Orderly shutdown of the socket, try flushing output. */
conn->flags |= MG_F_SEND_AND_CLOSE;
} else if (mg_is_error(n)) {
conn->flags |= MG_F_CLOSE_IMMEDIATELY;
}
}
}
static int mg_recvfrom(struct mg_connection *nc, union socket_address *sa,
socklen_t *sa_len, char **buf) {
int n;
*buf = (char *) MG_MALLOC(MG_UDP_RECV_BUFFER_SIZE);
if (*buf == NULL) {
DBG(("Out of memory"));
return -ENOMEM;
}
n = recvfrom(nc->sock, *buf, MG_UDP_RECV_BUFFER_SIZE, 0, &sa->sa, sa_len);
if (n <= 0) {
DBG(("%p recvfrom: %s", nc, strerror(mg_get_errno())));
MG_FREE(*buf);
}
return n;
}
static void mg_handle_udp_read(struct mg_connection *nc) {
char *buf = NULL;
union socket_address sa;
socklen_t sa_len = sizeof(sa);
int n = mg_recvfrom(nc, &sa, &sa_len, &buf);
DBG(("%p %d bytes from %s:%d", nc, n, inet_ntoa(nc->sa.sin.sin_addr),
ntohs(nc->sa.sin.sin_port)));
mg_if_recv_udp_cb(nc, buf, n, &sa, sa_len);
}
#if MG_ENABLE_SSL
static void mg_ssl_begin(struct mg_connection *nc) {
int server_side = (nc->listener != NULL);
enum mg_ssl_if_result res = mg_ssl_if_handshake(nc);
DBG(("%p %d res %d", nc, server_side, res));
if (res == MG_SSL_OK) {
nc->flags |= MG_F_SSL_HANDSHAKE_DONE;
nc->flags &= ~(MG_F_WANT_READ | MG_F_WANT_WRITE);
if (server_side) {
union socket_address sa;
socklen_t sa_len = sizeof(sa);
(void) getpeername(nc->sock, &sa.sa, &sa_len);
mg_if_accept_tcp_cb(nc, &sa, sa_len);
} else {
mg_if_connect_cb(nc, 0);
}
} else if (res != MG_SSL_WANT_READ && res != MG_SSL_WANT_WRITE) {
if (!server_side) {
mg_if_connect_cb(nc, res);
}
nc->flags |= MG_F_CLOSE_IMMEDIATELY;
}
}
#endif /* MG_ENABLE_SSL */
#define _MG_F_FD_CAN_READ 1
#define _MG_F_FD_CAN_WRITE 1 << 1
#define _MG_F_FD_ERROR 1 << 2
void mg_mgr_handle_conn(struct mg_connection *nc, int fd_flags, double now) {
int worth_logging =
fd_flags != 0 || (nc->flags & (MG_F_WANT_READ | MG_F_WANT_WRITE));
if (worth_logging) {
DBG(("%p fd=%d fd_flags=%d nc_flags=%lu rmbl=%d smbl=%d", nc, nc->sock,
fd_flags, nc->flags, (int) nc->recv_mbuf.len,
(int) nc->send_mbuf.len));
}
if (nc->flags & MG_F_CONNECTING) {
if (fd_flags != 0) {
int err = 0;
#if !defined(MG_ESP8266)
if (!(nc->flags & MG_F_UDP)) {
socklen_t len = sizeof(err);
int ret =
getsockopt(nc->sock, SOL_SOCKET, SO_ERROR, (char *) &err, &len);
if (ret != 0) {
err = 1;
} else if (err == EAGAIN || err == EWOULDBLOCK) {
err = 0;
}
}
#else
/*
* On ESP8266 we use blocking connect.
*/
err = nc->err;
#endif
#if MG_ENABLE_SSL
if ((nc->flags & MG_F_SSL) && err == 0) {
mg_ssl_begin(nc);
} else {
mg_if_connect_cb(nc, err);
}
#else
mg_if_connect_cb(nc, err);
#endif
} else if (nc->err != 0) {
mg_if_connect_cb(nc, nc->err);
}
}
if (fd_flags & _MG_F_FD_CAN_READ) {
if (nc->flags & MG_F_UDP) {
mg_handle_udp_read(nc);
} else {
if (nc->flags & MG_F_LISTENING) {
/*
* We're not looping here, and accepting just one connection at
* a time. The reason is that eCos does not respect non-blocking
* flag on a listening socket and hangs in a loop.
*/
mg_accept_conn(nc);
} else {
mg_handle_tcp_read(nc);
}
}
}
if (!(nc->flags & MG_F_CLOSE_IMMEDIATELY)) {
if ((fd_flags & _MG_F_FD_CAN_WRITE) && nc->send_mbuf.len > 0) {
mg_write_to_socket(nc);
}
mg_if_poll(nc, (time_t) now);
mg_if_timer(nc, now);
}
if (worth_logging) {
DBG(("%p after fd=%d nc_flags=%lu rmbl=%d smbl=%d", nc, nc->sock, nc->flags,
(int) nc->recv_mbuf.len, (int) nc->send_mbuf.len));
}
}
#if MG_ENABLE_BROADCAST
static void mg_mgr_handle_ctl_sock(struct mg_mgr *mgr) {
struct ctl_msg ctl_msg;
int len =
(int) MG_RECV_FUNC(mgr->ctl[1], (char *) &ctl_msg, sizeof(ctl_msg), 0);
size_t dummy = MG_SEND_FUNC(mgr->ctl[1], ctl_msg.message, 1, 0);
DBG(("read %d from ctl socket", len));
(void) dummy; /* https://gcc.gnu.org/bugzilla/show_bug.cgi?id=25509 */
if (len >= (int) sizeof(ctl_msg.callback) && ctl_msg.callback != NULL) {
struct mg_connection *nc;
for (nc = mg_next(mgr, NULL); nc != NULL; nc = mg_next(mgr, nc)) {
ctl_msg.callback(nc, MG_EV_POLL,
ctl_msg.message MG_UD_ARG(nc->user_data));
}
}
}
#endif
/* Associate a socket to a connection. */
void mg_socket_if_sock_set(struct mg_connection *nc, sock_t sock) {
mg_set_non_blocking_mode(sock);
mg_set_close_on_exec(sock);
nc->sock = sock;
DBG(("%p %d", nc, sock));
}
void mg_socket_if_init(struct mg_iface *iface) {
(void) iface;
DBG(("%p using select()", iface->mgr));
#if MG_ENABLE_BROADCAST
do {
mg_socketpair(iface->mgr->ctl, SOCK_DGRAM);
} while (iface->mgr->ctl[0] == INVALID_SOCKET);
#endif
}
void mg_socket_if_free(struct mg_iface *iface) {
(void) iface;
}
void mg_socket_if_add_conn(struct mg_connection *nc) {
(void) nc;
}
void mg_socket_if_remove_conn(struct mg_connection *nc) {
(void) nc;
}
void mg_add_to_set(sock_t sock, fd_set *set, sock_t *max_fd) {
if (sock != INVALID_SOCKET
#ifdef __unix__
&& sock < (sock_t) FD_SETSIZE
#endif
) {
FD_SET(sock, set);
if (*max_fd == INVALID_SOCKET || sock > *max_fd) {
*max_fd = sock;
}
}
}
time_t mg_socket_if_poll(struct mg_iface *iface, int timeout_ms) {
struct mg_mgr *mgr = iface->mgr;
double now = mg_time();
double min_timer;
struct mg_connection *nc, *tmp;
struct timeval tv;
fd_set read_set, write_set, err_set;
sock_t max_fd = INVALID_SOCKET;
int num_fds, num_ev, num_timers = 0;
#ifdef __unix__
int try_dup = 1;
#endif
FD_ZERO(&read_set);
FD_ZERO(&write_set);
FD_ZERO(&err_set);
#if MG_ENABLE_BROADCAST
mg_add_to_set(mgr->ctl[1], &read_set, &max_fd);
#endif
/*
* Note: it is ok to have connections with sock == INVALID_SOCKET in the list,
* e.g. timer-only "connections".
*/
min_timer = 0;
for (nc = mgr->active_connections, num_fds = 0; nc != NULL; nc = tmp) {
tmp = nc->next;
if (nc->sock != INVALID_SOCKET) {
num_fds++;
#ifdef __unix__
/* A hack to make sure all our file descriptos fit into FD_SETSIZE. */
if (nc->sock >= (sock_t) FD_SETSIZE && try_dup) {
int new_sock = dup(nc->sock);
if (new_sock >= 0 && new_sock < (sock_t) FD_SETSIZE) {
closesocket(nc->sock);
DBG(("new sock %d -> %d", nc->sock, new_sock));
nc->sock = new_sock;
} else {
try_dup = 0;
}
}
#endif
if (!(nc->flags & MG_F_WANT_WRITE) &&
nc->recv_mbuf.len < nc->recv_mbuf_limit &&
(!(nc->flags & MG_F_UDP) || nc->listener == NULL)) {
mg_add_to_set(nc->sock, &read_set, &max_fd);
}
if (((nc->flags & MG_F_CONNECTING) && !(nc->flags & MG_F_WANT_READ)) ||
(nc->send_mbuf.len > 0 && !(nc->flags & MG_F_CONNECTING))) {
mg_add_to_set(nc->sock, &write_set, &max_fd);
mg_add_to_set(nc->sock, &err_set, &max_fd);
}
}
if (nc->ev_timer_time > 0) {
if (num_timers == 0 || nc->ev_timer_time < min_timer) {
min_timer = nc->ev_timer_time;
}
num_timers++;
}
}
/*
* If there is a timer to be fired earlier than the requested timeout,
* adjust the timeout.
*/
if (num_timers > 0) {
double timer_timeout_ms = (min_timer - mg_time()) * 1000 + 1 /* rounding */;
if (timer_timeout_ms < timeout_ms) {
timeout_ms = (int) timer_timeout_ms;
}
}
if (timeout_ms < 0) timeout_ms = 0;
tv.tv_sec = timeout_ms / 1000;
tv.tv_usec = (timeout_ms % 1000) * 1000;
num_ev = select((int) max_fd + 1, &read_set, &write_set, &err_set, &tv);
now = mg_time();
#if 0
DBG(("select @ %ld num_ev=%d of %d, timeout=%d", (long) now, num_ev, num_fds,
timeout_ms));
#endif
#if MG_ENABLE_BROADCAST
if (num_ev > 0 && mgr->ctl[1] != INVALID_SOCKET &&
FD_ISSET(mgr->ctl[1], &read_set)) {
mg_mgr_handle_ctl_sock(mgr);
}
#endif
for (nc = mgr->active_connections; nc != NULL; nc = tmp) {
int fd_flags = 0;
if (nc->sock != INVALID_SOCKET) {
if (num_ev > 0) {
fd_flags = (FD_ISSET(nc->sock, &read_set) &&
(!(nc->flags & MG_F_UDP) || nc->listener == NULL)
? _MG_F_FD_CAN_READ
: 0) |
(FD_ISSET(nc->sock, &write_set) ? _MG_F_FD_CAN_WRITE : 0) |
(FD_ISSET(nc->sock, &err_set) ? _MG_F_FD_ERROR : 0);
}
#if MG_LWIP
/* With LWIP socket emulation layer, we don't get write events for UDP */
if ((nc->flags & MG_F_UDP) && nc->listener == NULL) {
fd_flags |= _MG_F_FD_CAN_WRITE;
}
#endif
}
tmp = nc->next;
mg_mgr_handle_conn(nc, fd_flags, now);
}
for (nc = mgr->active_connections; nc != NULL; nc = tmp) {
tmp = nc->next;
if ((nc->flags & MG_F_CLOSE_IMMEDIATELY) ||
(nc->send_mbuf.len == 0 && (nc->flags & MG_F_SEND_AND_CLOSE))) {
mg_close_conn(nc);
}
}
return (time_t) now;
}
#if MG_ENABLE_BROADCAST
int mg_socketpair(sock_t sp[2], int sock_type) {
union socket_address sa;
sock_t sock;
socklen_t len = sizeof(sa.sin);
int ret = 0;
sock = sp[0] = sp[1] = INVALID_SOCKET;
(void) memset(&sa, 0, sizeof(sa));
sa.sin.sin_family = AF_INET;
sa.sin.sin_port = htons(0);
sa.sin.sin_addr.s_addr = htonl(0x7f000001); /* 127.0.0.1 */
if ((sock = socket(AF_INET, sock_type, 0)) == INVALID_SOCKET) {
} else if (bind(sock, &sa.sa, len) != 0) {
} else if (sock_type == SOCK_STREAM && listen(sock, 1) != 0) {
} else if (getsockname(sock, &sa.sa, &len) != 0) {
} else if ((sp[0] = socket(AF_INET, sock_type, 0)) == INVALID_SOCKET) {
} else if (connect(sp[0], &sa.sa, len) != 0) {
} else if (sock_type == SOCK_DGRAM &&
(getsockname(sp[0], &sa.sa, &len) != 0 ||
connect(sock, &sa.sa, len) != 0)) {
} else if ((sp[1] = (sock_type == SOCK_DGRAM ? sock
: accept(sock, &sa.sa, &len))) ==
INVALID_SOCKET) {
} else {
mg_set_close_on_exec(sp[0]);
mg_set_close_on_exec(sp[1]);
if (sock_type == SOCK_STREAM) closesocket(sock);
ret = 1;
}
if (!ret) {
if (sp[0] != INVALID_SOCKET) closesocket(sp[0]);
if (sp[1] != INVALID_SOCKET) closesocket(sp[1]);
if (sock != INVALID_SOCKET) closesocket(sock);
sock = sp[0] = sp[1] = INVALID_SOCKET;
}
return ret;
}
#endif /* MG_ENABLE_BROADCAST */
static void mg_sock_get_addr(sock_t sock, int remote,
union socket_address *sa) {
socklen_t slen = sizeof(*sa);
memset(sa, 0, slen);
if (remote) {
getpeername(sock, &sa->sa, &slen);
} else {
getsockname(sock, &sa->sa, &slen);
}
}
void mg_sock_to_str(sock_t sock, char *buf, size_t len, int flags) {
union socket_address sa;
mg_sock_get_addr(sock, flags & MG_SOCK_STRINGIFY_REMOTE, &sa);
mg_sock_addr_to_str(&sa, buf, len, flags);
}
void mg_socket_if_get_conn_addr(struct mg_connection *nc, int remote,
union socket_address *sa) {
mg_sock_get_addr(nc->sock, remote, sa);
}
/* clang-format off */
#define MG_SOCKET_IFACE_VTABLE \
{ \
mg_socket_if_init, \
mg_socket_if_free, \
mg_socket_if_add_conn, \
mg_socket_if_remove_conn, \
mg_socket_if_poll, \
mg_socket_if_listen_tcp, \
mg_socket_if_listen_udp, \
mg_socket_if_connect_tcp, \
mg_socket_if_connect_udp, \
mg_socket_if_tcp_send, \
mg_socket_if_udp_send, \
mg_socket_if_recved, \
mg_socket_if_create_conn, \
mg_socket_if_destroy_conn, \
mg_socket_if_sock_set, \
mg_socket_if_get_conn_addr, \
}
/* clang-format on */
const struct mg_iface_vtable mg_socket_iface_vtable = MG_SOCKET_IFACE_VTABLE;
#if MG_NET_IF == MG_NET_IF_SOCKET
const struct mg_iface_vtable mg_default_iface_vtable = MG_SOCKET_IFACE_VTABLE;
#endif
#endif /* MG_ENABLE_NET_IF_SOCKET */
#ifdef MG_MODULE_LINES
#line 1 "mongoose/src/net_if_tun.c"
#endif
/*
* Copyright (c) 2014-2016 Cesanta Software Limited
* All rights reserved
*/
#if MG_ENABLE_TUN
/* Amalgamated: #include "common/cs_dbg.h" */
/* Amalgamated: #include "common/cs_time.h" */
/* Amalgamated: #include "mongoose/src/internal.h" */
/* Amalgamated: #include "mongoose/src/net_if_tun.h" */
/* Amalgamated: #include "mongoose/src/tun.h" */
/* Amalgamated: #include "mongoose/src/util.h" */
#define MG_TCP_RECV_BUFFER_SIZE 1024
#define MG_UDP_RECV_BUFFER_SIZE 1500
void mg_tun_if_connect_tcp(struct mg_connection *nc,
const union socket_address *sa) {
(void) nc;
(void) sa;
}
void mg_tun_if_connect_udp(struct mg_connection *nc) {
(void) nc;
}
int mg_tun_if_listen_tcp(struct mg_connection *nc, union socket_address *sa) {
(void) nc;
(void) sa;
return 0;
}
int mg_tun_if_listen_udp(struct mg_connection *nc, union socket_address *sa) {
(void) nc;
(void) sa;
return -1;
}
void mg_tun_if_tcp_send(struct mg_connection *nc, const void *buf, size_t len) {
struct mg_tun_client *client = (struct mg_tun_client *) nc->iface->data;
uint32_t stream_id = (uint32_t)(uintptr_t) nc->mgr_data;
struct mg_str msg = {(char *) buf, len};
#if MG_ENABLE_HEXDUMP
char hex[512];
mg_hexdump(buf, len, hex, sizeof(hex));
LOG(LL_DEBUG, ("sending to stream %zu:\n%s", stream_id, hex));
#endif
mg_tun_send_frame(client->disp, stream_id, MG_TUN_DATA_FRAME, 0, msg);
}
void mg_tun_if_udp_send(struct mg_connection *nc, const void *buf, size_t len) {
(void) nc;
(void) buf;
(void) len;
}
void mg_tun_if_recved(struct mg_connection *nc, size_t len) {
(void) nc;
(void) len;
}
int mg_tun_if_create_conn(struct mg_connection *nc) {
(void) nc;
return 1;
}
void mg_tun_if_destroy_conn(struct mg_connection *nc) {
struct mg_tun_client *client = (struct mg_tun_client *) nc->iface->data;
if (nc->flags & MG_F_LISTENING) {
mg_tun_destroy_client(client);
} else if (client->disp) {
uint32_t stream_id = (uint32_t)(uintptr_t) nc->mgr_data;
struct mg_str msg = {NULL, 0};
LOG(LL_DEBUG, ("closing %zu:", stream_id));
mg_tun_send_frame(client->disp, stream_id, MG_TUN_DATA_FRAME,
MG_TUN_F_END_STREAM, msg);
}
}
/* Associate a socket to a connection. */
void mg_tun_if_sock_set(struct mg_connection *nc, sock_t sock) {
(void) nc;
(void) sock;
}
void mg_tun_if_init(struct mg_iface *iface) {
(void) iface;
}
void mg_tun_if_free(struct mg_iface *iface) {
(void) iface;
}
void mg_tun_if_add_conn(struct mg_connection *nc) {
nc->sock = INVALID_SOCKET;
}
void mg_tun_if_remove_conn(struct mg_connection *nc) {
(void) nc;
}
time_t mg_tun_if_poll(struct mg_iface *iface, int timeout_ms) {
(void) iface;
(void) timeout_ms;
return (time_t) cs_time();
}
void mg_tun_if_get_conn_addr(struct mg_connection *nc, int remote,
union socket_address *sa) {
(void) nc;
(void) remote;
(void) sa;
}
struct mg_connection *mg_tun_if_find_conn(struct mg_tun_client *client,
uint32_t stream_id) {
struct mg_connection *nc = NULL;
for (nc = client->mgr->active_connections; nc != NULL; nc = nc->next) {
if (nc->iface != client->iface || (nc->flags & MG_F_LISTENING)) {
continue;
}
if (stream_id == (uint32_t)(uintptr_t) nc->mgr_data) {
return nc;
}
}
if (stream_id > client->last_stream_id) {
/* create a new connection */
LOG(LL_DEBUG, ("new stream 0x%lx, accepting", stream_id));
nc = mg_if_accept_new_conn(client->listener);
nc->mgr_data = (void *) (uintptr_t) stream_id;
client->last_stream_id = stream_id;
} else {
LOG(LL_DEBUG, ("Ignoring stream 0x%lx (last_stream_id 0x%lx)", stream_id,
client->last_stream_id));
}
return nc;
}
/* clang-format off */
#define MG_TUN_IFACE_VTABLE \
{ \
mg_tun_if_init, \
mg_tun_if_free, \
mg_tun_if_add_conn, \
mg_tun_if_remove_conn, \
mg_tun_if_poll, \
mg_tun_if_listen_tcp, \
mg_tun_if_listen_udp, \
mg_tun_if_connect_tcp, \
mg_tun_if_connect_udp, \
mg_tun_if_tcp_send, \
mg_tun_if_udp_send, \
mg_tun_if_recved, \
mg_tun_if_create_conn, \
mg_tun_if_destroy_conn, \
mg_tun_if_sock_set, \
mg_tun_if_get_conn_addr, \
}
/* clang-format on */
const struct mg_iface_vtable mg_tun_iface_vtable = MG_TUN_IFACE_VTABLE;
#endif /* MG_ENABLE_TUN */
#ifdef MG_MODULE_LINES
#line 1 "mongoose/src/ssl_if_openssl.c"
#endif
/*
* Copyright (c) 2014-2016 Cesanta Software Limited
* All rights reserved
*/
#if MG_ENABLE_SSL && MG_SSL_IF == MG_SSL_IF_OPENSSL
#ifdef __APPLE__
#pragma GCC diagnostic ignored "-Wdeprecated-declarations"
#endif
#include <openssl/ssl.h>
struct mg_ssl_if_ctx {
SSL *ssl;
SSL_CTX *ssl_ctx;
struct mbuf psk;
size_t identity_len;
};
void mg_ssl_if_init() {
SSL_library_init();
}
enum mg_ssl_if_result mg_ssl_if_conn_accept(struct mg_connection *nc,
struct mg_connection *lc) {
struct mg_ssl_if_ctx *ctx =
(struct mg_ssl_if_ctx *) MG_CALLOC(1, sizeof(*ctx));
struct mg_ssl_if_ctx *lc_ctx = (struct mg_ssl_if_ctx *) lc->ssl_if_data;
nc->ssl_if_data = ctx;
if (ctx == NULL || lc_ctx == NULL) return MG_SSL_ERROR;
ctx->ssl_ctx = lc_ctx->ssl_ctx;
if ((ctx->ssl = SSL_new(ctx->ssl_ctx)) == NULL) {
return MG_SSL_ERROR;
}
return MG_SSL_OK;
}
static enum mg_ssl_if_result mg_use_cert(SSL_CTX *ctx, const char *cert,
const char *key, const char **err_msg);
static enum mg_ssl_if_result mg_use_ca_cert(SSL_CTX *ctx, const char *cert);
static enum mg_ssl_if_result mg_set_cipher_list(SSL_CTX *ctx, const char *cl);
static enum mg_ssl_if_result mg_ssl_if_ossl_set_psk(struct mg_ssl_if_ctx *ctx,
const char *identity,
const char *key_str);
enum mg_ssl_if_result mg_ssl_if_conn_init(
struct mg_connection *nc, const struct mg_ssl_if_conn_params *params,
const char **err_msg) {
struct mg_ssl_if_ctx *ctx =
(struct mg_ssl_if_ctx *) MG_CALLOC(1, sizeof(*ctx));
DBG(("%p %s,%s,%s", nc, (params->cert ? params->cert : ""),
(params->key ? params->key : ""),
(params->ca_cert ? params->ca_cert : "")));
if (ctx == NULL) {
MG_SET_PTRPTR(err_msg, "Out of memory");
return MG_SSL_ERROR;
}
nc->ssl_if_data = ctx;
if (nc->flags & MG_F_LISTENING) {
ctx->ssl_ctx = SSL_CTX_new(SSLv23_server_method());
} else {
ctx->ssl_ctx = SSL_CTX_new(SSLv23_client_method());
}
if (ctx->ssl_ctx == NULL) {
MG_SET_PTRPTR(err_msg, "Failed to create SSL context");
return MG_SSL_ERROR;
}
if (params->cert != NULL &&
mg_use_cert(ctx->ssl_ctx, params->cert, params->key, err_msg) !=
MG_SSL_OK) {
return MG_SSL_ERROR;
}
if (params->ca_cert != NULL &&
mg_use_ca_cert(ctx->ssl_ctx, params->ca_cert) != MG_SSL_OK) {
MG_SET_PTRPTR(err_msg, "Invalid SSL CA cert");
return MG_SSL_ERROR;
}
if (params->server_name != NULL) {
#ifdef KR_VERSION
SSL_CTX_kr_set_verify_name(ctx->ssl_ctx, params->server_name);
#else
/* TODO(rojer): Implement server name verification on OpenSSL. */
#endif
}
if (mg_set_cipher_list(ctx->ssl_ctx, params->cipher_suites) != MG_SSL_OK) {
MG_SET_PTRPTR(err_msg, "Invalid cipher suite list");
return MG_SSL_ERROR;
}
mbuf_init(&ctx->psk, 0);
if (mg_ssl_if_ossl_set_psk(ctx, params->psk_identity, params->psk_key) !=
MG_SSL_OK) {
MG_SET_PTRPTR(err_msg, "Invalid PSK settings");
return MG_SSL_ERROR;
}
if (!(nc->flags & MG_F_LISTENING) &&
(ctx->ssl = SSL_new(ctx->ssl_ctx)) == NULL) {
MG_SET_PTRPTR(err_msg, "Failed to create SSL session");
return MG_SSL_ERROR;
}
nc->flags |= MG_F_SSL;
return MG_SSL_OK;
}
static enum mg_ssl_if_result mg_ssl_if_ssl_err(struct mg_connection *nc,
int res) {
struct mg_ssl_if_ctx *ctx = (struct mg_ssl_if_ctx *) nc->ssl_if_data;
int err = SSL_get_error(ctx->ssl, res);
if (err == SSL_ERROR_WANT_READ) return MG_SSL_WANT_READ;
if (err == SSL_ERROR_WANT_WRITE) return MG_SSL_WANT_WRITE;
DBG(("%p %p SSL error: %d %d", nc, ctx->ssl_ctx, res, err));
nc->err = err;
return MG_SSL_ERROR;
}
enum mg_ssl_if_result mg_ssl_if_handshake(struct mg_connection *nc) {
struct mg_ssl_if_ctx *ctx = (struct mg_ssl_if_ctx *) nc->ssl_if_data;
int server_side = (nc->listener != NULL);
int res;
/* If descriptor is not yet set, do it now. */
if (SSL_get_fd(ctx->ssl) < 0) {
if (SSL_set_fd(ctx->ssl, nc->sock) != 1) return MG_SSL_ERROR;
}
res = server_side ? SSL_accept(ctx->ssl) : SSL_connect(ctx->ssl);
if (res != 1) return mg_ssl_if_ssl_err(nc, res);
return MG_SSL_OK;
}
int mg_ssl_if_read(struct mg_connection *nc, void *buf, size_t buf_size) {
struct mg_ssl_if_ctx *ctx = (struct mg_ssl_if_ctx *) nc->ssl_if_data;
int n = SSL_read(ctx->ssl, buf, buf_size);
DBG(("%p %d -> %d", nc, (int) buf_size, n));
if (n < 0) return mg_ssl_if_ssl_err(nc, n);
if (n == 0) nc->flags |= MG_F_CLOSE_IMMEDIATELY;
return n;
}
int mg_ssl_if_write(struct mg_connection *nc, const void *data, size_t len) {
struct mg_ssl_if_ctx *ctx = (struct mg_ssl_if_ctx *) nc->ssl_if_data;
int n = SSL_write(ctx->ssl, data, len);
DBG(("%p %d -> %d", nc, (int) len, n));
if (n <= 0) return mg_ssl_if_ssl_err(nc, n);
return n;
}
void mg_ssl_if_conn_close_notify(struct mg_connection *nc) {
struct mg_ssl_if_ctx *ctx = (struct mg_ssl_if_ctx *) nc->ssl_if_data;
if (ctx == NULL) return;
SSL_shutdown(ctx->ssl);
}
void mg_ssl_if_conn_free(struct mg_connection *nc) {
struct mg_ssl_if_ctx *ctx = (struct mg_ssl_if_ctx *) nc->ssl_if_data;
if (ctx == NULL) return;
nc->ssl_if_data = NULL;
if (ctx->ssl != NULL) SSL_free(ctx->ssl);
if (ctx->ssl_ctx != NULL && nc->listener == NULL) SSL_CTX_free(ctx->ssl_ctx);
mbuf_free(&ctx->psk);
memset(ctx, 0, sizeof(*ctx));
MG_FREE(ctx);
}
/*
* Cipher suite options used for TLS negotiation.
* https://wiki.mozilla.org/Security/Server_Side_TLS#Recommended_configurations
*/
static const char mg_s_cipher_list[] =
#if defined(MG_SSL_CRYPTO_MODERN)
"ECDHE-ECDSA-AES256-GCM-SHA384:ECDHE-RSA-AES256-GCM-SHA384:"
"ECDHE-ECDSA-AES128-GCM-SHA256:ECDHE-RSA-AES128-GCM-SHA256:"
"DHE-RSA-AES128-GCM-SHA256:DHE-DSS-AES128-GCM-SHA256:kEDH+AESGCM:"
"ECDHE-RSA-AES128-SHA256:ECDHE-ECDSA-AES128-SHA256:ECDHE-RSA-AES128-SHA:"
"ECDHE-ECDSA-AES128-SHA:ECDHE-RSA-AES256-SHA384:ECDHE-ECDSA-AES256-SHA384:"
"ECDHE-RSA-AES256-SHA:ECDHE-ECDSA-AES256-SHA:DHE-RSA-AES128-SHA256:"
"DHE-RSA-AES128-SHA:DHE-DSS-AES128-SHA256:DHE-RSA-AES256-SHA256:"
"DHE-DSS-AES256-SHA:DHE-RSA-AES256-SHA:"
"!aNULL:!eNULL:!EXPORT:!DES:!RC4:!3DES:!MD5:!PSK"
#elif defined(MG_SSL_CRYPTO_OLD)
"ECDHE-RSA-AES128-GCM-SHA256:ECDHE-ECDSA-AES128-GCM-SHA256:"
"ECDHE-RSA-AES256-GCM-SHA384:ECDHE-ECDSA-AES256-GCM-SHA384:"
"DHE-RSA-AES128-GCM-SHA256:DHE-DSS-AES128-GCM-SHA256:kEDH+AESGCM:"
"ECDHE-RSA-AES128-SHA256:ECDHE-ECDSA-AES128-SHA256:ECDHE-RSA-AES128-SHA:"
"ECDHE-ECDSA-AES128-SHA:ECDHE-RSA-AES256-SHA384:ECDHE-ECDSA-AES256-SHA384:"
"ECDHE-RSA-AES256-SHA:ECDHE-ECDSA-AES256-SHA:DHE-RSA-AES128-SHA256:"
"DHE-RSA-AES128-SHA:DHE-DSS-AES128-SHA256:DHE-RSA-AES256-SHA256:"
"DHE-DSS-AES256-SHA:DHE-RSA-AES256-SHA:ECDHE-RSA-DES-CBC3-SHA:"
"ECDHE-ECDSA-DES-CBC3-SHA:AES128-GCM-SHA256:AES256-GCM-SHA384:"
"AES128-SHA256:AES256-SHA256:AES128-SHA:AES256-SHA:AES:DES-CBC3-SHA:"
"HIGH:!aNULL:!eNULL:!EXPORT:!DES:!RC4:!MD5:!PSK:!aECDH:"
"!EDH-DSS-DES-CBC3-SHA:!EDH-RSA-DES-CBC3-SHA:!KRB5-DES-CBC3-SHA"
#else /* Default - intermediate. */
"ECDHE-RSA-AES128-GCM-SHA256:ECDHE-ECDSA-AES128-GCM-SHA256:"
"ECDHE-RSA-AES256-GCM-SHA384:ECDHE-ECDSA-AES256-GCM-SHA384:"
"DHE-RSA-AES128-GCM-SHA256:DHE-DSS-AES128-GCM-SHA256:kEDH+AESGCM:"
"ECDHE-RSA-AES128-SHA256:ECDHE-ECDSA-AES128-SHA256:ECDHE-RSA-AES128-SHA:"
"ECDHE-ECDSA-AES128-SHA:ECDHE-RSA-AES256-SHA384:ECDHE-ECDSA-AES256-SHA384:"
"ECDHE-RSA-AES256-SHA:ECDHE-ECDSA-AES256-SHA:DHE-RSA-AES128-SHA256:"
"DHE-RSA-AES128-SHA:DHE-DSS-AES128-SHA256:DHE-RSA-AES256-SHA256:"
"DHE-DSS-AES256-SHA:DHE-RSA-AES256-SHA:AES128-GCM-SHA256:AES256-GCM-SHA384:"
"AES128-SHA256:AES256-SHA256:AES128-SHA:AES256-SHA:AES:CAMELLIA:"
"DES-CBC3-SHA:!aNULL:!eNULL:!EXPORT:!DES:!RC4:!MD5:!PSK:!aECDH:"
"!EDH-DSS-DES-CBC3-SHA:!EDH-RSA-DES-CBC3-SHA:!KRB5-DES-CBC3-SHA"
#endif
;
/*
* Default DH params for PFS cipher negotiation. This is a 2048-bit group.
* Will be used if none are provided by the user in the certificate file.
*/
#if !MG_DISABLE_PFS && !defined(KR_VERSION)
static const char mg_s_default_dh_params[] =
"\
-----BEGIN DH PARAMETERS-----\n\
MIIBCAKCAQEAlvbgD/qh9znWIlGFcV0zdltD7rq8FeShIqIhkQ0C7hYFThrBvF2E\n\
Z9bmgaP+sfQwGpVlv9mtaWjvERbu6mEG7JTkgmVUJrUt/wiRzwTaCXBqZkdUO8Tq\n\
+E6VOEQAilstG90ikN1Tfo+K6+X68XkRUIlgawBTKuvKVwBhuvlqTGerOtnXWnrt\n\
ym//hd3cd5PBYGBix0i7oR4xdghvfR2WLVu0LgdThTBb6XP7gLd19cQ1JuBtAajZ\n\
wMuPn7qlUkEFDIkAZy59/Hue/H2Q2vU/JsvVhHWCQBL4F1ofEAt50il6ZxR1QfFK\n\
9VGKDC4oOgm9DlxwwBoC2FjqmvQlqVV3kwIBAg==\n\
-----END DH PARAMETERS-----\n";
#endif
static enum mg_ssl_if_result mg_use_ca_cert(SSL_CTX *ctx, const char *cert) {
if (cert == NULL || strcmp(cert, "*") == 0) {
return MG_SSL_OK;
}
SSL_CTX_set_verify(ctx, SSL_VERIFY_PEER | SSL_VERIFY_FAIL_IF_NO_PEER_CERT, 0);
return SSL_CTX_load_verify_locations(ctx, cert, NULL) == 1 ? MG_SSL_OK
: MG_SSL_ERROR;
}
static enum mg_ssl_if_result mg_use_cert(SSL_CTX *ctx, const char *cert,
const char *key,
const char **err_msg) {
if (key == NULL) key = cert;
if (cert == NULL || cert[0] == '\0' || key == NULL || key[0] == '\0') {
return MG_SSL_OK;
} else if (SSL_CTX_use_certificate_file(ctx, cert, 1) == 0) {
MG_SET_PTRPTR(err_msg, "Invalid SSL cert");
return MG_SSL_ERROR;
} else if (SSL_CTX_use_PrivateKey_file(ctx, key, 1) == 0) {
MG_SET_PTRPTR(err_msg, "Invalid SSL key");
return MG_SSL_ERROR;
} else if (SSL_CTX_use_certificate_chain_file(ctx, cert) == 0) {
MG_SET_PTRPTR(err_msg, "Invalid CA bundle");
return MG_SSL_ERROR;
} else {
SSL_CTX_set_mode(ctx, SSL_MODE_ACCEPT_MOVING_WRITE_BUFFER);
#if !MG_DISABLE_PFS && !defined(KR_VERSION)
BIO *bio = NULL;
DH *dh = NULL;
/* Try to read DH parameters from the cert/key file. */
bio = BIO_new_file(cert, "r");
if (bio != NULL) {
dh = PEM_read_bio_DHparams(bio, NULL, NULL, NULL);
BIO_free(bio);
}
/*
* If there are no DH params in the file, fall back to hard-coded ones.
* Not ideal, but better than nothing.
*/
if (dh == NULL) {
bio = BIO_new_mem_buf((void *) mg_s_default_dh_params, -1);
dh = PEM_read_bio_DHparams(bio, NULL, NULL, NULL);
BIO_free(bio);
}
if (dh != NULL) {
SSL_CTX_set_tmp_dh(ctx, dh);
SSL_CTX_set_options(ctx, SSL_OP_SINGLE_DH_USE);
DH_free(dh);
}
#if OPENSSL_VERSION_NUMBER > 0x10002000L
SSL_CTX_set_ecdh_auto(ctx, 1);
#endif
#endif
}
return MG_SSL_OK;
}
static enum mg_ssl_if_result mg_set_cipher_list(SSL_CTX *ctx, const char *cl) {
return (SSL_CTX_set_cipher_list(ctx, cl ? cl : mg_s_cipher_list) == 1
? MG_SSL_OK
: MG_SSL_ERROR);
}
#ifndef KR_VERSION
static unsigned int mg_ssl_if_ossl_psk_cb(SSL *ssl, const char *hint,
char *identity,
unsigned int max_identity_len,
unsigned char *psk,
unsigned int max_psk_len) {
struct mg_ssl_if_ctx *ctx =
(struct mg_ssl_if_ctx *) ssl->ctx->msg_callback_arg;
size_t key_len = ctx->psk.len - ctx->identity_len - 1;
DBG(("hint: '%s'", (hint ? hint : "")));
if (ctx->identity_len + 1 > max_identity_len) {
DBG(("identity too long"));
return 0;
}
if (key_len > max_psk_len) {
DBG(("key too long"));
return 0;
}
memcpy(identity, ctx->psk.buf, ctx->identity_len + 1);
memcpy(psk, ctx->psk.buf + ctx->identity_len + 1, key_len);
(void) ssl;
return key_len;
}
static enum mg_ssl_if_result mg_ssl_if_ossl_set_psk(struct mg_ssl_if_ctx *ctx,
const char *identity,
const char *key_str) {
unsigned char key[32];
size_t key_len;
size_t i = 0;
if (identity == NULL && key_str == NULL) return MG_SSL_OK;
if (identity == NULL || key_str == NULL) return MG_SSL_ERROR;
key_len = strlen(key_str);
if (key_len != 32 && key_len != 64) return MG_SSL_ERROR;
memset(key, 0, sizeof(key));
key_len = 0;
for (i = 0; key_str[i] != '\0'; i++) {
unsigned char c;
char hc = tolower((int) key_str[i]);
if (hc >= '0' && hc <= '9') {
c = hc - '0';
} else if (hc >= 'a' && hc <= 'f') {
c = hc - 'a' + 0xa;
} else {
return MG_SSL_ERROR;
}
key_len = i / 2;
key[key_len] <<= 4;
key[key_len] |= c;
}
key_len++;
DBG(("identity = '%s', key = (%u)", identity, (unsigned int) key_len));
ctx->identity_len = strlen(identity);
mbuf_append(&ctx->psk, identity, ctx->identity_len + 1);
mbuf_append(&ctx->psk, key, key_len);
SSL_CTX_set_psk_client_callback(ctx->ssl_ctx, mg_ssl_if_ossl_psk_cb);
/* Hack: there is no field for us to keep this, so we use msg_callback_arg */
ctx->ssl_ctx->msg_callback_arg = ctx;
return MG_SSL_OK;
}
#else
static enum mg_ssl_if_result mg_ssl_if_ossl_set_psk(struct mg_ssl_if_ctx *ctx,
const char *identity,
const char *key_str) {
(void) ctx;
(void) identity;
(void) key_str;
/* Krypton does not support PSK. */
return MG_SSL_ERROR;
}
#endif /* defined(KR_VERSION) */
const char *mg_set_ssl(struct mg_connection *nc, const char *cert,
const char *ca_cert) {
const char *err_msg = NULL;
struct mg_ssl_if_conn_params params;
memset(¶ms, 0, sizeof(params));
params.cert = cert;
params.ca_cert = ca_cert;
if (mg_ssl_if_conn_init(nc, ¶ms, &err_msg) != MG_SSL_OK) {
return err_msg;
}
return NULL;
}
#endif /* MG_ENABLE_SSL && MG_SSL_IF == MG_SSL_IF_OPENSSL */
#ifdef MG_MODULE_LINES
#line 1 "mongoose/src/ssl_if_mbedtls.c"
#endif
/*
* Copyright (c) 2014-2016 Cesanta Software Limited
* All rights reserved
*/
#if MG_ENABLE_SSL && MG_SSL_IF == MG_SSL_IF_MBEDTLS
#include <mbedtls/debug.h>
#include <mbedtls/ecp.h>
#include <mbedtls/platform.h>
#include <mbedtls/ssl.h>
#include <mbedtls/x509_crt.h>
static void mg_ssl_mbed_log(void *ctx, int level, const char *file, int line,
const char *str) {
enum cs_log_level cs_level;
switch (level) {
case 1:
cs_level = LL_ERROR;
break;
case 2:
case 3:
cs_level = LL_DEBUG;
break;
default:
cs_level = LL_VERBOSE_DEBUG;
}
/* mbedTLS passes strings with \n at the end, strip it. */
LOG(cs_level, ("%p %.*s", ctx, (int) (strlen(str) - 1), str));
(void) file;
(void) line;
}
struct mg_ssl_if_ctx {
mbedtls_ssl_config *conf;
mbedtls_ssl_context *ssl;
mbedtls_x509_crt *cert;
mbedtls_pk_context *key;
mbedtls_x509_crt *ca_cert;
struct mbuf cipher_suites;
};
/* Must be provided by the platform. ctx is struct mg_connection. */
extern int mg_ssl_if_mbed_random(void *ctx, unsigned char *buf, size_t len);
void mg_ssl_if_init() {
}
enum mg_ssl_if_result mg_ssl_if_conn_accept(struct mg_connection *nc,
struct mg_connection *lc) {
struct mg_ssl_if_ctx *ctx =
(struct mg_ssl_if_ctx *) MG_CALLOC(1, sizeof(*ctx));
struct mg_ssl_if_ctx *lc_ctx = (struct mg_ssl_if_ctx *) lc->ssl_if_data;
nc->ssl_if_data = ctx;
if (ctx == NULL || lc_ctx == NULL) return MG_SSL_ERROR;
ctx->ssl = (mbedtls_ssl_context *) MG_CALLOC(1, sizeof(*ctx->ssl));
if (mbedtls_ssl_setup(ctx->ssl, lc_ctx->conf) != 0) {
return MG_SSL_ERROR;
}
return MG_SSL_OK;
}
static enum mg_ssl_if_result mg_use_cert(struct mg_ssl_if_ctx *ctx,
const char *cert, const char *key,
const char **err_msg);
static enum mg_ssl_if_result mg_use_ca_cert(struct mg_ssl_if_ctx *ctx,
const char *cert);
static enum mg_ssl_if_result mg_set_cipher_list(struct mg_ssl_if_ctx *ctx,
const char *ciphers);
static enum mg_ssl_if_result mg_ssl_if_mbed_set_psk(struct mg_ssl_if_ctx *ctx,
const char *identity,
const char *key);
enum mg_ssl_if_result mg_ssl_if_conn_init(
struct mg_connection *nc, const struct mg_ssl_if_conn_params *params,
const char **err_msg) {
struct mg_ssl_if_ctx *ctx =
(struct mg_ssl_if_ctx *) MG_CALLOC(1, sizeof(*ctx));
DBG(("%p %s,%s,%s", nc, (params->cert ? params->cert : ""),
(params->key ? params->key : ""),
(params->ca_cert ? params->ca_cert : "")));
if (ctx == NULL) {
MG_SET_PTRPTR(err_msg, "Out of memory");
return MG_SSL_ERROR;
}
nc->ssl_if_data = ctx;
ctx->conf = (mbedtls_ssl_config *) MG_CALLOC(1, sizeof(*ctx->conf));
mbuf_init(&ctx->cipher_suites, 0);
mbedtls_ssl_config_init(ctx->conf);
mbedtls_ssl_conf_dbg(ctx->conf, mg_ssl_mbed_log, nc);
if (mbedtls_ssl_config_defaults(
ctx->conf, (nc->flags & MG_F_LISTENING ? MBEDTLS_SSL_IS_SERVER
: MBEDTLS_SSL_IS_CLIENT),
MBEDTLS_SSL_TRANSPORT_STREAM, MBEDTLS_SSL_PRESET_DEFAULT) != 0) {
MG_SET_PTRPTR(err_msg, "Failed to init SSL config");
return MG_SSL_ERROR;
}
/* TLS 1.2 and up */
mbedtls_ssl_conf_min_version(ctx->conf, MBEDTLS_SSL_MAJOR_VERSION_3,
MBEDTLS_SSL_MINOR_VERSION_3);
mbedtls_ssl_conf_rng(ctx->conf, mg_ssl_if_mbed_random, nc);
if (params->cert != NULL &&
mg_use_cert(ctx, params->cert, params->key, err_msg) != MG_SSL_OK) {
return MG_SSL_ERROR;
}
if (params->ca_cert != NULL &&
mg_use_ca_cert(ctx, params->ca_cert) != MG_SSL_OK) {
MG_SET_PTRPTR(err_msg, "Invalid SSL CA cert");
return MG_SSL_ERROR;
}
if (mg_set_cipher_list(ctx, params->cipher_suites) != MG_SSL_OK) {
MG_SET_PTRPTR(err_msg, "Invalid cipher suite list");
return MG_SSL_ERROR;
}
if (mg_ssl_if_mbed_set_psk(ctx, params->psk_identity, params->psk_key) !=
MG_SSL_OK) {
MG_SET_PTRPTR(err_msg, "Invalid PSK settings");
return MG_SSL_ERROR;
}
if (!(nc->flags & MG_F_LISTENING)) {
ctx->ssl = (mbedtls_ssl_context *) MG_CALLOC(1, sizeof(*ctx->ssl));
mbedtls_ssl_init(ctx->ssl);
if (mbedtls_ssl_setup(ctx->ssl, ctx->conf) != 0) {
MG_SET_PTRPTR(err_msg, "Failed to create SSL session");
return MG_SSL_ERROR;
}
if (params->server_name != NULL &&
mbedtls_ssl_set_hostname(ctx->ssl, params->server_name) != 0) {
return MG_SSL_ERROR;
}
}
#ifdef MG_SSL_IF_MBEDTLS_MAX_FRAG_LEN
if (mbedtls_ssl_conf_max_frag_len(ctx->conf,
#if MG_SSL_IF_MBEDTLS_MAX_FRAG_LEN == 512
MBEDTLS_SSL_MAX_FRAG_LEN_512
#elif MG_SSL_IF_MBEDTLS_MAX_FRAG_LEN == 1024
MBEDTLS_SSL_MAX_FRAG_LEN_1024
#elif MG_SSL_IF_MBEDTLS_MAX_FRAG_LEN == 2048
MBEDTLS_SSL_MAX_FRAG_LEN_2048
#elif MG_SSL_IF_MBEDTLS_MAX_FRAG_LEN == 4096
MBEDTLS_SSL_MAX_FRAG_LEN_4096
#else
#error Invalid MG_SSL_IF_MBEDTLS_MAX_FRAG_LEN
#endif
) != 0) {
return MG_SSL_ERROR;
}
#endif
nc->flags |= MG_F_SSL;
return MG_SSL_OK;
}
#if MG_NET_IF == MG_NET_IF_LWIP_LOW_LEVEL
int ssl_socket_send(void *ctx, const unsigned char *buf, size_t len);
int ssl_socket_recv(void *ctx, unsigned char *buf, size_t len);
#else
static int ssl_socket_send(void *ctx, const unsigned char *buf, size_t len) {
struct mg_connection *nc = (struct mg_connection *) ctx;
int n = (int) MG_SEND_FUNC(nc->sock, buf, len, 0);
LOG(LL_DEBUG, ("%p %d -> %d", nc, (int) len, n));
if (n >= 0) return n;
n = mg_get_errno();
return ((n == EAGAIN || n == EINPROGRESS) ? MBEDTLS_ERR_SSL_WANT_WRITE : -1);
}
static int ssl_socket_recv(void *ctx, unsigned char *buf, size_t len) {
struct mg_connection *nc = (struct mg_connection *) ctx;
int n = (int) MG_RECV_FUNC(nc->sock, buf, len, 0);
LOG(LL_DEBUG, ("%p %d <- %d", nc, (int) len, n));
if (n >= 0) return n;
n = mg_get_errno();
return ((n == EAGAIN || n == EINPROGRESS) ? MBEDTLS_ERR_SSL_WANT_READ : -1);
}
#endif
static enum mg_ssl_if_result mg_ssl_if_mbed_err(struct mg_connection *nc,
int ret) {
if (ret == MBEDTLS_ERR_SSL_WANT_READ) return MG_SSL_WANT_READ;
if (ret == MBEDTLS_ERR_SSL_WANT_WRITE) return MG_SSL_WANT_WRITE;
if (ret !=
MBEDTLS_ERR_SSL_PEER_CLOSE_NOTIFY) { /* CLOSE_NOTIFY = Normal shutdown */
LOG(LL_ERROR, ("%p SSL error: %d", nc, ret));
}
nc->err = ret;
nc->flags |= MG_F_CLOSE_IMMEDIATELY;
return MG_SSL_ERROR;
}
static void mg_ssl_if_mbed_free_certs_and_keys(struct mg_ssl_if_ctx *ctx) {
if (ctx->cert != NULL) {
mbedtls_x509_crt_free(ctx->cert);
MG_FREE(ctx->cert);
ctx->cert = NULL;
mbedtls_pk_free(ctx->key);
MG_FREE(ctx->key);
ctx->key = NULL;
}
if (ctx->ca_cert != NULL) {
mbedtls_ssl_conf_ca_chain(ctx->conf, NULL, NULL);
mbedtls_x509_crt_free(ctx->ca_cert);
MG_FREE(ctx->ca_cert);
ctx->ca_cert = NULL;
}
}
enum mg_ssl_if_result mg_ssl_if_handshake(struct mg_connection *nc) {
struct mg_ssl_if_ctx *ctx = (struct mg_ssl_if_ctx *) nc->ssl_if_data;
int err;
/* If bio is not yet set, do it now. */
if (ctx->ssl->p_bio == NULL) {
mbedtls_ssl_set_bio(ctx->ssl, nc, ssl_socket_send, ssl_socket_recv, NULL);
}
err = mbedtls_ssl_handshake(ctx->ssl);
if (err != 0) return mg_ssl_if_mbed_err(nc, err);
#ifdef MG_SSL_IF_MBEDTLS_FREE_CERTS
/*
* Free the peer certificate, we don't need it after handshake.
* Note that this effectively disables renegotiation.
*/
mbedtls_x509_crt_free(ctx->ssl->session->peer_cert);
mbedtls_free(ctx->ssl->session->peer_cert);
ctx->ssl->session->peer_cert = NULL;
/* On a client connection we can also free our own and CA certs. */
if (nc->listener == NULL) {
if (ctx->conf->key_cert != NULL) {
/* Note that this assumes one key_cert entry, which matches our init. */
MG_FREE(ctx->conf->key_cert);
ctx->conf->key_cert = NULL;
}
mbedtls_ssl_conf_ca_chain(ctx->conf, NULL, NULL);
mg_ssl_if_mbed_free_certs_and_keys(ctx);
}
#endif
return MG_SSL_OK;
}
int mg_ssl_if_read(struct mg_connection *nc, void *buf, size_t buf_size) {
struct mg_ssl_if_ctx *ctx = (struct mg_ssl_if_ctx *) nc->ssl_if_data;
int n = mbedtls_ssl_read(ctx->ssl, (unsigned char *) buf, buf_size);
DBG(("%p %d -> %d", nc, (int) buf_size, n));
if (n < 0) return mg_ssl_if_mbed_err(nc, n);
if (n == 0) nc->flags |= MG_F_CLOSE_IMMEDIATELY;
return n;
}
int mg_ssl_if_write(struct mg_connection *nc, const void *data, size_t len) {
struct mg_ssl_if_ctx *ctx = (struct mg_ssl_if_ctx *) nc->ssl_if_data;
int n = mbedtls_ssl_write(ctx->ssl, (const unsigned char *) data, len);
DBG(("%p %d -> %d", nc, (int) len, n));
if (n < 0) return mg_ssl_if_mbed_err(nc, n);
return n;
}
void mg_ssl_if_conn_close_notify(struct mg_connection *nc) {
struct mg_ssl_if_ctx *ctx = (struct mg_ssl_if_ctx *) nc->ssl_if_data;
if (ctx == NULL) return;
mbedtls_ssl_close_notify(ctx->ssl);
}
void mg_ssl_if_conn_free(struct mg_connection *nc) {
struct mg_ssl_if_ctx *ctx = (struct mg_ssl_if_ctx *) nc->ssl_if_data;
if (ctx == NULL) return;
nc->ssl_if_data = NULL;
if (ctx->ssl != NULL) {
mbedtls_ssl_free(ctx->ssl);
MG_FREE(ctx->ssl);
}
mg_ssl_if_mbed_free_certs_and_keys(ctx);
if (ctx->conf != NULL) {
mbedtls_ssl_config_free(ctx->conf);
MG_FREE(ctx->conf);
}
mbuf_free(&ctx->cipher_suites);
memset(ctx, 0, sizeof(*ctx));
MG_FREE(ctx);
}
static enum mg_ssl_if_result mg_use_ca_cert(struct mg_ssl_if_ctx *ctx,
const char *ca_cert) {
if (ca_cert == NULL || strcmp(ca_cert, "*") == 0) {
return MG_SSL_OK;
}
ctx->ca_cert = (mbedtls_x509_crt *) MG_CALLOC(1, sizeof(*ctx->ca_cert));
mbedtls_x509_crt_init(ctx->ca_cert);
if (mbedtls_x509_crt_parse_file(ctx->ca_cert, ca_cert) != 0) {
return MG_SSL_ERROR;
}
mbedtls_ssl_conf_ca_chain(ctx->conf, ctx->ca_cert, NULL);
mbedtls_ssl_conf_authmode(ctx->conf, MBEDTLS_SSL_VERIFY_REQUIRED);
return MG_SSL_OK;
}
static enum mg_ssl_if_result mg_use_cert(struct mg_ssl_if_ctx *ctx,
const char *cert, const char *key,
const char **err_msg) {
if (key == NULL) key = cert;
if (cert == NULL || cert[0] == '\0' || key == NULL || key[0] == '\0') {
return MG_SSL_OK;
}
ctx->cert = (mbedtls_x509_crt *) MG_CALLOC(1, sizeof(*ctx->cert));
mbedtls_x509_crt_init(ctx->cert);
ctx->key = (mbedtls_pk_context *) MG_CALLOC(1, sizeof(*ctx->key));
mbedtls_pk_init(ctx->key);
if (mbedtls_x509_crt_parse_file(ctx->cert, cert) != 0) {
MG_SET_PTRPTR(err_msg, "Invalid SSL cert");
return MG_SSL_ERROR;
}
if (mbedtls_pk_parse_keyfile(ctx->key, key, NULL) != 0) {
MG_SET_PTRPTR(err_msg, "Invalid SSL key");
return MG_SSL_ERROR;
}
if (mbedtls_ssl_conf_own_cert(ctx->conf, ctx->cert, ctx->key) != 0) {
MG_SET_PTRPTR(err_msg, "Invalid SSL key or cert");
return MG_SSL_ERROR;
}
return MG_SSL_OK;
}
static const int mg_s_cipher_list[] = {
MBEDTLS_TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,
MBEDTLS_TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,
MBEDTLS_TLS_DHE_RSA_WITH_AES_128_GCM_SHA256,
MBEDTLS_TLS_DHE_RSA_WITH_AES_128_CBC_SHA256,
MBEDTLS_TLS_ECDH_ECDSA_WITH_AES_128_GCM_SHA256,
MBEDTLS_TLS_ECDH_ECDSA_WITH_AES_128_CBC_SHA256,
MBEDTLS_TLS_ECDH_ECDSA_WITH_AES_128_CBC_SHA,
MBEDTLS_TLS_ECDH_RSA_WITH_AES_128_GCM_SHA256,
MBEDTLS_TLS_ECDH_RSA_WITH_AES_128_CBC_SHA256,
MBEDTLS_TLS_ECDH_RSA_WITH_AES_128_CBC_SHA,
MBEDTLS_TLS_RSA_WITH_AES_128_GCM_SHA256,
MBEDTLS_TLS_RSA_WITH_AES_128_CBC_SHA256,
MBEDTLS_TLS_RSA_WITH_AES_128_CBC_SHA, 0};
/*
* Ciphers can be specified as a colon-separated list of cipher suite names.
* These can be found in
* https://github.com/ARMmbed/mbedtls/blob/development/library/ssl_ciphersuites.c#L267
* E.g.: TLS-ECDHE-ECDSA-WITH-AES-128-GCM-SHA256:TLS-DHE-RSA-WITH-AES-256-CCM
*/
static enum mg_ssl_if_result mg_set_cipher_list(struct mg_ssl_if_ctx *ctx,
const char *ciphers) {
if (ciphers != NULL) {
int l, id;
const char *s = ciphers, *e;
char tmp[50];
while (s != NULL) {
e = strchr(s, ':');
l = (e != NULL ? (e - s) : (int) strlen(s));
strncpy(tmp, s, l);
tmp[l] = '\0';
id = mbedtls_ssl_get_ciphersuite_id(tmp);
DBG(("%s -> %04x", tmp, id));
if (id != 0) {
mbuf_append(&ctx->cipher_suites, &id, sizeof(id));
}
s = (e != NULL ? e + 1 : NULL);
}
if (ctx->cipher_suites.len == 0) return MG_SSL_ERROR;
id = 0;
mbuf_append(&ctx->cipher_suites, &id, sizeof(id));
mbuf_trim(&ctx->cipher_suites);
mbedtls_ssl_conf_ciphersuites(ctx->conf,
(const int *) ctx->cipher_suites.buf);
} else {
mbedtls_ssl_conf_ciphersuites(ctx->conf, mg_s_cipher_list);
}
return MG_SSL_OK;
}
static enum mg_ssl_if_result mg_ssl_if_mbed_set_psk(struct mg_ssl_if_ctx *ctx,
const char *identity,
const char *key_str) {
unsigned char key[32];
size_t key_len;
if (identity == NULL && key_str == NULL) return MG_SSL_OK;
if (identity == NULL || key_str == NULL) return MG_SSL_ERROR;
key_len = strlen(key_str);
if (key_len != 32 && key_len != 64) return MG_SSL_ERROR;
size_t i = 0;
memset(key, 0, sizeof(key));
key_len = 0;
for (i = 0; key_str[i] != '\0'; i++) {
unsigned char c;
char hc = tolower((int) key_str[i]);
if (hc >= '0' && hc <= '9') {
c = hc - '0';
} else if (hc >= 'a' && hc <= 'f') {
c = hc - 'a' + 0xa;
} else {
return MG_SSL_ERROR;
}
key_len = i / 2;
key[key_len] <<= 4;
key[key_len] |= c;
}
key_len++;
DBG(("identity = '%s', key = (%u)", identity, (unsigned int) key_len));
/* mbedTLS makes copies of psk and identity. */
if (mbedtls_ssl_conf_psk(ctx->conf, (const unsigned char *) key, key_len,
(const unsigned char *) identity,
strlen(identity)) != 0) {
return MG_SSL_ERROR;
}
return MG_SSL_OK;
}
const char *mg_set_ssl(struct mg_connection *nc, const char *cert,
const char *ca_cert) {
const char *err_msg = NULL;
struct mg_ssl_if_conn_params params;
memset(¶ms, 0, sizeof(params));
params.cert = cert;
params.ca_cert = ca_cert;
if (mg_ssl_if_conn_init(nc, ¶ms, &err_msg) != MG_SSL_OK) {
return err_msg;
}
return NULL;
}
/* Lazy RNG. Warning: it would be a bad idea to do this in production! */
#ifdef MG_SSL_MBED_DUMMY_RANDOM
int mg_ssl_if_mbed_random(void *ctx, unsigned char *buf, size_t len) {
(void) ctx;
while (len--) *buf++ = rand();
return 0;
}
#endif
#endif /* MG_ENABLE_SSL && MG_SSL_IF == MG_SSL_IF_MBEDTLS */
#ifdef MG_MODULE_LINES
#line 1 "mongoose/src/uri.c"
#endif
/*
* Copyright (c) 2014 Cesanta Software Limited
* All rights reserved
*/
/* Amalgamated: #include "mongoose/src/internal.h" */
/* Amalgamated: #include "mongoose/src/uri.h" */
/*
* scan string until `sep`, keeping track of component boundaries in `res`.
*
* `p` will point to the char after the separator or it will be `end`.
*/
static void parse_uri_component(const char **p, const char *end, char sep,
struct mg_str *res) {
res->p = *p;
for (; *p < end; (*p)++) {
if (**p == sep) {
break;
}
}
res->len = (*p) - res->p;
if (*p < end) (*p)++;
}
int mg_parse_uri(struct mg_str uri, struct mg_str *scheme,
struct mg_str *user_info, struct mg_str *host,
unsigned int *port, struct mg_str *path, struct mg_str *query,
struct mg_str *fragment) {
struct mg_str rscheme = {0, 0}, ruser_info = {0, 0}, rhost = {0, 0},
rpath = {0, 0}, rquery = {0, 0}, rfragment = {0, 0};
unsigned int rport = 0;
enum {
P_START,
P_SCHEME_OR_PORT,
P_USER_INFO,
P_HOST,
P_PORT,
P_REST
} state = P_START;
const char *p = uri.p, *end = p + uri.len;
while (p < end) {
switch (state) {
case P_START:
/*
* expecting on of:
* - `scheme://xxxx`
* - `xxxx:port`
* - `xxxx/path`
*/
for (; p < end; p++) {
if (*p == ':') {
state = P_SCHEME_OR_PORT;
break;
} else if (*p == '/') {
state = P_REST;
break;
}
}
if (state == P_START || state == P_REST) {
rhost.p = uri.p;
rhost.len = p - uri.p;
}
break;
case P_SCHEME_OR_PORT:
if (end - p >= 3 && strncmp(p, "://", 3) == 0) {
rscheme.p = uri.p;
rscheme.len = p - uri.p;
state = P_USER_INFO;
p += 2; /* point to last separator char */
} else {
rhost.p = uri.p;
rhost.len = p - uri.p;
state = P_PORT;
}
break;
case P_USER_INFO:
p++;
ruser_info.p = p;
for (; p < end; p++) {
if (*p == '@') {
state = P_HOST;
break;
} else if (*p == '/') {
break;
}
}
if (p == end || *p == '/') {
/* backtrack and parse as host */
state = P_HOST;
p = ruser_info.p;
}
ruser_info.len = p - ruser_info.p;
break;
case P_HOST:
if (*p == '@') p++;
rhost.p = p;
for (; p < end; p++) {
if (*p == ':') {
state = P_PORT;
break;
} else if (*p == '/') {
state = P_REST;
break;
}
}
rhost.len = p - rhost.p;
break;
case P_PORT:
p++;
for (; p < end; p++) {
if (*p == '/') {
state = P_REST;
break;
}
rport *= 10;
rport += *p - '0';
}
break;
case P_REST:
/* `p` points to separator. `path` includes the separator */
parse_uri_component(&p, end, '?', &rpath);
parse_uri_component(&p, end, '#', &rquery);
parse_uri_component(&p, end, '\0', &rfragment);
break;
}
}
if (scheme != 0) *scheme = rscheme;
if (user_info != 0) *user_info = ruser_info;
if (host != 0) *host = rhost;
if (port != 0) *port = rport;
if (path != 0) *path = rpath;
if (query != 0) *query = rquery;
if (fragment != 0) *fragment = rfragment;
return 0;
}
/* Normalize the URI path. Remove/resolve "." and "..". */
int mg_normalize_uri_path(const struct mg_str *in, struct mg_str *out) {
const char *s = in->p, *se = s + in->len;
char *cp = (char *) out->p, *d;
if (in->len == 0 || *s != '/') {
out->len = 0;
return 0;
}
d = cp;
while (s < se) {
const char *next = s;
struct mg_str component;
parse_uri_component(&next, se, '/', &component);
if (mg_vcmp(&component, ".") == 0) {
/* Yum. */
} else if (mg_vcmp(&component, "..") == 0) {
/* Backtrack to previous slash. */
if (d > cp + 1 && *(d - 1) == '/') d--;
while (d > cp && *(d - 1) != '/') d--;
} else {
memmove(d, s, next - s);
d += next - s;
}
s = next;
}
if (d == cp) *d++ = '/';
out->p = cp;
out->len = d - cp;
return 1;
}
#ifdef MG_MODULE_LINES
#line 1 "mongoose/src/http.c"
#endif
/*
* Copyright (c) 2014 Cesanta Software Limited
* All rights reserved
*/
#if MG_ENABLE_HTTP
/* Amalgamated: #include "common/md5.h" */
/* Amalgamated: #include "common/sha1.h" */
/* Amalgamated: #include "mongoose/src/internal.h" */
/* Amalgamated: #include "mongoose/src/util.h" */
static const char *mg_version_header = "Mongoose/" MG_VERSION;
enum mg_http_proto_data_type { DATA_NONE, DATA_FILE, DATA_PUT };
struct mg_http_proto_data_file {
FILE *fp; /* Opened file. */
int64_t cl; /* Content-Length. How many bytes to send. */
int64_t sent; /* How many bytes have been already sent. */
int keepalive; /* Keep connection open after sending. */
enum mg_http_proto_data_type type;
};
#if MG_ENABLE_HTTP_CGI
struct mg_http_proto_data_cgi {
struct mg_connection *cgi_nc;
};
#endif
struct mg_http_proto_data_chuncked {
int64_t body_len; /* How many bytes of chunked body was reassembled. */
};
struct mg_http_endpoint {
struct mg_http_endpoint *next;
const char *name;
size_t name_len;
mg_event_handler_t handler;
#if MG_ENABLE_CALLBACK_USERDATA
void *user_data;
#endif
};
enum mg_http_multipart_stream_state {
MPS_BEGIN,
MPS_WAITING_FOR_BOUNDARY,
MPS_WAITING_FOR_CHUNK,
MPS_GOT_CHUNK,
MPS_GOT_BOUNDARY,
MPS_FINALIZE,
MPS_FINISHED
};
struct mg_http_multipart_stream {
const char *boundary;
int boundary_len;
const char *var_name;
const char *file_name;
void *user_data;
int prev_io_len;
enum mg_http_multipart_stream_state state;
int processing_part;
};
struct mg_reverse_proxy_data {
struct mg_connection *linked_conn;
};
struct mg_http_proto_data {
#if MG_ENABLE_FILESYSTEM
struct mg_http_proto_data_file file;
#endif
#if MG_ENABLE_HTTP_CGI
struct mg_http_proto_data_cgi cgi;
#endif
#if MG_ENABLE_HTTP_STREAMING_MULTIPART
struct mg_http_multipart_stream mp_stream;
#endif
struct mg_http_proto_data_chuncked chunk;
struct mg_http_endpoint *endpoints;
mg_event_handler_t endpoint_handler;
struct mg_reverse_proxy_data reverse_proxy_data;
};
static void mg_http_conn_destructor(void *proto_data);
struct mg_connection *mg_connect_http_base(
struct mg_mgr *mgr, MG_CB(mg_event_handler_t ev_handler, void *user_data),
struct mg_connect_opts opts, const char *schema, const char *schema_ssl,
const char *url, const char **path, char **user, char **pass, char **addr);
static struct mg_http_proto_data *mg_http_get_proto_data(
struct mg_connection *c) {
if (c->proto_data == NULL) {
c->proto_data = MG_CALLOC(1, sizeof(struct mg_http_proto_data));
c->proto_data_destructor = mg_http_conn_destructor;
}
return (struct mg_http_proto_data *) c->proto_data;
}
#if MG_ENABLE_HTTP_STREAMING_MULTIPART
static void mg_http_free_proto_data_mp_stream(
struct mg_http_multipart_stream *mp) {
MG_FREE((void *) mp->boundary);
MG_FREE((void *) mp->var_name);
MG_FREE((void *) mp->file_name);
memset(mp, 0, sizeof(*mp));
}
#endif
#if MG_ENABLE_FILESYSTEM
static void mg_http_free_proto_data_file(struct mg_http_proto_data_file *d) {
if (d != NULL) {
if (d->fp != NULL) {
fclose(d->fp);
}
memset(d, 0, sizeof(struct mg_http_proto_data_file));
}
}
#endif
static void mg_http_free_proto_data_endpoints(struct mg_http_endpoint **ep) {
struct mg_http_endpoint *current = *ep;
while (current != NULL) {
struct mg_http_endpoint *tmp = current->next;
MG_FREE((void *) current->name);
MG_FREE(current);
current = tmp;
}
ep = NULL;
}
static void mg_http_free_reverse_proxy_data(struct mg_reverse_proxy_data *rpd) {
if (rpd->linked_conn != NULL) {
/*
* Connection has linked one, we have to unlink & close it
* since _this_ connection is going to die and
* it doesn't make sense to keep another one
*/
struct mg_http_proto_data *pd = mg_http_get_proto_data(rpd->linked_conn);
if (pd->reverse_proxy_data.linked_conn != NULL) {
pd->reverse_proxy_data.linked_conn->flags |= MG_F_SEND_AND_CLOSE;
pd->reverse_proxy_data.linked_conn = NULL;
}
rpd->linked_conn = NULL;
}
}
static void mg_http_conn_destructor(void *proto_data) {
struct mg_http_proto_data *pd = (struct mg_http_proto_data *) proto_data;
#if MG_ENABLE_FILESYSTEM
mg_http_free_proto_data_file(&pd->file);
#endif
#if MG_ENABLE_HTTP_CGI
mg_http_free_proto_data_cgi(&pd->cgi);
#endif
#if MG_ENABLE_HTTP_STREAMING_MULTIPART
mg_http_free_proto_data_mp_stream(&pd->mp_stream);
#endif
mg_http_free_proto_data_endpoints(&pd->endpoints);
mg_http_free_reverse_proxy_data(&pd->reverse_proxy_data);
MG_FREE(proto_data);
}
#if MG_ENABLE_FILESYSTEM
#define MIME_ENTRY(_ext, _type) \
{ _ext, sizeof(_ext) - 1, _type }
static const struct {
const char *extension;
size_t ext_len;
const char *mime_type;
} mg_static_builtin_mime_types[] = {
MIME_ENTRY("html", "text/html"),
MIME_ENTRY("html", "text/html"),
MIME_ENTRY("htm", "text/html"),
MIME_ENTRY("shtm", "text/html"),
MIME_ENTRY("shtml", "text/html"),
MIME_ENTRY("css", "text/css"),
MIME_ENTRY("js", "application/x-javascript"),
MIME_ENTRY("ico", "image/x-icon"),
MIME_ENTRY("gif", "image/gif"),
MIME_ENTRY("jpg", "image/jpeg"),
MIME_ENTRY("jpeg", "image/jpeg"),
MIME_ENTRY("png", "image/png"),
MIME_ENTRY("svg", "image/svg+xml"),
MIME_ENTRY("txt", "text/plain"),
MIME_ENTRY("torrent", "application/x-bittorrent"),
MIME_ENTRY("wav", "audio/x-wav"),
MIME_ENTRY("mp3", "audio/x-mp3"),
MIME_ENTRY("mid", "audio/mid"),
MIME_ENTRY("m3u", "audio/x-mpegurl"),
MIME_ENTRY("ogg", "application/ogg"),
MIME_ENTRY("ram", "audio/x-pn-realaudio"),
MIME_ENTRY("xml", "text/xml"),
MIME_ENTRY("ttf", "application/x-font-ttf"),
MIME_ENTRY("json", "application/json"),
MIME_ENTRY("xslt", "application/xml"),
MIME_ENTRY("xsl", "application/xml"),
MIME_ENTRY("ra", "audio/x-pn-realaudio"),
MIME_ENTRY("doc", "application/msword"),
MIME_ENTRY("exe", "application/octet-stream"),
MIME_ENTRY("zip", "application/x-zip-compressed"),
MIME_ENTRY("xls", "application/excel"),
MIME_ENTRY("tgz", "application/x-tar-gz"),
MIME_ENTRY("tar", "application/x-tar"),
MIME_ENTRY("gz", "application/x-gunzip"),
MIME_ENTRY("arj", "application/x-arj-compressed"),
MIME_ENTRY("rar", "application/x-rar-compressed"),
MIME_ENTRY("rtf", "application/rtf"),
MIME_ENTRY("pdf", "application/pdf"),
MIME_ENTRY("swf", "application/x-shockwave-flash"),
MIME_ENTRY("mpg", "video/mpeg"),
MIME_ENTRY("webm", "video/webm"),
MIME_ENTRY("mpeg", "video/mpeg"),
MIME_ENTRY("mov", "video/quicktime"),
MIME_ENTRY("mp4", "video/mp4"),
MIME_ENTRY("m4v", "video/x-m4v"),
MIME_ENTRY("asf", "video/x-ms-asf"),
MIME_ENTRY("avi", "video/x-msvideo"),
MIME_ENTRY("bmp", "image/bmp"),
{NULL, 0, NULL}};
static struct mg_str mg_get_mime_type(const char *path, const char *dflt,
const struct mg_serve_http_opts *opts) {
const char *ext, *overrides;
size_t i, path_len;
struct mg_str r, k, v;
path_len = strlen(path);
overrides = opts->custom_mime_types;
while ((overrides = mg_next_comma_list_entry(overrides, &k, &v)) != NULL) {
ext = path + (path_len - k.len);
if (path_len > k.len && mg_vcasecmp(&k, ext) == 0) {
return v;
}
}
for (i = 0; mg_static_builtin_mime_types[i].extension != NULL; i++) {
ext = path + (path_len - mg_static_builtin_mime_types[i].ext_len);
if (path_len > mg_static_builtin_mime_types[i].ext_len && ext[-1] == '.' &&
mg_casecmp(ext, mg_static_builtin_mime_types[i].extension) == 0) {
r.p = mg_static_builtin_mime_types[i].mime_type;
r.len = strlen(r.p);
return r;
}
}
r.p = dflt;
r.len = strlen(r.p);
return r;
}
#endif
/*
* Check whether full request is buffered. Return:
* -1 if request is malformed
* 0 if request is not yet fully buffered
* >0 actual request length, including last \r\n\r\n
*/
static int mg_http_get_request_len(const char *s, int buf_len) {
const unsigned char *buf = (unsigned char *) s;
int i;
for (i = 0; i < buf_len; i++) {
if (!isprint(buf[i]) && buf[i] != '\r' && buf[i] != '\n' && buf[i] < 128) {
return -1;
} else if (buf[i] == '\n' && i + 1 < buf_len && buf[i + 1] == '\n') {
return i + 2;
} else if (buf[i] == '\n' && i + 2 < buf_len && buf[i + 1] == '\r' &&
buf[i + 2] == '\n') {
return i + 3;
}
}
return 0;
}
static const char *mg_http_parse_headers(const char *s, const char *end,
int len, struct http_message *req) {
int i = 0;
while (i < (int) ARRAY_SIZE(req->header_names) - 1) {
struct mg_str *k = &req->header_names[i], *v = &req->header_values[i];
s = mg_skip(s, end, ": ", k);
s = mg_skip(s, end, "\r\n", v);
while (v->len > 0 && v->p[v->len - 1] == ' ') {
v->len--; /* Trim trailing spaces in header value */
}
/*
* If header value is empty - skip it and go to next (if any).
* NOTE: Do not add it to headers_values because such addition changes API
* behaviour
*/
if (k->len != 0 && v->len == 0) {
continue;
}
if (k->len == 0 || v->len == 0) {
k->p = v->p = NULL;
k->len = v->len = 0;
break;
}
if (!mg_ncasecmp(k->p, "Content-Length", 14)) {
req->body.len = (size_t) to64(v->p);
req->message.len = len + req->body.len;
}
i++;
}
return s;
}
int mg_parse_http(const char *s, int n, struct http_message *hm, int is_req) {
const char *end, *qs;
int len = mg_http_get_request_len(s, n);
if (len <= 0) return len;
memset(hm, 0, sizeof(*hm));
hm->message.p = s;
hm->body.p = s + len;
hm->message.len = hm->body.len = (size_t) ~0;
end = s + len;
/* Request is fully buffered. Skip leading whitespaces. */
while (s < end && isspace(*(unsigned char *) s)) s++;
if (is_req) {
/* Parse request line: method, URI, proto */
s = mg_skip(s, end, " ", &hm->method);
s = mg_skip(s, end, " ", &hm->uri);
s = mg_skip(s, end, "\r\n", &hm->proto);
if (hm->uri.p <= hm->method.p || hm->proto.p <= hm->uri.p) return -1;
/* If URI contains '?' character, initialize query_string */
if ((qs = (char *) memchr(hm->uri.p, '?', hm->uri.len)) != NULL) {
hm->query_string.p = qs + 1;
hm->query_string.len = &hm->uri.p[hm->uri.len] - (qs + 1);
hm->uri.len = qs - hm->uri.p;
}
} else {
s = mg_skip(s, end, " ", &hm->proto);
if (end - s < 4 || s[3] != ' ') return -1;
hm->resp_code = atoi(s);
if (hm->resp_code < 100 || hm->resp_code >= 600) return -1;
s += 4;
s = mg_skip(s, end, "\r\n", &hm->resp_status_msg);
}
s = mg_http_parse_headers(s, end, len, hm);
/*
* mg_parse_http() is used to parse both HTTP requests and HTTP
* responses. If HTTP response does not have Content-Length set, then
* body is read until socket is closed, i.e. body.len is infinite (~0).
*
* For HTTP requests though, according to
* http://tools.ietf.org/html/rfc7231#section-8.1.3,
* only POST and PUT methods have defined body semantics.
* Therefore, if Content-Length is not specified and methods are
* not one of PUT or POST, set body length to 0.
*
* So,
* if it is HTTP request, and Content-Length is not set,
* and method is not (PUT or POST) then reset body length to zero.
*/
if (hm->body.len == (size_t) ~0 && is_req &&
mg_vcasecmp(&hm->method, "PUT") != 0 &&
mg_vcasecmp(&hm->method, "POST") != 0) {
hm->body.len = 0;
hm->message.len = len;
}
return len;
}
struct mg_str *mg_get_http_header(struct http_message *hm, const char *name) {
size_t i, len = strlen(name);
for (i = 0; hm->header_names[i].len > 0; i++) {
struct mg_str *h = &hm->header_names[i], *v = &hm->header_values[i];
if (h->p != NULL && h->len == len && !mg_ncasecmp(h->p, name, len))
return v;
}
return NULL;
}
#if MG_ENABLE_FILESYSTEM
static void mg_http_transfer_file_data(struct mg_connection *nc) {
struct mg_http_proto_data *pd = mg_http_get_proto_data(nc);
char buf[MG_MAX_HTTP_SEND_MBUF];
size_t n = 0, to_read = 0, left = (size_t)(pd->file.cl - pd->file.sent);
if (pd->file.type == DATA_FILE) {
struct mbuf *io = &nc->send_mbuf;
if (io->len < sizeof(buf)) {
to_read = sizeof(buf) - io->len;
}
if (left > 0 && to_read > left) {
to_read = left;
}
if (to_read == 0) {
/* Rate limiting. send_mbuf is too full, wait until it's drained. */
} else if (pd->file.sent < pd->file.cl &&
(n = mg_fread(buf, 1, to_read, pd->file.fp)) > 0) {
mg_send(nc, buf, n);
pd->file.sent += n;
} else {
if (!pd->file.keepalive) nc->flags |= MG_F_SEND_AND_CLOSE;
mg_http_free_proto_data_file(&pd->file);
}
} else if (pd->file.type == DATA_PUT) {
struct mbuf *io = &nc->recv_mbuf;
size_t to_write = left <= 0 ? 0 : left < io->len ? (size_t) left : io->len;
size_t n = mg_fwrite(io->buf, 1, to_write, pd->file.fp);
if (n > 0) {
mbuf_remove(io, n);
pd->file.sent += n;
}
if (n == 0 || pd->file.sent >= pd->file.cl) {
if (!pd->file.keepalive) nc->flags |= MG_F_SEND_AND_CLOSE;
mg_http_free_proto_data_file(&pd->file);
}
}
#if MG_ENABLE_HTTP_CGI
else if (pd->cgi.cgi_nc != NULL) {
/* This is POST data that needs to be forwarded to the CGI process */
if (pd->cgi.cgi_nc != NULL) {
mg_forward(nc, pd->cgi.cgi_nc);
} else {
nc->flags |= MG_F_SEND_AND_CLOSE;
}
}
#endif
}
#endif /* MG_ENABLE_FILESYSTEM */
/*
* Parse chunked-encoded buffer. Return 0 if the buffer is not encoded, or
* if it's incomplete. If the chunk is fully buffered, return total number of
* bytes in a chunk, and store data in `data`, `data_len`.
*/
static size_t mg_http_parse_chunk(char *buf, size_t len, char **chunk_data,
size_t *chunk_len) {
unsigned char *s = (unsigned char *) buf;
size_t n = 0; /* scanned chunk length */
size_t i = 0; /* index in s */
/* Scan chunk length. That should be a hexadecimal number. */
while (i < len && isxdigit(s[i])) {
n *= 16;
n += (s[i] >= '0' && s[i] <= '9') ? s[i] - '0' : tolower(s[i]) - 'a' + 10;
i++;
}
/* Skip new line */
if (i == 0 || i + 2 > len || s[i] != '\r' || s[i + 1] != '\n') {
return 0;
}
i += 2;
/* Record where the data is */
*chunk_data = (char *) s + i;
*chunk_len = n;
/* Skip data */
i += n;
/* Skip new line */
if (i == 0 || i + 2 > len || s[i] != '\r' || s[i + 1] != '\n') {
return 0;
}
return i + 2;
}
MG_INTERNAL size_t mg_handle_chunked(struct mg_connection *nc,
struct http_message *hm, char *buf,
size_t blen) {
struct mg_http_proto_data *pd = mg_http_get_proto_data(nc);
char *data;
size_t i, n, data_len, body_len, zero_chunk_received = 0;
/* Find out piece of received data that is not yet reassembled */
body_len = (size_t) pd->chunk.body_len;
assert(blen >= body_len);
/* Traverse all fully buffered chunks */
for (i = body_len;
(n = mg_http_parse_chunk(buf + i, blen - i, &data, &data_len)) > 0;
i += n) {
/* Collapse chunk data to the rest of HTTP body */
memmove(buf + body_len, data, data_len);
body_len += data_len;
hm->body.len = body_len;
if (data_len == 0) {
zero_chunk_received = 1;
i += n;
break;
}
}
if (i > body_len) {
/* Shift unparsed content to the parsed body */
assert(i <= blen);
memmove(buf + body_len, buf + i, blen - i);
memset(buf + body_len + blen - i, 0, i - body_len);
nc->recv_mbuf.len -= i - body_len;
pd->chunk.body_len = body_len;
/* Send MG_EV_HTTP_CHUNK event */
nc->flags &= ~MG_F_DELETE_CHUNK;
mg_call(nc, nc->handler, nc->user_data, MG_EV_HTTP_CHUNK, hm);
/* Delete processed data if user set MG_F_DELETE_CHUNK flag */
if (nc->flags & MG_F_DELETE_CHUNK) {
memset(buf, 0, body_len);
memmove(buf, buf + body_len, blen - i);
nc->recv_mbuf.len -= body_len;
hm->body.len = 0;
pd->chunk.body_len = 0;
}
if (zero_chunk_received) {
/* Total message size is len(body) + len(headers) */
hm->message.len =
(size_t) pd->chunk.body_len + blen - i + (hm->body.p - hm->message.p);
}
}
return body_len;
}
struct mg_http_endpoint *mg_http_get_endpoint_handler(struct mg_connection *nc,
struct mg_str *uri_path) {
struct mg_http_proto_data *pd;
struct mg_http_endpoint *ret = NULL;
int matched, matched_max = 0;
struct mg_http_endpoint *ep;
if (nc == NULL) {
return NULL;
}
pd = mg_http_get_proto_data(nc);
ep = pd->endpoints;
while (ep != NULL) {
const struct mg_str name_s = {ep->name, ep->name_len};
if ((matched = mg_match_prefix_n(name_s, *uri_path)) != -1) {
if (matched > matched_max) {
/* Looking for the longest suitable handler */
ret = ep;
matched_max = matched;
}
}
ep = ep->next;
}
return ret;
}
static void mg_http_call_endpoint_handler(struct mg_connection *nc, int ev,
struct http_message *hm) {
struct mg_http_proto_data *pd = mg_http_get_proto_data(nc);
void *user_data = nc->user_data;
if (ev == MG_EV_HTTP_REQUEST) {
struct mg_http_endpoint *ep =
mg_http_get_endpoint_handler(nc->listener, &hm->uri);
if (ep != NULL) {
pd->endpoint_handler = ep->handler;
#if MG_ENABLE_CALLBACK_USERDATA
user_data = ep->user_data;
#endif
}
}
mg_call(nc, pd->endpoint_handler ? pd->endpoint_handler : nc->handler,
user_data, ev, hm);
}
#if MG_ENABLE_HTTP_STREAMING_MULTIPART
static void mg_http_multipart_continue(struct mg_connection *nc);
static void mg_http_multipart_begin(struct mg_connection *nc,
struct http_message *hm, int req_len);
#endif
/*
* lx106 compiler has a bug (TODO(mkm) report and insert tracking bug here)
* If a big structure is declared in a big function, lx106 gcc will make it
* even bigger (round up to 4k, from 700 bytes of actual size).
*/
#ifdef __xtensa__
static void mg_http_handler2(struct mg_connection *nc, int ev,
void *ev_data MG_UD_ARG(void *user_data),
struct http_message *hm) __attribute__((noinline));
void mg_http_handler(struct mg_connection *nc, int ev,
void *ev_data MG_UD_ARG(void *user_data)) {
struct http_message hm;
mg_http_handler2(nc, ev, ev_data MG_UD_ARG(user_data), &hm);
}
static void mg_http_handler2(struct mg_connection *nc, int ev,
void *ev_data MG_UD_ARG(void *user_data),
struct http_message *hm) {
#else /* !__XTENSA__ */
void mg_http_handler(struct mg_connection *nc, int ev,
void *ev_data MG_UD_ARG(void *user_data)) {
struct http_message shm;
struct http_message *hm = &shm;
#endif /* __XTENSA__ */
struct mg_http_proto_data *pd = mg_http_get_proto_data(nc);
struct mbuf *io = &nc->recv_mbuf;
int req_len;
const int is_req = (nc->listener != NULL);
#if MG_ENABLE_HTTP_WEBSOCKET
struct mg_str *vec;
#endif
if (ev == MG_EV_CLOSE) {
#if MG_ENABLE_HTTP_CGI
/* Close associated CGI forwarder connection */
if (pd->cgi.cgi_nc != NULL) {
pd->cgi.cgi_nc->user_data = NULL;
pd->cgi.cgi_nc->flags |= MG_F_CLOSE_IMMEDIATELY;
}
#endif
#if MG_ENABLE_HTTP_STREAMING_MULTIPART
if (pd->mp_stream.boundary != NULL) {
/*
* Multipart message is in progress, but connection is closed.
* Finish part and request with an error flag.
*/
struct mg_http_multipart_part mp;
memset(&mp, 0, sizeof(mp));
mp.status = -1;
mp.var_name = pd->mp_stream.var_name;
mp.file_name = pd->mp_stream.file_name;
mg_call(nc, (pd->endpoint_handler ? pd->endpoint_handler : nc->handler),
nc->user_data, MG_EV_HTTP_PART_END, &mp);
mp.var_name = NULL;
mp.file_name = NULL;
mg_call(nc, (pd->endpoint_handler ? pd->endpoint_handler : nc->handler),
nc->user_data, MG_EV_HTTP_MULTIPART_REQUEST_END, &mp);
} else
#endif
if (io->len > 0 && mg_parse_http(io->buf, io->len, hm, is_req) > 0) {
/*
* For HTTP messages without Content-Length, always send HTTP message
* before MG_EV_CLOSE message.
*/
int ev2 = is_req ? MG_EV_HTTP_REQUEST : MG_EV_HTTP_REPLY;
hm->message.len = io->len;
hm->body.len = io->buf + io->len - hm->body.p;
mg_http_call_endpoint_handler(nc, ev2, hm);
}
}
#if MG_ENABLE_FILESYSTEM
if (pd->file.fp != NULL) {
mg_http_transfer_file_data(nc);
}
#endif
mg_call(nc, nc->handler, nc->user_data, ev, ev_data);
if (ev == MG_EV_RECV) {
struct mg_str *s;
#if MG_ENABLE_HTTP_STREAMING_MULTIPART
if (pd->mp_stream.boundary != NULL) {
mg_http_multipart_continue(nc);
return;
}
#endif /* MG_ENABLE_HTTP_STREAMING_MULTIPART */
req_len = mg_parse_http(io->buf, io->len, hm, is_req);
if (req_len > 0 &&
(s = mg_get_http_header(hm, "Transfer-Encoding")) != NULL &&
mg_vcasecmp(s, "chunked") == 0) {
mg_handle_chunked(nc, hm, io->buf + req_len, io->len - req_len);
}
#if MG_ENABLE_HTTP_STREAMING_MULTIPART
if (req_len > 0 && (s = mg_get_http_header(hm, "Content-Type")) != NULL &&
s->len >= 9 && strncmp(s->p, "multipart", 9) == 0) {
mg_http_multipart_begin(nc, hm, req_len);
mg_http_multipart_continue(nc);
return;
}
#endif /* MG_ENABLE_HTTP_STREAMING_MULTIPART */
/* TODO(alashkin): refactor this ifelseifelseifelseifelse */
if ((req_len < 0 ||
(req_len == 0 && io->len >= MG_MAX_HTTP_REQUEST_SIZE))) {
DBG(("invalid request"));
nc->flags |= MG_F_CLOSE_IMMEDIATELY;
} else if (req_len == 0) {
/* Do nothing, request is not yet fully buffered */
}
#if MG_ENABLE_HTTP_WEBSOCKET
else if (nc->listener == NULL &&
mg_get_http_header(hm, "Sec-WebSocket-Accept")) {
/* We're websocket client, got handshake response from server. */
/* TODO(lsm): check the validity of accept Sec-WebSocket-Accept */
mbuf_remove(io, req_len);
nc->proto_handler = mg_ws_handler;
nc->flags |= MG_F_IS_WEBSOCKET;
mg_call(nc, nc->handler, nc->user_data, MG_EV_WEBSOCKET_HANDSHAKE_DONE,
NULL);
mg_ws_handler(nc, MG_EV_RECV, ev_data MG_UD_ARG(user_data));
} else if (nc->listener != NULL &&
(vec = mg_get_http_header(hm, "Sec-WebSocket-Key")) != NULL) {
struct mg_http_endpoint *ep;
/* This is a websocket request. Switch protocol handlers. */
mbuf_remove(io, req_len);
nc->proto_handler = mg_ws_handler;
nc->flags |= MG_F_IS_WEBSOCKET;
/*
* If we have a handler set up with mg_register_http_endpoint(),
* deliver subsequent websocket events to this handler after the
* protocol switch.
*/
ep = mg_http_get_endpoint_handler(nc->listener, &hm->uri);
if (ep != NULL) {
nc->handler = ep->handler;
#if MG_ENABLE_CALLBACK_USERDATA
nc->user_data = ep->user_data;
#endif
}
/* Send handshake */
mg_call(nc, nc->handler, nc->user_data, MG_EV_WEBSOCKET_HANDSHAKE_REQUEST,
hm);
if (!(nc->flags & (MG_F_CLOSE_IMMEDIATELY | MG_F_SEND_AND_CLOSE))) {
if (nc->send_mbuf.len == 0) {
mg_ws_handshake(nc, vec);
}
mg_call(nc, nc->handler, nc->user_data, MG_EV_WEBSOCKET_HANDSHAKE_DONE,
NULL);
mg_ws_handler(nc, MG_EV_RECV, ev_data MG_UD_ARG(user_data));
}
}
#endif /* MG_ENABLE_HTTP_WEBSOCKET */
else if (hm->message.len <= io->len) {
int trigger_ev = nc->listener ? MG_EV_HTTP_REQUEST : MG_EV_HTTP_REPLY;
char addr[32];
mg_sock_addr_to_str(&nc->sa, addr, sizeof(addr),
MG_SOCK_STRINGIFY_IP | MG_SOCK_STRINGIFY_PORT);
DBG(("%p %s %.*s %.*s", nc, addr, (int) hm->method.len, hm->method.p,
(int) hm->uri.len, hm->uri.p));
/* Whole HTTP message is fully buffered, call event handler */
#if MG_ENABLE_JAVASCRIPT
v7_val_t v1, v2, headers, req, args, res;
struct v7 *v7 = nc->mgr->v7;
const char *ev_name = trigger_ev == MG_EV_HTTP_REPLY ? "onsnd" : "onrcv";
int i, js_callback_handled_request = 0;
if (v7 != NULL) {
/* Lookup JS callback */
v1 = v7_get(v7, v7_get_global(v7), "Http", ~0);
v2 = v7_get(v7, v1, ev_name, ~0);
/* Create callback params. TODO(lsm): own/disown those */
args = v7_mk_array(v7);
req = v7_mk_object(v7);
headers = v7_mk_object(v7);
/* Populate request object */
v7_set(v7, req, "method", ~0,
v7_mk_string(v7, hm->method.p, hm->method.len, 1));
v7_set(v7, req, "uri", ~0, v7_mk_string(v7, hm->uri.p, hm->uri.len, 1));
v7_set(v7, req, "body", ~0,
v7_mk_string(v7, hm->body.p, hm->body.len, 1));
v7_set(v7, req, "headers", ~0, headers);
for (i = 0; hm->header_names[i].len > 0; i++) {
const struct mg_str *name = &hm->header_names[i];
const struct mg_str *value = &hm->header_values[i];
v7_set(v7, headers, name->p, name->len,
v7_mk_string(v7, value->p, value->len, 1));
}
/* Invoke callback. TODO(lsm): report errors */
v7_array_push(v7, args, v7_mk_foreign(v7, nc));
v7_array_push(v7, args, req);
if (v7_apply(v7, v2, V7_UNDEFINED, args, &res) == V7_OK &&
v7_is_truthy(v7, res)) {
js_callback_handled_request++;
}
}
/* If JS callback returns true, stop request processing */
if (js_callback_handled_request) {
nc->flags |= MG_F_SEND_AND_CLOSE;
} else {
mg_http_call_endpoint_handler(nc, trigger_ev, hm);
}
#else
mg_http_call_endpoint_handler(nc, trigger_ev, hm);
#endif
mbuf_remove(io, hm->message.len);
}
}
(void) pd;
}
static size_t mg_get_line_len(const char *buf, size_t buf_len) {
size_t len = 0;
while (len < buf_len && buf[len] != '\n') len++;
return len == buf_len ? 0 : len + 1;
}
#if MG_ENABLE_HTTP_STREAMING_MULTIPART
static void mg_http_multipart_begin(struct mg_connection *nc,
struct http_message *hm, int req_len) {
struct mg_http_proto_data *pd = mg_http_get_proto_data(nc);
struct mg_str *ct;
struct mbuf *io = &nc->recv_mbuf;
void *user_data = nc->user_data;
char boundary[100];
int boundary_len;
ct = mg_get_http_header(hm, "Content-Type");
if (ct == NULL) {
/* We need more data - or it isn't multipart mesage */
goto exit_mp;
}
/* Content-type should start with "multipart" */
if (ct->len < 9 || strncmp(ct->p, "multipart", 9) != 0) {
goto exit_mp;
}
boundary_len =
mg_http_parse_header(ct, "boundary", boundary, sizeof(boundary));
if (boundary_len == 0) {
/*
* Content type is multipart, but there is no boundary,
* probably malformed request
*/
nc->flags = MG_F_CLOSE_IMMEDIATELY;
DBG(("invalid request"));
goto exit_mp;
}
/* If we reach this place - that is multipart request */
if (pd->mp_stream.boundary != NULL) {
/*
* Another streaming request was in progress,
* looks like protocol error
*/
nc->flags |= MG_F_CLOSE_IMMEDIATELY;
} else {
struct mg_http_endpoint *ep = NULL;
pd->mp_stream.state = MPS_BEGIN;
pd->mp_stream.boundary = strdup(boundary);
pd->mp_stream.boundary_len = strlen(boundary);
pd->mp_stream.var_name = pd->mp_stream.file_name = NULL;
pd->endpoint_handler = nc->handler;
ep = mg_http_get_endpoint_handler(nc->listener, &hm->uri);
if (ep != NULL) {
pd->endpoint_handler = ep->handler;
#if MG_ENABLE_CALLBACK_USERDATA
user_data = ep->user_data;
#endif
}
mg_call(nc, pd->endpoint_handler, user_data, MG_EV_HTTP_MULTIPART_REQUEST,
hm);
mbuf_remove(io, req_len);
}
exit_mp:
;
}
#define CONTENT_DISPOSITION "Content-Disposition: "
static void mg_http_multipart_call_handler(struct mg_connection *c, int ev,
const char *data, size_t data_len) {
struct mg_http_multipart_part mp;
struct mg_http_proto_data *pd = mg_http_get_proto_data(c);
memset(&mp, 0, sizeof(mp));
mp.var_name = pd->mp_stream.var_name;
mp.file_name = pd->mp_stream.file_name;
mp.user_data = pd->mp_stream.user_data;
mp.data.p = data;
mp.data.len = data_len;
mg_call(c, pd->endpoint_handler, c->user_data, ev, &mp);
pd->mp_stream.user_data = mp.user_data;
}
static int mg_http_multipart_got_chunk(struct mg_connection *c) {
struct mg_http_proto_data *pd = mg_http_get_proto_data(c);
struct mbuf *io = &c->recv_mbuf;
mg_http_multipart_call_handler(c, MG_EV_HTTP_PART_DATA, io->buf,
pd->mp_stream.prev_io_len);
mbuf_remove(io, pd->mp_stream.prev_io_len);
pd->mp_stream.prev_io_len = 0;
pd->mp_stream.state = MPS_WAITING_FOR_CHUNK;
return 0;
}
static int mg_http_multipart_finalize(struct mg_connection *c) {
struct mg_http_proto_data *pd = mg_http_get_proto_data(c);
mg_http_multipart_call_handler(c, MG_EV_HTTP_PART_END, NULL, 0);
MG_FREE((void *) pd->mp_stream.file_name);
pd->mp_stream.file_name = NULL;
MG_FREE((void *) pd->mp_stream.var_name);
pd->mp_stream.var_name = NULL;
mg_http_multipart_call_handler(c, MG_EV_HTTP_MULTIPART_REQUEST_END, NULL, 0);
mg_http_free_proto_data_mp_stream(&pd->mp_stream);
pd->mp_stream.state = MPS_FINISHED;
return 1;
}
static int mg_http_multipart_wait_for_boundary(struct mg_connection *c) {
const char *boundary;
struct mbuf *io = &c->recv_mbuf;
struct mg_http_proto_data *pd = mg_http_get_proto_data(c);
if ((int) io->len < pd->mp_stream.boundary_len + 2) {
return 0;
}
boundary = c_strnstr(io->buf, pd->mp_stream.boundary, io->len);
if (boundary != NULL) {
const char *boundary_end = (boundary + pd->mp_stream.boundary_len);
if (io->len - (boundary_end - io->buf) < 4) {
return 0;
}
if (strncmp(boundary_end, "--\r\n", 4) == 0) {
pd->mp_stream.state = MPS_FINALIZE;
mbuf_remove(io, (boundary_end - io->buf) + 4);
} else {
pd->mp_stream.state = MPS_GOT_BOUNDARY;
}
} else {
return 0;
}
return 1;
}
static int mg_http_multipart_process_boundary(struct mg_connection *c) {
int data_size;
const char *boundary, *block_begin;
struct mbuf *io = &c->recv_mbuf;
struct mg_http_proto_data *pd = mg_http_get_proto_data(c);
char file_name[100], var_name[100];
int line_len;
boundary = c_strnstr(io->buf, pd->mp_stream.boundary, io->len);
block_begin = boundary + pd->mp_stream.boundary_len + 2;
data_size = io->len - (block_begin - io->buf);
while (data_size > 0 &&
(line_len = mg_get_line_len(block_begin, data_size)) != 0) {
if (line_len > (int) sizeof(CONTENT_DISPOSITION) &&
mg_ncasecmp(block_begin, CONTENT_DISPOSITION,
sizeof(CONTENT_DISPOSITION) - 1) == 0) {
struct mg_str header;
header.p = block_begin + sizeof(CONTENT_DISPOSITION) - 1;
header.len = line_len - sizeof(CONTENT_DISPOSITION) - 1;
mg_http_parse_header(&header, "name", var_name, sizeof(var_name) - 2);
mg_http_parse_header(&header, "filename", file_name,
sizeof(file_name) - 2);
block_begin += line_len;
data_size -= line_len;
continue;
}
if (line_len == 2 && mg_ncasecmp(block_begin, "\r\n", 2) == 0) {
mbuf_remove(io, block_begin - io->buf + 2);
if (pd->mp_stream.processing_part != 0) {
mg_http_multipart_call_handler(c, MG_EV_HTTP_PART_END, NULL, 0);
}
MG_FREE((void *) pd->mp_stream.file_name);
pd->mp_stream.file_name = strdup(file_name);
MG_FREE((void *) pd->mp_stream.var_name);
pd->mp_stream.var_name = strdup(var_name);
mg_http_multipart_call_handler(c, MG_EV_HTTP_PART_BEGIN, NULL, 0);
pd->mp_stream.state = MPS_WAITING_FOR_CHUNK;
pd->mp_stream.processing_part++;
return 1;
}
block_begin += line_len;
}
pd->mp_stream.state = MPS_WAITING_FOR_BOUNDARY;
return 0;
}
static int mg_http_multipart_continue_wait_for_chunk(struct mg_connection *c) {
struct mg_http_proto_data *pd = mg_http_get_proto_data(c);
struct mbuf *io = &c->recv_mbuf;
const char *boundary;
if ((int) io->len < pd->mp_stream.boundary_len + 6 /* \r\n, --, -- */) {
return 0;
}
boundary = c_strnstr(io->buf, pd->mp_stream.boundary, io->len);
if (boundary == NULL && pd->mp_stream.prev_io_len == 0) {
pd->mp_stream.prev_io_len = io->len;
return 0;
} else if (boundary == NULL &&
(int) io->len >
pd->mp_stream.prev_io_len + pd->mp_stream.boundary_len + 4) {
pd->mp_stream.state = MPS_GOT_CHUNK;
return 1;
} else if (boundary != NULL) {
int data_size = (boundary - io->buf - 4);
mg_http_multipart_call_handler(c, MG_EV_HTTP_PART_DATA, io->buf, data_size);
mbuf_remove(io, (boundary - io->buf));
pd->mp_stream.prev_io_len = 0;
pd->mp_stream.state = MPS_WAITING_FOR_BOUNDARY;
return 1;
} else {
return 0;
}
}
static void mg_http_multipart_continue(struct mg_connection *c) {
struct mg_http_proto_data *pd = mg_http_get_proto_data(c);
while (1) {
switch (pd->mp_stream.state) {
case MPS_BEGIN: {
pd->mp_stream.state = MPS_WAITING_FOR_BOUNDARY;
break;
}
case MPS_WAITING_FOR_BOUNDARY: {
if (mg_http_multipart_wait_for_boundary(c) == 0) {
return;
}
break;
}
case MPS_GOT_BOUNDARY: {
if (mg_http_multipart_process_boundary(c) == 0) {
return;
}
break;
}
case MPS_WAITING_FOR_CHUNK: {
if (mg_http_multipart_continue_wait_for_chunk(c) == 0) {
return;
}
break;
}
case MPS_GOT_CHUNK: {
if (mg_http_multipart_got_chunk(c) == 0) {
return;
}
break;
}
case MPS_FINALIZE: {
if (mg_http_multipart_finalize(c) == 0) {
return;
}
break;
}
case MPS_FINISHED: {
mbuf_remove(&c->recv_mbuf, c->recv_mbuf.len);
return;
}
}
}
}
struct file_upload_state {
char *lfn;
size_t num_recd;
FILE *fp;
};
#endif /* MG_ENABLE_HTTP_STREAMING_MULTIPART */
void mg_set_protocol_http_websocket(struct mg_connection *nc) {
nc->proto_handler = mg_http_handler;
}
const char *mg_status_message(int status_code) {
switch (status_code) {
case 206:
return "Partial Content";
case 301:
return "Moved";
case 302:
return "Found";
case 400:
return "Bad Request";
case 401:
return "Unauthorized";
case 403:
return "Forbidden";
case 404:
return "Not Found";
case 416:
return "Requested Range Not Satisfiable";
case 418:
return "I'm a teapot";
case 500:
return "Internal Server Error";
case 502:
return "Bad Gateway";
case 503:
return "Service Unavailable";
#if MG_ENABLE_EXTRA_ERRORS_DESC
case 100:
return "Continue";
case 101:
return "Switching Protocols";
case 102:
return "Processing";
case 200:
return "OK";
case 201:
return "Created";
case 202:
return "Accepted";
case 203:
return "Non-Authoritative Information";
case 204:
return "No Content";
case 205:
return "Reset Content";
case 207:
return "Multi-Status";
case 208:
return "Already Reported";
case 226:
return "IM Used";
case 300:
return "Multiple Choices";
case 303:
return "See Other";
case 304:
return "Not Modified";
case 305:
return "Use Proxy";
case 306:
return "Switch Proxy";
case 307:
return "Temporary Redirect";
case 308:
return "Permanent Redirect";
case 402:
return "Payment Required";
case 405:
return "Method Not Allowed";
case 406:
return "Not Acceptable";
case 407:
return "Proxy Authentication Required";
case 408:
return "Request Timeout";
case 409:
return "Conflict";
case 410:
return "Gone";
case 411:
return "Length Required";
case 412:
return "Precondition Failed";
case 413:
return "Payload Too Large";
case 414:
return "URI Too Long";
case 415:
return "Unsupported Media Type";
case 417:
return "Expectation Failed";
case 422:
return "Unprocessable Entity";
case 423:
return "Locked";
case 424:
return "Failed Dependency";
case 426:
return "Upgrade Required";
case 428:
return "Precondition Required";
case 429:
return "Too Many Requests";
case 431:
return "Request Header Fields Too Large";
case 451:
return "Unavailable For Legal Reasons";
case 501:
return "Not Implemented";
case 504:
return "Gateway Timeout";
case 505:
return "HTTP Version Not Supported";
case 506:
return "Variant Also Negotiates";
case 507:
return "Insufficient Storage";
case 508:
return "Loop Detected";
case 510:
return "Not Extended";
case 511:
return "Network Authentication Required";
#endif /* MG_ENABLE_EXTRA_ERRORS_DESC */
default:
return "OK";
}
}
void mg_send_response_line_s(struct mg_connection *nc, int status_code,
const struct mg_str extra_headers) {
mg_printf(nc, "HTTP/1.1 %d %s\r\nServer: %s\r\n", status_code,
mg_status_message(status_code), mg_version_header);
if (extra_headers.len > 0) {
mg_printf(nc, "%.*s\r\n", (int) extra_headers.len, extra_headers.p);
}
}
void mg_send_response_line(struct mg_connection *nc, int status_code,
const char *extra_headers) {
mg_send_response_line_s(nc, status_code, mg_mk_str(extra_headers));
}
void mg_http_send_redirect(struct mg_connection *nc, int status_code,
const struct mg_str location,
const struct mg_str extra_headers) {
char bbody[100], *pbody = bbody;
int bl = mg_asprintf(&pbody, sizeof(bbody),
"<p>Moved <a href='%.*s'>here</a>.\r\n",
(int) location.len, location.p);
char bhead[150], *phead = bhead;
mg_asprintf(&phead, sizeof(bhead),
"Location: %.*s\r\n"
"Content-Type: text/html\r\n"
"Content-Length: %d\r\n"
"Cache-Control: no-cache\r\n"
"%.*s%s",
(int) location.len, location.p, bl, (int) extra_headers.len,
extra_headers.p, (extra_headers.len > 0 ? "\r\n" : ""));
mg_send_response_line(nc, status_code, phead);
if (phead != bhead) MG_FREE(phead);
mg_send(nc, pbody, bl);
if (pbody != bbody) MG_FREE(pbody);
}
void mg_send_head(struct mg_connection *c, int status_code,
int64_t content_length, const char *extra_headers) {
mg_send_response_line(c, status_code, extra_headers);
if (content_length < 0) {
mg_printf(c, "%s", "Transfer-Encoding: chunked\r\n");
} else {
mg_printf(c, "Content-Length: %" INT64_FMT "\r\n", content_length);
}
mg_send(c, "\r\n", 2);
}
void mg_http_send_error(struct mg_connection *nc, int code,
const char *reason) {
if (!reason) reason = mg_status_message(code);
LOG(LL_DEBUG, ("%p %d %s", nc, code, reason));
mg_send_head(nc, code, strlen(reason),
"Content-Type: text/plain\r\nConnection: close");
mg_send(nc, reason, strlen(reason));
nc->flags |= MG_F_SEND_AND_CLOSE;
}
#if MG_ENABLE_FILESYSTEM
static void mg_http_construct_etag(char *buf, size_t buf_len,
const cs_stat_t *st) {
snprintf(buf, buf_len, "\"%lx.%" INT64_FMT "\"", (unsigned long) st->st_mtime,
(int64_t) st->st_size);
}
#ifndef WINCE
static void mg_gmt_time_string(char *buf, size_t buf_len, time_t *t) {
strftime(buf, buf_len, "%a, %d %b %Y %H:%M:%S GMT", gmtime(t));
}
#else
/* Look wince_lib.c for WindowsCE implementation */
static void mg_gmt_time_string(char *buf, size_t buf_len, time_t *t);
#endif
static int mg_http_parse_range_header(const struct mg_str *header, int64_t *a,
int64_t *b) {
/*
* There is no snscanf. Headers are not guaranteed to be NUL-terminated,
* so we have this. Ugh.
*/
int result;
char *p = (char *) MG_MALLOC(header->len + 1);
if (p == NULL) return 0;
memcpy(p, header->p, header->len);
p[header->len] = '\0';
result = sscanf(p, "bytes=%" INT64_FMT "-%" INT64_FMT, a, b);
MG_FREE(p);
return result;
}
void mg_http_serve_file(struct mg_connection *nc, struct http_message *hm,
const char *path, const struct mg_str mime_type,
const struct mg_str extra_headers) {
struct mg_http_proto_data *pd = mg_http_get_proto_data(nc);
cs_stat_t st;
LOG(LL_DEBUG, ("%p [%s] %.*s", nc, path, (int) mime_type.len, mime_type.p));
if (mg_stat(path, &st) != 0 || (pd->file.fp = mg_fopen(path, "rb")) == NULL) {
int code, err = mg_get_errno();
switch (err) {
case EACCES:
code = 403;
break;
case ENOENT:
code = 404;
break;
default:
code = 500;
};
mg_http_send_error(nc, code, "Open failed");
} else {
char etag[50], current_time[50], last_modified[50], range[70];
time_t t = (time_t) mg_time();
int64_t r1 = 0, r2 = 0, cl = st.st_size;
struct mg_str *range_hdr = mg_get_http_header(hm, "Range");
int n, status_code = 200;
/* Handle Range header */
range[0] = '\0';
if (range_hdr != NULL &&
(n = mg_http_parse_range_header(range_hdr, &r1, &r2)) > 0 && r1 >= 0 &&
r2 >= 0) {
/* If range is specified like "400-", set second limit to content len */
if (n == 1) {
r2 = cl - 1;
}
if (r1 > r2 || r2 >= cl) {
status_code = 416;
cl = 0;
snprintf(range, sizeof(range),
"Content-Range: bytes */%" INT64_FMT "\r\n",
(int64_t) st.st_size);
} else {
status_code = 206;
cl = r2 - r1 + 1;
snprintf(range, sizeof(range), "Content-Range: bytes %" INT64_FMT
"-%" INT64_FMT "/%" INT64_FMT "\r\n",
r1, r1 + cl - 1, (int64_t) st.st_size);
#if _FILE_OFFSET_BITS == 64 || _POSIX_C_SOURCE >= 200112L || \
_XOPEN_SOURCE >= 600
fseeko(pd->file.fp, r1, SEEK_SET);
#else
fseek(pd->file.fp, (long) r1, SEEK_SET);
#endif
}
}
#if !MG_DISABLE_HTTP_KEEP_ALIVE
{
struct mg_str *conn_hdr = mg_get_http_header(hm, "Connection");
if (conn_hdr != NULL) {
pd->file.keepalive = (mg_vcasecmp(conn_hdr, "keep-alive") == 0);
} else {
pd->file.keepalive = (mg_vcmp(&hm->proto, "HTTP/1.1") == 0);
}
}
#endif
mg_http_construct_etag(etag, sizeof(etag), &st);
mg_gmt_time_string(current_time, sizeof(current_time), &t);
mg_gmt_time_string(last_modified, sizeof(last_modified), &st.st_mtime);
/*
* Content length casted to size_t because:
* 1) that's the maximum buffer size anyway
* 2) ESP8266 RTOS SDK newlib vprintf cannot contain a 64bit arg at non-last
* position
* TODO(mkm): fix ESP8266 RTOS SDK
*/
mg_send_response_line_s(nc, status_code, extra_headers);
mg_printf(nc,
"Date: %s\r\n"
"Last-Modified: %s\r\n"
"Accept-Ranges: bytes\r\n"
"Content-Type: %.*s\r\n"
"Connection: %s\r\n"
"Content-Length: %" SIZE_T_FMT
"\r\n"
"%sEtag: %s\r\n\r\n",
current_time, last_modified, (int) mime_type.len, mime_type.p,
(pd->file.keepalive ? "keep-alive" : "close"), (size_t) cl, range,
etag);
pd->file.cl = cl;
pd->file.type = DATA_FILE;
mg_http_transfer_file_data(nc);
}
}
static void mg_http_serve_file2(struct mg_connection *nc, const char *path,
struct http_message *hm,
struct mg_serve_http_opts *opts) {
#if MG_ENABLE_HTTP_SSI
if (mg_match_prefix(opts->ssi_pattern, strlen(opts->ssi_pattern), path) > 0) {
mg_handle_ssi_request(nc, hm, path, opts);
return;
}
#endif
mg_http_serve_file(nc, hm, path, mg_get_mime_type(path, "text/plain", opts),
mg_mk_str(opts->extra_headers));
}
#endif
int mg_url_decode(const char *src, int src_len, char *dst, int dst_len,
int is_form_url_encoded) {
int i, j, a, b;
#define HEXTOI(x) (isdigit(x) ? x - '0' : x - 'W')
for (i = j = 0; i < src_len && j < dst_len - 1; i++, j++) {
if (src[i] == '%') {
if (i < src_len - 2 && isxdigit(*(const unsigned char *) (src + i + 1)) &&
isxdigit(*(const unsigned char *) (src + i + 2))) {
a = tolower(*(const unsigned char *) (src + i + 1));
b = tolower(*(const unsigned char *) (src + i + 2));
dst[j] = (char) ((HEXTOI(a) << 4) | HEXTOI(b));
i += 2;
} else {
return -1;
}
} else if (is_form_url_encoded && src[i] == '+') {
dst[j] = ' ';
} else {
dst[j] = src[i];
}
}
dst[j] = '\0'; /* Null-terminate the destination */
return i >= src_len ? j : -1;
}
int mg_get_http_var(const struct mg_str *buf, const char *name, char *dst,
size_t dst_len) {
const char *p, *e, *s;
size_t name_len;
int len;
/*
* According to the documentation function returns negative
* value in case of error. For debug purposes it returns:
* -1 - src is wrong (NUUL)
* -2 - dst is wrong (NULL)
* -3 - failed to decode url or dst is to small
*/
if (dst == NULL || dst_len == 0) {
len = -2;
} else if (buf->p == NULL || name == NULL || buf->len == 0) {
len = -1;
dst[0] = '\0';
} else {
name_len = strlen(name);
e = buf->p + buf->len;
len = 0;
dst[0] = '\0';
for (p = buf->p; p + name_len < e; p++) {
if ((p == buf->p || p[-1] == '&') && p[name_len] == '=' &&
!mg_ncasecmp(name, p, name_len)) {
p += name_len + 1;
s = (const char *) memchr(p, '&', (size_t)(e - p));
if (s == NULL) {
s = e;
}
len = mg_url_decode(p, (size_t)(s - p), dst, dst_len, 1);
/* -1 means: failed to decode or dst is too small */
if (len == -1) {
len = -3;
}
break;
}
}
}
return len;
}
void mg_send_http_chunk(struct mg_connection *nc, const char *buf, size_t len) {
char chunk_size[50];
int n;
n = snprintf(chunk_size, sizeof(chunk_size), "%lX\r\n", (unsigned long) len);
mg_send(nc, chunk_size, n);
mg_send(nc, buf, len);
mg_send(nc, "\r\n", 2);
}
void mg_printf_http_chunk(struct mg_connection *nc, const char *fmt, ...) {
char mem[MG_VPRINTF_BUFFER_SIZE], *buf = mem;
int len;
va_list ap;
va_start(ap, fmt);
len = mg_avprintf(&buf, sizeof(mem), fmt, ap);
va_end(ap);
if (len >= 0) {
mg_send_http_chunk(nc, buf, len);
}
/* LCOV_EXCL_START */
if (buf != mem && buf != NULL) {
MG_FREE(buf);
}
/* LCOV_EXCL_STOP */
}
void mg_printf_html_escape(struct mg_connection *nc, const char *fmt, ...) {
char mem[MG_VPRINTF_BUFFER_SIZE], *buf = mem;
int i, j, len;
va_list ap;
va_start(ap, fmt);
len = mg_avprintf(&buf, sizeof(mem), fmt, ap);
va_end(ap);
if (len >= 0) {
for (i = j = 0; i < len; i++) {
if (buf[i] == '<' || buf[i] == '>') {
mg_send(nc, buf + j, i - j);
mg_send(nc, buf[i] == '<' ? "<" : ">", 4);
j = i + 1;
}
}
mg_send(nc, buf + j, i - j);
}
/* LCOV_EXCL_START */
if (buf != mem && buf != NULL) {
MG_FREE(buf);
}
/* LCOV_EXCL_STOP */
}
int mg_http_parse_header(struct mg_str *hdr, const char *var_name, char *buf,
size_t buf_size) {
int ch = ' ', ch1 = ',', len = 0, n = strlen(var_name);
const char *p, *end = hdr ? hdr->p + hdr->len : NULL, *s = NULL;
if (buf != NULL && buf_size > 0) buf[0] = '\0';
if (hdr == NULL) return 0;
/* Find where variable starts */
for (s = hdr->p; s != NULL && s + n < end; s++) {
if ((s == hdr->p || s[-1] == ch || s[-1] == ch1 || s[-1] == ';') &&
s[n] == '=' && !strncmp(s, var_name, n))
break;
}
if (s != NULL && &s[n + 1] < end) {
s += n + 1;
if (*s == '"' || *s == '\'') {
ch = ch1 = *s++;
}
p = s;
while (p < end && p[0] != ch && p[0] != ch1 && len < (int) buf_size) {
if (ch != ' ' && p[0] == '\\' && p[1] == ch) p++;
buf[len++] = *p++;
}
if (len >= (int) buf_size || (ch != ' ' && *p != ch)) {
len = 0;
} else {
if (len > 0 && s[len - 1] == ',') len--;
if (len > 0 && s[len - 1] == ';') len--;
buf[len] = '\0';
}
}
return len;
}
int mg_get_http_basic_auth(struct http_message *hm, char *user, size_t user_len,
char *pass, size_t pass_len) {
struct mg_str *hdr = mg_get_http_header(hm, "Authorization");
if (hdr == NULL) return -1;
return mg_parse_http_basic_auth(hdr, user, user_len, pass, pass_len);
}
int mg_parse_http_basic_auth(struct mg_str *hdr, char *user, size_t user_len,
char *pass, size_t pass_len) {
char *buf = NULL;
char fmt[64];
int res = 0;
if (mg_strncmp(*hdr, mg_mk_str("Basic "), 6) != 0) return -1;
buf = (char *) MG_MALLOC(hdr->len);
cs_base64_decode((unsigned char *) hdr->p + 6, hdr->len, buf, NULL);
/* e.g. "%123[^:]:%321[^\n]" */
snprintf(fmt, sizeof(fmt), "%%%" SIZE_T_FMT "[^:]:%%%" SIZE_T_FMT "[^\n]",
user_len - 1, pass_len - 1);
if (sscanf(buf, fmt, user, pass) == 0) {
res = -1;
}
MG_FREE(buf);
return res;
}
#if MG_ENABLE_FILESYSTEM
static int mg_is_file_hidden(const char *path,
const struct mg_serve_http_opts *opts,
int exclude_specials) {
const char *p1 = opts->per_directory_auth_file;
const char *p2 = opts->hidden_file_pattern;
/* Strip directory path from the file name */
const char *pdir = strrchr(path, DIRSEP);
if (pdir != NULL) {
path = pdir + 1;
}
return (exclude_specials && (!strcmp(path, ".") || !strcmp(path, ".."))) ||
(p1 != NULL &&
mg_match_prefix(p1, strlen(p1), path) == (int) strlen(p1)) ||
(p2 != NULL && mg_match_prefix(p2, strlen(p2), path) > 0);
}
#if !MG_DISABLE_HTTP_DIGEST_AUTH
static void mg_mkmd5resp(const char *method, size_t method_len, const char *uri,
size_t uri_len, const char *ha1, size_t ha1_len,
const char *nonce, size_t nonce_len, const char *nc,
size_t nc_len, const char *cnonce, size_t cnonce_len,
const char *qop, size_t qop_len, char *resp) {
static const char colon[] = ":";
static const size_t one = 1;
char ha2[33];
cs_md5(ha2, method, method_len, colon, one, uri, uri_len, NULL);
cs_md5(resp, ha1, ha1_len, colon, one, nonce, nonce_len, colon, one, nc,
nc_len, colon, one, cnonce, cnonce_len, colon, one, qop, qop_len,
colon, one, ha2, sizeof(ha2) - 1, NULL);
}
int mg_http_create_digest_auth_header(char *buf, size_t buf_len,
const char *method, const char *uri,
const char *auth_domain, const char *user,
const char *passwd) {
static const char colon[] = ":", qop[] = "auth";
static const size_t one = 1;
char ha1[33], resp[33], cnonce[40];
snprintf(cnonce, sizeof(cnonce), "%x", (unsigned int) mg_time());
cs_md5(ha1, user, (size_t) strlen(user), colon, one, auth_domain,
(size_t) strlen(auth_domain), colon, one, passwd,
(size_t) strlen(passwd), NULL);
mg_mkmd5resp(method, strlen(method), uri, strlen(uri), ha1, sizeof(ha1) - 1,
cnonce, strlen(cnonce), "1", one, cnonce, strlen(cnonce), qop,
sizeof(qop) - 1, resp);
return snprintf(buf, buf_len,
"Authorization: Digest username=\"%s\","
"realm=\"%s\",uri=\"%s\",qop=%s,nc=1,cnonce=%s,"
"nonce=%s,response=%s\r\n",
user, auth_domain, uri, qop, cnonce, cnonce, resp);
}
/*
* Check for authentication timeout.
* Clients send time stamp encoded in nonce. Make sure it is not too old,
* to prevent replay attacks.
* Assumption: nonce is a hexadecimal number of seconds since 1970.
*/
static int mg_check_nonce(const char *nonce) {
unsigned long now = (unsigned long) mg_time();
unsigned long val = (unsigned long) strtoul(nonce, NULL, 16);
return now < val || now - val < 3600;
}
int mg_http_check_digest_auth(struct http_message *hm, const char *auth_domain,
FILE *fp) {
struct mg_str *hdr;
char buf[128], f_user[sizeof(buf)], f_ha1[sizeof(buf)], f_domain[sizeof(buf)];
char user[50], cnonce[33], response[40], uri[200], qop[20], nc[20], nonce[30];
char expected_response[33];
/* Parse "Authorization:" header, fail fast on parse error */
if (hm == NULL || fp == NULL ||
(hdr = mg_get_http_header(hm, "Authorization")) == NULL ||
mg_http_parse_header(hdr, "username", user, sizeof(user)) == 0 ||
mg_http_parse_header(hdr, "cnonce", cnonce, sizeof(cnonce)) == 0 ||
mg_http_parse_header(hdr, "response", response, sizeof(response)) == 0 ||
mg_http_parse_header(hdr, "uri", uri, sizeof(uri)) == 0 ||
mg_http_parse_header(hdr, "qop", qop, sizeof(qop)) == 0 ||
mg_http_parse_header(hdr, "nc", nc, sizeof(nc)) == 0 ||
mg_http_parse_header(hdr, "nonce", nonce, sizeof(nonce)) == 0 ||
mg_check_nonce(nonce) == 0) {
return 0;
}
/*
* Read passwords file line by line. If should have htdigest format,
* i.e. each line should be a colon-separated sequence:
* USER_NAME:DOMAIN_NAME:HA1_HASH_OF_USER_DOMAIN_AND_PASSWORD
*/
while (fgets(buf, sizeof(buf), fp) != NULL) {
if (sscanf(buf, "%[^:]:%[^:]:%s", f_user, f_domain, f_ha1) == 3 &&
strcmp(user, f_user) == 0 &&
/* NOTE(lsm): due to a bug in MSIE, we do not compare URIs */
strcmp(auth_domain, f_domain) == 0) {
/* User and domain matched, check the password */
mg_mkmd5resp(
hm->method.p, hm->method.len, hm->uri.p,
hm->uri.len + (hm->query_string.len ? hm->query_string.len + 1 : 0),
f_ha1, strlen(f_ha1), nonce, strlen(nonce), nc, strlen(nc), cnonce,
strlen(cnonce), qop, strlen(qop), expected_response);
return mg_casecmp(response, expected_response) == 0;
}
}
/* None of the entries in the passwords file matched - return failure */
return 0;
}
static int mg_is_authorized(struct http_message *hm, const char *path,
int is_directory, const char *domain,
const char *passwords_file,
int is_global_pass_file) {
char buf[MG_MAX_PATH];
const char *p;
FILE *fp;
int authorized = 1;
if (domain != NULL && passwords_file != NULL) {
if (is_global_pass_file) {
fp = mg_fopen(passwords_file, "r");
} else if (is_directory) {
snprintf(buf, sizeof(buf), "%s%c%s", path, DIRSEP, passwords_file);
fp = mg_fopen(buf, "r");
} else {
p = strrchr(path, DIRSEP);
if (p == NULL) p = path;
snprintf(buf, sizeof(buf), "%.*s%c%s", (int) (p - path), path, DIRSEP,
passwords_file);
fp = mg_fopen(buf, "r");
}
if (fp != NULL) {
authorized = mg_http_check_digest_auth(hm, domain, fp);
fclose(fp);
}
}
LOG(LL_DEBUG, ("%s '%s' %d %d", path, passwords_file ? passwords_file : "",
is_global_pass_file, authorized));
return authorized;
}
#else
static int mg_is_authorized(struct http_message *hm, const char *path,
int is_directory, const char *domain,
const char *passwords_file,
int is_global_pass_file) {
(void) hm;
(void) path;
(void) is_directory;
(void) domain;
(void) passwords_file;
(void) is_global_pass_file;
return 1;
}
#endif
#if MG_ENABLE_DIRECTORY_LISTING
static size_t mg_url_encode(const char *src, size_t s_len, char *dst,
size_t dst_len) {
static const char *dont_escape = "._-$,;~()/";
static const char *hex = "0123456789abcdef";
size_t i = 0, j = 0;
for (i = j = 0; dst_len > 0 && i < s_len && j + 2 < dst_len - 1; i++, j++) {
if (isalnum(*(const unsigned char *) (src + i)) ||
strchr(dont_escape, *(const unsigned char *) (src + i)) != NULL) {
dst[j] = src[i];
} else if (j + 3 < dst_len) {
dst[j] = '%';
dst[j + 1] = hex[(*(const unsigned char *) (src + i)) >> 4];
dst[j + 2] = hex[(*(const unsigned char *) (src + i)) & 0xf];
j += 2;
}
}
dst[j] = '\0';
return j;
}
static void mg_escape(const char *src, char *dst, size_t dst_len) {
size_t n = 0;
while (*src != '\0' && n + 5 < dst_len) {
unsigned char ch = *(unsigned char *) src++;
if (ch == '<') {
n += snprintf(dst + n, dst_len - n, "%s", "<");
} else {
dst[n++] = ch;
}
}
dst[n] = '\0';
}
static void mg_print_dir_entry(struct mg_connection *nc, const char *file_name,
cs_stat_t *stp) {
char size[64], mod[64], href[MAX_PATH_SIZE * 3], path[MAX_PATH_SIZE];
int64_t fsize = stp->st_size;
int is_dir = S_ISDIR(stp->st_mode);
const char *slash = is_dir ? "/" : "";
if (is_dir) {
snprintf(size, sizeof(size), "%s", "[DIRECTORY]");
} else {
/*
* We use (double) cast below because MSVC 6 compiler cannot
* convert unsigned __int64 to double.
*/
if (fsize < 1024) {
snprintf(size, sizeof(size), "%d", (int) fsize);
} else if (fsize < 0x100000) {
snprintf(size, sizeof(size), "%.1fk", (double) fsize / 1024.0);
} else if (fsize < 0x40000000) {
snprintf(size, sizeof(size), "%.1fM", (double) fsize / 1048576);
} else {
snprintf(size, sizeof(size), "%.1fG", (double) fsize / 1073741824);
}
}
strftime(mod, sizeof(mod), "%d-%b-%Y %H:%M", localtime(&stp->st_mtime));
mg_escape(file_name, path, sizeof(path));
mg_url_encode(file_name, strlen(file_name), href, sizeof(href));
mg_printf_http_chunk(nc,
"<tr><td><a href=\"%s%s\">%s%s</a></td>"
"<td>%s</td><td name=%" INT64_FMT ">%s</td></tr>\n",
href, slash, path, slash, mod, is_dir ? -1 : fsize,
size);
}
static void mg_scan_directory(struct mg_connection *nc, const char *dir,
const struct mg_serve_http_opts *opts,
void (*func)(struct mg_connection *, const char *,
cs_stat_t *)) {
char path[MAX_PATH_SIZE];
cs_stat_t st;
struct dirent *dp;
DIR *dirp;
LOG(LL_DEBUG, ("%p [%s]", nc, dir));
if ((dirp = (opendir(dir))) != NULL) {
while ((dp = readdir(dirp)) != NULL) {
/* Do not show current dir and hidden files */
if (mg_is_file_hidden((const char *) dp->d_name, opts, 1)) {
continue;
}
snprintf(path, sizeof(path), "%s/%s", dir, dp->d_name);
if (mg_stat(path, &st) == 0) {
func(nc, (const char *) dp->d_name, &st);
}
}
closedir(dirp);
} else {
LOG(LL_DEBUG, ("%p opendir(%s) -> %d", nc, dir, mg_get_errno()));
}
}
static void mg_send_directory_listing(struct mg_connection *nc, const char *dir,
struct http_message *hm,
struct mg_serve_http_opts *opts) {
static const char *sort_js_code =
"<script>function srt(tb, sc, so, d) {"
"var tr = Array.prototype.slice.call(tb.rows, 0),"
"tr = tr.sort(function (a, b) { var c1 = a.cells[sc], c2 = b.cells[sc],"
"n1 = c1.getAttribute('name'), n2 = c2.getAttribute('name'), "
"t1 = a.cells[2].getAttribute('name'), "
"t2 = b.cells[2].getAttribute('name'); "
"return so * (t1 < 0 && t2 >= 0 ? -1 : t2 < 0 && t1 >= 0 ? 1 : "
"n1 ? parseInt(n2) - parseInt(n1) : "
"c1.textContent.trim().localeCompare(c2.textContent.trim())); });";
static const char *sort_js_code2 =
"for (var i = 0; i < tr.length; i++) tb.appendChild(tr[i]); "
"if (!d) window.location.hash = ('sc=' + sc + '&so=' + so); "
"};"
"window.onload = function() {"
"var tb = document.getElementById('tb');"
"var m = /sc=([012]).so=(1|-1)/.exec(window.location.hash) || [0, 2, 1];"
"var sc = m[1], so = m[2]; document.onclick = function(ev) { "
"var c = ev.target.rel; if (c) {if (c == sc) so *= -1; srt(tb, c, so); "
"sc = c; ev.preventDefault();}};"
"srt(tb, sc, so, true);"
"}"
"</script>";
mg_send_response_line(nc, 200, opts->extra_headers);
mg_printf(nc, "%s: %s\r\n%s: %s\r\n\r\n", "Transfer-Encoding", "chunked",
"Content-Type", "text/html; charset=utf-8");
mg_printf_http_chunk(
nc,
"<html><head><title>Index of %.*s</title>%s%s"
"<style>th,td {text-align: left; padding-right: 1em; "
"font-family: monospace; }</style></head>\n"
"<body><h1>Index of %.*s</h1>\n<table cellpadding=0><thead>"
"<tr><th><a href=# rel=0>Name</a></th><th>"
"<a href=# rel=1>Modified</a</th>"
"<th><a href=# rel=2>Size</a></th></tr>"
"<tr><td colspan=3><hr></td></tr>\n"
"</thead>\n"
"<tbody id=tb>",
(int) hm->uri.len, hm->uri.p, sort_js_code, sort_js_code2,
(int) hm->uri.len, hm->uri.p);
mg_scan_directory(nc, dir, opts, mg_print_dir_entry);
mg_printf_http_chunk(nc,
"</tbody><tr><td colspan=3><hr></td></tr>\n"
"</table>\n"
"<address>%s</address>\n"
"</body></html>",
mg_version_header);
mg_send_http_chunk(nc, "", 0);
/* TODO(rojer): Remove when cesanta/dev/issues/197 is fixed. */
nc->flags |= MG_F_SEND_AND_CLOSE;
}
#endif /* MG_ENABLE_DIRECTORY_LISTING */
/*
* Given a directory path, find one of the files specified in the
* comma-separated list of index files `list`.
* First found index file wins. If an index file is found, then gets
* appended to the `path`, stat-ed, and result of `stat()` passed to `stp`.
* If index file is not found, then `path` and `stp` remain unchanged.
*/
MG_INTERNAL void mg_find_index_file(const char *path, const char *list,
char **index_file, cs_stat_t *stp) {
struct mg_str vec;
size_t path_len = strlen(path);
int found = 0;
*index_file = NULL;
/* Traverse index files list. For each entry, append it to the given */
/* path and see if the file exists. If it exists, break the loop */
while ((list = mg_next_comma_list_entry(list, &vec, NULL)) != NULL) {
cs_stat_t st;
size_t len = path_len + 1 + vec.len + 1;
*index_file = (char *) MG_REALLOC(*index_file, len);
if (*index_file == NULL) break;
snprintf(*index_file, len, "%s%c%.*s", path, DIRSEP, (int) vec.len, vec.p);
/* Does it exist? Is it a file? */
if (mg_stat(*index_file, &st) == 0 && S_ISREG(st.st_mode)) {
/* Yes it does, break the loop */
*stp = st;
found = 1;
break;
}
}
if (!found) {
MG_FREE(*index_file);
*index_file = NULL;
}
LOG(LL_DEBUG, ("[%s] [%s]", path, (*index_file ? *index_file : "")));
}
#if MG_ENABLE_HTTP_URL_REWRITES
static int mg_http_send_port_based_redirect(
struct mg_connection *c, struct http_message *hm,
const struct mg_serve_http_opts *opts) {
const char *rewrites = opts->url_rewrites;
struct mg_str a, b;
char local_port[20] = {'%'};
mg_conn_addr_to_str(c, local_port + 1, sizeof(local_port) - 1,
MG_SOCK_STRINGIFY_PORT);
while ((rewrites = mg_next_comma_list_entry(rewrites, &a, &b)) != NULL) {
if (mg_vcmp(&a, local_port) == 0) {
mg_send_response_line(c, 301, NULL);
mg_printf(c, "Content-Length: 0\r\nLocation: %.*s%.*s\r\n\r\n",
(int) b.len, b.p, (int) (hm->proto.p - hm->uri.p - 1),
hm->uri.p);
return 1;
}
}
return 0;
}
static void mg_reverse_proxy_handler(struct mg_connection *nc, int ev,
void *ev_data MG_UD_ARG(void *user_data)) {
struct http_message *hm = (struct http_message *) ev_data;
struct mg_http_proto_data *pd = mg_http_get_proto_data(nc);
if (pd == NULL || pd->reverse_proxy_data.linked_conn == NULL) {
DBG(("%p: upstream closed", nc));
return;
}
switch (ev) {
case MG_EV_CONNECT:
if (*(int *) ev_data != 0) {
mg_http_send_error(pd->reverse_proxy_data.linked_conn, 502, NULL);
}
break;
/* TODO(mkm): handle streaming */
case MG_EV_HTTP_REPLY:
mg_send(pd->reverse_proxy_data.linked_conn, hm->message.p,
hm->message.len);
pd->reverse_proxy_data.linked_conn->flags |= MG_F_SEND_AND_CLOSE;
nc->flags |= MG_F_CLOSE_IMMEDIATELY;
break;
case MG_EV_CLOSE:
pd->reverse_proxy_data.linked_conn->flags |= MG_F_SEND_AND_CLOSE;
break;
}
#if MG_ENABLE_CALLBACK_USERDATA
(void) user_data;
#endif
}
void mg_http_reverse_proxy(struct mg_connection *nc,
const struct http_message *hm, struct mg_str mount,
struct mg_str upstream) {
struct mg_connection *be;
char burl[256], *purl = burl;
char *addr = NULL;
const char *path = NULL;
int i;
const char *error;
struct mg_connect_opts opts;
memset(&opts, 0, sizeof(opts));
opts.error_string = &error;
mg_asprintf(&purl, sizeof(burl), "%.*s%.*s", (int) upstream.len, upstream.p,
(int) (hm->uri.len - mount.len), hm->uri.p + mount.len);
be = mg_connect_http_base(nc->mgr, MG_CB(mg_reverse_proxy_handler, NULL),
opts, "http://", "https://", purl, &path,
NULL /* user */, NULL /* pass */, &addr);
LOG(LL_DEBUG, ("Proxying %.*s to %s (rule: %.*s)", (int) hm->uri.len,
hm->uri.p, purl, (int) mount.len, mount.p));
if (be == NULL) {
LOG(LL_ERROR, ("Error connecting to %s: %s", purl, error));
mg_http_send_error(nc, 502, NULL);
goto cleanup;
}
/* link connections to each other, they must live and die together */
mg_http_get_proto_data(be)->reverse_proxy_data.linked_conn = nc;
mg_http_get_proto_data(nc)->reverse_proxy_data.linked_conn = be;
/* send request upstream */
mg_printf(be, "%.*s %s HTTP/1.1\r\n", (int) hm->method.len, hm->method.p,
path);
mg_printf(be, "Host: %s\r\n", addr);
for (i = 0; i < MG_MAX_HTTP_HEADERS && hm->header_names[i].len > 0; i++) {
struct mg_str hn = hm->header_names[i];
struct mg_str hv = hm->header_values[i];
/* we rewrite the host header */
if (mg_vcasecmp(&hn, "Host") == 0) continue;
/*
* Don't pass chunked transfer encoding to the client because hm->body is
* already dechunked when we arrive here.
*/
if (mg_vcasecmp(&hn, "Transfer-encoding") == 0 &&
mg_vcasecmp(&hv, "chunked") == 0) {
mg_printf(be, "Content-Length: %" SIZE_T_FMT "\r\n", hm->body.len);
continue;
}
/* We don't support proxying Expect: 100-continue. */
if (mg_vcasecmp(&hn, "Expect") == 0 &&
mg_vcasecmp(&hv, "100-continue") == 0) {
continue;
}
mg_printf(be, "%.*s: %.*s\r\n", (int) hn.len, hn.p, (int) hv.len, hv.p);
}
mg_send(be, "\r\n", 2);
mg_send(be, hm->body.p, hm->body.len);
cleanup:
if (purl != burl) MG_FREE(purl);
}
static int mg_http_handle_forwarding(struct mg_connection *nc,
struct http_message *hm,
const struct mg_serve_http_opts *opts) {
const char *rewrites = opts->url_rewrites;
struct mg_str a, b;
struct mg_str p1 = MG_MK_STR("http://"), p2 = MG_MK_STR("https://");
while ((rewrites = mg_next_comma_list_entry(rewrites, &a, &b)) != NULL) {
if (mg_strncmp(a, hm->uri, a.len) == 0) {
if (mg_strncmp(b, p1, p1.len) == 0 || mg_strncmp(b, p2, p2.len) == 0) {
mg_http_reverse_proxy(nc, hm, a, b);
return 1;
}
}
}
return 0;
}
#endif
MG_INTERNAL int mg_uri_to_local_path(struct http_message *hm,
const struct mg_serve_http_opts *opts,
char **local_path,
struct mg_str *remainder) {
int ok = 1;
const char *cp = hm->uri.p, *cp_end = hm->uri.p + hm->uri.len;
struct mg_str root = {NULL, 0};
const char *file_uri_start = cp;
*local_path = NULL;
remainder->p = NULL;
remainder->len = 0;
{ /* 1. Determine which root to use. */
#if MG_ENABLE_HTTP_URL_REWRITES
const char *rewrites = opts->url_rewrites;
#else
const char *rewrites = "";
#endif
struct mg_str *hh = mg_get_http_header(hm, "Host");
struct mg_str a, b;
/* Check rewrites first. */
while ((rewrites = mg_next_comma_list_entry(rewrites, &a, &b)) != NULL) {
if (a.len > 1 && a.p[0] == '@') {
/* Host rewrite. */
if (hh != NULL && hh->len == a.len - 1 &&
mg_ncasecmp(a.p + 1, hh->p, a.len - 1) == 0) {
root = b;
break;
}
} else {
/* Regular rewrite, URI=directory */
int match_len = mg_match_prefix_n(a, hm->uri);
if (match_len > 0) {
file_uri_start = hm->uri.p + match_len;
if (*file_uri_start == '/' || file_uri_start == cp_end) {
/* Match ended at component boundary, ok. */
} else if (*(file_uri_start - 1) == '/') {
/* Pattern ends with '/', backtrack. */
file_uri_start--;
} else {
/* No match: must fall on the component boundary. */
continue;
}
root = b;
break;
}
}
}
/* If no rewrite rules matched, use DAV or regular document root. */
if (root.p == NULL) {
#if MG_ENABLE_HTTP_WEBDAV
if (opts->dav_document_root != NULL && mg_is_dav_request(&hm->method)) {
root.p = opts->dav_document_root;
root.len = strlen(opts->dav_document_root);
} else
#endif
{
root.p = opts->document_root;
root.len = strlen(opts->document_root);
}
}
assert(root.p != NULL && root.len > 0);
}
{ /* 2. Find where in the canonical URI path the local path ends. */
const char *u = file_uri_start + 1;
char *lp = (char *) MG_MALLOC(root.len + hm->uri.len + 1);
char *lp_end = lp + root.len + hm->uri.len + 1;
char *p = lp, *ps;
int exists = 1;
if (lp == NULL) {
ok = 0;
goto out;
}
memcpy(p, root.p, root.len);
p += root.len;
if (*(p - 1) == DIRSEP) p--;
*p = '\0';
ps = p;
/* Chop off URI path components one by one and build local path. */
while (u <= cp_end) {
const char *next = u;
struct mg_str component;
if (exists) {
cs_stat_t st;
exists = (mg_stat(lp, &st) == 0);
if (exists && S_ISREG(st.st_mode)) {
/* We found the terminal, the rest of the URI (if any) is path_info.
*/
if (*(u - 1) == '/') u--;
break;
}
}
if (u >= cp_end) break;
parse_uri_component((const char **) &next, cp_end, '/', &component);
if (component.len > 0) {
int len;
memmove(p + 1, component.p, component.len);
len = mg_url_decode(p + 1, component.len, p + 1, lp_end - p - 1, 0);
if (len <= 0) {
ok = 0;
break;
}
component.p = p + 1;
component.len = len;
if (mg_vcmp(&component, ".") == 0) {
/* Yum. */
} else if (mg_vcmp(&component, "..") == 0) {
while (p > ps && *p != DIRSEP) p--;
*p = '\0';
} else {
size_t i;
#ifdef _WIN32
/* On Windows, make sure it's valid Unicode (no funny stuff). */
wchar_t buf[MG_MAX_PATH * 2];
if (to_wchar(component.p, buf, MG_MAX_PATH) == 0) {
DBG(("[%.*s] smells funny", (int) component.len, component.p));
ok = 0;
break;
}
#endif
*p++ = DIRSEP;
/* No NULs and DIRSEPs in the component (percent-encoded). */
for (i = 0; i < component.len; i++, p++) {
if (*p == '\0' || *p == DIRSEP
#ifdef _WIN32
/* On Windows, "/" is also accepted, so check for that too. */
||
*p == '/'
#endif
) {
ok = 0;
break;
}
}
}
}
u = next;
}
if (ok) {
*local_path = lp;
if (u > cp_end) u = cp_end;
remainder->p = u;
remainder->len = cp_end - u;
} else {
MG_FREE(lp);
}
}
out:
LOG(LL_DEBUG,
("'%.*s' -> '%s' + '%.*s'", (int) hm->uri.len, hm->uri.p,
*local_path ? *local_path : "", (int) remainder->len, remainder->p));
return ok;
}
static int mg_get_month_index(const char *s) {
static const char *month_names[] = {"Jan", "Feb", "Mar", "Apr", "May", "Jun",
"Jul", "Aug", "Sep", "Oct", "Nov", "Dec"};
size_t i;
for (i = 0; i < ARRAY_SIZE(month_names); i++)
if (!strcmp(s, month_names[i])) return (int) i;
return -1;
}
static int mg_num_leap_years(int year) {
return year / 4 - year / 100 + year / 400;
}
/* Parse UTC date-time string, and return the corresponding time_t value. */
MG_INTERNAL time_t mg_parse_date_string(const char *datetime) {
static const unsigned short days_before_month[] = {
0, 31, 59, 90, 120, 151, 181, 212, 243, 273, 304, 334};
char month_str[32];
int second, minute, hour, day, month, year, leap_days, days;
time_t result = (time_t) 0;
if (((sscanf(datetime, "%d/%3s/%d %d:%d:%d", &day, month_str, &year, &hour,
&minute, &second) == 6) ||
(sscanf(datetime, "%d %3s %d %d:%d:%d", &day, month_str, &year, &hour,
&minute, &second) == 6) ||
(sscanf(datetime, "%*3s, %d %3s %d %d:%d:%d", &day, month_str, &year,
&hour, &minute, &second) == 6) ||
(sscanf(datetime, "%d-%3s-%d %d:%d:%d", &day, month_str, &year, &hour,
&minute, &second) == 6)) &&
year > 1970 && (month = mg_get_month_index(month_str)) != -1) {
leap_days = mg_num_leap_years(year) - mg_num_leap_years(1970);
year -= 1970;
days = year * 365 + days_before_month[month] + (day - 1) + leap_days;
result = days * 24 * 3600 + hour * 3600 + minute * 60 + second;
}
return result;
}
MG_INTERNAL int mg_is_not_modified(struct http_message *hm, cs_stat_t *st) {
struct mg_str *hdr;
if ((hdr = mg_get_http_header(hm, "If-None-Match")) != NULL) {
char etag[64];
mg_http_construct_etag(etag, sizeof(etag), st);
return mg_vcasecmp(hdr, etag) == 0;
} else if ((hdr = mg_get_http_header(hm, "If-Modified-Since")) != NULL) {
return st->st_mtime <= mg_parse_date_string(hdr->p);
} else {
return 0;
}
}
static void mg_http_send_digest_auth_request(struct mg_connection *c,
const char *domain) {
mg_printf(c,
"HTTP/1.1 401 Unauthorized\r\n"
"WWW-Authenticate: Digest qop=\"auth\", "
"realm=\"%s\", nonce=\"%lu\"\r\n"
"Content-Length: 0\r\n\r\n",
domain, (unsigned long) mg_time());
}
static void mg_http_send_options(struct mg_connection *nc) {
mg_printf(nc, "%s",
"HTTP/1.1 200 OK\r\nAllow: GET, POST, HEAD, CONNECT, OPTIONS"
#if MG_ENABLE_HTTP_WEBDAV
", MKCOL, PUT, DELETE, PROPFIND, MOVE\r\nDAV: 1,2"
#endif
"\r\n\r\n");
nc->flags |= MG_F_SEND_AND_CLOSE;
}
static int mg_is_creation_request(const struct http_message *hm) {
return mg_vcmp(&hm->method, "MKCOL") == 0 || mg_vcmp(&hm->method, "PUT") == 0;
}
MG_INTERNAL void mg_send_http_file(struct mg_connection *nc, char *path,
const struct mg_str *path_info,
struct http_message *hm,
struct mg_serve_http_opts *opts) {
int exists, is_directory, is_cgi;
#if MG_ENABLE_HTTP_WEBDAV
int is_dav = mg_is_dav_request(&hm->method);
#else
int is_dav = 0;
#endif
char *index_file = NULL;
cs_stat_t st;
exists = (mg_stat(path, &st) == 0);
is_directory = exists && S_ISDIR(st.st_mode);
if (is_directory)
mg_find_index_file(path, opts->index_files, &index_file, &st);
is_cgi =
(mg_match_prefix(opts->cgi_file_pattern, strlen(opts->cgi_file_pattern),
index_file ? index_file : path) > 0);
LOG(LL_DEBUG,
("%p %.*s [%s] exists=%d is_dir=%d is_dav=%d is_cgi=%d index=%s", nc,
(int) hm->method.len, hm->method.p, path, exists, is_directory, is_dav,
is_cgi, index_file ? index_file : ""));
if (is_directory && hm->uri.p[hm->uri.len - 1] != '/' && !is_dav) {
mg_printf(nc,
"HTTP/1.1 301 Moved\r\nLocation: %.*s/\r\n"
"Content-Length: 0\r\n\r\n",
(int) hm->uri.len, hm->uri.p);
MG_FREE(index_file);
return;
}
/* If we have path_info, the only way to handle it is CGI. */
if (path_info->len > 0 && !is_cgi) {
mg_http_send_error(nc, 501, NULL);
MG_FREE(index_file);
return;
}
if (is_dav && opts->dav_document_root == NULL) {
mg_http_send_error(nc, 501, NULL);
} else if (!mg_is_authorized(hm, path, is_directory, opts->auth_domain,
opts->global_auth_file, 1) ||
!mg_is_authorized(hm, path, is_directory, opts->auth_domain,
opts->per_directory_auth_file, 0)) {
mg_http_send_digest_auth_request(nc, opts->auth_domain);
} else if (is_cgi) {
#if MG_ENABLE_HTTP_CGI
mg_handle_cgi(nc, index_file ? index_file : path, path_info, hm, opts);
#else
mg_http_send_error(nc, 501, NULL);
#endif /* MG_ENABLE_HTTP_CGI */
} else if ((!exists ||
mg_is_file_hidden(path, opts, 0 /* specials are ok */)) &&
!mg_is_creation_request(hm)) {
mg_http_send_error(nc, 404, NULL);
#if MG_ENABLE_HTTP_WEBDAV
} else if (!mg_vcmp(&hm->method, "PROPFIND")) {
mg_handle_propfind(nc, path, &st, hm, opts);
#if !MG_DISABLE_DAV_AUTH
} else if (is_dav &&
(opts->dav_auth_file == NULL ||
(strcmp(opts->dav_auth_file, "-") != 0 &&
!mg_is_authorized(hm, path, is_directory, opts->auth_domain,
opts->dav_auth_file, 1)))) {
mg_http_send_digest_auth_request(nc, opts->auth_domain);
#endif
} else if (!mg_vcmp(&hm->method, "MKCOL")) {
mg_handle_mkcol(nc, path, hm);
} else if (!mg_vcmp(&hm->method, "DELETE")) {
mg_handle_delete(nc, opts, path);
} else if (!mg_vcmp(&hm->method, "PUT")) {
mg_handle_put(nc, path, hm);
} else if (!mg_vcmp(&hm->method, "MOVE")) {
mg_handle_move(nc, opts, path, hm);
#if MG_ENABLE_FAKE_DAVLOCK
} else if (!mg_vcmp(&hm->method, "LOCK")) {
mg_handle_lock(nc, path);
#endif
#endif /* MG_ENABLE_HTTP_WEBDAV */
} else if (!mg_vcmp(&hm->method, "OPTIONS")) {
mg_http_send_options(nc);
} else if (is_directory && index_file == NULL) {
#if MG_ENABLE_DIRECTORY_LISTING
if (strcmp(opts->enable_directory_listing, "yes") == 0) {
mg_send_directory_listing(nc, path, hm, opts);
} else {
mg_http_send_error(nc, 403, NULL);
}
#else
mg_http_send_error(nc, 501, NULL);
#endif
} else if (mg_is_not_modified(hm, &st)) {
mg_http_send_error(nc, 304, "Not Modified");
} else {
mg_http_serve_file2(nc, index_file ? index_file : path, hm, opts);
}
MG_FREE(index_file);
}
void mg_serve_http(struct mg_connection *nc, struct http_message *hm,
struct mg_serve_http_opts opts) {
char *path = NULL;
struct mg_str *hdr, path_info;
uint32_t remote_ip = ntohl(*(uint32_t *) &nc->sa.sin.sin_addr);
if (mg_check_ip_acl(opts.ip_acl, remote_ip) != 1) {
/* Not allowed to connect */
mg_http_send_error(nc, 403, NULL);
nc->flags |= MG_F_SEND_AND_CLOSE;
return;
}
#if MG_ENABLE_HTTP_URL_REWRITES
if (mg_http_handle_forwarding(nc, hm, &opts)) {
return;
}
if (mg_http_send_port_based_redirect(nc, hm, &opts)) {
return;
}
#endif
if (opts.document_root == NULL) {
opts.document_root = ".";
}
if (opts.per_directory_auth_file == NULL) {
opts.per_directory_auth_file = ".htpasswd";
}
if (opts.enable_directory_listing == NULL) {
opts.enable_directory_listing = "yes";
}
if (opts.cgi_file_pattern == NULL) {
opts.cgi_file_pattern = "**.cgi$|**.php$";
}
if (opts.ssi_pattern == NULL) {
opts.ssi_pattern = "**.shtml$|**.shtm$";
}
if (opts.index_files == NULL) {
opts.index_files = "index.html,index.htm,index.shtml,index.cgi,index.php";
}
/* Normalize path - resolve "." and ".." (in-place). */
if (!mg_normalize_uri_path(&hm->uri, &hm->uri)) {
mg_http_send_error(nc, 400, NULL);
return;
}
if (mg_uri_to_local_path(hm, &opts, &path, &path_info) == 0) {
mg_http_send_error(nc, 404, NULL);
return;
}
mg_send_http_file(nc, path, &path_info, hm, &opts);
MG_FREE(path);
path = NULL;
/* Close connection for non-keep-alive requests */
if (mg_vcmp(&hm->proto, "HTTP/1.1") != 0 ||
((hdr = mg_get_http_header(hm, "Connection")) != NULL &&
mg_vcmp(hdr, "keep-alive") != 0)) {
#if 0
nc->flags |= MG_F_SEND_AND_CLOSE;
#endif
}
}
#if MG_ENABLE_HTTP_STREAMING_MULTIPART
void mg_file_upload_handler(struct mg_connection *nc, int ev, void *ev_data,
mg_fu_fname_fn local_name_fn
MG_UD_ARG(void *user_data)) {
switch (ev) {
case MG_EV_HTTP_PART_BEGIN: {
struct mg_http_multipart_part *mp =
(struct mg_http_multipart_part *) ev_data;
struct file_upload_state *fus =
(struct file_upload_state *) MG_CALLOC(1, sizeof(*fus));
struct mg_str lfn = local_name_fn(nc, mg_mk_str(mp->file_name));
mp->user_data = NULL;
if (lfn.p == NULL || lfn.len == 0) {
LOG(LL_ERROR, ("%p Not allowed to upload %s", nc, mp->file_name));
mg_printf(nc,
"HTTP/1.1 403 Not Allowed\r\n"
"Content-Type: text/plain\r\n"
"Connection: close\r\n\r\n"
"Not allowed to upload %s\r\n",
mp->file_name);
nc->flags |= MG_F_SEND_AND_CLOSE;
return;
}
fus->lfn = (char *) MG_MALLOC(lfn.len + 1);
memcpy(fus->lfn, lfn.p, lfn.len);
fus->lfn[lfn.len] = '\0';
if (lfn.p != mp->file_name) MG_FREE((char *) lfn.p);
LOG(LL_DEBUG,
("%p Receiving file %s -> %s", nc, mp->file_name, fus->lfn));
fus->fp = mg_fopen(fus->lfn, "w");
if (fus->fp == NULL) {
mg_printf(nc,
"HTTP/1.1 500 Internal Server Error\r\n"
"Content-Type: text/plain\r\n"
"Connection: close\r\n\r\n");
LOG(LL_ERROR, ("Failed to open %s: %d\n", fus->lfn, mg_get_errno()));
mg_printf(nc, "Failed to open %s: %d\n", fus->lfn, mg_get_errno());
/* Do not close the connection just yet, discard remainder of the data.
* This is because at the time of writing some browsers (Chrome) fail to
* render response before all the data is sent. */
}
mp->user_data = (void *) fus;
break;
}
case MG_EV_HTTP_PART_DATA: {
struct mg_http_multipart_part *mp =
(struct mg_http_multipart_part *) ev_data;
struct file_upload_state *fus =
(struct file_upload_state *) mp->user_data;
if (fus == NULL || fus->fp == NULL) break;
if (mg_fwrite(mp->data.p, 1, mp->data.len, fus->fp) != mp->data.len) {
LOG(LL_ERROR, ("Failed to write to %s: %d, wrote %d", fus->lfn,
mg_get_errno(), (int) fus->num_recd));
if (mg_get_errno() == ENOSPC
#ifdef SPIFFS_ERR_FULL
|| mg_get_errno() == SPIFFS_ERR_FULL
#endif
) {
mg_printf(nc,
"HTTP/1.1 413 Payload Too Large\r\n"
"Content-Type: text/plain\r\n"
"Connection: close\r\n\r\n");
mg_printf(nc, "Failed to write to %s: no space left; wrote %d\r\n",
fus->lfn, (int) fus->num_recd);
} else {
mg_printf(nc,
"HTTP/1.1 500 Internal Server Error\r\n"
"Content-Type: text/plain\r\n"
"Connection: close\r\n\r\n");
mg_printf(nc, "Failed to write to %s: %d, wrote %d", mp->file_name,
mg_get_errno(), (int) fus->num_recd);
}
fclose(fus->fp);
remove(fus->lfn);
fus->fp = NULL;
/* Do not close the connection just yet, discard remainder of the data.
* This is because at the time of writing some browsers (Chrome) fail to
* render response before all the data is sent. */
return;
}
fus->num_recd += mp->data.len;
LOG(LL_DEBUG, ("%p rec'd %d bytes, %d total", nc, (int) mp->data.len,
(int) fus->num_recd));
break;
}
case MG_EV_HTTP_PART_END: {
struct mg_http_multipart_part *mp =
(struct mg_http_multipart_part *) ev_data;
struct file_upload_state *fus =
(struct file_upload_state *) mp->user_data;
if (fus == NULL) break;
if (mp->status >= 0 && fus->fp != NULL) {
LOG(LL_DEBUG, ("%p Uploaded %s (%s), %d bytes", nc, mp->file_name,
fus->lfn, (int) fus->num_recd));
mg_printf(nc,
"HTTP/1.1 200 OK\r\n"
"Content-Type: text/plain\r\n"
"Connection: close\r\n\r\n"
"Ok, %s - %d bytes.\r\n",
mp->file_name, (int) fus->num_recd);
} else {
LOG(LL_ERROR, ("Failed to store %s (%s)", mp->file_name, fus->lfn));
/*
* mp->status < 0 means connection was terminated, so no reason to send
* HTTP reply
*/
}
if (fus->fp != NULL) fclose(fus->fp);
MG_FREE(fus->lfn);
MG_FREE(fus);
mp->user_data = NULL;
nc->flags |= MG_F_SEND_AND_CLOSE;
break;
}
}
#if MG_ENABLE_CALLBACK_USERDATA
(void) user_data;
#endif
}
#endif /* MG_ENABLE_HTTP_STREAMING_MULTIPART */
#endif /* MG_ENABLE_FILESYSTEM */
/* returns 0 on success, -1 on error */
MG_INTERNAL int mg_http_common_url_parse(const char *url, const char *schema,
const char *schema_tls, int *use_ssl,
char **user, char **pass, char **addr,
int *port_i, const char **path) {
int addr_len = 0;
int auth_sep_pos = -1;
int user_sep_pos = -1;
int port_pos = -1;
(void) user;
(void) pass;
if (strncmp(url, schema, strlen(schema)) == 0) {
url += strlen(schema);
} else if (strncmp(url, schema_tls, strlen(schema_tls)) == 0) {
url += strlen(schema_tls);
*use_ssl = 1;
#if !MG_ENABLE_SSL
return -1; /* SSL is not enabled, cannot do HTTPS URLs */
#endif
}
while (*url != '\0') {
*addr = (char *) MG_REALLOC(*addr, addr_len + 6 /* space for port too. */);
if (*addr == NULL) {
DBG(("OOM"));
return -1;
}
if (*url == '/') {
break;
}
if (*url == '@') {
auth_sep_pos = addr_len;
user_sep_pos = port_pos;
port_pos = -1;
}
if (*url == ':') port_pos = addr_len;
(*addr)[addr_len++] = *url;
(*addr)[addr_len] = '\0';
url++;
}
if (addr_len == 0) goto cleanup;
if (port_pos < 0) {
*port_i = addr_len;
addr_len += sprintf(*addr + addr_len, ":%d", *use_ssl ? 443 : 80);
} else {
*port_i = -1;
}
if (*path == NULL) *path = url;
if (**path == '\0') *path = "/";
if (user != NULL && pass != NULL) {
if (auth_sep_pos == -1) {
*user = NULL;
*pass = NULL;
} else {
/* user is from 0 to user_sep_pos */
*user = (char *) MG_MALLOC(user_sep_pos + 1);
memcpy(*user, *addr, user_sep_pos);
(*user)[user_sep_pos] = '\0';
/* pass is from user_sep_pos + 1 to auth_sep_pos */
*pass = (char *) MG_MALLOC(auth_sep_pos - user_sep_pos - 1 + 1);
memcpy(*pass, *addr + user_sep_pos + 1, auth_sep_pos - user_sep_pos - 1);
(*pass)[auth_sep_pos - user_sep_pos - 1] = '\0';
/* move address proper to the front */
memmove(*addr, *addr + auth_sep_pos + 1, addr_len - auth_sep_pos);
}
}
DBG(("%s %s", *addr, *path));
return 0;
cleanup:
MG_FREE(*addr);
return -1;
}
struct mg_connection *mg_connect_http_base(
struct mg_mgr *mgr, MG_CB(mg_event_handler_t ev_handler, void *user_data),
struct mg_connect_opts opts, const char *schema, const char *schema_ssl,
const char *url, const char **path, char **user, char **pass, char **addr) {
struct mg_connection *nc = NULL;
int port_i = -1;
int use_ssl = 0;
if (mg_http_common_url_parse(url, schema, schema_ssl, &use_ssl, user, pass,
addr, &port_i, path) < 0) {
MG_SET_PTRPTR(opts.error_string, "cannot parse url");
return NULL;
}
LOG(LL_DEBUG, ("%s use_ssl? %d", url, use_ssl));
if (use_ssl) {
#if MG_ENABLE_SSL
/*
* Schema requires SSL, but no SSL parameters were provided in opts.
* In order to maintain backward compatibility, use a faux-SSL with no
* verification.
*/
if (opts.ssl_ca_cert == NULL) {
opts.ssl_ca_cert = "*";
}
#else
MG_SET_PTRPTR(opts.error_string, "ssl is disabled");
if (user != NULL) MG_FREE(*user);
if (pass != NULL) MG_FREE(*pass);
MG_FREE(*addr);
return NULL;
#endif
}
if ((nc = mg_connect_opt(mgr, *addr, MG_CB(ev_handler, user_data), opts)) !=
NULL) {
mg_set_protocol_http_websocket(nc);
/* If the port was addred by us, restore the original host. */
if (port_i >= 0) (*addr)[port_i] = '\0';
}
return nc;
}
struct mg_connection *mg_connect_http_opt(
struct mg_mgr *mgr, MG_CB(mg_event_handler_t ev_handler, void *user_data),
struct mg_connect_opts opts, const char *url, const char *extra_headers,
const char *post_data) {
char *user = NULL, *pass = NULL, *addr = NULL;
const char *path = NULL;
struct mbuf auth;
struct mg_connection *nc =
mg_connect_http_base(mgr, MG_CB(ev_handler, user_data), opts, "http://",
"https://", url, &path, &user, &pass, &addr);
if (nc == NULL) {
return NULL;
}
mbuf_init(&auth, 0);
if (user != NULL) {
mg_basic_auth_header(user, pass, &auth);
}
if (post_data == NULL) post_data = "";
if (extra_headers == NULL) extra_headers = "";
mg_printf(nc, "%s %s HTTP/1.1\r\nHost: %s\r\nContent-Length: %" SIZE_T_FMT
"\r\n%.*s%s\r\n%s",
post_data[0] == '\0' ? "GET" : "POST", path, addr,
strlen(post_data), (int) auth.len,
(auth.buf == NULL ? "" : auth.buf), extra_headers, post_data);
mbuf_free(&auth);
MG_FREE(user);
MG_FREE(pass);
MG_FREE(addr);
return nc;
}
struct mg_connection *mg_connect_http(
struct mg_mgr *mgr, MG_CB(mg_event_handler_t ev_handler, void *user_data),
const char *url, const char *extra_headers, const char *post_data) {
struct mg_connect_opts opts;
memset(&opts, 0, sizeof(opts));
return mg_connect_http_opt(mgr, MG_CB(ev_handler, user_data), opts, url,
extra_headers, post_data);
}
size_t mg_parse_multipart(const char *buf, size_t buf_len, char *var_name,
size_t var_name_len, char *file_name,
size_t file_name_len, const char **data,
size_t *data_len) {
static const char cd[] = "Content-Disposition: ";
size_t hl, bl, n, ll, pos, cdl = sizeof(cd) - 1;
if (buf == NULL || buf_len <= 0) return 0;
if ((hl = mg_http_get_request_len(buf, buf_len)) <= 0) return 0;
if (buf[0] != '-' || buf[1] != '-' || buf[2] == '\n') return 0;
/* Get boundary length */
bl = mg_get_line_len(buf, buf_len);
/* Loop through headers, fetch variable name and file name */
var_name[0] = file_name[0] = '\0';
for (n = bl; (ll = mg_get_line_len(buf + n, hl - n)) > 0; n += ll) {
if (mg_ncasecmp(cd, buf + n, cdl) == 0) {
struct mg_str header;
header.p = buf + n + cdl;
header.len = ll - (cdl + 2);
mg_http_parse_header(&header, "name", var_name, var_name_len);
mg_http_parse_header(&header, "filename", file_name, file_name_len);
}
}
/* Scan through the body, search for terminating boundary */
for (pos = hl; pos + (bl - 2) < buf_len; pos++) {
if (buf[pos] == '-' && !strncmp(buf, &buf[pos], bl - 2)) {
if (data_len != NULL) *data_len = (pos - 2) - hl;
if (data != NULL) *data = buf + hl;
return pos;
}
}
return 0;
}
void mg_register_http_endpoint(struct mg_connection *nc, const char *uri_path,
MG_CB(mg_event_handler_t handler,
void *user_data)) {
struct mg_http_proto_data *pd = NULL;
struct mg_http_endpoint *new_ep = NULL;
if (nc == NULL) return;
new_ep = (struct mg_http_endpoint *) MG_CALLOC(1, sizeof(*new_ep));
if (new_ep == NULL) return;
pd = mg_http_get_proto_data(nc);
new_ep->name = strdup(uri_path);
new_ep->name_len = strlen(new_ep->name);
new_ep->handler = handler;
#if MG_ENABLE_CALLBACK_USERDATA
new_ep->user_data = user_data;
#endif
new_ep->next = pd->endpoints;
pd->endpoints = new_ep;
}
#endif /* MG_ENABLE_HTTP */
#ifdef MG_MODULE_LINES
#line 1 "mongoose/src/http_cgi.c"
#endif
/*
* Copyright (c) 2014-2016 Cesanta Software Limited
* All rights reserved
*/
#if MG_ENABLE_HTTP && MG_ENABLE_HTTP_CGI
#ifndef MG_MAX_CGI_ENVIR_VARS
#define MG_MAX_CGI_ENVIR_VARS 64
#endif
#ifndef MG_ENV_EXPORT_TO_CGI
#define MG_ENV_EXPORT_TO_CGI "MONGOOSE_CGI"
#endif
/*
* This structure helps to create an environment for the spawned CGI program.
* Environment is an array of "VARIABLE=VALUE\0" ASCIIZ strings,
* last element must be NULL.
* However, on Windows there is a requirement that all these VARIABLE=VALUE\0
* strings must reside in a contiguous buffer. The end of the buffer is
* marked by two '\0' characters.
* We satisfy both worlds: we create an envp array (which is vars), all
* entries are actually pointers inside buf.
*/
struct mg_cgi_env_block {
struct mg_connection *nc;
char buf[MG_CGI_ENVIRONMENT_SIZE]; /* Environment buffer */
const char *vars[MG_MAX_CGI_ENVIR_VARS]; /* char *envp[] */
int len; /* Space taken */
int nvars; /* Number of variables in envp[] */
};
#ifdef _WIN32
struct mg_threadparam {
sock_t s;
HANDLE hPipe;
};
static int mg_wait_until_ready(sock_t sock, int for_read) {
fd_set set;
FD_ZERO(&set);
FD_SET(sock, &set);
return select(sock + 1, for_read ? &set : 0, for_read ? 0 : &set, 0, 0) == 1;
}
static void *mg_push_to_stdin(void *arg) {
struct mg_threadparam *tp = (struct mg_threadparam *) arg;
int n, sent, stop = 0;
DWORD k;
char buf[BUFSIZ];
while (!stop && mg_wait_until_ready(tp->s, 1) &&
(n = recv(tp->s, buf, sizeof(buf), 0)) > 0) {
if (n == -1 && GetLastError() == WSAEWOULDBLOCK) continue;
for (sent = 0; !stop && sent < n; sent += k) {
if (!WriteFile(tp->hPipe, buf + sent, n - sent, &k, 0)) stop = 1;
}
}
DBG(("%s", "FORWARED EVERYTHING TO CGI"));
CloseHandle(tp->hPipe);
MG_FREE(tp);
return NULL;
}
static void *mg_pull_from_stdout(void *arg) {
struct mg_threadparam *tp = (struct mg_threadparam *) arg;
int k = 0, stop = 0;
DWORD n, sent;
char buf[BUFSIZ];
while (!stop && ReadFile(tp->hPipe, buf, sizeof(buf), &n, NULL)) {
for (sent = 0; !stop && sent < n; sent += k) {
if (mg_wait_until_ready(tp->s, 0) &&
(k = send(tp->s, buf + sent, n - sent, 0)) <= 0)
stop = 1;
}
}
DBG(("%s", "EOF FROM CGI"));
CloseHandle(tp->hPipe);
shutdown(tp->s, 2); // Without this, IO thread may get truncated data
closesocket(tp->s);
MG_FREE(tp);
return NULL;
}
static void mg_spawn_stdio_thread(sock_t sock, HANDLE hPipe,
void *(*func)(void *)) {
struct mg_threadparam *tp = (struct mg_threadparam *) MG_MALLOC(sizeof(*tp));
if (tp != NULL) {
tp->s = sock;
tp->hPipe = hPipe;
mg_start_thread(func, tp);
}
}
static void mg_abs_path(const char *utf8_path, char *abs_path, size_t len) {
wchar_t buf[MAX_PATH_SIZE], buf2[MAX_PATH_SIZE];
to_wchar(utf8_path, buf, ARRAY_SIZE(buf));
GetFullPathNameW(buf, ARRAY_SIZE(buf2), buf2, NULL);
WideCharToMultiByte(CP_UTF8, 0, buf2, wcslen(buf2) + 1, abs_path, len, 0, 0);
}
static int mg_start_process(const char *interp, const char *cmd,
const char *env, const char *envp[],
const char *dir, sock_t sock) {
STARTUPINFOW si;
PROCESS_INFORMATION pi;
HANDLE a[2], b[2], me = GetCurrentProcess();
wchar_t wcmd[MAX_PATH_SIZE], full_dir[MAX_PATH_SIZE];
char buf[MAX_PATH_SIZE], buf2[MAX_PATH_SIZE], buf5[MAX_PATH_SIZE],
buf4[MAX_PATH_SIZE], cmdline[MAX_PATH_SIZE];
DWORD flags = DUPLICATE_CLOSE_SOURCE | DUPLICATE_SAME_ACCESS;
FILE *fp;
memset(&si, 0, sizeof(si));
memset(&pi, 0, sizeof(pi));
si.cb = sizeof(si);
si.dwFlags = STARTF_USESTDHANDLES | STARTF_USESHOWWINDOW;
si.wShowWindow = SW_HIDE;
si.hStdError = GetStdHandle(STD_ERROR_HANDLE);
CreatePipe(&a[0], &a[1], NULL, 0);
CreatePipe(&b[0], &b[1], NULL, 0);
DuplicateHandle(me, a[0], me, &si.hStdInput, 0, TRUE, flags);
DuplicateHandle(me, b[1], me, &si.hStdOutput, 0, TRUE, flags);
if (interp == NULL && (fp = mg_fopen(cmd, "r")) != NULL) {
buf[0] = buf[1] = '\0';
fgets(buf, sizeof(buf), fp);
buf[sizeof(buf) - 1] = '\0';
if (buf[0] == '#' && buf[1] == '!') {
interp = buf + 2;
/* Trim leading spaces: https://github.com/cesanta/mongoose/issues/489 */
while (*interp != '\0' && isspace(*(unsigned char *) interp)) {
interp++;
}
}
fclose(fp);
}
snprintf(buf, sizeof(buf), "%s/%s", dir, cmd);
mg_abs_path(buf, buf2, ARRAY_SIZE(buf2));
mg_abs_path(dir, buf5, ARRAY_SIZE(buf5));
to_wchar(dir, full_dir, ARRAY_SIZE(full_dir));
if (interp != NULL) {
mg_abs_path(interp, buf4, ARRAY_SIZE(buf4));
snprintf(cmdline, sizeof(cmdline), "%s \"%s\"", buf4, buf2);
} else {
snprintf(cmdline, sizeof(cmdline), "\"%s\"", buf2);
}
to_wchar(cmdline, wcmd, ARRAY_SIZE(wcmd));
if (CreateProcessW(NULL, wcmd, NULL, NULL, TRUE, CREATE_NEW_PROCESS_GROUP,
(void *) env, full_dir, &si, &pi) != 0) {
mg_spawn_stdio_thread(sock, a[1], mg_push_to_stdin);
mg_spawn_stdio_thread(sock, b[0], mg_pull_from_stdout);
CloseHandle(si.hStdOutput);
CloseHandle(si.hStdInput);
CloseHandle(pi.hThread);
CloseHandle(pi.hProcess);
} else {
CloseHandle(a[1]);
CloseHandle(b[0]);
closesocket(sock);
}
DBG(("CGI command: [%ls] -> %p", wcmd, pi.hProcess));
/* Not closing a[0] and b[1] because we've used DUPLICATE_CLOSE_SOURCE */
(void) envp;
return (pi.hProcess != NULL);
}
#else
static int mg_start_process(const char *interp, const char *cmd,
const char *env, const char *envp[],
const char *dir, sock_t sock) {
char buf[500];
pid_t pid = fork();
(void) env;
if (pid == 0) {
/*
* In Linux `chdir` declared with `warn_unused_result` attribute
* To shutup compiler we have yo use result in some way
*/
int tmp = chdir(dir);
(void) tmp;
(void) dup2(sock, 0);
(void) dup2(sock, 1);
closesocket(sock);
/*
* After exec, all signal handlers are restored to their default values,
* with one exception of SIGCHLD. According to POSIX.1-2001 and Linux's
* implementation, SIGCHLD's handler will leave unchanged after exec
* if it was set to be ignored. Restore it to default action.
*/
signal(SIGCHLD, SIG_DFL);
if (interp == NULL) {
execle(cmd, cmd, (char *) 0, envp); /* (char *) 0 to squash warning */
} else {
execle(interp, interp, cmd, (char *) 0, envp);
}
snprintf(buf, sizeof(buf),
"Status: 500\r\n\r\n"
"500 Server Error: %s%s%s: %s",
interp == NULL ? "" : interp, interp == NULL ? "" : " ", cmd,
strerror(errno));
send(1, buf, strlen(buf), 0);
_exit(EXIT_FAILURE); /* exec call failed */
}
return (pid != 0);
}
#endif /* _WIN32 */
/*
* Append VARIABLE=VALUE\0 string to the buffer, and add a respective
* pointer into the vars array.
*/
static char *mg_addenv(struct mg_cgi_env_block *block, const char *fmt, ...) {
int n, space;
char *added = block->buf + block->len;
va_list ap;
/* Calculate how much space is left in the buffer */
space = sizeof(block->buf) - (block->len + 2);
if (space > 0) {
/* Copy VARIABLE=VALUE\0 string into the free space */
va_start(ap, fmt);
n = vsnprintf(added, (size_t) space, fmt, ap);
va_end(ap);
/* Make sure we do not overflow buffer and the envp array */
if (n > 0 && n + 1 < space &&
block->nvars < (int) ARRAY_SIZE(block->vars) - 2) {
/* Append a pointer to the added string into the envp array */
block->vars[block->nvars++] = added;
/* Bump up used length counter. Include \0 terminator */
block->len += n + 1;
}
}
return added;
}
static void mg_addenv2(struct mg_cgi_env_block *blk, const char *name) {
const char *s;
if ((s = getenv(name)) != NULL) mg_addenv(blk, "%s=%s", name, s);
}
static void mg_prepare_cgi_environment(struct mg_connection *nc,
const char *prog,
const struct mg_str *path_info,
const struct http_message *hm,
const struct mg_serve_http_opts *opts,
struct mg_cgi_env_block *blk) {
const char *s;
struct mg_str *h;
char *p;
size_t i;
char buf[100];
blk->len = blk->nvars = 0;
blk->nc = nc;
if ((s = getenv("SERVER_NAME")) != NULL) {
mg_addenv(blk, "SERVER_NAME=%s", s);
} else {
mg_sock_to_str(nc->sock, buf, sizeof(buf), 3);
mg_addenv(blk, "SERVER_NAME=%s", buf);
}
mg_addenv(blk, "SERVER_ROOT=%s", opts->document_root);
mg_addenv(blk, "DOCUMENT_ROOT=%s", opts->document_root);
mg_addenv(blk, "SERVER_SOFTWARE=%s/%s", "Mongoose", MG_VERSION);
/* Prepare the environment block */
mg_addenv(blk, "%s", "GATEWAY_INTERFACE=CGI/1.1");
mg_addenv(blk, "%s", "SERVER_PROTOCOL=HTTP/1.1");
mg_addenv(blk, "%s", "REDIRECT_STATUS=200"); /* For PHP */
mg_addenv(blk, "REQUEST_METHOD=%.*s", (int) hm->method.len, hm->method.p);
mg_addenv(blk, "REQUEST_URI=%.*s%s%.*s", (int) hm->uri.len, hm->uri.p,
hm->query_string.len == 0 ? "" : "?", (int) hm->query_string.len,
hm->query_string.p);
mg_conn_addr_to_str(nc, buf, sizeof(buf),
MG_SOCK_STRINGIFY_REMOTE | MG_SOCK_STRINGIFY_IP);
mg_addenv(blk, "REMOTE_ADDR=%s", buf);
mg_conn_addr_to_str(nc, buf, sizeof(buf), MG_SOCK_STRINGIFY_PORT);
mg_addenv(blk, "SERVER_PORT=%s", buf);
s = hm->uri.p + hm->uri.len - path_info->len - 1;
if (*s == '/') {
const char *base_name = strrchr(prog, DIRSEP);
mg_addenv(blk, "SCRIPT_NAME=%.*s/%s", (int) (s - hm->uri.p), hm->uri.p,
(base_name != NULL ? base_name + 1 : prog));
} else {
mg_addenv(blk, "SCRIPT_NAME=%.*s", (int) (s - hm->uri.p + 1), hm->uri.p);
}
mg_addenv(blk, "SCRIPT_FILENAME=%s", prog);
if (path_info != NULL && path_info->len > 0) {
mg_addenv(blk, "PATH_INFO=%.*s", (int) path_info->len, path_info->p);
/* Not really translated... */
mg_addenv(blk, "PATH_TRANSLATED=%.*s", (int) path_info->len, path_info->p);
}
#if MG_ENABLE_SSL
mg_addenv(blk, "HTTPS=%s", (nc->flags & MG_F_SSL ? "on" : "off"));
#else
mg_addenv(blk, "HTTPS=off");
#endif
if ((h = mg_get_http_header((struct http_message *) hm, "Content-Type")) !=
NULL) {
mg_addenv(blk, "CONTENT_TYPE=%.*s", (int) h->len, h->p);
}
if (hm->query_string.len > 0) {
mg_addenv(blk, "QUERY_STRING=%.*s", (int) hm->query_string.len,
hm->query_string.p);
}
if ((h = mg_get_http_header((struct http_message *) hm, "Content-Length")) !=
NULL) {
mg_addenv(blk, "CONTENT_LENGTH=%.*s", (int) h->len, h->p);
}
mg_addenv2(blk, "PATH");
mg_addenv2(blk, "TMP");
mg_addenv2(blk, "TEMP");
mg_addenv2(blk, "TMPDIR");
mg_addenv2(blk, "PERLLIB");
mg_addenv2(blk, MG_ENV_EXPORT_TO_CGI);
#ifdef _WIN32
mg_addenv2(blk, "COMSPEC");
mg_addenv2(blk, "SYSTEMROOT");
mg_addenv2(blk, "SystemDrive");
mg_addenv2(blk, "ProgramFiles");
mg_addenv2(blk, "ProgramFiles(x86)");
mg_addenv2(blk, "CommonProgramFiles(x86)");
#else
mg_addenv2(blk, "LD_LIBRARY_PATH");
#endif /* _WIN32 */
/* Add all headers as HTTP_* variables */
for (i = 0; hm->header_names[i].len > 0; i++) {
p = mg_addenv(blk, "HTTP_%.*s=%.*s", (int) hm->header_names[i].len,
hm->header_names[i].p, (int) hm->header_values[i].len,
hm->header_values[i].p);
/* Convert variable name into uppercase, and change - to _ */
for (; *p != '=' && *p != '\0'; p++) {
if (*p == '-') *p = '_';
*p = (char) toupper(*(unsigned char *) p);
}
}
blk->vars[blk->nvars++] = NULL;
blk->buf[blk->len++] = '\0';
}
static void mg_cgi_ev_handler(struct mg_connection *cgi_nc, int ev,
void *ev_data MG_UD_ARG(void *user_data)) {
#if !MG_ENABLE_CALLBACK_USERDATA
void *user_data = cgi_nc->user_data;
#endif
struct mg_connection *nc = (struct mg_connection *) user_data;
(void) ev_data;
if (nc == NULL) {
cgi_nc->flags |= MG_F_CLOSE_IMMEDIATELY;
return;
}
switch (ev) {
case MG_EV_RECV:
/*
* CGI script does not output reply line, like "HTTP/1.1 CODE XXXXX\n"
* It outputs headers, then body. Headers might include "Status"
* header, which changes CODE, and it might include "Location" header
* which changes CODE to 302.
*
* Therefore we do not send the output from the CGI script to the user
* until all CGI headers are received.
*
* Here we parse the output from the CGI script, and if all headers has
* been received, send appropriate reply line, and forward all
* received headers to the client.
*/
if (nc->flags & MG_F_USER_1) {
struct mbuf *io = &cgi_nc->recv_mbuf;
int len = mg_http_get_request_len(io->buf, io->len);
if (len == 0) break;
if (len < 0 || io->len > MG_MAX_HTTP_REQUEST_SIZE) {
cgi_nc->flags |= MG_F_CLOSE_IMMEDIATELY;
mg_http_send_error(nc, 500, "Bad headers");
} else {
struct http_message hm;
struct mg_str *h;
mg_http_parse_headers(io->buf, io->buf + io->len, io->len, &hm);
if (mg_get_http_header(&hm, "Location") != NULL) {
mg_printf(nc, "%s", "HTTP/1.1 302 Moved\r\n");
} else if ((h = mg_get_http_header(&hm, "Status")) != NULL) {
mg_printf(nc, "HTTP/1.1 %.*s\r\n", (int) h->len, h->p);
} else {
mg_printf(nc, "%s", "HTTP/1.1 200 OK\r\n");
}
}
nc->flags &= ~MG_F_USER_1;
}
if (!(nc->flags & MG_F_USER_1)) {
mg_forward(cgi_nc, nc);
}
break;
case MG_EV_CLOSE:
mg_http_free_proto_data_cgi(&mg_http_get_proto_data(nc)->cgi);
nc->flags |= MG_F_SEND_AND_CLOSE;
break;
}
}
MG_INTERNAL void mg_handle_cgi(struct mg_connection *nc, const char *prog,
const struct mg_str *path_info,
const struct http_message *hm,
const struct mg_serve_http_opts *opts) {
struct mg_cgi_env_block blk;
char dir[MAX_PATH_SIZE];
const char *p;
sock_t fds[2];
DBG(("%p [%s]", nc, prog));
mg_prepare_cgi_environment(nc, prog, path_info, hm, opts, &blk);
/*
* CGI must be executed in its own directory. 'dir' must point to the
* directory containing executable program, 'p' must point to the
* executable program name relative to 'dir'.
*/
if ((p = strrchr(prog, DIRSEP)) == NULL) {
snprintf(dir, sizeof(dir), "%s", ".");
} else {
snprintf(dir, sizeof(dir), "%.*s", (int) (p - prog), prog);
prog = p + 1;
}
/*
* Try to create socketpair in a loop until success. mg_socketpair()
* can be interrupted by a signal and fail.
* TODO(lsm): use sigaction to restart interrupted syscall
*/
do {
mg_socketpair(fds, SOCK_STREAM);
} while (fds[0] == INVALID_SOCKET);
if (mg_start_process(opts->cgi_interpreter, prog, blk.buf, blk.vars, dir,
fds[1]) != 0) {
size_t n = nc->recv_mbuf.len - (hm->message.len - hm->body.len);
struct mg_connection *cgi_nc =
mg_add_sock(nc->mgr, fds[0], mg_cgi_ev_handler MG_UD_ARG(nc));
struct mg_http_proto_data *cgi_pd = mg_http_get_proto_data(nc);
cgi_pd->cgi.cgi_nc = cgi_nc;
#if !MG_ENABLE_CALLBACK_USERDATA
cgi_pd->cgi.cgi_nc->user_data = nc;
#endif
nc->flags |= MG_F_USER_1;
/* Push POST data to the CGI */
if (n > 0 && n < nc->recv_mbuf.len) {
mg_send(cgi_pd->cgi.cgi_nc, hm->body.p, n);
}
mbuf_remove(&nc->recv_mbuf, nc->recv_mbuf.len);
} else {
closesocket(fds[0]);
mg_http_send_error(nc, 500, "CGI failure");
}
#ifndef _WIN32
closesocket(fds[1]); /* On Windows, CGI stdio thread closes that socket */
#endif
}
MG_INTERNAL void mg_http_free_proto_data_cgi(struct mg_http_proto_data_cgi *d) {
if (d != NULL) {
if (d->cgi_nc != NULL) d->cgi_nc->flags |= MG_F_CLOSE_IMMEDIATELY;
memset(d, 0, sizeof(struct mg_http_proto_data_cgi));
}
}
#endif /* MG_ENABLE_HTTP && MG_ENABLE_HTTP_CGI */
#ifdef MG_MODULE_LINES
#line 1 "mongoose/src/http_ssi.c"
#endif
/*
* Copyright (c) 2014-2016 Cesanta Software Limited
* All rights reserved
*/
#if MG_ENABLE_HTTP && MG_ENABLE_HTTP_SSI && MG_ENABLE_FILESYSTEM
static void mg_send_ssi_file(struct mg_connection *nc, struct http_message *hm,
const char *path, FILE *fp, int include_level,
const struct mg_serve_http_opts *opts);
static void mg_send_file_data(struct mg_connection *nc, FILE *fp) {
char buf[BUFSIZ];
size_t n;
while ((n = mg_fread(buf, 1, sizeof(buf), fp)) > 0) {
mg_send(nc, buf, n);
}
}
static void mg_do_ssi_include(struct mg_connection *nc, struct http_message *hm,
const char *ssi, char *tag, int include_level,
const struct mg_serve_http_opts *opts) {
char file_name[BUFSIZ], path[MAX_PATH_SIZE], *p;
FILE *fp;
/*
* sscanf() is safe here, since send_ssi_file() also uses buffer
* of size MG_BUF_LEN to get the tag. So strlen(tag) is always < MG_BUF_LEN.
*/
if (sscanf(tag, " virtual=\"%[^\"]\"", file_name) == 1) {
/* File name is relative to the webserver root */
snprintf(path, sizeof(path), "%s/%s", opts->document_root, file_name);
} else if (sscanf(tag, " abspath=\"%[^\"]\"", file_name) == 1) {
/*
* File name is relative to the webserver working directory
* or it is absolute system path
*/
snprintf(path, sizeof(path), "%s", file_name);
} else if (sscanf(tag, " file=\"%[^\"]\"", file_name) == 1 ||
sscanf(tag, " \"%[^\"]\"", file_name) == 1) {
/* File name is relative to the currect document */
snprintf(path, sizeof(path), "%s", ssi);
if ((p = strrchr(path, DIRSEP)) != NULL) {
p[1] = '\0';
}
snprintf(path + strlen(path), sizeof(path) - strlen(path), "%s", file_name);
} else {
mg_printf(nc, "Bad SSI #include: [%s]", tag);
return;
}
if ((fp = mg_fopen(path, "rb")) == NULL) {
mg_printf(nc, "SSI include error: mg_fopen(%s): %s", path,
strerror(mg_get_errno()));
} else {
mg_set_close_on_exec((sock_t) fileno(fp));
if (mg_match_prefix(opts->ssi_pattern, strlen(opts->ssi_pattern), path) >
0) {
mg_send_ssi_file(nc, hm, path, fp, include_level + 1, opts);
} else {
mg_send_file_data(nc, fp);
}
fclose(fp);
}
}
#if MG_ENABLE_HTTP_SSI_EXEC
static void do_ssi_exec(struct mg_connection *nc, char *tag) {
char cmd[BUFSIZ];
FILE *fp;
if (sscanf(tag, " \"%[^\"]\"", cmd) != 1) {
mg_printf(nc, "Bad SSI #exec: [%s]", tag);
} else if ((fp = popen(cmd, "r")) == NULL) {
mg_printf(nc, "Cannot SSI #exec: [%s]: %s", cmd, strerror(mg_get_errno()));
} else {
mg_send_file_data(nc, fp);
pclose(fp);
}
}
#endif /* MG_ENABLE_HTTP_SSI_EXEC */
/*
* SSI directive has the following format:
* <!--#directive parameter=value parameter=value -->
*/
static void mg_send_ssi_file(struct mg_connection *nc, struct http_message *hm,
const char *path, FILE *fp, int include_level,
const struct mg_serve_http_opts *opts) {
static const struct mg_str btag = MG_MK_STR("<!--#");
static const struct mg_str d_include = MG_MK_STR("include");
static const struct mg_str d_call = MG_MK_STR("call");
#if MG_ENABLE_HTTP_SSI_EXEC
static const struct mg_str d_exec = MG_MK_STR("exec");
#endif
char buf[BUFSIZ], *p = buf + btag.len; /* p points to SSI directive */
int ch, len, in_ssi_tag;
if (include_level > 10) {
mg_printf(nc, "SSI #include level is too deep (%s)", path);
return;
}
in_ssi_tag = len = 0;
while ((ch = fgetc(fp)) != EOF) {
if (in_ssi_tag && ch == '>' && buf[len - 1] == '-' && buf[len - 2] == '-') {
size_t i = len - 2;
in_ssi_tag = 0;
/* Trim closing --> */
buf[i--] = '\0';
while (i > 0 && buf[i] == ' ') {
buf[i--] = '\0';
}
/* Handle known SSI directives */
if (strncmp(p, d_include.p, d_include.len) == 0) {
mg_do_ssi_include(nc, hm, path, p + d_include.len + 1, include_level,
opts);
} else if (strncmp(p, d_call.p, d_call.len) == 0) {
struct mg_ssi_call_ctx cctx;
memset(&cctx, 0, sizeof(cctx));
cctx.req = hm;
cctx.file = mg_mk_str(path);
cctx.arg = mg_mk_str(p + d_call.len + 1);
mg_call(nc, NULL, nc->user_data, MG_EV_SSI_CALL,
(void *) cctx.arg.p); /* NUL added above */
mg_call(nc, NULL, nc->user_data, MG_EV_SSI_CALL_CTX, &cctx);
#if MG_ENABLE_HTTP_SSI_EXEC
} else if (strncmp(p, d_exec.p, d_exec.len) == 0) {
do_ssi_exec(nc, p + d_exec.len + 1);
#endif
} else {
/* Silently ignore unknown SSI directive. */
}
len = 0;
} else if (ch == '<') {
in_ssi_tag = 1;
if (len > 0) {
mg_send(nc, buf, (size_t) len);
}
len = 0;
buf[len++] = ch & 0xff;
} else if (in_ssi_tag) {
if (len == (int) btag.len && strncmp(buf, btag.p, btag.len) != 0) {
/* Not an SSI tag */
in_ssi_tag = 0;
} else if (len == (int) sizeof(buf) - 2) {
mg_printf(nc, "%s: SSI tag is too large", path);
len = 0;
}
buf[len++] = ch & 0xff;
} else {
buf[len++] = ch & 0xff;
if (len == (int) sizeof(buf)) {
mg_send(nc, buf, (size_t) len);
len = 0;
}
}
}
/* Send the rest of buffered data */
if (len > 0) {
mg_send(nc, buf, (size_t) len);
}
}
MG_INTERNAL void mg_handle_ssi_request(struct mg_connection *nc,
struct http_message *hm,
const char *path,
const struct mg_serve_http_opts *opts) {
FILE *fp;
struct mg_str mime_type;
DBG(("%p %s", nc, path));
if ((fp = mg_fopen(path, "rb")) == NULL) {
mg_http_send_error(nc, 404, NULL);
} else {
mg_set_close_on_exec((sock_t) fileno(fp));
mime_type = mg_get_mime_type(path, "text/plain", opts);
mg_send_response_line(nc, 200, opts->extra_headers);
mg_printf(nc,
"Content-Type: %.*s\r\n"
"Connection: close\r\n\r\n",
(int) mime_type.len, mime_type.p);
mg_send_ssi_file(nc, hm, path, fp, 0, opts);
fclose(fp);
nc->flags |= MG_F_SEND_AND_CLOSE;
}
}
#endif /* MG_ENABLE_HTTP_SSI && MG_ENABLE_HTTP && MG_ENABLE_FILESYSTEM */
#ifdef MG_MODULE_LINES
#line 1 "mongoose/src/http_webdav.c"
#endif
/*
* Copyright (c) 2014-2016 Cesanta Software Limited
* All rights reserved
*/
#if MG_ENABLE_HTTP && MG_ENABLE_HTTP_WEBDAV
MG_INTERNAL int mg_is_dav_request(const struct mg_str *s) {
static const char *methods[] = {
"PUT",
"DELETE",
"MKCOL",
"PROPFIND",
"MOVE"
#if MG_ENABLE_FAKE_DAVLOCK
,
"LOCK",
"UNLOCK"
#endif
};
size_t i;
for (i = 0; i < ARRAY_SIZE(methods); i++) {
if (mg_vcmp(s, methods[i]) == 0) {
return 1;
}
}
return 0;
}
static int mg_mkdir(const char *path, uint32_t mode) {
#ifndef _WIN32
return mkdir(path, mode);
#else
(void) mode;
return _mkdir(path);
#endif
}
static void mg_print_props(struct mg_connection *nc, const char *name,
cs_stat_t *stp) {
char mtime[64], buf[MAX_PATH_SIZE * 3];
time_t t = stp->st_mtime; /* store in local variable for NDK compile */
mg_gmt_time_string(mtime, sizeof(mtime), &t);
mg_url_encode(name, strlen(name), buf, sizeof(buf));
mg_printf(nc,
"<d:response>"
"<d:href>%s</d:href>"
"<d:propstat>"
"<d:prop>"
"<d:resourcetype>%s</d:resourcetype>"
"<d:getcontentlength>%" INT64_FMT
"</d:getcontentlength>"
"<d:getlastmodified>%s</d:getlastmodified>"
"</d:prop>"
"<d:status>HTTP/1.1 200 OK</d:status>"
"</d:propstat>"
"</d:response>\n",
buf, S_ISDIR(stp->st_mode) ? "<d:collection/>" : "",
(int64_t) stp->st_size, mtime);
}
MG_INTERNAL void mg_handle_propfind(struct mg_connection *nc, const char *path,
cs_stat_t *stp, struct http_message *hm,
struct mg_serve_http_opts *opts) {
static const char header[] =
"HTTP/1.1 207 Multi-Status\r\n"
"Connection: close\r\n"
"Content-Type: text/xml; charset=utf-8\r\n\r\n"
"<?xml version=\"1.0\" encoding=\"utf-8\"?>"
"<d:multistatus xmlns:d='DAV:'>\n";
static const char footer[] = "</d:multistatus>\n";
const struct mg_str *depth = mg_get_http_header(hm, "Depth");
/* Print properties for the requested resource itself */
if (S_ISDIR(stp->st_mode) &&
strcmp(opts->enable_directory_listing, "yes") != 0) {
mg_printf(nc, "%s", "HTTP/1.1 403 Directory Listing Denied\r\n\r\n");
} else {
char uri[MAX_PATH_SIZE];
mg_send(nc, header, sizeof(header) - 1);
snprintf(uri, sizeof(uri), "%.*s", (int) hm->uri.len, hm->uri.p);
mg_print_props(nc, uri, stp);
if (S_ISDIR(stp->st_mode) && (depth == NULL || mg_vcmp(depth, "0") != 0)) {
mg_scan_directory(nc, path, opts, mg_print_props);
}
mg_send(nc, footer, sizeof(footer) - 1);
nc->flags |= MG_F_SEND_AND_CLOSE;
}
}
#if MG_ENABLE_FAKE_DAVLOCK
/*
* Windows explorer (probably there are another WebDav clients like it)
* requires LOCK support in webdav. W/out this, it still works, but fails
* to save file: shows error message and offers "Save As".
* "Save as" works, but this message is very annoying.
* This is fake lock, which doesn't lock something, just returns LOCK token,
* UNLOCK always answers "OK".
* With this fake LOCK Windows Explorer looks happy and saves file.
* NOTE: that is not DAV LOCK imlementation, it is just a way to shut up
* Windows native DAV client. This is why FAKE LOCK is not enabed by default
*/
MG_INTERNAL void mg_handle_lock(struct mg_connection *nc, const char *path) {
static const char *reply =
"HTTP/1.1 207 Multi-Status\r\n"
"Connection: close\r\n"
"Content-Type: text/xml; charset=utf-8\r\n\r\n"
"<?xml version=\"1.0\" encoding=\"utf-8\"?>"
"<d:multistatus xmlns:d='DAV:'>\n"
"<D:lockdiscovery>\n"
"<D:activelock>\n"
"<D:locktoken>\n"
"<D:href>\n"
"opaquelocktoken:%s%u"
"</D:href>"
"</D:locktoken>"
"</D:activelock>\n"
"</D:lockdiscovery>"
"</d:multistatus>\n";
mg_printf(nc, reply, path, (unsigned int) mg_time());
nc->flags |= MG_F_SEND_AND_CLOSE;
}
#endif
MG_INTERNAL void mg_handle_mkcol(struct mg_connection *nc, const char *path,
struct http_message *hm) {
int status_code = 500;
if (hm->body.len != (size_t) ~0 && hm->body.len > 0) {
status_code = 415;
} else if (!mg_mkdir(path, 0755)) {
status_code = 201;
} else if (errno == EEXIST) {
status_code = 405;
} else if (errno == EACCES) {
status_code = 403;
} else if (errno == ENOENT) {
status_code = 409;
} else {
status_code = 500;
}
mg_http_send_error(nc, status_code, NULL);
}
static int mg_remove_directory(const struct mg_serve_http_opts *opts,
const char *dir) {
char path[MAX_PATH_SIZE];
struct dirent *dp;
cs_stat_t st;
DIR *dirp;
if ((dirp = opendir(dir)) == NULL) return 0;
while ((dp = readdir(dirp)) != NULL) {
if (mg_is_file_hidden((const char *) dp->d_name, opts, 1)) {
continue;
}
snprintf(path, sizeof(path), "%s%c%s", dir, '/', dp->d_name);
mg_stat(path, &st);
if (S_ISDIR(st.st_mode)) {
mg_remove_directory(opts, path);
} else {
remove(path);
}
}
closedir(dirp);
rmdir(dir);
return 1;
}
MG_INTERNAL void mg_handle_move(struct mg_connection *c,
const struct mg_serve_http_opts *opts,
const char *path, struct http_message *hm) {
const struct mg_str *dest = mg_get_http_header(hm, "Destination");
if (dest == NULL) {
mg_http_send_error(c, 411, NULL);
} else {
const char *p = (char *) memchr(dest->p, '/', dest->len);
if (p != NULL && p[1] == '/' &&
(p = (char *) memchr(p + 2, '/', dest->p + dest->len - p)) != NULL) {
char buf[MAX_PATH_SIZE];
snprintf(buf, sizeof(buf), "%s%.*s", opts->dav_document_root,
(int) (dest->p + dest->len - p), p);
if (rename(path, buf) == 0) {
mg_http_send_error(c, 200, NULL);
} else {
mg_http_send_error(c, 418, NULL);
}
} else {
mg_http_send_error(c, 500, NULL);
}
}
}
MG_INTERNAL void mg_handle_delete(struct mg_connection *nc,
const struct mg_serve_http_opts *opts,
const char *path) {
cs_stat_t st;
if (mg_stat(path, &st) != 0) {
mg_http_send_error(nc, 404, NULL);
} else if (S_ISDIR(st.st_mode)) {
mg_remove_directory(opts, path);
mg_http_send_error(nc, 204, NULL);
} else if (remove(path) == 0) {
mg_http_send_error(nc, 204, NULL);
} else {
mg_http_send_error(nc, 423, NULL);
}
}
/* Return -1 on error, 1 on success. */
static int mg_create_itermediate_directories(const char *path) {
const char *s;
/* Create intermediate directories if they do not exist */
for (s = path + 1; *s != '\0'; s++) {
if (*s == '/') {
char buf[MAX_PATH_SIZE];
cs_stat_t st;
snprintf(buf, sizeof(buf), "%.*s", (int) (s - path), path);
buf[sizeof(buf) - 1] = '\0';
if (mg_stat(buf, &st) != 0 && mg_mkdir(buf, 0755) != 0) {
return -1;
}
}
}
return 1;
}
MG_INTERNAL void mg_handle_put(struct mg_connection *nc, const char *path,
struct http_message *hm) {
struct mg_http_proto_data *pd = mg_http_get_proto_data(nc);
cs_stat_t st;
const struct mg_str *cl_hdr = mg_get_http_header(hm, "Content-Length");
int rc, status_code = mg_stat(path, &st) == 0 ? 200 : 201;
mg_http_free_proto_data_file(&pd->file);
if ((rc = mg_create_itermediate_directories(path)) == 0) {
mg_printf(nc, "HTTP/1.1 %d OK\r\nContent-Length: 0\r\n\r\n", status_code);
} else if (rc == -1) {
mg_http_send_error(nc, 500, NULL);
} else if (cl_hdr == NULL) {
mg_http_send_error(nc, 411, NULL);
} else if ((pd->file.fp = mg_fopen(path, "w+b")) == NULL) {
mg_http_send_error(nc, 500, NULL);
} else {
const struct mg_str *range_hdr = mg_get_http_header(hm, "Content-Range");
int64_t r1 = 0, r2 = 0;
pd->file.type = DATA_PUT;
mg_set_close_on_exec((sock_t) fileno(pd->file.fp));
pd->file.cl = to64(cl_hdr->p);
if (range_hdr != NULL &&
mg_http_parse_range_header(range_hdr, &r1, &r2) > 0) {
status_code = 206;
fseeko(pd->file.fp, r1, SEEK_SET);
pd->file.cl = r2 > r1 ? r2 - r1 + 1 : pd->file.cl - r1;
}
mg_printf(nc, "HTTP/1.1 %d OK\r\nContent-Length: 0\r\n\r\n", status_code);
/* Remove HTTP request from the mbuf, leave only payload */
mbuf_remove(&nc->recv_mbuf, hm->message.len - hm->body.len);
mg_http_transfer_file_data(nc);
}
}
#endif /* MG_ENABLE_HTTP && MG_ENABLE_HTTP_WEBDAV */
#ifdef MG_MODULE_LINES
#line 1 "mongoose/src/http_websocket.c"
#endif
/*
* Copyright (c) 2014 Cesanta Software Limited
* All rights reserved
*/
#if MG_ENABLE_HTTP && MG_ENABLE_HTTP_WEBSOCKET
#ifndef MG_WEBSOCKET_PING_INTERVAL_SECONDS
#define MG_WEBSOCKET_PING_INTERVAL_SECONDS 5
#endif
#define MG_WS_NO_HOST_HEADER_MAGIC ((char *) 0x1)
static int mg_is_ws_fragment(unsigned char flags) {
return (flags & 0x80) == 0 || (flags & 0x0f) == 0;
}
static int mg_is_ws_first_fragment(unsigned char flags) {
return (flags & 0x80) == 0 && (flags & 0x0f) != 0;
}
static void mg_handle_incoming_websocket_frame(struct mg_connection *nc,
struct websocket_message *wsm) {
if (wsm->flags & 0x8) {
mg_call(nc, nc->handler, nc->user_data, MG_EV_WEBSOCKET_CONTROL_FRAME, wsm);
} else {
mg_call(nc, nc->handler, nc->user_data, MG_EV_WEBSOCKET_FRAME, wsm);
}
}
static int mg_deliver_websocket_data(struct mg_connection *nc) {
/* Using unsigned char *, cause of integer arithmetic below */
uint64_t i, data_len = 0, frame_len = 0, buf_len = nc->recv_mbuf.len, len,
mask_len = 0, header_len = 0;
unsigned char *p = (unsigned char *) nc->recv_mbuf.buf, *buf = p,
*e = p + buf_len;
unsigned *sizep = (unsigned *) &p[1]; /* Size ptr for defragmented frames */
int ok, reass = buf_len > 0 && mg_is_ws_fragment(p[0]) &&
!(nc->flags & MG_F_WEBSOCKET_NO_DEFRAG);
/* If that's a continuation frame that must be reassembled, handle it */
if (reass && !mg_is_ws_first_fragment(p[0]) &&
buf_len >= 1 + sizeof(*sizep) && buf_len >= 1 + sizeof(*sizep) + *sizep) {
buf += 1 + sizeof(*sizep) + *sizep;
buf_len -= 1 + sizeof(*sizep) + *sizep;
}
if (buf_len >= 2) {
len = buf[1] & 127;
mask_len = buf[1] & 128 ? 4 : 0;
if (len < 126 && buf_len >= mask_len) {
data_len = len;
header_len = 2 + mask_len;
} else if (len == 126 && buf_len >= 4 + mask_len) {
header_len = 4 + mask_len;
data_len = ntohs(*(uint16_t *) &buf[2]);
} else if (buf_len >= 10 + mask_len) {
header_len = 10 + mask_len;
data_len = (((uint64_t) ntohl(*(uint32_t *) &buf[2])) << 32) +
ntohl(*(uint32_t *) &buf[6]);
}
}
frame_len = header_len + data_len;
ok = frame_len > 0 && frame_len <= buf_len;
if (ok) {
struct websocket_message wsm;
wsm.size = (size_t) data_len;
wsm.data = buf + header_len;
wsm.flags = buf[0];
/* Apply mask if necessary */
if (mask_len > 0) {
for (i = 0; i < data_len; i++) {
buf[i + header_len] ^= (buf + header_len - mask_len)[i % 4];
}
}
if (reass) {
/* On first fragmented frame, nullify size */
if (mg_is_ws_first_fragment(wsm.flags)) {
mbuf_resize(&nc->recv_mbuf, nc->recv_mbuf.size + sizeof(*sizep));
p[0] &= ~0x0f; /* Next frames will be treated as continuation */
buf = p + 1 + sizeof(*sizep);
*sizep = 0; /* TODO(lsm): fix. this can stomp over frame data */
}
/* Append this frame to the reassembled buffer */
memmove(buf, wsm.data, e - wsm.data);
(*sizep) += wsm.size;
nc->recv_mbuf.len -= wsm.data - buf;
/* On last fragmented frame - call user handler and remove data */
if (wsm.flags & 0x80) {
wsm.data = p + 1 + sizeof(*sizep);
wsm.size = *sizep;
mg_handle_incoming_websocket_frame(nc, &wsm);
mbuf_remove(&nc->recv_mbuf, 1 + sizeof(*sizep) + *sizep);
}
} else {
/* TODO(lsm): properly handle OOB control frames during defragmentation */
mg_handle_incoming_websocket_frame(nc, &wsm);
mbuf_remove(&nc->recv_mbuf, (size_t) frame_len); /* Cleanup frame */
}
/* If the frame is not reassembled - client closes and close too */
if (!reass && (buf[0] & 0x0f) == WEBSOCKET_OP_CLOSE) {
nc->flags |= MG_F_SEND_AND_CLOSE;
}
}
return ok;
}
struct ws_mask_ctx {
size_t pos; /* zero means unmasked */
uint32_t mask;
};
static uint32_t mg_ws_random_mask(void) {
uint32_t mask;
/*
* The spec requires WS client to generate hard to
* guess mask keys. From RFC6455, Section 5.3:
*
* The unpredictability of the masking key is essential to prevent
* authors of malicious applications from selecting the bytes that appear on
* the wire.
*
* Hence this feature is essential when the actual end user of this API
* is untrusted code that wouldn't have access to a lower level net API
* anyway (e.g. web browsers). Hence this feature is low prio for most
* mongoose use cases and thus can be disabled, e.g. when porting to a platform
* that lacks rand().
*/
#if MG_DISABLE_WS_RANDOM_MASK
mask = 0xefbeadde; /* generated with a random number generator, I swear */
#else
if (sizeof(long) >= 4) {
mask = (uint32_t) rand();
} else if (sizeof(long) == 2) {
mask = (uint32_t) rand() << 16 | (uint32_t) rand();
}
#endif
return mask;
}
static void mg_send_ws_header(struct mg_connection *nc, int op, size_t len,
struct ws_mask_ctx *ctx) {
int header_len;
unsigned char header[10];
header[0] = (op & WEBSOCKET_DONT_FIN ? 0x0 : 0x80) + (op & 0x0f);
if (len < 126) {
header[1] = (unsigned char) len;
header_len = 2;
} else if (len < 65535) {
uint16_t tmp = htons((uint16_t) len);
header[1] = 126;
memcpy(&header[2], &tmp, sizeof(tmp));
header_len = 4;
} else {
uint32_t tmp;
header[1] = 127;
tmp = htonl((uint32_t)((uint64_t) len >> 32));
memcpy(&header[2], &tmp, sizeof(tmp));
tmp = htonl((uint32_t)(len & 0xffffffff));
memcpy(&header[6], &tmp, sizeof(tmp));
header_len = 10;
}
/* client connections enable masking */
if (nc->listener == NULL) {
header[1] |= 1 << 7; /* set masking flag */
mg_send(nc, header, header_len);
ctx->mask = mg_ws_random_mask();
mg_send(nc, &ctx->mask, sizeof(ctx->mask));
ctx->pos = nc->send_mbuf.len;
} else {
mg_send(nc, header, header_len);
ctx->pos = 0;
}
}
static void mg_ws_mask_frame(struct mbuf *mbuf, struct ws_mask_ctx *ctx) {
size_t i;
if (ctx->pos == 0) return;
for (i = 0; i < (mbuf->len - ctx->pos); i++) {
mbuf->buf[ctx->pos + i] ^= ((char *) &ctx->mask)[i % 4];
}
}
void mg_send_websocket_frame(struct mg_connection *nc, int op, const void *data,
size_t len) {
struct ws_mask_ctx ctx;
DBG(("%p %d %d", nc, op, (int) len));
mg_send_ws_header(nc, op, len, &ctx);
mg_send(nc, data, len);
mg_ws_mask_frame(&nc->send_mbuf, &ctx);
if (op == WEBSOCKET_OP_CLOSE) {
nc->flags |= MG_F_SEND_AND_CLOSE;
}
}
void mg_send_websocket_framev(struct mg_connection *nc, int op,
const struct mg_str *strv, int strvcnt) {
struct ws_mask_ctx ctx;
int i;
int len = 0;
for (i = 0; i < strvcnt; i++) {
len += strv[i].len;
}
mg_send_ws_header(nc, op, len, &ctx);
for (i = 0; i < strvcnt; i++) {
mg_send(nc, strv[i].p, strv[i].len);
}
mg_ws_mask_frame(&nc->send_mbuf, &ctx);
if (op == WEBSOCKET_OP_CLOSE) {
nc->flags |= MG_F_SEND_AND_CLOSE;
}
}
void mg_printf_websocket_frame(struct mg_connection *nc, int op,
const char *fmt, ...) {
char mem[MG_VPRINTF_BUFFER_SIZE], *buf = mem;
va_list ap;
int len;
va_start(ap, fmt);
if ((len = mg_avprintf(&buf, sizeof(mem), fmt, ap)) > 0) {
mg_send_websocket_frame(nc, op, buf, len);
}
va_end(ap);
if (buf != mem && buf != NULL) {
MG_FREE(buf);
}
}
MG_INTERNAL void mg_ws_handler(struct mg_connection *nc, int ev,
void *ev_data MG_UD_ARG(void *user_data)) {
mg_call(nc, nc->handler, nc->user_data, ev, ev_data);
switch (ev) {
case MG_EV_RECV:
do {
} while (mg_deliver_websocket_data(nc));
break;
case MG_EV_POLL:
/* Ping idle websocket connections */
{
time_t now = *(time_t *) ev_data;
if (nc->flags & MG_F_IS_WEBSOCKET &&
now > nc->last_io_time + MG_WEBSOCKET_PING_INTERVAL_SECONDS) {
mg_send_websocket_frame(nc, WEBSOCKET_OP_PING, "", 0);
}
}
break;
default:
break;
}
#if MG_ENABLE_CALLBACK_USERDATA
(void) user_data;
#endif
}
#ifndef MG_EXT_SHA1
static void mg_hash_sha1_v(size_t num_msgs, const uint8_t *msgs[],
const size_t *msg_lens, uint8_t *digest) {
size_t i;
cs_sha1_ctx sha_ctx;
cs_sha1_init(&sha_ctx);
for (i = 0; i < num_msgs; i++) {
cs_sha1_update(&sha_ctx, msgs[i], msg_lens[i]);
}
cs_sha1_final(digest, &sha_ctx);
}
#else
extern void mg_hash_sha1_v(size_t num_msgs, const uint8_t *msgs[],
const size_t *msg_lens, uint8_t *digest);
#endif
MG_INTERNAL void mg_ws_handshake(struct mg_connection *nc,
const struct mg_str *key) {
static const char *magic = "258EAFA5-E914-47DA-95CA-C5AB0DC85B11";
const uint8_t *msgs[2] = {(const uint8_t *) key->p, (const uint8_t *) magic};
const size_t msg_lens[2] = {key->len, 36};
unsigned char sha[20];
char b64_sha[30];
mg_hash_sha1_v(2, msgs, msg_lens, sha);
mg_base64_encode(sha, sizeof(sha), b64_sha);
mg_printf(nc, "%s%s%s",
"HTTP/1.1 101 Switching Protocols\r\n"
"Upgrade: websocket\r\n"
"Connection: Upgrade\r\n"
"Sec-WebSocket-Accept: ",
b64_sha, "\r\n\r\n");
DBG(("%p %.*s %s", nc, (int) key->len, key->p, b64_sha));
}
void mg_send_websocket_handshake2(struct mg_connection *nc, const char *path,
const char *host, const char *protocol,
const char *extra_headers) {
mg_send_websocket_handshake3(nc, path, host, protocol, extra_headers, NULL,
NULL);
}
void mg_send_websocket_handshake3(struct mg_connection *nc, const char *path,
const char *host, const char *protocol,
const char *extra_headers, const char *user,
const char *pass) {
struct mbuf auth;
char key[25];
uint32_t nonce[4];
nonce[0] = mg_ws_random_mask();
nonce[1] = mg_ws_random_mask();
nonce[2] = mg_ws_random_mask();
nonce[3] = mg_ws_random_mask();
mg_base64_encode((unsigned char *) &nonce, sizeof(nonce), key);
mbuf_init(&auth, 0);
if (user != NULL) {
mg_basic_auth_header(user, pass, &auth);
}
/*
* NOTE: the (auth.buf == NULL ? "" : auth.buf) is because cc3200 libc is
* broken: it doesn't like zero length to be passed to %.*s
* i.e. sprintf("f%.*so", (int)0, NULL), yields `f\0o`.
* because it handles NULL specially (and incorrectly).
*/
mg_printf(nc,
"GET %s HTTP/1.1\r\n"
"Upgrade: websocket\r\n"
"Connection: Upgrade\r\n"
"%.*s"
"Sec-WebSocket-Version: 13\r\n"
"Sec-WebSocket-Key: %s\r\n",
path, (int) auth.len, (auth.buf == NULL ? "" : auth.buf), key);
/* TODO(mkm): take default hostname from http proto data if host == NULL */
if (host != MG_WS_NO_HOST_HEADER_MAGIC) {
mg_printf(nc, "Host: %s\r\n", host);
}
if (protocol != NULL) {
mg_printf(nc, "Sec-WebSocket-Protocol: %s\r\n", protocol);
}
if (extra_headers != NULL) {
mg_printf(nc, "%s", extra_headers);
}
mg_printf(nc, "\r\n");
mbuf_free(&auth);
}
void mg_send_websocket_handshake(struct mg_connection *nc, const char *path,
const char *extra_headers) {
mg_send_websocket_handshake2(nc, path, MG_WS_NO_HOST_HEADER_MAGIC, NULL,
extra_headers);
}
struct mg_connection *mg_connect_ws_opt(
struct mg_mgr *mgr, MG_CB(mg_event_handler_t ev_handler, void *user_data),
struct mg_connect_opts opts, const char *url, const char *protocol,
const char *extra_headers) {
char *user = NULL, *pass = NULL, *addr = NULL;
const char *path = NULL;
struct mg_connection *nc =
mg_connect_http_base(mgr, MG_CB(ev_handler, user_data), opts, "ws://",
"wss://", url, &path, &user, &pass, &addr);
if (nc != NULL) {
mg_send_websocket_handshake3(nc, path, addr, protocol, extra_headers, user,
pass);
}
MG_FREE(addr);
MG_FREE(user);
MG_FREE(pass);
return nc;
}
struct mg_connection *mg_connect_ws(
struct mg_mgr *mgr, MG_CB(mg_event_handler_t ev_handler, void *user_data),
const char *url, const char *protocol, const char *extra_headers) {
struct mg_connect_opts opts;
memset(&opts, 0, sizeof(opts));
return mg_connect_ws_opt(mgr, MG_CB(ev_handler, user_data), opts, url,
protocol, extra_headers);
}
#endif /* MG_ENABLE_HTTP && MG_ENABLE_HTTP_WEBSOCKET */
#ifdef MG_MODULE_LINES
#line 1 "mongoose/src/util.c"
#endif
/*
* Copyright (c) 2014 Cesanta Software Limited
* All rights reserved
*/
/* Amalgamated: #include "common/base64.h" */
/* Amalgamated: #include "mongoose/src/internal.h" */
/* Amalgamated: #include "mongoose/src/util.h" */
/* For platforms with limited libc */
#ifndef MAX
#define MAX(a, b) ((a) > (b) ? (a) : (b))
#endif
const char *mg_skip(const char *s, const char *end, const char *delims,
struct mg_str *v) {
v->p = s;
while (s < end && strchr(delims, *(unsigned char *) s) == NULL) s++;
v->len = s - v->p;
while (s < end && strchr(delims, *(unsigned char *) s) != NULL) s++;
return s;
}
static int lowercase(const char *s) {
return tolower(*(const unsigned char *) s);
}
#if MG_ENABLE_FILESYSTEM && !defined(MG_USER_FILE_FUNCTIONS)
int mg_stat(const char *path, cs_stat_t *st) {
#ifdef _WIN32
wchar_t wpath[MAX_PATH_SIZE];
to_wchar(path, wpath, ARRAY_SIZE(wpath));
DBG(("[%ls] -> %d", wpath, _wstati64(wpath, st)));
return _wstati64(wpath, st);
#else
return stat(path, st);
#endif
}
FILE *mg_fopen(const char *path, const char *mode) {
#ifdef _WIN32
wchar_t wpath[MAX_PATH_SIZE], wmode[10];
to_wchar(path, wpath, ARRAY_SIZE(wpath));
to_wchar(mode, wmode, ARRAY_SIZE(wmode));
return _wfopen(wpath, wmode);
#else
return fopen(path, mode);
#endif
}
int mg_open(const char *path, int flag, int mode) { /* LCOV_EXCL_LINE */
#if defined(_WIN32) && !defined(WINCE)
wchar_t wpath[MAX_PATH_SIZE];
to_wchar(path, wpath, ARRAY_SIZE(wpath));
return _wopen(wpath, flag, mode);
#else
return open(path, flag, mode); /* LCOV_EXCL_LINE */
#endif
}
size_t mg_fread(void *ptr, size_t size, size_t count, FILE *f) {
return fread(ptr, size, count, f);
}
size_t mg_fwrite(const void *ptr, size_t size, size_t count, FILE *f) {
return fwrite(ptr, size, count, f);
}
#endif
void mg_base64_encode(const unsigned char *src, int src_len, char *dst) {
cs_base64_encode(src, src_len, dst);
}
int mg_base64_decode(const unsigned char *s, int len, char *dst) {
return cs_base64_decode(s, len, dst, NULL);
}
#if MG_ENABLE_THREADS
void *mg_start_thread(void *(*f)(void *), void *p) {
#ifdef WINCE
return (void *) CreateThread(NULL, 0, (LPTHREAD_START_ROUTINE) f, p, 0, NULL);
#elif defined(_WIN32)
return (void *) _beginthread((void(__cdecl *) (void *) ) f, 0, p);
#else
pthread_t thread_id = (pthread_t) 0;
pthread_attr_t attr;
(void) pthread_attr_init(&attr);
(void) pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_DETACHED);
#if defined(MG_STACK_SIZE) && MG_STACK_SIZE > 1
(void) pthread_attr_setstacksize(&attr, MG_STACK_SIZE);
#endif
pthread_create(&thread_id, &attr, f, p);
pthread_attr_destroy(&attr);
return (void *) thread_id;
#endif
}
#endif /* MG_ENABLE_THREADS */
/* Set close-on-exec bit for a given socket. */
void mg_set_close_on_exec(sock_t sock) {
#if defined(_WIN32) && !defined(WINCE)
(void) SetHandleInformation((HANDLE) sock, HANDLE_FLAG_INHERIT, 0);
#elif defined(__unix__)
fcntl(sock, F_SETFD, FD_CLOEXEC);
#else
(void) sock;
#endif
}
void mg_sock_addr_to_str(const union socket_address *sa, char *buf, size_t len,
int flags) {
int is_v6;
if (buf == NULL || len <= 0) return;
memset(buf, 0, len);
#if MG_ENABLE_IPV6
is_v6 = sa->sa.sa_family == AF_INET6;
#else
is_v6 = 0;
#endif
if (flags & MG_SOCK_STRINGIFY_IP) {
#if MG_ENABLE_IPV6
const void *addr = NULL;
char *start = buf;
socklen_t capacity = len;
if (!is_v6) {
addr = &sa->sin.sin_addr;
} else {
addr = (void *) &sa->sin6.sin6_addr;
if (flags & MG_SOCK_STRINGIFY_PORT) {
*buf = '[';
start++;
capacity--;
}
}
if (inet_ntop(sa->sa.sa_family, addr, start, capacity) == NULL) {
goto cleanup;
}
#elif defined(_WIN32) || MG_LWIP || (MG_NET_IF == MG_NET_IF_PIC32)
/* Only Windoze Vista (and newer) have inet_ntop() */
char *addr_str = inet_ntoa(sa->sin.sin_addr);
if (addr_str != NULL) {
strncpy(buf, inet_ntoa(sa->sin.sin_addr), len - 1);
} else {
goto cleanup;
}
#else
if (inet_ntop(AF_INET, (void *) &sa->sin.sin_addr, buf, len - 1) == NULL) {
goto cleanup;
}
#endif
}
if (flags & MG_SOCK_STRINGIFY_PORT) {
int port = ntohs(sa->sin.sin_port);
if (flags & MG_SOCK_STRINGIFY_IP) {
int buf_len = strlen(buf);
snprintf(buf + buf_len, len - (buf_len + 1), "%s:%d", (is_v6 ? "]" : ""),
port);
} else {
snprintf(buf, len, "%d", port);
}
}
return;
cleanup:
*buf = '\0';
}
void mg_conn_addr_to_str(struct mg_connection *nc, char *buf, size_t len,
int flags) {
union socket_address sa;
memset(&sa, 0, sizeof(sa));
mg_if_get_conn_addr(nc, flags & MG_SOCK_STRINGIFY_REMOTE, &sa);
mg_sock_addr_to_str(&sa, buf, len, flags);
}
#if MG_ENABLE_HEXDUMP
static int mg_hexdump_n(const void *buf, int len, char *dst, int dst_len,
int offset) {
const unsigned char *p = (const unsigned char *) buf;
char ascii[17] = "";
int i, idx, n = 0;
for (i = 0; i < len; i++) {
idx = i % 16;
if (idx == 0) {
if (i > 0) n += snprintf(dst + n, MAX(dst_len - n, 0), " %s\n", ascii);
n += snprintf(dst + n, MAX(dst_len - n, 0), "%04x ", i + offset);
}
if (dst_len - n < 0) {
return n;
}
n += snprintf(dst + n, MAX(dst_len - n, 0), " %02x", p[i]);
ascii[idx] = p[i] < 0x20 || p[i] > 0x7e ? '.' : p[i];
ascii[idx + 1] = '\0';
}
while (i++ % 16) n += snprintf(dst + n, MAX(dst_len - n, 0), "%s", " ");
n += snprintf(dst + n, MAX(dst_len - n, 0), " %s\n", ascii);
return n;
}
int mg_hexdump(const void *buf, int len, char *dst, int dst_len) {
return mg_hexdump_n(buf, len, dst, dst_len, 0);
}
void mg_hexdumpf(FILE *fp, const void *buf, int len) {
char tmp[80];
int offset = 0, n;
while (len > 0) {
n = (len < 16 ? len : 16);
mg_hexdump_n(((const char *) buf) + offset, n, tmp, sizeof(tmp), offset);
fputs(tmp, fp);
offset += n;
len -= n;
}
}
void mg_hexdump_connection(struct mg_connection *nc, const char *path,
const void *buf, int num_bytes, int ev) {
FILE *fp = NULL;
char *hexbuf, src[60], dst[60];
int buf_size = num_bytes * 5 + 100;
if (strcmp(path, "-") == 0) {
fp = stdout;
} else if (strcmp(path, "--") == 0) {
fp = stderr;
#if MG_ENABLE_FILESYSTEM
} else {
fp = mg_fopen(path, "a");
#endif
}
if (fp == NULL) return;
mg_conn_addr_to_str(nc, src, sizeof(src),
MG_SOCK_STRINGIFY_IP | MG_SOCK_STRINGIFY_PORT);
mg_conn_addr_to_str(nc, dst, sizeof(dst), MG_SOCK_STRINGIFY_IP |
MG_SOCK_STRINGIFY_PORT |
MG_SOCK_STRINGIFY_REMOTE);
fprintf(
fp, "%lu %p %s %s %s %d\n", (unsigned long) mg_time(), (void *) nc, src,
ev == MG_EV_RECV ? "<-" : ev == MG_EV_SEND
? "->"
: ev == MG_EV_ACCEPT
? "<A"
: ev == MG_EV_CONNECT ? "C>" : "XX",
dst, num_bytes);
if (num_bytes > 0 && (hexbuf = (char *) MG_MALLOC(buf_size)) != NULL) {
mg_hexdump(buf, num_bytes, hexbuf, buf_size);
fprintf(fp, "%s", hexbuf);
MG_FREE(hexbuf);
}
if (fp != stdin && fp != stdout) fclose(fp);
}
#endif
int mg_is_big_endian(void) {
static const int n = 1;
/* TODO(mkm) use compiletime check with 4-byte char literal */
return ((char *) &n)[0] == 0;
}
const char *mg_next_comma_list_entry(const char *list, struct mg_str *val,
struct mg_str *eq_val) {
if (list == NULL || *list == '\0') {
/* End of the list */
list = NULL;
} else {
val->p = list;
if ((list = strchr(val->p, ',')) != NULL) {
/* Comma found. Store length and shift the list ptr */
val->len = list - val->p;
list++;
} else {
/* This value is the last one */
list = val->p + strlen(val->p);
val->len = list - val->p;
}
if (eq_val != NULL) {
/* Value has form "x=y", adjust pointers and lengths */
/* so that val points to "x", and eq_val points to "y". */
eq_val->len = 0;
eq_val->p = (const char *) memchr(val->p, '=', val->len);
if (eq_val->p != NULL) {
eq_val->p++; /* Skip over '=' character */
eq_val->len = val->p + val->len - eq_val->p;
val->len = (eq_val->p - val->p) - 1;
}
}
}
return list;
}
int mg_match_prefix_n(const struct mg_str pattern, const struct mg_str str) {
const char *or_str;
size_t len, i = 0, j = 0;
int res;
if ((or_str = (const char *) memchr(pattern.p, '|', pattern.len)) != NULL) {
struct mg_str pstr = {pattern.p, (size_t)(or_str - pattern.p)};
res = mg_match_prefix_n(pstr, str);
if (res > 0) return res;
pstr.p = or_str + 1;
pstr.len = (pattern.p + pattern.len) - (or_str + 1);
return mg_match_prefix_n(pstr, str);
}
for (; i < pattern.len; i++, j++) {
if (pattern.p[i] == '?' && j != str.len) {
continue;
} else if (pattern.p[i] == '$') {
return j == str.len ? (int) j : -1;
} else if (pattern.p[i] == '*') {
i++;
if (pattern.p[i] == '*') {
i++;
len = str.len - j;
} else {
len = 0;
while (j + len != str.len && str.p[j + len] != '/') {
len++;
}
}
if (i == pattern.len) {
return j + len;
}
do {
const struct mg_str pstr = {pattern.p + i, pattern.len - i};
const struct mg_str sstr = {str.p + j + len, str.len - j - len};
res = mg_match_prefix_n(pstr, sstr);
} while (res == -1 && len-- > 0);
return res == -1 ? -1 : (int) (j + res + len);
} else if (lowercase(&pattern.p[i]) != lowercase(&str.p[j])) {
return -1;
}
}
return j;
}
int mg_match_prefix(const char *pattern, int pattern_len, const char *str) {
const struct mg_str pstr = {pattern, (size_t) pattern_len};
return mg_match_prefix_n(pstr, mg_mk_str(str));
}
DO_NOT_WARN_UNUSED MG_INTERNAL int mg_get_errno(void) {
#ifndef WINCE
return errno;
#else
/* TODO(alashkin): translate error codes? */
return GetLastError();
#endif
}
void mg_mbuf_append_base64_putc(char ch, void *user_data) {
struct mbuf *mbuf = (struct mbuf *) user_data;
mbuf_append(mbuf, &ch, sizeof(ch));
}
void mg_mbuf_append_base64(struct mbuf *mbuf, const void *data, size_t len) {
struct cs_base64_ctx ctx;
cs_base64_init(&ctx, mg_mbuf_append_base64_putc, mbuf);
cs_base64_update(&ctx, (const char *) data, len);
cs_base64_finish(&ctx);
}
void mg_basic_auth_header(const char *user, const char *pass,
struct mbuf *buf) {
const char *header_prefix = "Authorization: Basic ";
const char *header_suffix = "\r\n";
struct cs_base64_ctx ctx;
cs_base64_init(&ctx, mg_mbuf_append_base64_putc, buf);
mbuf_append(buf, header_prefix, strlen(header_prefix));
cs_base64_update(&ctx, user, strlen(user));
if (pass != NULL) {
cs_base64_update(&ctx, ":", 1);
cs_base64_update(&ctx, pass, strlen(pass));
}
cs_base64_finish(&ctx);
mbuf_append(buf, header_suffix, strlen(header_suffix));
}
#ifdef MG_MODULE_LINES
#line 1 "mongoose/src/mqtt.c"
#endif
/*
* Copyright (c) 2014 Cesanta Software Limited
* All rights reserved
*/
#if MG_ENABLE_MQTT
#include <string.h>
/* Amalgamated: #include "mongoose/src/internal.h" */
/* Amalgamated: #include "mongoose/src/mqtt.h" */
static uint16_t getu16(const char *p) {
const uint8_t *up = (const uint8_t *) p;
return (up[0] << 8) + up[1];
}
static const char *scanto(const char *p, struct mg_str *s) {
s->len = getu16(p);
s->p = p + 2;
return s->p + s->len;
}
MG_INTERNAL int parse_mqtt(struct mbuf *io, struct mg_mqtt_message *mm) {
uint8_t header;
size_t len = 0;
int cmd;
const char *p = &io->buf[1], *end;
if (io->len < 2) return -1;
header = io->buf[0];
cmd = header >> 4;
/* decode mqtt variable length */
do {
len += (*p & 127) << 7 * (p - &io->buf[1]);
} while ((*p++ & 128) != 0 && ((size_t)(p - io->buf) <= io->len));
end = p + len;
if (end > io->buf + io->len + 1) {
return -1;
}
mm->cmd = cmd;
mm->qos = MG_MQTT_GET_QOS(header);
switch (cmd) {
case MG_MQTT_CMD_CONNECT: {
p = scanto(p, &mm->protocol_name);
mm->protocol_version = *(uint8_t *) p++;
mm->connect_flags = *(uint8_t *) p++;
mm->keep_alive_timer = getu16(p);
p += 2;
if (p < end) p = scanto(p, &mm->client_id);
if (p < end && (mm->connect_flags & MG_MQTT_HAS_WILL))
p = scanto(p, &mm->will_topic);
if (p < end && (mm->connect_flags & MG_MQTT_HAS_WILL))
p = scanto(p, &mm->will_message);
if (p < end && (mm->connect_flags & MG_MQTT_HAS_USER_NAME))
p = scanto(p, &mm->user_name);
if (p < end && (mm->connect_flags & MG_MQTT_HAS_PASSWORD))
p = scanto(p, &mm->password);
LOG(LL_DEBUG,
("%d %2x %d proto [%.*s] client_id [%.*s] will_topic [%.*s] "
"will_msg [%.*s] user_name [%.*s] password [%.*s]",
len, (int) mm->connect_flags, (int) mm->keep_alive_timer,
(int) mm->protocol_name.len, mm->protocol_name.p,
(int) mm->client_id.len, mm->client_id.p, (int) mm->will_topic.len,
mm->will_topic.p, (int) mm->will_message.len, mm->will_message.p,
(int) mm->user_name.len, mm->user_name.p, (int) mm->password.len,
mm->password.p));
break;
}
case MG_MQTT_CMD_CONNACK:
mm->connack_ret_code = p[1];
break;
case MG_MQTT_CMD_PUBACK:
case MG_MQTT_CMD_PUBREC:
case MG_MQTT_CMD_PUBREL:
case MG_MQTT_CMD_PUBCOMP:
case MG_MQTT_CMD_SUBACK:
mm->message_id = getu16(p);
break;
case MG_MQTT_CMD_PUBLISH: {
p = scanto(p, &mm->topic);
if (mm->qos > 0) {
mm->message_id = getu16(p);
p += 2;
}
mm->payload.p = p;
mm->payload.len = end - p;
break;
}
case MG_MQTT_CMD_SUBSCRIBE:
mm->message_id = getu16(p);
p += 2;
/*
* topic expressions are left in the payload and can be parsed with
* `mg_mqtt_next_subscribe_topic`
*/
mm->payload.p = p;
mm->payload.len = end - p;
break;
default:
/* Unhandled command */
break;
}
return end - io->buf;
}
static void mqtt_handler(struct mg_connection *nc, int ev,
void *ev_data MG_UD_ARG(void *user_data)) {
int len;
struct mbuf *io = &nc->recv_mbuf;
struct mg_mqtt_message mm;
memset(&mm, 0, sizeof(mm));
nc->handler(nc, ev, ev_data MG_UD_ARG(user_data));
switch (ev) {
case MG_EV_RECV:
len = parse_mqtt(io, &mm);
if (len == -1) break; /* not fully buffered */
nc->handler(nc, MG_MQTT_EVENT_BASE + mm.cmd, &mm MG_UD_ARG(user_data));
mbuf_remove(io, len);
break;
}
}
static void mg_mqtt_proto_data_destructor(void *proto_data) {
MG_FREE(proto_data);
}
int mg_mqtt_match_topic_expression(struct mg_str exp, struct mg_str topic) {
/* TODO(mkm): implement real matching */
if (memchr(exp.p, '#', exp.len)) {
/* exp `foo/#` will become `foo/` */
exp.len -= 1;
/*
* topic should be longer than the expression: e.g. topic `foo/bar` does
* match `foo/#`, but neither `foo` nor `foo/` do.
*/
if (topic.len <= exp.len) {
return 0;
}
/* Truncate topic so that it'll pass the next length check */
topic.len = exp.len;
}
if (topic.len != exp.len) {
return 0;
}
return strncmp(topic.p, exp.p, exp.len) == 0;
}
int mg_mqtt_vmatch_topic_expression(const char *exp, struct mg_str topic) {
return mg_mqtt_match_topic_expression(mg_mk_str(exp), topic);
}
void mg_set_protocol_mqtt(struct mg_connection *nc) {
nc->proto_handler = mqtt_handler;
nc->proto_data = MG_CALLOC(1, sizeof(struct mg_mqtt_proto_data));
nc->proto_data_destructor = mg_mqtt_proto_data_destructor;
}
void mg_send_mqtt_handshake(struct mg_connection *nc, const char *client_id) {
static struct mg_send_mqtt_handshake_opts opts;
mg_send_mqtt_handshake_opt(nc, client_id, opts);
}
void mg_send_mqtt_handshake_opt(struct mg_connection *nc, const char *client_id,
struct mg_send_mqtt_handshake_opts opts) {
uint8_t header = MG_MQTT_CMD_CONNECT << 4;
uint8_t rem_len;
uint16_t keep_alive;
uint16_t len;
struct mg_mqtt_proto_data *pd = (struct mg_mqtt_proto_data *) nc->proto_data;
/*
* 9: version_header(len, magic_string, version_number), 1: flags, 2:
* keep-alive timer,
* 2: client_identifier_len, n: client_id
*/
rem_len = 9 + 1 + 2 + 2 + (uint8_t) strlen(client_id);
if (opts.user_name != NULL) {
opts.flags |= MG_MQTT_HAS_USER_NAME;
rem_len += (uint8_t) strlen(opts.user_name) + 2;
}
if (opts.password != NULL) {
opts.flags |= MG_MQTT_HAS_PASSWORD;
rem_len += (uint8_t) strlen(opts.password) + 2;
}
if (opts.will_topic != NULL && opts.will_message != NULL) {
opts.flags |= MG_MQTT_HAS_WILL;
rem_len += (uint8_t) strlen(opts.will_topic) + 2;
rem_len += (uint8_t) strlen(opts.will_message) + 2;
}
mg_send(nc, &header, 1);
mg_send(nc, &rem_len, 1);
mg_send(nc, "\00\06MQIsdp\03", 9);
mg_send(nc, &opts.flags, 1);
if (opts.keep_alive == 0) {
opts.keep_alive = 60;
}
keep_alive = htons(opts.keep_alive);
mg_send(nc, &keep_alive, 2);
len = htons((uint16_t) strlen(client_id));
mg_send(nc, &len, 2);
mg_send(nc, client_id, strlen(client_id));
if (opts.flags & MG_MQTT_HAS_WILL) {
len = htons((uint16_t) strlen(opts.will_topic));
mg_send(nc, &len, 2);
mg_send(nc, opts.will_topic, strlen(opts.will_topic));
len = htons((uint16_t) strlen(opts.will_message));
mg_send(nc, &len, 2);
mg_send(nc, opts.will_message, strlen(opts.will_message));
}
if (opts.flags & MG_MQTT_HAS_USER_NAME) {
len = htons((uint16_t) strlen(opts.user_name));
mg_send(nc, &len, 2);
mg_send(nc, opts.user_name, strlen(opts.user_name));
}
if (opts.flags & MG_MQTT_HAS_PASSWORD) {
len = htons((uint16_t) strlen(opts.password));
mg_send(nc, &len, 2);
mg_send(nc, opts.password, strlen(opts.password));
}
if (pd != NULL) {
pd->keep_alive = opts.keep_alive;
}
}
static void mg_mqtt_prepend_header(struct mg_connection *nc, uint8_t cmd,
uint8_t flags, size_t len) {
size_t off = nc->send_mbuf.len - len;
uint8_t header = cmd << 4 | (uint8_t) flags;
uint8_t buf[1 + sizeof(size_t)];
uint8_t *vlen = &buf[1];
assert(nc->send_mbuf.len >= len);
buf[0] = header;
/* mqtt variable length encoding */
do {
*vlen = len % 0x80;
len /= 0x80;
if (len > 0) *vlen |= 0x80;
vlen++;
} while (len > 0);
mbuf_insert(&nc->send_mbuf, off, buf, vlen - buf);
}
void mg_mqtt_publish(struct mg_connection *nc, const char *topic,
uint16_t message_id, int flags, const void *data,
size_t len) {
size_t old_len = nc->send_mbuf.len;
uint16_t topic_len = htons((uint16_t) strlen(topic));
uint16_t message_id_net = htons(message_id);
mg_send(nc, &topic_len, 2);
mg_send(nc, topic, strlen(topic));
if (MG_MQTT_GET_QOS(flags) > 0) {
mg_send(nc, &message_id_net, 2);
}
mg_send(nc, data, len);
mg_mqtt_prepend_header(nc, MG_MQTT_CMD_PUBLISH, flags,
nc->send_mbuf.len - old_len);
}
void mg_mqtt_subscribe(struct mg_connection *nc,
const struct mg_mqtt_topic_expression *topics,
size_t topics_len, uint16_t message_id) {
size_t old_len = nc->send_mbuf.len;
uint16_t message_id_n = htons(message_id);
size_t i;
mg_send(nc, (char *) &message_id_n, 2);
for (i = 0; i < topics_len; i++) {
uint16_t topic_len_n = htons((uint16_t) strlen(topics[i].topic));
mg_send(nc, &topic_len_n, 2);
mg_send(nc, topics[i].topic, strlen(topics[i].topic));
mg_send(nc, &topics[i].qos, 1);
}
mg_mqtt_prepend_header(nc, MG_MQTT_CMD_SUBSCRIBE, MG_MQTT_QOS(1),
nc->send_mbuf.len - old_len);
}
int mg_mqtt_next_subscribe_topic(struct mg_mqtt_message *msg,
struct mg_str *topic, uint8_t *qos, int pos) {
unsigned char *buf = (unsigned char *) msg->payload.p + pos;
if ((size_t) pos >= msg->payload.len) {
return -1;
}
topic->len = buf[0] << 8 | buf[1];
topic->p = (char *) buf + 2;
*qos = buf[2 + topic->len];
return pos + 2 + topic->len + 1;
}
void mg_mqtt_unsubscribe(struct mg_connection *nc, char **topics,
size_t topics_len, uint16_t message_id) {
size_t old_len = nc->send_mbuf.len;
uint16_t message_id_n = htons(message_id);
size_t i;
mg_send(nc, (char *) &message_id_n, 2);
for (i = 0; i < topics_len; i++) {
uint16_t topic_len_n = htons((uint16_t) strlen(topics[i]));
mg_send(nc, &topic_len_n, 2);
mg_send(nc, topics[i], strlen(topics[i]));
}
mg_mqtt_prepend_header(nc, MG_MQTT_CMD_UNSUBSCRIBE, MG_MQTT_QOS(1),
nc->send_mbuf.len - old_len);
}
void mg_mqtt_connack(struct mg_connection *nc, uint8_t return_code) {
uint8_t unused = 0;
mg_send(nc, &unused, 1);
mg_send(nc, &return_code, 1);
mg_mqtt_prepend_header(nc, MG_MQTT_CMD_CONNACK, 0, 2);
}
/*
* Sends a command which contains only a `message_id` and a QoS level of 1.
*
* Helper function.
*/
static void mg_send_mqtt_short_command(struct mg_connection *nc, uint8_t cmd,
uint16_t message_id) {
uint16_t message_id_net = htons(message_id);
uint8_t flags = (cmd == MG_MQTT_CMD_PUBREL ? 2 : 0);
mg_send(nc, &message_id_net, 2);
mg_mqtt_prepend_header(nc, cmd, flags, 2 /* len */);
}
void mg_mqtt_puback(struct mg_connection *nc, uint16_t message_id) {
mg_send_mqtt_short_command(nc, MG_MQTT_CMD_PUBACK, message_id);
}
void mg_mqtt_pubrec(struct mg_connection *nc, uint16_t message_id) {
mg_send_mqtt_short_command(nc, MG_MQTT_CMD_PUBREC, message_id);
}
void mg_mqtt_pubrel(struct mg_connection *nc, uint16_t message_id) {
mg_send_mqtt_short_command(nc, MG_MQTT_CMD_PUBREL, message_id);
}
void mg_mqtt_pubcomp(struct mg_connection *nc, uint16_t message_id) {
mg_send_mqtt_short_command(nc, MG_MQTT_CMD_PUBCOMP, message_id);
}
void mg_mqtt_suback(struct mg_connection *nc, uint8_t *qoss, size_t qoss_len,
uint16_t message_id) {
size_t i;
uint16_t message_id_net = htons(message_id);
mg_send(nc, &message_id_net, 2);
for (i = 0; i < qoss_len; i++) {
mg_send(nc, &qoss[i], 1);
}
mg_mqtt_prepend_header(nc, MG_MQTT_CMD_SUBACK, MG_MQTT_QOS(1), 2 + qoss_len);
}
void mg_mqtt_unsuback(struct mg_connection *nc, uint16_t message_id) {
mg_send_mqtt_short_command(nc, MG_MQTT_CMD_UNSUBACK, message_id);
}
void mg_mqtt_ping(struct mg_connection *nc) {
mg_mqtt_prepend_header(nc, MG_MQTT_CMD_PINGREQ, 0, 0);
}
void mg_mqtt_pong(struct mg_connection *nc) {
mg_mqtt_prepend_header(nc, MG_MQTT_CMD_PINGRESP, 0, 0);
}
void mg_mqtt_disconnect(struct mg_connection *nc) {
mg_mqtt_prepend_header(nc, MG_MQTT_CMD_DISCONNECT, 0, 0);
}
#endif /* MG_ENABLE_MQTT */
#ifdef MG_MODULE_LINES
#line 1 "mongoose/src/mqtt_server.c"
#endif
/*
* Copyright (c) 2014 Cesanta Software Limited
* All rights reserved
*/
/* Amalgamated: #include "mongoose/src/internal.h" */
/* Amalgamated: #include "mongoose/src/mqtt-server.h" */
#if MG_ENABLE_MQTT_BROKER
static void mg_mqtt_session_init(struct mg_mqtt_broker *brk,
struct mg_mqtt_session *s,
struct mg_connection *nc) {
s->brk = brk;
s->subscriptions = NULL;
s->num_subscriptions = 0;
s->nc = nc;
}
static void mg_mqtt_add_session(struct mg_mqtt_session *s) {
LIST_INSERT_HEAD(&s->brk->sessions, s, link);
}
static void mg_mqtt_remove_session(struct mg_mqtt_session *s) {
LIST_REMOVE(s, link);
}
static void mg_mqtt_destroy_session(struct mg_mqtt_session *s) {
size_t i;
for (i = 0; i < s->num_subscriptions; i++) {
MG_FREE((void *) s->subscriptions[i].topic);
}
MG_FREE(s->subscriptions);
MG_FREE(s);
}
static void mg_mqtt_close_session(struct mg_mqtt_session *s) {
mg_mqtt_remove_session(s);
mg_mqtt_destroy_session(s);
}
void mg_mqtt_broker_init(struct mg_mqtt_broker *brk, void *user_data) {
LIST_INIT(&brk->sessions);
brk->user_data = user_data;
}
static void mg_mqtt_broker_handle_connect(struct mg_mqtt_broker *brk,
struct mg_connection *nc) {
struct mg_mqtt_session *s =
(struct mg_mqtt_session *) MG_CALLOC(1, sizeof *s);
if (s == NULL) {
/* LCOV_EXCL_START */
mg_mqtt_connack(nc, MG_EV_MQTT_CONNACK_SERVER_UNAVAILABLE);
return;
/* LCOV_EXCL_STOP */
}
/* TODO(mkm): check header (magic and version) */
mg_mqtt_session_init(brk, s, nc);
s->user_data = nc->user_data;
nc->user_data = s;
mg_mqtt_add_session(s);
mg_mqtt_connack(nc, MG_EV_MQTT_CONNACK_ACCEPTED);
}
static void mg_mqtt_broker_handle_subscribe(struct mg_connection *nc,
struct mg_mqtt_message *msg) {
struct mg_mqtt_session *ss = (struct mg_mqtt_session *) nc->user_data;
uint8_t qoss[512];
size_t qoss_len = 0;
struct mg_str topic;
uint8_t qos;
int pos;
struct mg_mqtt_topic_expression *te;
for (pos = 0;
(pos = mg_mqtt_next_subscribe_topic(msg, &topic, &qos, pos)) != -1;) {
qoss[qoss_len++] = qos;
}
ss->subscriptions = (struct mg_mqtt_topic_expression *) MG_REALLOC(
ss->subscriptions, sizeof(*ss->subscriptions) * qoss_len);
for (pos = 0;
(pos = mg_mqtt_next_subscribe_topic(msg, &topic, &qos, pos)) != -1;
ss->num_subscriptions++) {
te = &ss->subscriptions[ss->num_subscriptions];
te->topic = (char *) MG_MALLOC(topic.len + 1);
te->qos = qos;
strncpy((char *) te->topic, topic.p, topic.len + 1);
}
mg_mqtt_suback(nc, qoss, qoss_len, msg->message_id);
}
static void mg_mqtt_broker_handle_publish(struct mg_mqtt_broker *brk,
struct mg_mqtt_message *msg) {
struct mg_mqtt_session *s;
size_t i;
for (s = mg_mqtt_next(brk, NULL); s != NULL; s = mg_mqtt_next(brk, s)) {
for (i = 0; i < s->num_subscriptions; i++) {
if (mg_mqtt_vmatch_topic_expression(s->subscriptions[i].topic,
msg->topic)) {
char buf[100], *p = buf;
mg_asprintf(&p, sizeof(buf), "%.*s", (int) msg->topic.len,
msg->topic.p);
if (p == NULL) {
return;
}
mg_mqtt_publish(s->nc, p, 0, 0, msg->payload.p, msg->payload.len);
if (p != buf) {
MG_FREE(p);
}
break;
}
}
}
}
void mg_mqtt_broker(struct mg_connection *nc, int ev, void *data) {
struct mg_mqtt_message *msg = (struct mg_mqtt_message *) data;
struct mg_mqtt_broker *brk;
if (nc->listener) {
brk = (struct mg_mqtt_broker *) nc->listener->user_data;
} else {
brk = (struct mg_mqtt_broker *) nc->user_data;
}
switch (ev) {
case MG_EV_ACCEPT:
mg_set_protocol_mqtt(nc);
nc->user_data = NULL; /* Clear up the inherited pointer to broker */
break;
case MG_EV_MQTT_CONNECT:
mg_mqtt_broker_handle_connect(brk, nc);
break;
case MG_EV_MQTT_SUBSCRIBE:
mg_mqtt_broker_handle_subscribe(nc, msg);
break;
case MG_EV_MQTT_PUBLISH:
mg_mqtt_broker_handle_publish(brk, msg);
break;
case MG_EV_CLOSE:
if (nc->listener && nc->user_data != NULL) {
mg_mqtt_close_session((struct mg_mqtt_session *) nc->user_data);
}
break;
}
}
struct mg_mqtt_session *mg_mqtt_next(struct mg_mqtt_broker *brk,
struct mg_mqtt_session *s) {
return s == NULL ? LIST_FIRST(&brk->sessions) : LIST_NEXT(s, link);
}
#endif /* MG_ENABLE_MQTT_BROKER */
#ifdef MG_MODULE_LINES
#line 1 "mongoose/src/dns.c"
#endif
/*
* Copyright (c) 2014 Cesanta Software Limited
* All rights reserved
*/
#if MG_ENABLE_DNS
/* Amalgamated: #include "mongoose/src/internal.h" */
/* Amalgamated: #include "mongoose/src/dns.h" */
static int mg_dns_tid = 0xa0;
struct mg_dns_header {
uint16_t transaction_id;
uint16_t flags;
uint16_t num_questions;
uint16_t num_answers;
uint16_t num_authority_prs;
uint16_t num_other_prs;
};
struct mg_dns_resource_record *mg_dns_next_record(
struct mg_dns_message *msg, int query,
struct mg_dns_resource_record *prev) {
struct mg_dns_resource_record *rr;
for (rr = (prev == NULL ? msg->answers : prev + 1);
rr - msg->answers < msg->num_answers; rr++) {
if (rr->rtype == query) {
return rr;
}
}
return NULL;
}
int mg_dns_parse_record_data(struct mg_dns_message *msg,
struct mg_dns_resource_record *rr, void *data,
size_t data_len) {
switch (rr->rtype) {
case MG_DNS_A_RECORD:
if (data_len < sizeof(struct in_addr)) {
return -1;
}
if (rr->rdata.p + data_len > msg->pkt.p + msg->pkt.len) {
return -1;
}
memcpy(data, rr->rdata.p, data_len);
return 0;
#if MG_ENABLE_IPV6
case MG_DNS_AAAA_RECORD:
if (data_len < sizeof(struct in6_addr)) {
return -1; /* LCOV_EXCL_LINE */
}
memcpy(data, rr->rdata.p, data_len);
return 0;
#endif
case MG_DNS_CNAME_RECORD:
mg_dns_uncompress_name(msg, &rr->rdata, (char *) data, data_len);
return 0;
}
return -1;
}
int mg_dns_insert_header(struct mbuf *io, size_t pos,
struct mg_dns_message *msg) {
struct mg_dns_header header;
memset(&header, 0, sizeof(header));
header.transaction_id = msg->transaction_id;
header.flags = htons(msg->flags);
header.num_questions = htons(msg->num_questions);
header.num_answers = htons(msg->num_answers);
return mbuf_insert(io, pos, &header, sizeof(header));
}
int mg_dns_copy_questions(struct mbuf *io, struct mg_dns_message *msg) {
unsigned char *begin, *end;
struct mg_dns_resource_record *last_q;
if (msg->num_questions <= 0) return 0;
begin = (unsigned char *) msg->pkt.p + sizeof(struct mg_dns_header);
last_q = &msg->questions[msg->num_questions - 1];
end = (unsigned char *) last_q->name.p + last_q->name.len + 4;
return mbuf_append(io, begin, end - begin);
}
int mg_dns_encode_name(struct mbuf *io, const char *name, size_t len) {
const char *s;
unsigned char n;
size_t pos = io->len;
do {
if ((s = strchr(name, '.')) == NULL) {
s = name + len;
}
if (s - name > 127) {
return -1; /* TODO(mkm) cover */
}
n = s - name; /* chunk length */
mbuf_append(io, &n, 1); /* send length */
mbuf_append(io, name, n);
if (*s == '.') {
n++;
}
name += n;
len -= n;
} while (*s != '\0');
mbuf_append(io, "\0", 1); /* Mark end of host name */
return io->len - pos;
}
int mg_dns_encode_record(struct mbuf *io, struct mg_dns_resource_record *rr,
const char *name, size_t nlen, const void *rdata,
size_t rlen) {
size_t pos = io->len;
uint16_t u16;
uint32_t u32;
if (rr->kind == MG_DNS_INVALID_RECORD) {
return -1; /* LCOV_EXCL_LINE */
}
if (mg_dns_encode_name(io, name, nlen) == -1) {
return -1;
}
u16 = htons(rr->rtype);
mbuf_append(io, &u16, 2);
u16 = htons(rr->rclass);
mbuf_append(io, &u16, 2);
if (rr->kind == MG_DNS_ANSWER) {
u32 = htonl(rr->ttl);
mbuf_append(io, &u32, 4);
if (rr->rtype == MG_DNS_CNAME_RECORD) {
int clen;
/* fill size after encoding */
size_t off = io->len;
mbuf_append(io, &u16, 2);
if ((clen = mg_dns_encode_name(io, (const char *) rdata, rlen)) == -1) {
return -1;
}
u16 = clen;
io->buf[off] = u16 >> 8;
io->buf[off + 1] = u16 & 0xff;
} else {
u16 = htons((uint16_t) rlen);
mbuf_append(io, &u16, 2);
mbuf_append(io, rdata, rlen);
}
}
return io->len - pos;
}
void mg_send_dns_query(struct mg_connection *nc, const char *name,
int query_type) {
struct mg_dns_message *msg =
(struct mg_dns_message *) MG_CALLOC(1, sizeof(*msg));
struct mbuf pkt;
struct mg_dns_resource_record *rr = &msg->questions[0];
DBG(("%s %d", name, query_type));
mbuf_init(&pkt, 64 /* Start small, it'll grow as needed. */);
msg->transaction_id = ++mg_dns_tid;
msg->flags = 0x100;
msg->num_questions = 1;
mg_dns_insert_header(&pkt, 0, msg);
rr->rtype = query_type;
rr->rclass = 1; /* Class: inet */
rr->kind = MG_DNS_QUESTION;
if (mg_dns_encode_record(&pkt, rr, name, strlen(name), NULL, 0) == -1) {
/* TODO(mkm): return an error code */
goto cleanup; /* LCOV_EXCL_LINE */
}
/* TCP DNS requires messages to be prefixed with len */
if (!(nc->flags & MG_F_UDP)) {
uint16_t len = htons((uint16_t) pkt.len);
mbuf_insert(&pkt, 0, &len, 2);
}
mg_send(nc, pkt.buf, pkt.len);
mbuf_free(&pkt);
cleanup:
MG_FREE(msg);
}
static unsigned char *mg_parse_dns_resource_record(
unsigned char *data, unsigned char *end, struct mg_dns_resource_record *rr,
int reply) {
unsigned char *name = data;
int chunk_len, data_len;
while (data < end && (chunk_len = *data)) {
if (((unsigned char *) data)[0] & 0xc0) {
data += 1;
break;
}
data += chunk_len + 1;
}
if (data > end - 5) {
return NULL;
}
rr->name.p = (char *) name;
rr->name.len = data - name + 1;
data++;
rr->rtype = data[0] << 8 | data[1];
data += 2;
rr->rclass = data[0] << 8 | data[1];
data += 2;
rr->kind = reply ? MG_DNS_ANSWER : MG_DNS_QUESTION;
if (reply) {
if (data >= end - 6) {
return NULL;
}
rr->ttl = (uint32_t) data[0] << 24 | (uint32_t) data[1] << 16 |
data[2] << 8 | data[3];
data += 4;
data_len = *data << 8 | *(data + 1);
data += 2;
rr->rdata.p = (char *) data;
rr->rdata.len = data_len;
data += data_len;
}
return data;
}
int mg_parse_dns(const char *buf, int len, struct mg_dns_message *msg) {
struct mg_dns_header *header = (struct mg_dns_header *) buf;
unsigned char *data = (unsigned char *) buf + sizeof(*header);
unsigned char *end = (unsigned char *) buf + len;
int i;
memset(msg, 0, sizeof(*msg));
msg->pkt.p = buf;
msg->pkt.len = len;
if (len < (int) sizeof(*header)) return -1;
msg->transaction_id = header->transaction_id;
msg->flags = ntohs(header->flags);
msg->num_questions = ntohs(header->num_questions);
if (msg->num_questions > (int) ARRAY_SIZE(msg->questions)) {
msg->num_questions = (int) ARRAY_SIZE(msg->questions);
}
msg->num_answers = ntohs(header->num_answers);
if (msg->num_answers > (int) ARRAY_SIZE(msg->answers)) {
msg->num_answers = (int) ARRAY_SIZE(msg->answers);
}
for (i = 0; i < msg->num_questions; i++) {
data = mg_parse_dns_resource_record(data, end, &msg->questions[i], 0);
if (data == NULL) return -1;
}
for (i = 0; i < msg->num_answers; i++) {
data = mg_parse_dns_resource_record(data, end, &msg->answers[i], 1);
if (data == NULL) return -1;
}
return 0;
}
size_t mg_dns_uncompress_name(struct mg_dns_message *msg, struct mg_str *name,
char *dst, int dst_len) {
int chunk_len;
char *old_dst = dst;
const unsigned char *data = (unsigned char *) name->p;
const unsigned char *end = (unsigned char *) msg->pkt.p + msg->pkt.len;
if (data >= end) {
return 0;
}
while ((chunk_len = *data++)) {
int leeway = dst_len - (dst - old_dst);
if (data >= end) {
return 0;
}
if (chunk_len & 0xc0) {
uint16_t off = (data[-1] & (~0xc0)) << 8 | data[0];
if (off >= msg->pkt.len) {
return 0;
}
data = (unsigned char *) msg->pkt.p + off;
continue;
}
if (chunk_len > leeway) {
chunk_len = leeway;
}
if (data + chunk_len >= end) {
return 0;
}
memcpy(dst, data, chunk_len);
data += chunk_len;
dst += chunk_len;
leeway -= chunk_len;
if (leeway == 0) {
return dst - old_dst;
}
*dst++ = '.';
}
if (dst != old_dst) {
*--dst = 0;
}
return dst - old_dst;
}
static void dns_handler(struct mg_connection *nc, int ev,
void *ev_data MG_UD_ARG(void *user_data)) {
struct mbuf *io = &nc->recv_mbuf;
struct mg_dns_message msg;
/* Pass low-level events to the user handler */
nc->handler(nc, ev, ev_data MG_UD_ARG(user_data));
switch (ev) {
case MG_EV_RECV:
if (!(nc->flags & MG_F_UDP)) {
mbuf_remove(&nc->recv_mbuf, 2);
}
if (mg_parse_dns(nc->recv_mbuf.buf, nc->recv_mbuf.len, &msg) == -1) {
/* reply + recursion allowed + format error */
memset(&msg, 0, sizeof(msg));
msg.flags = 0x8081;
mg_dns_insert_header(io, 0, &msg);
if (!(nc->flags & MG_F_UDP)) {
uint16_t len = htons((uint16_t) io->len);
mbuf_insert(io, 0, &len, 2);
}
mg_send(nc, io->buf, io->len);
} else {
/* Call user handler with parsed message */
nc->handler(nc, MG_DNS_MESSAGE, &msg MG_UD_ARG(user_data));
}
mbuf_remove(io, io->len);
break;
}
}
void mg_set_protocol_dns(struct mg_connection *nc) {
nc->proto_handler = dns_handler;
}
#endif /* MG_ENABLE_DNS */
#ifdef MG_MODULE_LINES
#line 1 "mongoose/src/dns_server.c"
#endif
/*
* Copyright (c) 2014 Cesanta Software Limited
* All rights reserved
*/
#if MG_ENABLE_DNS_SERVER
/* Amalgamated: #include "mongoose/src/internal.h" */
/* Amalgamated: #include "mongoose/src/dns-server.h" */
struct mg_dns_reply mg_dns_create_reply(struct mbuf *io,
struct mg_dns_message *msg) {
struct mg_dns_reply rep;
rep.msg = msg;
rep.io = io;
rep.start = io->len;
/* reply + recursion allowed */
msg->flags |= 0x8080;
mg_dns_copy_questions(io, msg);
msg->num_answers = 0;
return rep;
}
void mg_dns_send_reply(struct mg_connection *nc, struct mg_dns_reply *r) {
size_t sent = r->io->len - r->start;
mg_dns_insert_header(r->io, r->start, r->msg);
if (!(nc->flags & MG_F_UDP)) {
uint16_t len = htons((uint16_t) sent);
mbuf_insert(r->io, r->start, &len, 2);
}
if (&nc->send_mbuf != r->io) {
mg_send(nc, r->io->buf + r->start, r->io->len - r->start);
r->io->len = r->start;
}
}
int mg_dns_reply_record(struct mg_dns_reply *reply,
struct mg_dns_resource_record *question,
const char *name, int rtype, int ttl, const void *rdata,
size_t rdata_len) {
struct mg_dns_message *msg = (struct mg_dns_message *) reply->msg;
char rname[512];
struct mg_dns_resource_record *ans = &msg->answers[msg->num_answers];
if (msg->num_answers >= MG_MAX_DNS_ANSWERS) {
return -1; /* LCOV_EXCL_LINE */
}
if (name == NULL) {
name = rname;
rname[511] = 0;
mg_dns_uncompress_name(msg, &question->name, rname, sizeof(rname) - 1);
}
*ans = *question;
ans->kind = MG_DNS_ANSWER;
ans->rtype = rtype;
ans->ttl = ttl;
if (mg_dns_encode_record(reply->io, ans, name, strlen(name), rdata,
rdata_len) == -1) {
return -1; /* LCOV_EXCL_LINE */
};
msg->num_answers++;
return 0;
}
#endif /* MG_ENABLE_DNS_SERVER */
#ifdef MG_MODULE_LINES
#line 1 "mongoose/src/resolv.c"
#endif
/*
* Copyright (c) 2014 Cesanta Software Limited
* All rights reserved
*/
#if MG_ENABLE_ASYNC_RESOLVER
/* Amalgamated: #include "mongoose/src/internal.h" */
/* Amalgamated: #include "mongoose/src/resolv.h" */
#ifndef MG_DEFAULT_NAMESERVER
#define MG_DEFAULT_NAMESERVER "8.8.8.8"
#endif
struct mg_resolve_async_request {
char name[1024];
int query;
mg_resolve_callback_t callback;
void *data;
time_t timeout;
int max_retries;
enum mg_resolve_err err;
/* state */
time_t last_time;
int retries;
};
/*
* Find what nameserver to use.
*
* Return 0 if OK, -1 if error
*/
static int mg_get_ip_address_of_nameserver(char *name, size_t name_len) {
int ret = -1;
#ifdef _WIN32
int i;
LONG err;
HKEY hKey, hSub;
wchar_t subkey[512], value[128],
*key = L"SYSTEM\\ControlSet001\\Services\\Tcpip\\Parameters\\Interfaces";
if ((err = RegOpenKeyExW(HKEY_LOCAL_MACHINE, key, 0, KEY_READ, &hKey)) !=
ERROR_SUCCESS) {
fprintf(stderr, "cannot open reg key %S: %ld\n", key, err);
ret = -1;
} else {
for (ret = -1, i = 0; 1; i++) {
DWORD subkey_size = sizeof(subkey), type, len = sizeof(value);
if (RegEnumKeyExW(hKey, i, subkey, &subkey_size, NULL, NULL, NULL,
NULL) != ERROR_SUCCESS) {
break;
}
if (RegOpenKeyExW(hKey, subkey, 0, KEY_READ, &hSub) == ERROR_SUCCESS &&
(RegQueryValueExW(hSub, L"NameServer", 0, &type, (void *) value,
&len) == ERROR_SUCCESS ||
RegQueryValueExW(hSub, L"DhcpNameServer", 0, &type, (void *) value,
&len) == ERROR_SUCCESS)) {
/*
* See https://github.com/cesanta/mongoose/issues/176
* The value taken from the registry can be empty, a single
* IP address, or multiple IP addresses separated by comma.
* If it's empty, check the next interface.
* If it's multiple IP addresses, take the first one.
*/
wchar_t *comma = wcschr(value, ',');
if (value[0] == '\0') {
continue;
}
if (comma != NULL) {
*comma = '\0';
}
/* %S will convert wchar_t -> char */
snprintf(name, name_len, "%S", value);
ret = 0;
RegCloseKey(hSub);
break;
}
}
RegCloseKey(hKey);
}
#elif MG_ENABLE_FILESYSTEM
FILE *fp;
char line[512];
if ((fp = mg_fopen("/etc/resolv.conf", "r")) == NULL) {
ret = -1;
} else {
/* Try to figure out what nameserver to use */
for (ret = -1; fgets(line, sizeof(line), fp) != NULL;) {
unsigned int a, b, c, d;
if (sscanf(line, "nameserver %u.%u.%u.%u", &a, &b, &c, &d) == 4) {
snprintf(name, name_len, "%u.%u.%u.%u", a, b, c, d);
ret = 0;
break;
}
}
(void) fclose(fp);
}
#else
snprintf(name, name_len, "%s", MG_DEFAULT_NAMESERVER);
#endif /* _WIN32 */
return ret;
}
int mg_resolve_from_hosts_file(const char *name, union socket_address *usa) {
#if MG_ENABLE_FILESYSTEM
/* TODO(mkm) cache /etc/hosts */
FILE *fp;
char line[1024];
char *p;
char alias[256];
unsigned int a, b, c, d;
int len = 0;
if ((fp = mg_fopen("/etc/hosts", "r")) == NULL) {
return -1;
}
for (; fgets(line, sizeof(line), fp) != NULL;) {
if (line[0] == '#') continue;
if (sscanf(line, "%u.%u.%u.%u%n", &a, &b, &c, &d, &len) == 0) {
/* TODO(mkm): handle ipv6 */
continue;
}
for (p = line + len; sscanf(p, "%s%n", alias, &len) == 1; p += len) {
if (strcmp(alias, name) == 0) {
usa->sin.sin_addr.s_addr = htonl(a << 24 | b << 16 | c << 8 | d);
fclose(fp);
return 0;
}
}
}
fclose(fp);
#else
(void) name;
(void) usa;
#endif
return -1;
}
static void mg_resolve_async_eh(struct mg_connection *nc, int ev,
void *data MG_UD_ARG(void *user_data)) {
time_t now = (time_t) mg_time();
struct mg_resolve_async_request *req;
struct mg_dns_message *msg;
int first = 0;
#if !MG_ENABLE_CALLBACK_USERDATA
void *user_data = nc->user_data;
#endif
if (ev != MG_EV_POLL) DBG(("ev=%d user_data=%p", ev, user_data));
req = (struct mg_resolve_async_request *) user_data;
if (req == NULL) {
return;
}
switch (ev) {
case MG_EV_CONNECT:
/* don't depend on timer not being at epoch for sending out first req */
first = 1;
/* fallthrough */
case MG_EV_POLL:
if (req->retries > req->max_retries) {
req->err = MG_RESOLVE_EXCEEDED_RETRY_COUNT;
nc->flags |= MG_F_CLOSE_IMMEDIATELY;
break;
}
if (first || now - req->last_time >= req->timeout) {
mg_send_dns_query(nc, req->name, req->query);
req->last_time = now;
req->retries++;
}
break;
case MG_EV_RECV:
msg = (struct mg_dns_message *) MG_MALLOC(sizeof(*msg));
if (mg_parse_dns(nc->recv_mbuf.buf, *(int *) data, msg) == 0 &&
msg->num_answers > 0) {
req->callback(msg, req->data, MG_RESOLVE_OK);
nc->user_data = NULL;
MG_FREE(req);
} else {
req->err = MG_RESOLVE_NO_ANSWERS;
}
MG_FREE(msg);
nc->flags |= MG_F_CLOSE_IMMEDIATELY;
break;
case MG_EV_SEND:
/*
* If a send error occurs, prevent closing of the connection by the core.
* We will retry after timeout.
*/
nc->flags &= ~MG_F_CLOSE_IMMEDIATELY;
mbuf_remove(&nc->send_mbuf, nc->send_mbuf.len);
break;
case MG_EV_TIMER:
req->err = MG_RESOLVE_TIMEOUT;
nc->flags |= MG_F_CLOSE_IMMEDIATELY;
break;
case MG_EV_CLOSE:
/* If we got here with request still not done, fire an error callback. */
if (req != NULL) {
req->callback(NULL, req->data, req->err);
nc->user_data = NULL;
MG_FREE(req);
}
break;
}
}
int mg_resolve_async(struct mg_mgr *mgr, const char *name, int query,
mg_resolve_callback_t cb, void *data) {
struct mg_resolve_async_opts opts;
memset(&opts, 0, sizeof(opts));
return mg_resolve_async_opt(mgr, name, query, cb, data, opts);
}
int mg_resolve_async_opt(struct mg_mgr *mgr, const char *name, int query,
mg_resolve_callback_t cb, void *data,
struct mg_resolve_async_opts opts) {
struct mg_resolve_async_request *req;
struct mg_connection *dns_nc;
const char *nameserver = opts.nameserver;
char dns_server_buff[17], nameserver_url[26];
if (nameserver == NULL) {
nameserver = mgr->nameserver;
}
DBG(("%s %d %p", name, query, opts.dns_conn));
/* resolve with DNS */
req = (struct mg_resolve_async_request *) MG_CALLOC(1, sizeof(*req));
if (req == NULL) {
return -1;
}
strncpy(req->name, name, sizeof(req->name));
req->query = query;
req->callback = cb;
req->data = data;
/* TODO(mkm): parse defaults out of resolve.conf */
req->max_retries = opts.max_retries ? opts.max_retries : 2;
req->timeout = opts.timeout ? opts.timeout : 5;
/* Lazily initialize dns server */
if (nameserver == NULL) {
if (mg_get_ip_address_of_nameserver(dns_server_buff,
sizeof(dns_server_buff)) != -1) {
nameserver = dns_server_buff;
} else {
nameserver = MG_DEFAULT_NAMESERVER;
}
}
snprintf(nameserver_url, sizeof(nameserver_url), "udp://%s:53", nameserver);
dns_nc = mg_connect(mgr, nameserver_url, MG_CB(mg_resolve_async_eh, NULL));
if (dns_nc == NULL) {
MG_FREE(req);
return -1;
}
dns_nc->user_data = req;
if (opts.dns_conn != NULL) {
*opts.dns_conn = dns_nc;
}
return 0;
}
void mg_set_nameserver(struct mg_mgr *mgr, const char *nameserver) {
MG_FREE((char *) mgr->nameserver);
if (nameserver != NULL) {
mgr->nameserver = strdup(nameserver);
}
}
#endif /* MG_ENABLE_ASYNC_RESOLVER */
#ifdef MG_MODULE_LINES
#line 1 "mongoose/src/coap.c"
#endif
/*
* Copyright (c) 2015 Cesanta Software Limited
* All rights reserved
* This software is dual-licensed: you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation. For the terms of this
* license, see <http://www.gnu.org/licenses/>.
*
* You are free to use this software under the terms of the GNU General
* Public License, but WITHOUT ANY WARRANTY; without even the implied
* warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
* See the GNU General Public License for more details.
*
* Alternatively, you can license this software under a commercial
* license, as set out in <https://www.cesanta.com/license>.
*/
/* Amalgamated: #include "mongoose/src/internal.h" */
/* Amalgamated: #include "mongoose/src/coap.h" */
#if MG_ENABLE_COAP
void mg_coap_free_options(struct mg_coap_message *cm) {
while (cm->options != NULL) {
struct mg_coap_option *next = cm->options->next;
MG_FREE(cm->options);
cm->options = next;
}
}
struct mg_coap_option *mg_coap_add_option(struct mg_coap_message *cm,
uint32_t number, char *value,
size_t len) {
struct mg_coap_option *new_option =
(struct mg_coap_option *) MG_CALLOC(1, sizeof(*new_option));
new_option->number = number;
new_option->value.p = value;
new_option->value.len = len;
if (cm->options == NULL) {
cm->options = cm->optiomg_tail = new_option;
} else {
/*
* A very simple attention to help clients to compose options:
* CoAP wants to see options ASC ordered.
* Could be change by using sort in coap_compose
*/
if (cm->optiomg_tail->number <= new_option->number) {
/* if option is already ordered just add it */
cm->optiomg_tail = cm->optiomg_tail->next = new_option;
} else {
/* looking for appropriate position */
struct mg_coap_option *current_opt = cm->options;
struct mg_coap_option *prev_opt = 0;
while (current_opt != NULL) {
if (current_opt->number > new_option->number) {
break;
}
prev_opt = current_opt;
current_opt = current_opt->next;
}
if (prev_opt != NULL) {
prev_opt->next = new_option;
new_option->next = current_opt;
} else {
/* insert new_option to the beginning */
new_option->next = cm->options;
cm->options = new_option;
}
}
}
return new_option;
}
/*
* Fills CoAP header in mg_coap_message.
*
* Helper function.
*/
static char *coap_parse_header(char *ptr, struct mbuf *io,
struct mg_coap_message *cm) {
if (io->len < sizeof(uint32_t)) {
cm->flags |= MG_COAP_NOT_ENOUGH_DATA;
return NULL;
}
/*
* Version (Ver): 2-bit unsigned integer. Indicates the CoAP version
* number. Implementations of this specification MUST set this field
* to 1 (01 binary). Other values are reserved for future versions.
* Messages with unknown version numbers MUST be silently ignored.
*/
if (((uint8_t) *ptr >> 6) != 1) {
cm->flags |= MG_COAP_IGNORE;
return NULL;
}
/*
* Type (T): 2-bit unsigned integer. Indicates if this message is of
* type Confirmable (0), Non-confirmable (1), Acknowledgement (2), or
* Reset (3).
*/
cm->msg_type = ((uint8_t) *ptr & 0x30) >> 4;
cm->flags |= MG_COAP_MSG_TYPE_FIELD;
/*
* Token Length (TKL): 4-bit unsigned integer. Indicates the length of
* the variable-length Token field (0-8 bytes). Lengths 9-15 are
* reserved, MUST NOT be sent, and MUST be processed as a message
* format error.
*/
cm->token.len = *ptr & 0x0F;
if (cm->token.len > 8) {
cm->flags |= MG_COAP_FORMAT_ERROR;
return NULL;
}
ptr++;
/*
* Code: 8-bit unsigned integer, split into a 3-bit class (most
* significant bits) and a 5-bit detail (least significant bits)
*/
cm->code_class = (uint8_t) *ptr >> 5;
cm->code_detail = *ptr & 0x1F;
cm->flags |= (MG_COAP_CODE_CLASS_FIELD | MG_COAP_CODE_DETAIL_FIELD);
ptr++;
/* Message ID: 16-bit unsigned integer in network byte order. */
cm->msg_id = (uint8_t) *ptr << 8 | (uint8_t) * (ptr + 1);
cm->flags |= MG_COAP_MSG_ID_FIELD;
ptr += 2;
return ptr;
}
/*
* Fills token information in mg_coap_message.
*
* Helper function.
*/
static char *coap_get_token(char *ptr, struct mbuf *io,
struct mg_coap_message *cm) {
if (cm->token.len != 0) {
if (ptr + cm->token.len > io->buf + io->len) {
cm->flags |= MG_COAP_NOT_ENOUGH_DATA;
return NULL;
} else {
cm->token.p = ptr;
ptr += cm->token.len;
cm->flags |= MG_COAP_TOKEN_FIELD;
}
}
return ptr;
}
/*
* Returns Option Delta or Length.
*
* Helper function.
*/
static int coap_get_ext_opt(char *ptr, struct mbuf *io, uint16_t *opt_info) {
int ret = 0;
if (*opt_info == 13) {
/*
* 13: An 8-bit unsigned integer follows the initial byte and
* indicates the Option Delta/Length minus 13.
*/
if (ptr < io->buf + io->len) {
*opt_info = (uint8_t) *ptr + 13;
ret = sizeof(uint8_t);
} else {
ret = -1; /* LCOV_EXCL_LINE */
}
} else if (*opt_info == 14) {
/*
* 14: A 16-bit unsigned integer in network byte order follows the
* initial byte and indicates the Option Delta/Length minus 269.
*/
if (ptr + sizeof(uint8_t) < io->buf + io->len) {
*opt_info = ((uint8_t) *ptr << 8 | (uint8_t) * (ptr + 1)) + 269;
ret = sizeof(uint16_t);
} else {
ret = -1; /* LCOV_EXCL_LINE */
}
}
return ret;
}
/*
* Fills options in mg_coap_message.
*
* Helper function.
*
* General options format:
* +---------------+---------------+
* | Option Delta | Option Length | 1 byte
* +---------------+---------------+
* \ Option Delta (extended) \ 0-2 bytes
* +-------------------------------+
* / Option Length (extended) \ 0-2 bytes
* +-------------------------------+
* \ Option Value \ 0 or more bytes
* +-------------------------------+
*/
static char *coap_get_options(char *ptr, struct mbuf *io,
struct mg_coap_message *cm) {
uint16_t prev_opt = 0;
if (ptr == io->buf + io->len) {
/* end of packet, ok */
return NULL;
}
/* 0xFF is payload marker */
while (ptr < io->buf + io->len && (uint8_t) *ptr != 0xFF) {
uint16_t option_delta, option_lenght;
int optinfo_len;
/* Option Delta: 4-bit unsigned integer */
option_delta = ((uint8_t) *ptr & 0xF0) >> 4;
/* Option Length: 4-bit unsigned integer */
option_lenght = *ptr & 0x0F;
if (option_delta == 15 || option_lenght == 15) {
/*
* 15: Reserved for future use. If the field is set to this value,
* it MUST be processed as a message format error
*/
cm->flags |= MG_COAP_FORMAT_ERROR;
break;
}
ptr++;
/* check for extended option delta */
optinfo_len = coap_get_ext_opt(ptr, io, &option_delta);
if (optinfo_len == -1) {
cm->flags |= MG_COAP_NOT_ENOUGH_DATA; /* LCOV_EXCL_LINE */
break; /* LCOV_EXCL_LINE */
}
ptr += optinfo_len;
/* check or extended option lenght */
optinfo_len = coap_get_ext_opt(ptr, io, &option_lenght);
if (optinfo_len == -1) {
cm->flags |= MG_COAP_NOT_ENOUGH_DATA; /* LCOV_EXCL_LINE */
break; /* LCOV_EXCL_LINE */
}
ptr += optinfo_len;
/*
* Instead of specifying the Option Number directly, the instances MUST
* appear in order of their Option Numbers and a delta encoding is used
* between them.
*/
option_delta += prev_opt;
mg_coap_add_option(cm, option_delta, ptr, option_lenght);
prev_opt = option_delta;
if (ptr + option_lenght > io->buf + io->len) {
cm->flags |= MG_COAP_NOT_ENOUGH_DATA; /* LCOV_EXCL_LINE */
break; /* LCOV_EXCL_LINE */
}
ptr += option_lenght;
}
if ((cm->flags & MG_COAP_ERROR) != 0) {
mg_coap_free_options(cm);
return NULL;
}
cm->flags |= MG_COAP_OPTIOMG_FIELD;
if (ptr == io->buf + io->len) {
/* end of packet, ok */
return NULL;
}
ptr++;
return ptr;
}
uint32_t mg_coap_parse(struct mbuf *io, struct mg_coap_message *cm) {
char *ptr;
memset(cm, 0, sizeof(*cm));
if ((ptr = coap_parse_header(io->buf, io, cm)) == NULL) {
return cm->flags;
}
if ((ptr = coap_get_token(ptr, io, cm)) == NULL) {
return cm->flags;
}
if ((ptr = coap_get_options(ptr, io, cm)) == NULL) {
return cm->flags;
}
/* the rest is payload */
cm->payload.len = io->len - (ptr - io->buf);
if (cm->payload.len != 0) {
cm->payload.p = ptr;
cm->flags |= MG_COAP_PAYLOAD_FIELD;
}
return cm->flags;
}
/*
* Calculates extended size of given Opt Number/Length in coap message.
*
* Helper function.
*/
static size_t coap_get_ext_opt_size(uint32_t value) {
int ret = 0;
if (value >= 13 && value <= 0xFF + 13) {
ret = sizeof(uint8_t);
} else if (value > 0xFF + 13 && value <= 0xFFFF + 269) {
ret = sizeof(uint16_t);
}
return ret;
}
/*
* Splits given Opt Number/Length into base and ext values.
*
* Helper function.
*/
static int coap_split_opt(uint32_t value, uint8_t *base, uint16_t *ext) {
int ret = 0;
if (value < 13) {
*base = value;
} else if (value >= 13 && value <= 0xFF + 13) {
*base = 13;
*ext = value - 13;
ret = sizeof(uint8_t);
} else if (value > 0xFF + 13 && value <= 0xFFFF + 269) {
*base = 14;
*ext = value - 269;
ret = sizeof(uint16_t);
}
return ret;
}
/*
* Puts uint16_t (in network order) into given char stream.
*
* Helper function.
*/
static char *coap_add_uint16(char *ptr, uint16_t val) {
*ptr = val >> 8;
ptr++;
*ptr = val & 0x00FF;
ptr++;
return ptr;
}
/*
* Puts extended value of Opt Number/Length into given char stream.
*
* Helper function.
*/
static char *coap_add_opt_info(char *ptr, uint16_t val, size_t len) {
if (len == sizeof(uint8_t)) {
*ptr = (char) val;
ptr++;
} else if (len == sizeof(uint16_t)) {
ptr = coap_add_uint16(ptr, val);
}
return ptr;
}
/*
* Verifies given mg_coap_message and calculates message size for it.
*
* Helper function.
*/
static uint32_t coap_calculate_packet_size(struct mg_coap_message *cm,
size_t *len) {
struct mg_coap_option *opt;
uint32_t prev_opt_number;
*len = 4; /* header */
if (cm->msg_type > MG_COAP_MSG_MAX) {
return MG_COAP_ERROR | MG_COAP_MSG_TYPE_FIELD;
}
if (cm->token.len > 8) {
return MG_COAP_ERROR | MG_COAP_TOKEN_FIELD;
}
if (cm->code_class > 7) {
return MG_COAP_ERROR | MG_COAP_CODE_CLASS_FIELD;
}
if (cm->code_detail > 31) {
return MG_COAP_ERROR | MG_COAP_CODE_DETAIL_FIELD;
}
*len += cm->token.len;
if (cm->payload.len != 0) {
*len += cm->payload.len + 1; /* ... + 1; add payload marker */
}
opt = cm->options;
prev_opt_number = 0;
while (opt != NULL) {
*len += 1; /* basic delta/length */
*len += coap_get_ext_opt_size(opt->number - prev_opt_number);
*len += coap_get_ext_opt_size((uint32_t) opt->value.len);
/*
* Current implementation performs check if
* option_number > previous option_number and produces an error
* TODO(alashkin): write design doc with limitations
* May be resorting is more suitable solution.
*/
if ((opt->next != NULL && opt->number > opt->next->number) ||
opt->value.len > 0xFFFF + 269 ||
opt->number - prev_opt_number > 0xFFFF + 269) {
return MG_COAP_ERROR | MG_COAP_OPTIOMG_FIELD;
}
*len += opt->value.len;
prev_opt_number = opt->number;
opt = opt->next;
}
return 0;
}
uint32_t mg_coap_compose(struct mg_coap_message *cm, struct mbuf *io) {
struct mg_coap_option *opt;
uint32_t res, prev_opt_number;
size_t prev_io_len, packet_size;
char *ptr;
res = coap_calculate_packet_size(cm, &packet_size);
if (res != 0) {
return res;
}
/* saving previous lenght to handle non-empty mbuf */
prev_io_len = io->len;
mbuf_append(io, NULL, packet_size);
ptr = io->buf + prev_io_len;
/*
* since cm is verified, it is possible to use bits shift operator
* without additional zeroing of unused bits
*/
/* ver: 2 bits, msg_type: 2 bits, toklen: 4 bits */
*ptr = (1 << 6) | (cm->msg_type << 4) | (uint8_t)(cm->token.len);
ptr++;
/* code class: 3 bits, code detail: 5 bits */
*ptr = (cm->code_class << 5) | (cm->code_detail);
ptr++;
ptr = coap_add_uint16(ptr, cm->msg_id);
if (cm->token.len != 0) {
memcpy(ptr, cm->token.p, cm->token.len);
ptr += cm->token.len;
}
opt = cm->options;
prev_opt_number = 0;
while (opt != NULL) {
uint8_t delta_base = 0, length_base = 0;
uint16_t delta_ext = 0, length_ext = 0;
size_t opt_delta_len =
coap_split_opt(opt->number - prev_opt_number, &delta_base, &delta_ext);
size_t opt_lenght_len =
coap_split_opt((uint32_t) opt->value.len, &length_base, &length_ext);
*ptr = (delta_base << 4) | length_base;
ptr++;
ptr = coap_add_opt_info(ptr, delta_ext, opt_delta_len);
ptr = coap_add_opt_info(ptr, length_ext, opt_lenght_len);
if (opt->value.len != 0) {
memcpy(ptr, opt->value.p, opt->value.len);
ptr += opt->value.len;
}
prev_opt_number = opt->number;
opt = opt->next;
}
if (cm->payload.len != 0) {
*ptr = (char) -1;
ptr++;
memcpy(ptr, cm->payload.p, cm->payload.len);
}
return 0;
}
uint32_t mg_coap_send_message(struct mg_connection *nc,
struct mg_coap_message *cm) {
struct mbuf packet_out;
uint32_t compose_res;
mbuf_init(&packet_out, 0);
compose_res = mg_coap_compose(cm, &packet_out);
if (compose_res != 0) {
return compose_res; /* LCOV_EXCL_LINE */
}
mg_send(nc, packet_out.buf, (int) packet_out.len);
mbuf_free(&packet_out);
return 0;
}
uint32_t mg_coap_send_ack(struct mg_connection *nc, uint16_t msg_id) {
struct mg_coap_message cm;
memset(&cm, 0, sizeof(cm));
cm.msg_type = MG_COAP_MSG_ACK;
cm.msg_id = msg_id;
return mg_coap_send_message(nc, &cm);
}
static void coap_handler(struct mg_connection *nc, int ev,
void *ev_data MG_UD_ARG(void *user_data)) {
struct mbuf *io = &nc->recv_mbuf;
struct mg_coap_message cm;
uint32_t parse_res;
memset(&cm, 0, sizeof(cm));
nc->handler(nc, ev, ev_data MG_UD_ARG(user_data));
switch (ev) {
case MG_EV_RECV:
parse_res = mg_coap_parse(io, &cm);
if ((parse_res & MG_COAP_IGNORE) == 0) {
if ((cm.flags & MG_COAP_NOT_ENOUGH_DATA) != 0) {
/*
* Since we support UDP only
* MG_COAP_NOT_ENOUGH_DATA == MG_COAP_FORMAT_ERROR
*/
cm.flags |= MG_COAP_FORMAT_ERROR; /* LCOV_EXCL_LINE */
} /* LCOV_EXCL_LINE */
nc->handler(nc, MG_COAP_EVENT_BASE + cm.msg_type,
&cm MG_UD_ARG(user_data));
}
mg_coap_free_options(&cm);
mbuf_remove(io, io->len);
break;
}
}
/*
* Attach built-in CoAP event handler to the given connection.
*
* The user-defined event handler will receive following extra events:
*
* - MG_EV_COAP_CON
* - MG_EV_COAP_NOC
* - MG_EV_COAP_ACK
* - MG_EV_COAP_RST
*/
int mg_set_protocol_coap(struct mg_connection *nc) {
/* supports UDP only */
if ((nc->flags & MG_F_UDP) == 0) {
return -1;
}
nc->proto_handler = coap_handler;
return 0;
}
#endif /* MG_ENABLE_COAP */
#ifdef MG_MODULE_LINES
#line 1 "mongoose/src/tun.c"
#endif
/*
* Copyright (c) 2014 Cesanta Software Limited
* All rights reserved
*/
#if MG_ENABLE_TUN
/* Amalgamated: #include "common/cs_dbg.h" */
/* Amalgamated: #include "mongoose/src/http.h" */
/* Amalgamated: #include "mongoose/src/internal.h" */
/* Amalgamated: #include "mongoose/src/net.h" */
/* Amalgamated: #include "mongoose/src/net_if_tun.h" */
/* Amalgamated: #include "mongoose/src/tun.h" */
/* Amalgamated: #include "mongoose/src/util.h" */
static void mg_tun_reconnect(struct mg_tun_client *client, int timeout);
static void mg_tun_init_client(struct mg_tun_client *client, struct mg_mgr *mgr,
struct mg_iface *iface, const char *dispatcher,
struct mg_tun_ssl_opts ssl) {
client->mgr = mgr;
client->iface = iface;
client->disp_url = dispatcher;
client->last_stream_id = 0;
client->ssl = ssl;
client->disp = NULL; /* will be set by mg_tun_reconnect */
client->listener = NULL; /* will be set by mg_do_bind */
client->reconnect = NULL; /* will be set by mg_tun_reconnect */
}
void mg_tun_log_frame(struct mg_tun_frame *frame) {
LOG(LL_DEBUG, ("Got TUN frame: type=0x%x, flags=0x%x stream_id=0x%lx, "
"len=%zu",
frame->type, frame->flags, frame->stream_id, frame->body.len));
#if MG_ENABLE_HEXDUMP
{
char hex[512];
mg_hexdump(frame->body.p, frame->body.len, hex, sizeof(hex) - 1);
hex[sizeof(hex) - 1] = '\0';
LOG(LL_DEBUG, ("body:\n%s", hex));
}
#else
LOG(LL_DEBUG, ("body: '%.*s'", (int) frame->body.len, frame->body.p));
#endif
}
static void mg_tun_close_all(struct mg_tun_client *client) {
struct mg_connection *nc;
for (nc = client->mgr->active_connections; nc != NULL; nc = nc->next) {
if (nc->iface == client->iface && !(nc->flags & MG_F_LISTENING)) {
LOG(LL_DEBUG, ("Closing tunneled connection %p", nc));
nc->flags |= MG_F_CLOSE_IMMEDIATELY;
/* mg_close_conn(nc); */
}
}
}
static void mg_tun_client_handler(struct mg_connection *nc, int ev,
void *ev_data MG_UD_ARG(void *user_data)) {
#if !MG_ENABLE_CALLBACK_USERDATA
void *user_data = nc->user_data;
#else
(void) nc;
#endif
struct mg_tun_client *client = (struct mg_tun_client *) user_data;
switch (ev) {
case MG_EV_CONNECT: {
int err = *(int *) ev_data;
if (err) {
LOG(LL_ERROR, ("Cannot connect to the tunnel dispatcher: %d", err));
} else {
LOG(LL_INFO, ("Connected to the tunnel dispatcher"));
}
break;
}
case MG_EV_HTTP_REPLY: {
struct http_message *hm = (struct http_message *) ev_data;
if (hm->resp_code != 200) {
LOG(LL_ERROR,
("Tunnel dispatcher reply non-OK status code %d", hm->resp_code));
}
break;
}
case MG_EV_WEBSOCKET_HANDSHAKE_DONE: {
LOG(LL_INFO, ("Tunnel dispatcher handshake done"));
break;
}
case MG_EV_WEBSOCKET_FRAME: {
struct websocket_message *wm = (struct websocket_message *) ev_data;
struct mg_connection *tc;
struct mg_tun_frame frame;
if (mg_tun_parse_frame(wm->data, wm->size, &frame) == -1) {
LOG(LL_ERROR, ("Got invalid tun frame dropping", wm->size));
break;
}
mg_tun_log_frame(&frame);
tc = mg_tun_if_find_conn(client, frame.stream_id);
if (tc == NULL) {
if (frame.body.len > 0) {
LOG(LL_DEBUG, ("Got frame after receiving end has been closed"));
}
break;
}
if (frame.body.len > 0) {
mg_if_recv_tcp_cb(tc, (void *) frame.body.p, frame.body.len,
0 /* own */);
}
if (frame.flags & MG_TUN_F_END_STREAM) {
LOG(LL_DEBUG, ("Closing tunneled connection because got end of stream "
"from other end"));
tc->flags |= MG_F_CLOSE_IMMEDIATELY;
mg_close_conn(tc);
}
break;
}
case MG_EV_CLOSE: {
LOG(LL_DEBUG, ("Closing all tunneled connections"));
/*
* The client might have been already freed when the listening socket is
* closed.
*/
if (client != NULL) {
mg_tun_close_all(client);
client->disp = NULL;
LOG(LL_INFO, ("Dispatcher connection is no more, reconnecting"));
/* TODO(mkm): implement exp back off */
mg_tun_reconnect(client, MG_TUN_RECONNECT_INTERVAL);
}
break;
}
default:
break;
}
}
static void mg_tun_do_reconnect(struct mg_tun_client *client) {
struct mg_connection *dc;
struct mg_connect_opts opts;
memset(&opts, 0, sizeof(opts));
#if MG_ENABLE_SSL
opts.ssl_cert = client->ssl.ssl_cert;
opts.ssl_key = client->ssl.ssl_key;
opts.ssl_ca_cert = client->ssl.ssl_ca_cert;
#endif
/* HTTP/Websocket listener */
if ((dc = mg_connect_ws_opt(client->mgr, MG_CB(mg_tun_client_handler, client),
opts, client->disp_url, MG_TUN_PROTO_NAME,
NULL)) == NULL) {
LOG(LL_ERROR,
("Cannot connect to WS server on addr [%s]\n", client->disp_url));
return;
}
client->disp = dc;
#if !MG_ENABLE_CALLBACK_USERDATA
dc->user_data = client;
#endif
}
void mg_tun_reconnect_ev_handler(struct mg_connection *nc, int ev,
void *ev_data MG_UD_ARG(void *user_data)) {
#if !MG_ENABLE_CALLBACK_USERDATA
void *user_data = nc->user_data;
#else
(void) nc;
#endif
struct mg_tun_client *client = (struct mg_tun_client *) user_data;
(void) ev_data;
switch (ev) {
case MG_EV_TIMER:
if (!(client->listener->flags & MG_F_TUN_DO_NOT_RECONNECT)) {
mg_tun_do_reconnect(client);
} else {
/* Reconnecting is suppressed, we'll check again at the next poll */
mg_tun_reconnect(client, 0);
}
break;
}
}
static void mg_tun_reconnect(struct mg_tun_client *client, int timeout) {
if (client->reconnect == NULL) {
client->reconnect = mg_add_sock(client->mgr, INVALID_SOCKET,
MG_CB(mg_tun_reconnect_ev_handler, client));
#if !MG_ENABLE_CALLBACK_USERDATA
client->reconnect->user_data = client;
#endif
}
client->reconnect->ev_timer_time = mg_time() + timeout;
}
static struct mg_tun_client *mg_tun_create_client(struct mg_mgr *mgr,
const char *dispatcher,
struct mg_tun_ssl_opts ssl) {
struct mg_tun_client *client = NULL;
struct mg_iface *iface = mg_find_iface(mgr, &mg_tun_iface_vtable, NULL);
if (iface == NULL) {
LOG(LL_ERROR, ("The tun feature requires the manager to have a tun "
"interface enabled"));
return NULL;
}
client = (struct mg_tun_client *) MG_MALLOC(sizeof(*client));
mg_tun_init_client(client, mgr, iface, dispatcher, ssl);
iface->data = client;
/*
* We need to give application a chance to set MG_F_TUN_DO_NOT_RECONNECT on a
* listening connection right after mg_tun_bind_opt() returned it, so we
* should use mg_tun_reconnect() here, instead of mg_tun_do_reconnect()
*/
mg_tun_reconnect(client, 0);
return client;
}
void mg_tun_destroy_client(struct mg_tun_client *client) {
/*
* NOTE:
* `client` is NULL in case of OOM
* `client->disp` is NULL if connection failed
* `client->iface is NULL is `mg_find_iface` failed
*/
if (client != NULL && client->disp != NULL) {
/* the dispatcher connection handler will in turn close all tunnels */
client->disp->flags |= MG_F_CLOSE_IMMEDIATELY;
/* this is used as a signal to other tun handlers that the party is over */
client->disp->user_data = NULL;
}
if (client != NULL && client->reconnect != NULL) {
client->reconnect->flags |= MG_F_CLOSE_IMMEDIATELY;
}
if (client != NULL && client->iface != NULL) {
client->iface->data = NULL;
}
MG_FREE(client);
}
static struct mg_connection *mg_tun_do_bind(struct mg_tun_client *client,
MG_CB(mg_event_handler_t handler,
void *user_data),
struct mg_bind_opts opts) {
struct mg_connection *lc;
opts.iface = client->iface;
lc = mg_bind_opt(client->mgr, ":1234" /* dummy port */,
MG_CB(handler, user_data), opts);
client->listener = lc;
return lc;
}
struct mg_connection *mg_tun_bind_opt(struct mg_mgr *mgr,
const char *dispatcher,
MG_CB(mg_event_handler_t handler,
void *user_data),
struct mg_bind_opts opts) {
#if MG_ENABLE_SSL
struct mg_tun_ssl_opts ssl = {opts.ssl_cert, opts.ssl_key, opts.ssl_ca_cert};
#else
struct mg_tun_ssl_opts ssl = {0};
#endif
struct mg_tun_client *client = mg_tun_create_client(mgr, dispatcher, ssl);
if (client == NULL) {
return NULL;
}
#if MG_ENABLE_SSL
/* these options don't make sense in the local mouth of the tunnel */
opts.ssl_cert = NULL;
opts.ssl_key = NULL;
opts.ssl_ca_cert = NULL;
#endif
return mg_tun_do_bind(client, MG_CB(handler, user_data), opts);
}
int mg_tun_parse_frame(void *data, size_t len, struct mg_tun_frame *frame) {
const size_t header_size = sizeof(uint32_t) + sizeof(uint8_t) * 2;
if (len < header_size) {
return -1;
}
frame->type = *(uint8_t *) (data);
frame->flags = *(uint8_t *) ((char *) data + 1);
memcpy(&frame->stream_id, (char *) data + 2, sizeof(uint32_t));
frame->stream_id = ntohl(frame->stream_id);
frame->body.p = (char *) data + header_size;
frame->body.len = len - header_size;
return 0;
}
void mg_tun_send_frame(struct mg_connection *ws, uint32_t stream_id,
uint8_t type, uint8_t flags, struct mg_str msg) {
stream_id = htonl(stream_id);
{
struct mg_str parts[] = {
{(char *) &type, sizeof(type)},
{(char *) &flags, sizeof(flags)},
{(char *) &stream_id, sizeof(stream_id)},
{msg.p, msg.len} /* vc6 doesn't like just `msg` here */};
mg_send_websocket_framev(ws, WEBSOCKET_OP_BINARY, parts,
sizeof(parts) / sizeof(parts[0]));
}
}
#endif /* MG_ENABLE_TUN */
#ifdef MG_MODULE_LINES
#line 1 "mongoose/src/sntp.c"
#endif
/*
* Copyright (c) 2016 Cesanta Software Limited
* All rights reserved
*/
/* Amalgamated: #include "mongoose/src/internal.h" */
/* Amalgamated: #include "mongoose/src/sntp.h" */
/* Amalgamated: #include "mongoose/src/util.h" */
#if MG_ENABLE_SNTP
#define SNTP_TIME_OFFSET 2208988800
#ifndef SNTP_TIMEOUT
#define SNTP_TIMEOUT 10
#endif
#ifndef SNTP_ATTEMPTS
#define SNTP_ATTEMPTS 3
#endif
static uint64_t mg_get_sec(uint64_t val) {
return (val & 0xFFFFFFFF00000000) >> 32;
}
static uint64_t mg_get_usec(uint64_t val) {
uint64_t tmp = (val & 0x00000000FFFFFFFF);
tmp *= 1000000;
tmp >>= 32;
return tmp;
}
static void mg_ntp_to_tv(uint64_t val, struct timeval *tv) {
uint64_t tmp;
tmp = mg_get_sec(val);
tmp -= SNTP_TIME_OFFSET;
tv->tv_sec = tmp;
tv->tv_usec = mg_get_usec(val);
}
static void mg_get_ntp_ts(const char *ntp, uint64_t *val) {
uint32_t tmp;
memcpy(&tmp, ntp, sizeof(tmp));
tmp = ntohl(tmp);
*val = (uint64_t) tmp << 32;
memcpy(&tmp, ntp + 4, sizeof(tmp));
tmp = ntohl(tmp);
*val |= tmp;
}
void mg_sntp_send_request(struct mg_connection *c) {
char buf[48] = {0};
/*
* header - 8 bit:
* LI (2 bit) - 3 (not in sync), VN (3 bit) - 4 (version),
* mode (3 bit) - 3 (client)
*/
buf[0] = (3 << 6) | (4 << 3) | 3;
/*
* Next fields should be empty in client request
* stratum, 8 bit
* poll interval, 8 bit
* rrecision, 8 bit
* root delay, 32 bit
* root dispersion, 32 bit
* ref id, 32 bit
* ref timestamp, 64 bit
* originate Timestamp, 64 bit
* receive Timestamp, 64 bit
*/
/*
* convert time to sntp format (sntp starts from 00:00:00 01.01.1900)
* according to rfc868 it is 2208988800L sec
* this information is used to correct roundtrip delay
* but if local clock is absolutely broken (and doesn't work even
* as simple timer), it is better to disable it
*/
#ifndef MG_SNMP_NO_DELAY_CORRECTION
uint32_t sec;
sec = htonl((uint32_t)(mg_time() + SNTP_TIME_OFFSET));
memcpy(&buf[40], &sec, sizeof(sec));
#endif
mg_send(c, buf, sizeof(buf));
}
#ifndef MG_SNMP_NO_DELAY_CORRECTION
static uint64_t mg_calculate_delay(uint64_t t1, uint64_t t2, uint64_t t3) {
/* roundloop delay = (T4 - T1) - (T3 - T2) */
uint64_t d1 = ((mg_time() + SNTP_TIME_OFFSET) * 1000000) -
(mg_get_sec(t1) * 1000000 + mg_get_usec(t1));
uint64_t d2 = (mg_get_sec(t3) * 1000000 + mg_get_usec(t3)) -
(mg_get_sec(t2) * 1000000 + mg_get_usec(t2));
return (d1 > d2) ? d1 - d2 : 0;
}
#endif
MG_INTERNAL int mg_sntp_parse_reply(const char *buf, int len,
struct mg_sntp_message *msg) {
uint8_t hdr;
uint64_t orig_ts_T1, recv_ts_T2, trsm_ts_T3, delay = 0;
int mode;
struct timeval tv;
(void) orig_ts_T1;
(void) recv_ts_T2;
if (len < 48) {
return -1;
}
hdr = buf[0];
if ((hdr & 0x38) >> 3 != 4) {
/* Wrong version */
return -1;
}
mode = hdr & 0x7;
if (mode != 4 && mode != 5) {
/* Not a server reply */
return -1;
}
memset(msg, 0, sizeof(*msg));
msg->kiss_of_death = (buf[1] == 0); /* Server asks to not send requests */
mg_get_ntp_ts(&buf[40], &trsm_ts_T3);
#ifndef MG_SNMP_NO_DELAY_CORRECTION
mg_get_ntp_ts(&buf[24], &orig_ts_T1);
mg_get_ntp_ts(&buf[32], &recv_ts_T2);
delay = mg_calculate_delay(orig_ts_T1, recv_ts_T2, trsm_ts_T3);
#endif
mg_ntp_to_tv(trsm_ts_T3, &tv);
msg->time = (double) tv.tv_sec + (((double) tv.tv_usec + delay) / 1000000.0);
return 0;
}
static void mg_sntp_handler(struct mg_connection *c, int ev,
void *ev_data MG_UD_ARG(void *user_data)) {
struct mbuf *io = &c->recv_mbuf;
struct mg_sntp_message msg;
c->handler(c, ev, ev_data MG_UD_ARG(user_data));
switch (ev) {
case MG_EV_RECV: {
if (mg_sntp_parse_reply(io->buf, io->len, &msg) < 0) {
DBG(("Invalid SNTP packet received (%d)", (int) io->len));
c->handler(c, MG_SNTP_MALFORMED_REPLY, NULL MG_UD_ARG(user_data));
} else {
c->handler(c, MG_SNTP_REPLY, (void *) &msg MG_UD_ARG(user_data));
}
mbuf_remove(io, io->len);
break;
}
}
}
int mg_set_protocol_sntp(struct mg_connection *c) {
if ((c->flags & MG_F_UDP) == 0) {
return -1;
}
c->proto_handler = mg_sntp_handler;
return 0;
}
struct mg_connection *mg_sntp_connect(struct mg_mgr *mgr,
MG_CB(mg_event_handler_t event_handler,
void *user_data),
const char *sntp_server_name) {
struct mg_connection *c = NULL;
char url[100], *p_url = url;
const char *proto = "", *port = "", *tmp;
/* If port is not specified, use default (123) */
tmp = strchr(sntp_server_name, ':');
if (tmp != NULL && *(tmp + 1) == '/') {
tmp = strchr(tmp + 1, ':');
}
if (tmp == NULL) {
port = ":123";
}
/* Add udp:// if needed */
if (strncmp(sntp_server_name, "udp://", 6) != 0) {
proto = "udp://";
}
mg_asprintf(&p_url, sizeof(url), "%s%s%s", proto, sntp_server_name, port);
c = mg_connect(mgr, p_url, event_handler MG_UD_ARG(user_data));
if (c == NULL) {
goto cleanup;
}
mg_set_protocol_sntp(c);
cleanup:
if (p_url != url) {
MG_FREE(p_url);
}
return c;
}
struct sntp_data {
mg_event_handler_t hander;
int count;
};
static void mg_sntp_util_ev_handler(struct mg_connection *c, int ev,
void *ev_data MG_UD_ARG(void *user_data)) {
#if !MG_ENABLE_CALLBACK_USERDATA
void *user_data = c->user_data;
#endif
struct sntp_data *sd = (struct sntp_data *) user_data;
switch (ev) {
case MG_EV_CONNECT:
if (*(int *) ev_data != 0) {
mg_call(c, sd->hander, c->user_data, MG_SNTP_FAILED, NULL);
break;
}
/* fallthrough */
case MG_EV_TIMER:
if (sd->count <= SNTP_ATTEMPTS) {
mg_sntp_send_request(c);
mg_set_timer(c, mg_time() + 10);
sd->count++;
} else {
mg_call(c, sd->hander, c->user_data, MG_SNTP_FAILED, NULL);
c->flags |= MG_F_CLOSE_IMMEDIATELY;
}
break;
case MG_SNTP_MALFORMED_REPLY:
mg_call(c, sd->hander, c->user_data, MG_SNTP_FAILED, NULL);
c->flags |= MG_F_CLOSE_IMMEDIATELY;
break;
case MG_SNTP_REPLY:
mg_call(c, sd->hander, c->user_data, MG_SNTP_REPLY, ev_data);
c->flags |= MG_F_CLOSE_IMMEDIATELY;
break;
case MG_EV_CLOSE:
MG_FREE(user_data);
c->user_data = NULL;
break;
}
}
struct mg_connection *mg_sntp_get_time(struct mg_mgr *mgr,
mg_event_handler_t event_handler,
const char *sntp_server_name) {
struct mg_connection *c;
struct sntp_data *sd = (struct sntp_data *) MG_CALLOC(1, sizeof(*sd));
if (sd == NULL) {
return NULL;
}
c = mg_sntp_connect(mgr, MG_CB(mg_sntp_util_ev_handler, sd),
sntp_server_name);
if (c == NULL) {
MG_FREE(sd);
return NULL;
}
sd->hander = event_handler;
#if !MG_ENABLE_CALLBACK_USERDATA
c->user_data = sd;
#endif
return c;
}
#endif /* MG_ENABLE_SNTP */
#ifdef MG_MODULE_LINES
#line 1 "common/platforms/cc3200/cc3200_libc.c"
#endif
/*
* Copyright (c) 2014-2016 Cesanta Software Limited
* All rights reserved
*/
#if CS_PLATFORM == CS_P_CC3200
/* Amalgamated: #include "common/mg_mem.h" */
#include <stdio.h>
#include <string.h>
#ifndef __TI_COMPILER_VERSION__
#include <reent.h>
#include <sys/stat.h>
#include <sys/time.h>
#include <unistd.h>
#endif
#include <inc/hw_types.h>
#include <inc/hw_memmap.h>
#include <driverlib/prcm.h>
#include <driverlib/rom.h>
#include <driverlib/rom_map.h>
#include <driverlib/uart.h>
#include <driverlib/utils.h>
#define CONSOLE_UART UARTA0_BASE
#ifdef __TI_COMPILER_VERSION__
int asprintf(char **strp, const char *fmt, ...) {
va_list ap;
int len;
*strp = MG_MALLOC(BUFSIZ);
if (*strp == NULL) return -1;
va_start(ap, fmt);
len = vsnprintf(*strp, BUFSIZ, fmt, ap);
va_end(ap);
if (len > 0) {
*strp = MG_REALLOC(*strp, len + 1);
if (*strp == NULL) return -1;
}
if (len >= BUFSIZ) {
va_start(ap, fmt);
len = vsnprintf(*strp, len + 1, fmt, ap);
va_end(ap);
}
return len;
}
#if MG_TI_NO_HOST_INTERFACE
time_t HOSTtime() {
struct timeval tp;
gettimeofday(&tp, NULL);
return tp.tv_sec;
}
#endif
#endif /* __TI_COMPILER_VERSION__ */
#ifndef __TI_COMPILER_VERSION__
int _gettimeofday_r(struct _reent *r, struct timeval *tp, void *tzp) {
#else
int gettimeofday(struct timeval *tp, void *tzp) {
#endif
unsigned long long r1 = 0, r2;
/* Achieve two consecutive reads of the same value. */
do {
r2 = r1;
r1 = PRCMSlowClkCtrFastGet();
} while (r1 != r2);
/* This is a 32768 Hz counter. */
tp->tv_sec = (r1 >> 15);
/* 1/32768-th of a second is 30.517578125 microseconds, approx. 31,
* but we round down so it doesn't overflow at 32767 */
tp->tv_usec = (r1 & 0x7FFF) * 30;
return 0;
}
void fprint_str(FILE *fp, const char *str) {
while (*str != '\0') {
if (*str == '\n') MAP_UARTCharPut(CONSOLE_UART, '\r');
MAP_UARTCharPut(CONSOLE_UART, *str++);
}
}
void _exit(int status) {
fprint_str(stderr, "_exit\n");
/* cause an unaligned access exception, that will drop you into gdb */
*(int *) 1 = status;
while (1)
; /* avoid gcc warning because stdlib abort() has noreturn attribute */
}
void _not_implemented(const char *what) {
fprint_str(stderr, what);
fprint_str(stderr, " is not implemented\n");
_exit(42);
}
int _kill(int pid, int sig) {
(void) pid;
(void) sig;
_not_implemented("_kill");
return -1;
}
int _getpid() {
fprint_str(stderr, "_getpid is not implemented\n");
return 42;
}
int _isatty(int fd) {
/* 0, 1 and 2 are TTYs. */
return fd < 2;
}
#endif /* CS_PLATFORM == CS_P_CC3200 */
#ifdef MG_MODULE_LINES
#line 1 "common/platforms/msp432/msp432_libc.c"
#endif
/*
* Copyright (c) 2014-2016 Cesanta Software Limited
* All rights reserved
*/
#if CS_PLATFORM == CS_P_MSP432
#include <ti/sysbios/BIOS.h>
#include <ti/sysbios/knl/Clock.h>
int gettimeofday(struct timeval *tp, void *tzp) {
uint32_t ticks = Clock_getTicks();
tp->tv_sec = ticks / 1000;
tp->tv_usec = (ticks % 1000) * 1000;
return 0;
}
#endif /* CS_PLATFORM == CS_P_MSP432 */
#ifdef MG_MODULE_LINES
#line 1 "common/platforms/nrf5/nrf5_libc.c"
#endif
/*
* Copyright (c) 2014-2016 Cesanta Software Limited
* All rights reserved
*/
#if (CS_PLATFORM == CS_P_NRF51 || CS_PLATFORM == CS_P_NRF52) && \
defined(__ARMCC_VERSION)
int gettimeofday(struct timeval *tp, void *tzp) {
/* TODO */
tp->tv_sec = 0;
tp->tv_usec = 0;
return 0;
}
#endif
#ifdef MG_MODULE_LINES
#line 1 "common/platforms/simplelink/sl_fs_slfs.h"
#endif
/*
* Copyright (c) 2014-2016 Cesanta Software Limited
* All rights reserved
*/
#ifndef CS_COMMON_PLATFORMS_SIMPLELINK_SL_FS_SLFS_H_
#define CS_COMMON_PLATFORMS_SIMPLELINK_SL_FS_SLFS_H_
#if defined(MG_FS_SLFS)
#include <stdio.h>
#ifndef __TI_COMPILER_VERSION__
#include <unistd.h>
#include <sys/stat.h>
#endif
#define MAX_OPEN_SLFS_FILES 8
/* Indirect libc interface - same functions, different names. */
int fs_slfs_open(const char *pathname, int flags, mode_t mode);
int fs_slfs_close(int fd);
ssize_t fs_slfs_read(int fd, void *buf, size_t count);
ssize_t fs_slfs_write(int fd, const void *buf, size_t count);
int fs_slfs_stat(const char *pathname, struct stat *s);
int fs_slfs_fstat(int fd, struct stat *s);
off_t fs_slfs_lseek(int fd, off_t offset, int whence);
int fs_slfs_unlink(const char *filename);
int fs_slfs_rename(const char *from, const char *to);
void fs_slfs_set_new_file_size(const char *name, size_t size);
#endif /* defined(MG_FS_SLFS) */
#endif /* CS_COMMON_PLATFORMS_SIMPLELINK_SL_FS_SLFS_H_ */
#ifdef MG_MODULE_LINES
#line 1 "common/platforms/simplelink/sl_fs_slfs.c"
#endif
/*
* Copyright (c) 2014-2016 Cesanta Software Limited
* All rights reserved
*/
/* Standard libc interface to TI SimpleLink FS. */
#if defined(MG_FS_SLFS) || defined(CC3200_FS_SLFS)
/* Amalgamated: #include "common/platforms/simplelink/sl_fs_slfs.h" */
#include <errno.h>
#if CS_PLATFORM == CS_P_CC3200
#include <inc/hw_types.h>
#endif
#include <simplelink/include/simplelink.h>
#include <simplelink/include/fs.h>
/* Amalgamated: #include "common/cs_dbg.h" */
/* Amalgamated: #include "common/mg_mem.h" */
/* From sl_fs.c */
extern int set_errno(int e);
static const char *drop_dir(const char *fname, bool *is_slfs);
/*
* With SLFS, you have to pre-declare max file size. Yes. Really.
* 64K should be enough for everyone. Right?
*/
#ifndef FS_SLFS_MAX_FILE_SIZE
#define FS_SLFS_MAX_FILE_SIZE (64 * 1024)
#endif
struct sl_file_size_hint {
char *name;
size_t size;
};
struct sl_fd_info {
_i32 fh;
_off_t pos;
size_t size;
};
static struct sl_fd_info s_sl_fds[MAX_OPEN_SLFS_FILES];
static struct sl_file_size_hint s_sl_file_size_hints[MAX_OPEN_SLFS_FILES];
static int sl_fs_to_errno(_i32 r) {
DBG(("SL error: %d", (int) r));
switch (r) {
case SL_FS_OK:
return 0;
case SL_FS_FILE_NAME_EXIST:
return EEXIST;
case SL_FS_WRONG_FILE_NAME:
return EINVAL;
case SL_FS_ERR_NO_AVAILABLE_NV_INDEX:
case SL_FS_ERR_NO_AVAILABLE_BLOCKS:
return ENOSPC;
case SL_FS_ERR_FAILED_TO_ALLOCATE_MEM:
return ENOMEM;
case SL_FS_ERR_FILE_NOT_EXISTS:
return ENOENT;
case SL_FS_ERR_NOT_SUPPORTED:
return ENOTSUP;
}
return ENXIO;
}
int fs_slfs_open(const char *pathname, int flags, mode_t mode) {
int fd;
for (fd = 0; fd < MAX_OPEN_SLFS_FILES; fd++) {
if (s_sl_fds[fd].fh <= 0) break;
}
if (fd >= MAX_OPEN_SLFS_FILES) return set_errno(ENOMEM);
struct sl_fd_info *fi = &s_sl_fds[fd];
/*
* Apply path manipulations again, in case we got here directly
* (via TI libc's "add_device").
*/
pathname = drop_dir(pathname, NULL);
_u32 am = 0;
fi->size = (size_t) -1;
int rw = (flags & 3);
if (rw == O_RDONLY) {
SlFsFileInfo_t sl_fi;
_i32 r = sl_FsGetInfo((const _u8 *) pathname, 0, &sl_fi);
if (r == SL_FS_OK) {
fi->size = sl_fi.FileLen;
}
am = FS_MODE_OPEN_READ;
} else {
if (!(flags & O_TRUNC) || (flags & O_APPEND)) {
// FailFS files cannot be opened for append and will be truncated
// when opened for write.
return set_errno(ENOTSUP);
}
if (flags & O_CREAT) {
size_t i, size = FS_SLFS_MAX_FILE_SIZE;
for (i = 0; i < MAX_OPEN_SLFS_FILES; i++) {
if (s_sl_file_size_hints[i].name != NULL &&
strcmp(s_sl_file_size_hints[i].name, pathname) == 0) {
size = s_sl_file_size_hints[i].size;
MG_FREE(s_sl_file_size_hints[i].name);
s_sl_file_size_hints[i].name = NULL;
break;
}
}
DBG(("creating %s with max size %d", pathname, (int) size));
am = FS_MODE_OPEN_CREATE(size, 0);
} else {
am = FS_MODE_OPEN_WRITE;
}
}
_i32 r = sl_FsOpen((_u8 *) pathname, am, NULL, &fi->fh);
DBG(("sl_FsOpen(%s, 0x%x) = %d, %d", pathname, (int) am, (int) r,
(int) fi->fh));
if (r == SL_FS_OK) {
fi->pos = 0;
r = fd;
} else {
fi->fh = -1;
r = set_errno(sl_fs_to_errno(r));
}
return r;
}
int fs_slfs_close(int fd) {
struct sl_fd_info *fi = &s_sl_fds[fd];
if (fi->fh <= 0) return set_errno(EBADF);
_i32 r = sl_FsClose(fi->fh, NULL, NULL, 0);
DBG(("sl_FsClose(%d) = %d", (int) fi->fh, (int) r));
s_sl_fds[fd].fh = -1;
return set_errno(sl_fs_to_errno(r));
}
ssize_t fs_slfs_read(int fd, void *buf, size_t count) {
struct sl_fd_info *fi = &s_sl_fds[fd];
if (fi->fh <= 0) return set_errno(EBADF);
/* Simulate EOF. sl_FsRead @ file_size return SL_FS_ERR_OFFSET_OUT_OF_RANGE.
*/
if (fi->pos == fi->size) return 0;
_i32 r = sl_FsRead(fi->fh, fi->pos, buf, count);
DBG(("sl_FsRead(%d, %d, %d) = %d", (int) fi->fh, (int) fi->pos, (int) count,
(int) r));
if (r >= 0) {
fi->pos += r;
return r;
}
return set_errno(sl_fs_to_errno(r));
}
ssize_t fs_slfs_write(int fd, const void *buf, size_t count) {
struct sl_fd_info *fi = &s_sl_fds[fd];
if (fi->fh <= 0) return set_errno(EBADF);
_i32 r = sl_FsWrite(fi->fh, fi->pos, (_u8 *) buf, count);
DBG(("sl_FsWrite(%d, %d, %d) = %d", (int) fi->fh, (int) fi->pos, (int) count,
(int) r));
if (r >= 0) {
fi->pos += r;
return r;
}
return set_errno(sl_fs_to_errno(r));
}
int fs_slfs_stat(const char *pathname, struct stat *s) {
SlFsFileInfo_t sl_fi;
/*
* Apply path manipulations again, in case we got here directly
* (via TI libc's "add_device").
*/
pathname = drop_dir(pathname, NULL);
_i32 r = sl_FsGetInfo((const _u8 *) pathname, 0, &sl_fi);
if (r == SL_FS_OK) {
s->st_mode = S_IFREG | 0666;
s->st_nlink = 1;
s->st_size = sl_fi.FileLen;
return 0;
}
return set_errno(sl_fs_to_errno(r));
}
int fs_slfs_fstat(int fd, struct stat *s) {
struct sl_fd_info *fi = &s_sl_fds[fd];
if (fi->fh <= 0) return set_errno(EBADF);
s->st_mode = 0666;
s->st_mode = S_IFREG | 0666;
s->st_nlink = 1;
s->st_size = fi->size;
return 0;
}
off_t fs_slfs_lseek(int fd, off_t offset, int whence) {
if (s_sl_fds[fd].fh <= 0) return set_errno(EBADF);
switch (whence) {
case SEEK_SET:
s_sl_fds[fd].pos = offset;
break;
case SEEK_CUR:
s_sl_fds[fd].pos += offset;
break;
case SEEK_END:
return set_errno(ENOTSUP);
}
return 0;
}
int fs_slfs_unlink(const char *pathname) {
/*
* Apply path manipulations again, in case we got here directly
* (via TI libc's "add_device").
*/
pathname = drop_dir(pathname, NULL);
return set_errno(sl_fs_to_errno(sl_FsDel((const _u8 *) pathname, 0)));
}
int fs_slfs_rename(const char *from, const char *to) {
return set_errno(ENOTSUP);
}
void fs_slfs_set_new_file_size(const char *name, size_t size) {
int i;
for (i = 0; i < MAX_OPEN_SLFS_FILES; i++) {
if (s_sl_file_size_hints[i].name == NULL) {
DBG(("File size hint: %s %d", name, (int) size));
s_sl_file_size_hints[i].name = strdup(name);
s_sl_file_size_hints[i].size = size;
break;
}
}
}
#endif /* defined(MG_FS_SLFS) || defined(CC3200_FS_SLFS) */
#ifdef MG_MODULE_LINES
#line 1 "common/platforms/simplelink/sl_fs.c"
#endif
/*
* Copyright (c) 2014-2016 Cesanta Software Limited
* All rights reserved
*/
#if MG_NET_IF == MG_NET_IF_SIMPLELINK && \
(defined(MG_FS_SLFS) || defined(MG_FS_SPIFFS))
#include <errno.h>
#include <stdbool.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#ifdef __TI_COMPILER_VERSION__
#include <file.h>
#endif
/* Amalgamated: #include "common/cs_dbg.h" */
/* Amalgamated: #include "common/platform.h" */
#ifdef CC3200_FS_SPIFFS
/* Amalgamated: #include "cc3200_fs_spiffs.h" */
#endif
#ifdef MG_FS_SLFS
/* Amalgamated: #include "sl_fs_slfs.h" */
#endif
#define NUM_SYS_FDS 3
#define SPIFFS_FD_BASE 10
#define SLFS_FD_BASE 100
#ifndef MG_UART_CHAR_PUT
#if CS_PLATFORM == CS_P_CC3200
#include <inc/hw_types.h>
#include <inc/hw_memmap.h>
#include <driverlib/rom.h>
#include <driverlib/rom_map.h>
#include <driverlib/uart.h>
#define MG_UART_CHAR_PUT(fd, c) MAP_UARTCharPut(UARTA0_BASE, c);
#else
#define MG_UART_CHAR_PUT(fd, c)
#endif /* CS_PLATFORM == CS_P_CC3200 */
#endif /* !MG_UART_CHAR_PUT */
int set_errno(int e) {
errno = e;
return (e == 0 ? 0 : -1);
}
static const char *drop_dir(const char *fname, bool *is_slfs) {
if (is_slfs != NULL) {
*is_slfs = (strncmp(fname, "SL:", 3) == 0);
if (*is_slfs) fname += 3;
}
/* Drop "./", if any */
if (fname[0] == '.' && fname[1] == '/') {
fname += 2;
}
/*
* Drop / if it is the only one in the path.
* This allows use of /pretend/directories but serves /file.txt as normal.
*/
if (fname[0] == '/' && strchr(fname + 1, '/') == NULL) {
fname++;
}
return fname;
}
enum fd_type {
FD_INVALID,
FD_SYS,
#ifdef CC3200_FS_SPIFFS
FD_SPIFFS,
#endif
#ifdef MG_FS_SLFS
FD_SLFS
#endif
};
static int fd_type(int fd) {
if (fd >= 0 && fd < NUM_SYS_FDS) return FD_SYS;
#ifdef CC3200_FS_SPIFFS
if (fd >= SPIFFS_FD_BASE && fd < SPIFFS_FD_BASE + MAX_OPEN_SPIFFS_FILES) {
return FD_SPIFFS;
}
#endif
#ifdef MG_FS_SLFS
if (fd >= SLFS_FD_BASE && fd < SLFS_FD_BASE + MAX_OPEN_SLFS_FILES) {
return FD_SLFS;
}
#endif
return FD_INVALID;
}
#if MG_TI_NO_HOST_INTERFACE
int open(const char *pathname, unsigned flags, int mode) {
#else
int _open(const char *pathname, int flags, mode_t mode) {
#endif
int fd = -1;
bool is_sl;
const char *fname = drop_dir(pathname, &is_sl);
if (is_sl) {
#ifdef MG_FS_SLFS
fd = fs_slfs_open(fname, flags, mode);
if (fd >= 0) fd += SLFS_FD_BASE;
#endif
} else {
#ifdef CC3200_FS_SPIFFS
fd = fs_spiffs_open(fname, flags, mode);
if (fd >= 0) fd += SPIFFS_FD_BASE;
#endif
}
LOG(LL_DEBUG,
("open(%s, 0x%x) = %d, fname = %s", pathname, flags, fd, fname));
return fd;
}
int _stat(const char *pathname, struct stat *st) {
int res = -1;
bool is_sl;
const char *fname = drop_dir(pathname, &is_sl);
memset(st, 0, sizeof(*st));
/* Simulate statting the root directory. */
if (fname[0] == '\0' || strcmp(fname, ".") == 0) {
st->st_ino = 0;
st->st_mode = S_IFDIR | 0777;
st->st_nlink = 1;
st->st_size = 0;
return 0;
}
if (is_sl) {
#ifdef MG_FS_SLFS
res = fs_slfs_stat(fname, st);
#endif
} else {
#ifdef CC3200_FS_SPIFFS
res = fs_spiffs_stat(fname, st);
#endif
}
LOG(LL_DEBUG, ("stat(%s) = %d; fname = %s", pathname, res, fname));
return res;
}
#if MG_TI_NO_HOST_INTERFACE
int close(int fd) {
#else
int _close(int fd) {
#endif
int r = -1;
switch (fd_type(fd)) {
case FD_INVALID:
r = set_errno(EBADF);
break;
case FD_SYS:
r = set_errno(EACCES);
break;
#ifdef CC3200_FS_SPIFFS
case FD_SPIFFS:
r = fs_spiffs_close(fd - SPIFFS_FD_BASE);
break;
#endif
#ifdef MG_FS_SLFS
case FD_SLFS:
r = fs_slfs_close(fd - SLFS_FD_BASE);
break;
#endif
}
DBG(("close(%d) = %d", fd, r));
return r;
}
#if MG_TI_NO_HOST_INTERFACE
off_t lseek(int fd, off_t offset, int whence) {
#else
off_t _lseek(int fd, off_t offset, int whence) {
#endif
int r = -1;
switch (fd_type(fd)) {
case FD_INVALID:
r = set_errno(EBADF);
break;
case FD_SYS:
r = set_errno(ESPIPE);
break;
#ifdef CC3200_FS_SPIFFS
case FD_SPIFFS:
r = fs_spiffs_lseek(fd - SPIFFS_FD_BASE, offset, whence);
break;
#endif
#ifdef MG_FS_SLFS
case FD_SLFS:
r = fs_slfs_lseek(fd - SLFS_FD_BASE, offset, whence);
break;
#endif
}
DBG(("lseek(%d, %d, %d) = %d", fd, (int) offset, whence, r));
return r;
}
int _fstat(int fd, struct stat *s) {
int r = -1;
memset(s, 0, sizeof(*s));
switch (fd_type(fd)) {
case FD_INVALID:
r = set_errno(EBADF);
break;
case FD_SYS: {
/* Create barely passable stats for STD{IN,OUT,ERR}. */
memset(s, 0, sizeof(*s));
s->st_ino = fd;
s->st_mode = S_IFCHR | 0666;
r = 0;
break;
}
#ifdef CC3200_FS_SPIFFS
case FD_SPIFFS:
r = fs_spiffs_fstat(fd - SPIFFS_FD_BASE, s);
break;
#endif
#ifdef MG_FS_SLFS
case FD_SLFS:
r = fs_slfs_fstat(fd - SLFS_FD_BASE, s);
break;
#endif
}
DBG(("fstat(%d) = %d", fd, r));
return r;
}
#if MG_TI_NO_HOST_INTERFACE
int read(int fd, char *buf, unsigned count) {
#else
ssize_t _read(int fd, void *buf, size_t count) {
#endif
int r = -1;
switch (fd_type(fd)) {
case FD_INVALID:
r = set_errno(EBADF);
break;
case FD_SYS: {
if (fd != 0) {
r = set_errno(EACCES);
break;
}
/* Should we allow reading from stdin = uart? */
r = set_errno(ENOTSUP);
break;
}
#ifdef CC3200_FS_SPIFFS
case FD_SPIFFS:
r = fs_spiffs_read(fd - SPIFFS_FD_BASE, buf, count);
break;
#endif
#ifdef MG_FS_SLFS
case FD_SLFS:
r = fs_slfs_read(fd - SLFS_FD_BASE, buf, count);
break;
#endif
}
DBG(("read(%d, %u) = %d", fd, count, r));
return r;
}
#if MG_TI_NO_HOST_INTERFACE
int write(int fd, const char *buf, unsigned count) {
#else
ssize_t _write(int fd, const void *buf, size_t count) {
#endif
int r = -1;
size_t i = 0;
switch (fd_type(fd)) {
case FD_INVALID:
r = set_errno(EBADF);
break;
case FD_SYS: {
if (fd == 0) {
r = set_errno(EACCES);
break;
}
for (i = 0; i < count; i++) {
const char c = ((const char *) buf)[i];
if (c == '\n') MG_UART_CHAR_PUT(fd, '\r');
MG_UART_CHAR_PUT(fd, c);
}
r = count;
break;
}
#ifdef CC3200_FS_SPIFFS
case FD_SPIFFS:
r = fs_spiffs_write(fd - SPIFFS_FD_BASE, buf, count);
break;
#endif
#ifdef MG_FS_SLFS
case FD_SLFS:
r = fs_slfs_write(fd - SLFS_FD_BASE, buf, count);
break;
#endif
}
return r;
}
/*
* On Newlib we override rename directly too, because the default
* implementation using _link and _unlink doesn't work for us.
*/
#if MG_TI_NO_HOST_INTERFACE || defined(_NEWLIB_VERSION)
int rename(const char *frompath, const char *topath) {
int r = -1;
bool is_sl_from, is_sl_to;
const char *from = drop_dir(frompath, &is_sl_from);
const char *to = drop_dir(topath, &is_sl_to);
if (is_sl_from || is_sl_to) {
set_errno(ENOTSUP);
} else {
#ifdef CC3200_FS_SPIFFS
r = fs_spiffs_rename(from, to);
#endif
}
DBG(("rename(%s, %s) = %d", from, to, r));
return r;
}
#endif /* MG_TI_NO_HOST_INTERFACE || defined(_NEWLIB_VERSION) */
#if MG_TI_NO_HOST_INTERFACE
int unlink(const char *pathname) {
#else
int _unlink(const char *pathname) {
#endif
int r = -1;
bool is_sl;
const char *fname = drop_dir(pathname, &is_sl);
if (is_sl) {
#ifdef MG_FS_SLFS
r = fs_slfs_unlink(fname);
#endif
} else {
#ifdef CC3200_FS_SPIFFS
r = fs_spiffs_unlink(fname);
#endif
}
DBG(("unlink(%s) = %d, fname = %s", pathname, r, fname));
return r;
}
#ifdef CC3200_FS_SPIFFS /* FailFS does not support listing files. */
DIR *opendir(const char *dir_name) {
DIR *r = NULL;
bool is_sl;
drop_dir(dir_name, &is_sl);
if (is_sl) {
r = NULL;
set_errno(ENOTSUP);
} else {
r = fs_spiffs_opendir(dir_name);
}
DBG(("opendir(%s) = %p", dir_name, r));
return r;
}
struct dirent *readdir(DIR *dir) {
struct dirent *res = fs_spiffs_readdir(dir);
DBG(("readdir(%p) = %p", dir, res));
return res;
}
int closedir(DIR *dir) {
int res = fs_spiffs_closedir(dir);
DBG(("closedir(%p) = %d", dir, res));
return res;
}
int rmdir(const char *path) {
return fs_spiffs_rmdir(path);
}
int mkdir(const char *path, mode_t mode) {
(void) path;
(void) mode;
/* for spiffs supports only root dir, which comes from mongoose as '.' */
return (strlen(path) == 1 && *path == '.') ? 0 : ENOTDIR;
}
#endif
int sl_fs_init(void) {
int ret = 1;
#ifdef __TI_COMPILER_VERSION__
#ifdef MG_FS_SLFS
#pragma diag_push
#pragma diag_suppress 169 /* Nothing we can do about the prototype mismatch. \
*/
ret = (add_device("SL", _MSA, fs_slfs_open, fs_slfs_close, fs_slfs_read,
fs_slfs_write, fs_slfs_lseek, fs_slfs_unlink,
fs_slfs_rename) == 0);
#pragma diag_pop
#endif
#endif
return ret;
}
#endif /* MG_NET_IF == MG_NET_IF_SIMPLELINK && (defined(MG_FS_SLFS) || \
defined(MG_FS_SPIFFS)) */
#ifdef MG_MODULE_LINES
#line 1 "common/platforms/simplelink/sl_socket.c"
#endif
/*
* Copyright (c) 2014-2016 Cesanta Software Limited
* All rights reserved
*/
#if MG_NET_IF == MG_NET_IF_SIMPLELINK
#include <errno.h>
#include <stdio.h>
/* Amalgamated: #include "common/platform.h" */
const char *inet_ntop(int af, const void *src, char *dst, socklen_t size) {
int res;
struct in_addr *in = (struct in_addr *) src;
if (af != AF_INET) {
errno = EAFNOSUPPORT;
return NULL;
}
res = snprintf(dst, size, "%lu.%lu.%lu.%lu", SL_IPV4_BYTE(in->s_addr, 0),
SL_IPV4_BYTE(in->s_addr, 1), SL_IPV4_BYTE(in->s_addr, 2),
SL_IPV4_BYTE(in->s_addr, 3));
return res > 0 ? dst : NULL;
}
char *inet_ntoa(struct in_addr n) {
static char a[16];
return (char *) inet_ntop(AF_INET, &n, a, sizeof(a));
}
int inet_pton(int af, const char *src, void *dst) {
uint32_t a0, a1, a2, a3;
uint8_t *db = (uint8_t *) dst;
if (af != AF_INET) {
errno = EAFNOSUPPORT;
return 0;
}
if (sscanf(src, "%lu.%lu.%lu.%lu", &a0, &a1, &a2, &a3) != 4) {
return 0;
}
*db = a3;
*(db + 1) = a2;
*(db + 2) = a1;
*(db + 3) = a0;
return 1;
}
#endif /* MG_NET_IF == MG_NET_IF_SIMPLELINK */
#ifdef MG_MODULE_LINES
#line 1 "common/platforms/simplelink/sl_mg_task.c"
#endif
#if MG_NET_IF == MG_NET_IF_SIMPLELINK && !defined(MG_SIMPLELINK_NO_OSI)
/* Amalgamated: #include "mg_task.h" */
#include <oslib/osi.h>
enum mg_q_msg_type {
MG_Q_MSG_CB,
};
struct mg_q_msg {
enum mg_q_msg_type type;
void (*cb)(struct mg_mgr *mgr, void *arg);
void *arg;
};
static OsiMsgQ_t s_mg_q;
static void mg_task(void *arg);
bool mg_start_task(int priority, int stack_size, mg_init_cb mg_init) {
if (osi_MsgQCreate(&s_mg_q, "MG", sizeof(struct mg_q_msg), 16) != OSI_OK) {
return false;
}
if (osi_TaskCreate(mg_task, (const signed char *) "MG", stack_size,
(void *) mg_init, priority, NULL) != OSI_OK) {
return false;
}
return true;
}
static void mg_task(void *arg) {
struct mg_mgr mgr;
mg_init_cb mg_init = (mg_init_cb) arg;
mg_mgr_init(&mgr, NULL);
mg_init(&mgr);
while (1) {
struct mg_q_msg msg;
mg_mgr_poll(&mgr, 1);
if (osi_MsgQRead(&s_mg_q, &msg, 1) != OSI_OK) continue;
switch (msg.type) {
case MG_Q_MSG_CB: {
msg.cb(&mgr, msg.arg);
}
}
}
}
void mg_run_in_task(void (*cb)(struct mg_mgr *mgr, void *arg), void *cb_arg) {
struct mg_q_msg msg = {MG_Q_MSG_CB, cb, cb_arg};
osi_MsgQWrite(&s_mg_q, &msg, OSI_NO_WAIT);
}
#endif /* MG_NET_IF == MG_NET_IF_SIMPLELINK && !defined(MG_SIMPLELINK_NO_OSI) \
*/
#ifdef MG_MODULE_LINES
#line 1 "common/platforms/simplelink/sl_net_if.h"
#endif
/*
* Copyright (c) 2014-2016 Cesanta Software Limited
* All rights reserved
*/
#ifndef CS_COMMON_PLATFORMS_SIMPLELINK_SL_NET_IF_H_
#define CS_COMMON_PLATFORMS_SIMPLELINK_SL_NET_IF_H_
/* Amalgamated: #include "mongoose/src/net_if.h" */
#ifdef __cplusplus
extern "C" {
#endif /* __cplusplus */
#ifndef MG_ENABLE_NET_IF_SIMPLELINK
#define MG_ENABLE_NET_IF_SIMPLELINK MG_NET_IF == MG_NET_IF_SIMPLELINK
#endif
extern const struct mg_iface_vtable mg_simplelink_iface_vtable;
#ifdef __cplusplus
}
#endif /* __cplusplus */
#endif /* CS_COMMON_PLATFORMS_SIMPLELINK_SL_NET_IF_H_ */
#ifdef MG_MODULE_LINES
#line 1 "common/platforms/simplelink/sl_net_if.c"
#endif
/*
* Copyright (c) 2014-2016 Cesanta Software Limited
* All rights reserved
*/
/* Amalgamated: #include "common/platforms/simplelink/sl_net_if.h" */
#if MG_ENABLE_NET_IF_SIMPLELINK
/* Amalgamated: #include "mongoose/src/internal.h" */
/* Amalgamated: #include "mongoose/src/util.h" */
#define MG_TCP_RECV_BUFFER_SIZE 1024
#define MG_UDP_RECV_BUFFER_SIZE 1500
static sock_t mg_open_listening_socket(union socket_address *sa, int type,
int proto);
int sl_set_ssl_opts(struct mg_connection *nc);
void mg_set_non_blocking_mode(sock_t sock) {
SlSockNonblocking_t opt;
opt.NonblockingEnabled = 1;
sl_SetSockOpt(sock, SL_SOL_SOCKET, SL_SO_NONBLOCKING, &opt, sizeof(opt));
}
static int mg_is_error(int n) {
return (n < 0 && n != SL_EALREADY && n != SL_EAGAIN);
}
void mg_sl_if_connect_tcp(struct mg_connection *nc,
const union socket_address *sa) {
int proto = 0;
if (nc->flags & MG_F_SSL) proto = SL_SEC_SOCKET;
sock_t sock = sl_Socket(AF_INET, SOCK_STREAM, proto);
if (sock < 0) {
nc->err = sock;
goto out;
}
mg_sock_set(nc, sock);
#if MG_ENABLE_SSL
nc->err = sl_set_ssl_opts(nc);
if (nc->err != 0) goto out;
#endif
nc->err = sl_Connect(sock, &sa->sa, sizeof(sa->sin));
out:
DBG(("%p to %s:%d sock %d %d err %d", nc, inet_ntoa(sa->sin.sin_addr),
ntohs(sa->sin.sin_port), nc->sock, proto, nc->err));
}
void mg_sl_if_connect_udp(struct mg_connection *nc) {
sock_t sock = sl_Socket(AF_INET, SOCK_DGRAM, 0);
if (sock < 0) {
nc->err = sock;
return;
}
mg_sock_set(nc, sock);
nc->err = 0;
}
int mg_sl_if_listen_tcp(struct mg_connection *nc, union socket_address *sa) {
int proto = 0;
if (nc->flags & MG_F_SSL) proto = SL_SEC_SOCKET;
sock_t sock = mg_open_listening_socket(sa, SOCK_STREAM, proto);
if (sock < 0) return sock;
mg_sock_set(nc, sock);
#if MG_ENABLE_SSL
return sl_set_ssl_opts(nc);
#else
return 0;
#endif
}
int mg_sl_if_listen_udp(struct mg_connection *nc, union socket_address *sa) {
sock_t sock = mg_open_listening_socket(sa, SOCK_DGRAM, 0);
if (sock == INVALID_SOCKET) return (errno ? errno : 1);
mg_sock_set(nc, sock);
return 0;
}
void mg_sl_if_tcp_send(struct mg_connection *nc, const void *buf, size_t len) {
mbuf_append(&nc->send_mbuf, buf, len);
}
void mg_sl_if_udp_send(struct mg_connection *nc, const void *buf, size_t len) {
mbuf_append(&nc->send_mbuf, buf, len);
}
void mg_sl_if_recved(struct mg_connection *nc, size_t len) {
(void) nc;
(void) len;
}
int mg_sl_if_create_conn(struct mg_connection *nc) {
(void) nc;
return 1;
}
void mg_sl_if_destroy_conn(struct mg_connection *nc) {
if (nc->sock == INVALID_SOCKET) return;
/* For UDP, only close outgoing sockets or listeners. */
if (!(nc->flags & MG_F_UDP) || nc->listener == NULL) {
sl_Close(nc->sock);
}
nc->sock = INVALID_SOCKET;
}
static int mg_accept_conn(struct mg_connection *lc) {
struct mg_connection *nc;
union socket_address sa;
socklen_t sa_len = sizeof(sa);
sock_t sock = sl_Accept(lc->sock, &sa.sa, &sa_len);
if (sock < 0) {
DBG(("%p: failed to accept: %d", lc, sock));
return 0;
}
nc = mg_if_accept_new_conn(lc);
if (nc == NULL) {
sl_Close(sock);
return 0;
}
DBG(("%p conn from %s:%d", nc, inet_ntoa(sa.sin.sin_addr),
ntohs(sa.sin.sin_port)));
mg_sock_set(nc, sock);
if (nc->flags & MG_F_SSL) nc->flags |= MG_F_SSL_HANDSHAKE_DONE;
mg_if_accept_tcp_cb(nc, &sa, sa_len);
return 1;
}
/* 'sa' must be an initialized address to bind to */
static sock_t mg_open_listening_socket(union socket_address *sa, int type,
int proto) {
int r;
socklen_t sa_len =
(sa->sa.sa_family == AF_INET) ? sizeof(sa->sin) : sizeof(sa->sin6);
sock_t sock = sl_Socket(sa->sa.sa_family, type, proto);
if (sock < 0) return sock;
if ((r = sl_Bind(sock, &sa->sa, sa_len)) < 0) {
sl_Close(sock);
return r;
}
if (type != SOCK_DGRAM && (r = sl_Listen(sock, SOMAXCONN)) < 0) {
sl_Close(sock);
return r;
}
mg_set_non_blocking_mode(sock);
return sock;
}
static void mg_write_to_socket(struct mg_connection *nc) {
struct mbuf *io = &nc->send_mbuf;
int n = 0;
if (nc->flags & MG_F_UDP) {
n = sl_SendTo(nc->sock, io->buf, io->len, 0, &nc->sa.sa,
sizeof(nc->sa.sin));
DBG(("%p %d %d %d %s:%hu", nc, nc->sock, n, errno,
inet_ntoa(nc->sa.sin.sin_addr), ntohs(nc->sa.sin.sin_port)));
} else {
n = (int) sl_Send(nc->sock, io->buf, io->len, 0);
DBG(("%p %d bytes -> %d", nc, n, nc->sock));
}
if (n > 0) {
mbuf_remove(io, n);
mg_if_sent_cb(nc, n);
} else if (n < 0 && mg_is_error(n)) {
/* Something went wrong, drop the connection. */
nc->flags |= MG_F_CLOSE_IMMEDIATELY;
}
}
MG_INTERNAL size_t recv_avail_size(struct mg_connection *conn, size_t max) {
size_t avail;
if (conn->recv_mbuf_limit < conn->recv_mbuf.len) return 0;
avail = conn->recv_mbuf_limit - conn->recv_mbuf.len;
return avail > max ? max : avail;
}
static void mg_handle_tcp_read(struct mg_connection *conn) {
int n = 0;
char *buf = (char *) MG_MALLOC(MG_TCP_RECV_BUFFER_SIZE);
if (buf == NULL) {
DBG(("OOM"));
return;
}
n = (int) sl_Recv(conn->sock, buf,
recv_avail_size(conn, MG_TCP_RECV_BUFFER_SIZE), 0);
DBG(("%p %d bytes <- %d", conn, n, conn->sock));
if (n > 0) {
mg_if_recv_tcp_cb(conn, buf, n, 1 /* own */);
} else {
MG_FREE(buf);
}
if (n == 0) {
/* Orderly shutdown of the socket, try flushing output. */
conn->flags |= MG_F_SEND_AND_CLOSE;
} else if (mg_is_error(n)) {
conn->flags |= MG_F_CLOSE_IMMEDIATELY;
}
}
static void mg_handle_udp_read(struct mg_connection *nc) {
char *buf = (char *) MG_MALLOC(MG_UDP_RECV_BUFFER_SIZE);
if (buf == NULL) return;
union socket_address sa;
socklen_t sa_len = sizeof(sa);
int n = sl_RecvFrom(nc->sock, buf, MG_UDP_RECV_BUFFER_SIZE, 0,
(SlSockAddr_t *) &sa, &sa_len);
DBG(("%p %d bytes from %s:%d", nc, n, inet_ntoa(nc->sa.sin.sin_addr),
ntohs(nc->sa.sin.sin_port)));
if (n > 0) {
mg_if_recv_udp_cb(nc, buf, n, &sa, sa_len);
} else {
MG_FREE(buf);
}
}
#define _MG_F_FD_CAN_READ 1
#define _MG_F_FD_CAN_WRITE 1 << 1
#define _MG_F_FD_ERROR 1 << 2
void mg_mgr_handle_conn(struct mg_connection *nc, int fd_flags, double now) {
DBG(("%p fd=%d fd_flags=%d nc_flags=%lu rmbl=%d smbl=%d", nc, nc->sock,
fd_flags, nc->flags, (int) nc->recv_mbuf.len, (int) nc->send_mbuf.len));
if (nc->flags & MG_F_CONNECTING) {
if (nc->flags & MG_F_UDP || nc->err != SL_EALREADY) {
mg_if_connect_cb(nc, nc->err);
} else {
/* In SimpleLink, to get status of non-blocking connect() we need to wait
* until socket is writable and repeat the call to sl_Connect again,
* which will now return the real status. */
if (fd_flags & _MG_F_FD_CAN_WRITE) {
nc->err = sl_Connect(nc->sock, &nc->sa.sa, sizeof(nc->sa.sin));
DBG(("%p conn res=%d", nc, nc->err));
if (nc->err == SL_ESECSNOVERIFY ||
/* TODO(rojer): Provide API to set the date for verification. */
nc->err == SL_ESECDATEERROR) {
nc->err = 0;
}
if (nc->flags & MG_F_SSL && nc->err == 0) {
nc->flags |= MG_F_SSL_HANDSHAKE_DONE;
}
mg_if_connect_cb(nc, nc->err);
}
}
/* Ignore read/write in further processing, we've handled it. */
fd_flags &= ~(_MG_F_FD_CAN_READ | _MG_F_FD_CAN_WRITE);
}
if (fd_flags & _MG_F_FD_CAN_READ) {
if (nc->flags & MG_F_UDP) {
mg_handle_udp_read(nc);
} else {
if (nc->flags & MG_F_LISTENING) {
mg_accept_conn(nc);
} else {
mg_handle_tcp_read(nc);
}
}
}
if (!(nc->flags & MG_F_CLOSE_IMMEDIATELY)) {
if ((fd_flags & _MG_F_FD_CAN_WRITE) && nc->send_mbuf.len > 0) {
mg_write_to_socket(nc);
}
if (!(fd_flags & (_MG_F_FD_CAN_READ | _MG_F_FD_CAN_WRITE))) {
mg_if_poll(nc, now);
}
mg_if_timer(nc, now);
}
DBG(("%p after fd=%d nc_flags=%lu rmbl=%d smbl=%d", nc, nc->sock, nc->flags,
(int) nc->recv_mbuf.len, (int) nc->send_mbuf.len));
}
/* Associate a socket to a connection. */
void mg_sl_if_sock_set(struct mg_connection *nc, sock_t sock) {
mg_set_non_blocking_mode(sock);
nc->sock = sock;
DBG(("%p %d", nc, sock));
}
void mg_sl_if_init(struct mg_iface *iface) {
(void) iface;
DBG(("%p using sl_Select()", iface->mgr));
}
void mg_sl_if_free(struct mg_iface *iface) {
(void) iface;
}
void mg_sl_if_add_conn(struct mg_connection *nc) {
(void) nc;
}
void mg_sl_if_remove_conn(struct mg_connection *nc) {
(void) nc;
}
time_t mg_sl_if_poll(struct mg_iface *iface, int timeout_ms) {
struct mg_mgr *mgr = iface->mgr;
double now = mg_time();
double min_timer;
struct mg_connection *nc, *tmp;
struct SlTimeval_t tv;
SlFdSet_t read_set, write_set, err_set;
sock_t max_fd = INVALID_SOCKET;
int num_fds, num_ev = 0, num_timers = 0;
SL_FD_ZERO(&read_set);
SL_FD_ZERO(&write_set);
SL_FD_ZERO(&err_set);
/*
* Note: it is ok to have connections with sock == INVALID_SOCKET in the list,
* e.g. timer-only "connections".
*/
min_timer = 0;
for (nc = mgr->active_connections, num_fds = 0; nc != NULL; nc = tmp) {
tmp = nc->next;
if (nc->sock != INVALID_SOCKET) {
num_fds++;
if (!(nc->flags & MG_F_WANT_WRITE) &&
nc->recv_mbuf.len < nc->recv_mbuf_limit &&
(!(nc->flags & MG_F_UDP) || nc->listener == NULL)) {
SL_FD_SET(nc->sock, &read_set);
if (max_fd == INVALID_SOCKET || nc->sock > max_fd) max_fd = nc->sock;
}
if (((nc->flags & MG_F_CONNECTING) && !(nc->flags & MG_F_WANT_READ)) ||
(nc->send_mbuf.len > 0 && !(nc->flags & MG_F_CONNECTING))) {
SL_FD_SET(nc->sock, &write_set);
SL_FD_SET(nc->sock, &err_set);
if (max_fd == INVALID_SOCKET || nc->sock > max_fd) max_fd = nc->sock;
}
}
if (nc->ev_timer_time > 0) {
if (num_timers == 0 || nc->ev_timer_time < min_timer) {
min_timer = nc->ev_timer_time;
}
num_timers++;
}
}
/*
* If there is a timer to be fired earlier than the requested timeout,
* adjust the timeout.
*/
if (num_timers > 0) {
double timer_timeout_ms = (min_timer - mg_time()) * 1000 + 1 /* rounding */;
if (timer_timeout_ms < timeout_ms) {
timeout_ms = timer_timeout_ms;
}
}
if (timeout_ms < 0) timeout_ms = 0;
tv.tv_sec = timeout_ms / 1000;
tv.tv_usec = (timeout_ms % 1000) * 1000;
if (num_fds > 0) {
num_ev = sl_Select((int) max_fd + 1, &read_set, &write_set, &err_set, &tv);
}
now = mg_time();
DBG(("sl_Select @ %ld num_ev=%d of %d, timeout=%d", (long) now, num_ev,
num_fds, timeout_ms));
for (nc = mgr->active_connections; nc != NULL; nc = tmp) {
int fd_flags = 0;
if (nc->sock != INVALID_SOCKET) {
if (num_ev > 0) {
fd_flags =
(SL_FD_ISSET(nc->sock, &read_set) &&
(!(nc->flags & MG_F_UDP) || nc->listener == NULL)
? _MG_F_FD_CAN_READ
: 0) |
(SL_FD_ISSET(nc->sock, &write_set) ? _MG_F_FD_CAN_WRITE : 0) |
(SL_FD_ISSET(nc->sock, &err_set) ? _MG_F_FD_ERROR : 0);
}
/* SimpleLink does not report UDP sockets as writable. */
if (nc->flags & MG_F_UDP && nc->send_mbuf.len > 0) {
fd_flags |= _MG_F_FD_CAN_WRITE;
}
}
tmp = nc->next;
mg_mgr_handle_conn(nc, fd_flags, now);
}
for (nc = mgr->active_connections; nc != NULL; nc = tmp) {
tmp = nc->next;
if ((nc->flags & MG_F_CLOSE_IMMEDIATELY) ||
(nc->send_mbuf.len == 0 && (nc->flags & MG_F_SEND_AND_CLOSE))) {
mg_close_conn(nc);
}
}
return now;
}
void mg_sl_if_get_conn_addr(struct mg_connection *nc, int remote,
union socket_address *sa) {
/* SimpleLink does not provide a way to get socket's peer address after
* accept or connect. Address should have been preserved in the connection,
* so we do our best here by using it. */
if (remote) memcpy(sa, &nc->sa, sizeof(*sa));
}
void sl_restart_cb(struct mg_mgr *mgr) {
/*
* SimpleLink has been restarted, meaning all sockets have been invalidated.
* We try our best - we'll restart the listeners, but for outgoing
* connections we have no option but to terminate.
*/
struct mg_connection *nc;
for (nc = mg_next(mgr, NULL); nc != NULL; nc = mg_next(mgr, nc)) {
if (nc->sock == INVALID_SOCKET) continue; /* Could be a timer */
if (nc->flags & MG_F_LISTENING) {
DBG(("restarting %p %s:%d", nc, inet_ntoa(nc->sa.sin.sin_addr),
ntohs(nc->sa.sin.sin_port)));
int res = (nc->flags & MG_F_UDP ? mg_sl_if_listen_udp(nc, &nc->sa)
: mg_sl_if_listen_tcp(nc, &nc->sa));
if (res == 0) continue;
/* Well, we tried and failed. Fall through to closing. */
}
nc->sock = INVALID_SOCKET;
DBG(("terminating %p %s:%d", nc, inet_ntoa(nc->sa.sin.sin_addr),
ntohs(nc->sa.sin.sin_port)));
/* TODO(rojer): Outgoing UDP? */
nc->flags |= MG_F_CLOSE_IMMEDIATELY;
}
}
/* clang-format off */
#define MG_SL_IFACE_VTABLE \
{ \
mg_sl_if_init, \
mg_sl_if_free, \
mg_sl_if_add_conn, \
mg_sl_if_remove_conn, \
mg_sl_if_poll, \
mg_sl_if_listen_tcp, \
mg_sl_if_listen_udp, \
mg_sl_if_connect_tcp, \
mg_sl_if_connect_udp, \
mg_sl_if_tcp_send, \
mg_sl_if_udp_send, \
mg_sl_if_recved, \
mg_sl_if_create_conn, \
mg_sl_if_destroy_conn, \
mg_sl_if_sock_set, \
mg_sl_if_get_conn_addr, \
}
/* clang-format on */
const struct mg_iface_vtable mg_simplelink_iface_vtable = MG_SL_IFACE_VTABLE;
#if MG_NET_IF == MG_NET_IF_SIMPLELINK
const struct mg_iface_vtable mg_default_iface_vtable = MG_SL_IFACE_VTABLE;
#endif
#endif /* MG_ENABLE_NET_IF_SIMPLELINK */
#ifdef MG_MODULE_LINES
#line 1 "common/platforms/simplelink/sl_ssl_if.c"
#endif
/*
* Copyright (c) 2014-2016 Cesanta Software Limited
* All rights reserved
*/
#if MG_ENABLE_SSL && MG_SSL_IF == MG_SSL_IF_SIMPLELINK
/* Amalgamated: #include "common/mg_mem.h" */
struct mg_ssl_if_ctx {
char *ssl_cert;
char *ssl_key;
char *ssl_ca_cert;
char *ssl_server_name;
};
void mg_ssl_if_init() {
}
enum mg_ssl_if_result mg_ssl_if_conn_init(
struct mg_connection *nc, const struct mg_ssl_if_conn_params *params,
const char **err_msg) {
struct mg_ssl_if_ctx *ctx =
(struct mg_ssl_if_ctx *) MG_CALLOC(1, sizeof(*ctx));
if (ctx == NULL) {
MG_SET_PTRPTR(err_msg, "Out of memory");
return MG_SSL_ERROR;
}
nc->ssl_if_data = ctx;
if (params->cert != NULL || params->key != NULL) {
if (params->cert != NULL && params->key != NULL) {
ctx->ssl_cert = strdup(params->cert);
ctx->ssl_key = strdup(params->key);
} else {
MG_SET_PTRPTR(err_msg, "Both cert and key are required.");
return MG_SSL_ERROR;
}
}
if (params->ca_cert != NULL && strcmp(params->ca_cert, "*") != 0) {
ctx->ssl_ca_cert = strdup(params->ca_cert);
}
/* TODO(rojer): cipher_suites. */
if (params->server_name != NULL) {
ctx->ssl_server_name = strdup(params->server_name);
}
return MG_SSL_OK;
}
void mg_ssl_if_conn_close_notify(struct mg_connection *nc) {
/* Nothing to do */
(void) nc;
}
void mg_ssl_if_conn_free(struct mg_connection *nc) {
struct mg_ssl_if_ctx *ctx = (struct mg_ssl_if_ctx *) nc->ssl_if_data;
if (ctx == NULL) return;
nc->ssl_if_data = NULL;
MG_FREE(ctx->ssl_cert);
MG_FREE(ctx->ssl_key);
MG_FREE(ctx->ssl_ca_cert);
MG_FREE(ctx->ssl_server_name);
memset(ctx, 0, sizeof(*ctx));
MG_FREE(ctx);
}
bool pem_to_der(const char *pem_file, const char *der_file) {
bool ret = false;
FILE *pf = NULL, *df = NULL;
bool writing = false;
pf = fopen(pem_file, "r");
if (pf == NULL) goto clean;
remove(der_file);
fs_slfs_set_new_file_size(der_file + 3, 2048);
df = fopen(der_file, "w");
if (df == NULL) goto clean;
while (1) {
char pem_buf[70];
char der_buf[48];
if (!fgets(pem_buf, sizeof(pem_buf), pf)) break;
if (writing) {
if (strstr(pem_buf, "-----END ") != NULL) {
ret = true;
break;
}
int l = 0;
while (!isspace((unsigned int) pem_buf[l])) l++;
int der_len = 0;
cs_base64_decode((const unsigned char *) pem_buf, sizeof(pem_buf),
der_buf, &der_len);
if (der_len <= 0) break;
if (fwrite(der_buf, 1, der_len, df) != der_len) break;
} else if (strstr(pem_buf, "-----BEGIN ") != NULL) {
writing = true;
}
}
clean:
if (pf != NULL) fclose(pf);
if (df != NULL) {
fclose(df);
if (!ret) remove(der_file);
}
return ret;
}
#if MG_ENABLE_FILESYSTEM && defined(MG_FS_SLFS)
/* If the file's extension is .pem, convert it to DER format and put on SLFS. */
static char *sl_pem2der(const char *pem_file) {
const char *pem_ext = strstr(pem_file, ".pem");
if (pem_ext == NULL || *(pem_ext + 4) != '\0') {
return strdup(pem_file);
}
char *der_file = NULL;
/* DER file must be located on SLFS, add prefix. */
int l = mg_asprintf(&der_file, 0, "SL:%.*s.der", (int) (pem_ext - pem_file),
pem_file);
if (der_file == NULL) return NULL;
bool result = false;
cs_stat_t st;
if (mg_stat(der_file, &st) != 0) {
result = pem_to_der(pem_file, der_file);
LOG(LL_DEBUG, ("%s -> %s = %d", pem_file, der_file, result));
} else {
/* File exists, assume it's already been converted. */
result = true;
}
if (result) {
/* Strip the SL: prefix we added since NWP does not expect it. */
memmove(der_file, der_file + 3, l - 2 /* including \0 */);
} else {
MG_FREE(der_file);
der_file = NULL;
}
return der_file;
}
#else
static char *sl_pem2der(const char *pem_file) {
return strdup(pem_file);
}
#endif
int sl_set_ssl_opts(struct mg_connection *nc) {
int err;
struct mg_ssl_if_ctx *ctx = (struct mg_ssl_if_ctx *) nc->ssl_if_data;
DBG(("%p ssl ctx: %p", nc, ctx));
if (ctx != NULL) {
DBG(("%p %s,%s,%s,%s", nc, (ctx->ssl_cert ? ctx->ssl_cert : "-"),
(ctx->ssl_key ? ctx->ssl_cert : "-"),
(ctx->ssl_ca_cert ? ctx->ssl_ca_cert : "-"),
(ctx->ssl_server_name ? ctx->ssl_server_name : "-")));
if (ctx->ssl_cert != NULL && ctx->ssl_key != NULL) {
char *ssl_cert = sl_pem2der(ctx->ssl_cert);
char *ssl_key = sl_pem2der(ctx->ssl_key);
if (ssl_cert != NULL && ssl_key != NULL) {
err = sl_SetSockOpt(nc->sock, SL_SOL_SOCKET,
SL_SO_SECURE_FILES_CERTIFICATE_FILE_NAME, ssl_cert,
strlen(ssl_cert));
LOG(LL_INFO, ("CERTIFICATE_FILE_NAME %s -> %d", ssl_cert, err));
err = sl_SetSockOpt(nc->sock, SL_SOL_SOCKET,
SL_SO_SECURE_FILES_PRIVATE_KEY_FILE_NAME, ssl_key,
strlen(ssl_key));
LOG(LL_INFO, ("PRIVATE_KEY_FILE_NAME %s -> %d", ssl_key, err));
} else {
err = -1;
}
MG_FREE(ssl_cert);
MG_FREE(ssl_key);
if (err != 0) return err;
}
if (ctx->ssl_ca_cert != NULL) {
if (ctx->ssl_ca_cert[0] != '\0') {
char *ssl_ca_cert = sl_pem2der(ctx->ssl_ca_cert);
if (ssl_ca_cert != NULL) {
err = sl_SetSockOpt(nc->sock, SL_SOL_SOCKET,
SL_SO_SECURE_FILES_CA_FILE_NAME, ssl_ca_cert,
strlen(ssl_ca_cert));
LOG(LL_INFO, ("CA_FILE_NAME %s -> %d", ssl_ca_cert, err));
} else {
err = -1;
}
MG_FREE(ssl_ca_cert);
if (err != 0) return err;
}
}
if (ctx->ssl_server_name != NULL) {
err = sl_SetSockOpt(nc->sock, SL_SOL_SOCKET,
SO_SECURE_DOMAIN_NAME_VERIFICATION,
ctx->ssl_server_name, strlen(ctx->ssl_server_name));
DBG(("DOMAIN_NAME_VERIFICATION %s -> %d", ctx->ssl_server_name, err));
/* Domain name verificationw as added in a NWP service pack, older
* versions return SL_ENOPROTOOPT. There isn't much we can do about it,
* so we ignore the error. */
if (err != 0 && err != SL_ENOPROTOOPT) return err;
}
}
return 0;
}
#endif /* MG_ENABLE_SSL && MG_SSL_IF == MG_SSL_IF_SIMPLELINK */
#ifdef MG_MODULE_LINES
#line 1 "common/platforms/lwip/mg_lwip_net_if.h"
#endif
/*
* Copyright (c) 2014-2016 Cesanta Software Limited
* All rights reserved
*/
#ifndef CS_COMMON_PLATFORMS_LWIP_MG_NET_IF_LWIP_H_
#define CS_COMMON_PLATFORMS_LWIP_MG_NET_IF_LWIP_H_
#ifndef MG_ENABLE_NET_IF_LWIP_LOW_LEVEL
#define MG_ENABLE_NET_IF_LWIP_LOW_LEVEL MG_NET_IF == MG_NET_IF_LWIP_LOW_LEVEL
#endif
#if MG_ENABLE_NET_IF_LWIP_LOW_LEVEL
#include <stdint.h>
extern const struct mg_iface_vtable mg_lwip_iface_vtable;
struct mg_lwip_conn_state {
struct mg_connection *nc;
struct mg_connection *lc;
union {
struct tcp_pcb *tcp;
struct udp_pcb *udp;
} pcb;
err_t err;
size_t num_sent; /* Number of acknowledged bytes to be reported to the core */
struct pbuf *rx_chain; /* Chain of incoming data segments. */
size_t rx_offset; /* Offset within the first pbuf (if partially consumed) */
/* Last SSL write size, for retries. */
int last_ssl_write_size;
/* Whether MG_SIG_RECV is already pending for this connection */
int recv_pending;
};
enum mg_sig_type {
MG_SIG_CONNECT_RESULT = 1,
MG_SIG_RECV = 2,
MG_SIG_SENT_CB = 3,
MG_SIG_CLOSE_CONN = 4,
MG_SIG_TOMBSTONE = 5,
MG_SIG_ACCEPT = 6,
};
void mg_lwip_post_signal(enum mg_sig_type sig, struct mg_connection *nc);
/* To be implemented by the platform. */
void mg_lwip_mgr_schedule_poll(struct mg_mgr *mgr);
#endif /* MG_ENABLE_NET_IF_LWIP_LOW_LEVEL */
#endif /* CS_COMMON_PLATFORMS_LWIP_MG_NET_IF_LWIP_H_ */
#ifdef MG_MODULE_LINES
#line 1 "common/platforms/lwip/mg_lwip_net_if.c"
#endif
/*
* Copyright (c) 2014-2016 Cesanta Software Limited
* All rights reserved
*/
#if MG_ENABLE_NET_IF_LWIP_LOW_LEVEL
/* Amalgamated: #include "common/mg_mem.h" */
#include <lwip/pbuf.h>
#include <lwip/tcp.h>
#if CS_PLATFORM != CS_P_STM32
#include <lwip/tcp_impl.h>
#endif
#include <lwip/udp.h>
/* Amalgamated: #include "common/cs_dbg.h" */
/*
* Depending on whether Mongoose is compiled with ipv6 support, use right
* lwip functions
*/
#if MG_ENABLE_IPV6
#define TCP_NEW tcp_new_ip6
#define TCP_BIND tcp_bind_ip6
#define UDP_BIND udp_bind_ip6
#define IPADDR_NTOA(x) ip6addr_ntoa((const ip6_addr_t *)(x))
#define SET_ADDR(dst, src) \
memcpy((dst)->sin6.sin6_addr.s6_addr, (src)->ip6.addr, \
sizeof((dst)->sin6.sin6_addr.s6_addr))
#else
#define TCP_NEW tcp_new
#define TCP_BIND tcp_bind
#define UDP_BIND udp_bind
#define IPADDR_NTOA ipaddr_ntoa
#define SET_ADDR(dst, src) (dst)->sin.sin_addr.s_addr = GET_IPV4(src)
#endif
/*
* If lwip is compiled with ipv6 support, then API changes even for ipv4
*/
#if !defined(LWIP_IPV6) || !LWIP_IPV6
#define GET_IPV4(ipX_addr) ((ipX_addr)->addr)
#else
#define GET_IPV4(ipX_addr) ((ipX_addr)->ip4.addr)
#endif
void mg_lwip_ssl_do_hs(struct mg_connection *nc);
void mg_lwip_ssl_send(struct mg_connection *nc);
void mg_lwip_ssl_recv(struct mg_connection *nc);
void mg_lwip_if_init(struct mg_iface *iface);
void mg_lwip_if_free(struct mg_iface *iface);
void mg_lwip_if_add_conn(struct mg_connection *nc);
void mg_lwip_if_remove_conn(struct mg_connection *nc);
time_t mg_lwip_if_poll(struct mg_iface *iface, int timeout_ms);
#ifdef RTOS_SDK
extern void mgos_lock();
extern void mgos_unlock();
#else
#define mgos_lock()
#define mgos_unlock()
#endif
static void mg_lwip_recv_common(struct mg_connection *nc, struct pbuf *p);
#if LWIP_TCP_KEEPALIVE
void mg_lwip_set_keepalive_params(struct mg_connection *nc, int idle,
int interval, int count) {
if (nc->sock == INVALID_SOCKET || nc->flags & MG_F_UDP) {
return;
}
struct mg_lwip_conn_state *cs = (struct mg_lwip_conn_state *) nc->sock;
struct tcp_pcb *tpcb = cs->pcb.tcp;
if (idle > 0 && interval > 0 && count > 0) {
tpcb->keep_idle = idle * 1000;
tpcb->keep_intvl = interval * 1000;
tpcb->keep_cnt = count;
tpcb->so_options |= SOF_KEEPALIVE;
} else {
tpcb->so_options &= ~SOF_KEEPALIVE;
}
}
#elif !defined(MG_NO_LWIP_TCP_KEEPALIVE)
#warning LWIP TCP keepalive is disabled. Please consider enabling it.
#endif /* LWIP_TCP_KEEPALIVE */
static err_t mg_lwip_tcp_conn_cb(void *arg, struct tcp_pcb *tpcb, err_t err) {
struct mg_connection *nc = (struct mg_connection *) arg;
DBG(("%p connect to %s:%u = %d", nc, IPADDR_NTOA(ipX_2_ip(&tpcb->remote_ip)),
tpcb->remote_port, err));
if (nc == NULL) {
tcp_abort(tpcb);
return ERR_ARG;
}
struct mg_lwip_conn_state *cs = (struct mg_lwip_conn_state *) nc->sock;
cs->err = err;
#if LWIP_TCP_KEEPALIVE
if (err == 0) mg_lwip_set_keepalive_params(nc, 60, 10, 6);
#endif
mg_lwip_post_signal(MG_SIG_CONNECT_RESULT, nc);
return ERR_OK;
}
static void mg_lwip_tcp_error_cb(void *arg, err_t err) {
struct mg_connection *nc = (struct mg_connection *) arg;
DBG(("%p conn error %d", nc, err));
if (nc == NULL) return;
struct mg_lwip_conn_state *cs = (struct mg_lwip_conn_state *) nc->sock;
cs->pcb.tcp = NULL; /* Has already been deallocated */
if (nc->flags & MG_F_CONNECTING) {
cs->err = err;
mg_lwip_post_signal(MG_SIG_CONNECT_RESULT, nc);
} else {
mg_lwip_post_signal(MG_SIG_CLOSE_CONN, nc);
}
}
static err_t mg_lwip_tcp_recv_cb(void *arg, struct tcp_pcb *tpcb,
struct pbuf *p, err_t err) {
struct mg_connection *nc = (struct mg_connection *) arg;
DBG(("%p %p %u %d", nc, tpcb, (p != NULL ? p->tot_len : 0), err));
if (p == NULL) {
if (nc != NULL) {
mg_lwip_post_signal(MG_SIG_CLOSE_CONN, nc);
} else {
/* Tombstoned connection, do nothing. */
}
return ERR_OK;
} else if (nc == NULL) {
tcp_abort(tpcb);
return ERR_ARG;
}
struct mg_lwip_conn_state *cs = (struct mg_lwip_conn_state *) nc->sock;
/*
* If we get a chain of more than one segment at once, we need to bump
* refcount on the subsequent bufs to make them independent.
*/
if (p->next != NULL) {
struct pbuf *q = p->next;
for (; q != NULL; q = q->next) pbuf_ref(q);
}
if (cs->rx_chain == NULL) {
cs->rx_offset = 0;
} else if (pbuf_clen(cs->rx_chain) >= 4) {
/* ESP SDK has a limited pool of 5 pbufs. We must not hog them all or RX
* will be completely blocked. We already have at least 4 in the chain,
* this one is, so we have to make a copy and release this one. */
struct pbuf *np = pbuf_alloc(PBUF_RAW, p->tot_len, PBUF_RAM);
if (np != NULL) {
pbuf_copy(np, p);
pbuf_free(p);
p = np;
}
}
mg_lwip_recv_common(nc, p);
return ERR_OK;
}
static void mg_lwip_handle_recv_tcp(struct mg_connection *nc) {
struct mg_lwip_conn_state *cs = (struct mg_lwip_conn_state *) nc->sock;
#if MG_ENABLE_SSL
if (nc->flags & MG_F_SSL) {
if (nc->flags & MG_F_SSL_HANDSHAKE_DONE) {
mg_lwip_ssl_recv(nc);
} else {
mg_lwip_ssl_do_hs(nc);
}
return;
}
#endif
mgos_lock();
while (cs->rx_chain != NULL) {
struct pbuf *seg = cs->rx_chain;
size_t len = (seg->len - cs->rx_offset);
char *data = (char *) MG_MALLOC(len);
if (data == NULL) {
mgos_unlock();
DBG(("OOM"));
return;
}
pbuf_copy_partial(seg, data, len, cs->rx_offset);
cs->rx_offset += len;
if (cs->rx_offset == cs->rx_chain->len) {
cs->rx_chain = pbuf_dechain(cs->rx_chain);
pbuf_free(seg);
cs->rx_offset = 0;
}
mgos_unlock();
mg_if_recv_tcp_cb(nc, data, len, 1 /* own */);
mgos_lock();
}
mgos_unlock();
if (nc->send_mbuf.len > 0) {
mg_lwip_mgr_schedule_poll(nc->mgr);
}
}
static err_t mg_lwip_tcp_sent_cb(void *arg, struct tcp_pcb *tpcb,
u16_t num_sent) {
struct mg_connection *nc = (struct mg_connection *) arg;
DBG(("%p %p %u", nc, tpcb, num_sent));
if (nc == NULL) {
tcp_abort(tpcb);
return ERR_ABRT;
}
struct mg_lwip_conn_state *cs = (struct mg_lwip_conn_state *) nc->sock;
cs->num_sent += num_sent;
mg_lwip_post_signal(MG_SIG_SENT_CB, nc);
return ERR_OK;
}
void mg_lwip_if_connect_tcp(struct mg_connection *nc,
const union socket_address *sa) {
struct mg_lwip_conn_state *cs = (struct mg_lwip_conn_state *) nc->sock;
struct tcp_pcb *tpcb = TCP_NEW();
cs->pcb.tcp = tpcb;
ip_addr_t *ip = (ip_addr_t *) &sa->sin.sin_addr.s_addr;
u16_t port = ntohs(sa->sin.sin_port);
tcp_arg(tpcb, nc);
tcp_err(tpcb, mg_lwip_tcp_error_cb);
tcp_sent(tpcb, mg_lwip_tcp_sent_cb);
tcp_recv(tpcb, mg_lwip_tcp_recv_cb);
cs->err = TCP_BIND(tpcb, IP_ADDR_ANY, 0 /* any port */);
DBG(("%p tcp_bind = %d", nc, cs->err));
if (cs->err != ERR_OK) {
mg_lwip_post_signal(MG_SIG_CONNECT_RESULT, nc);
return;
}
cs->err = tcp_connect(tpcb, ip, port, mg_lwip_tcp_conn_cb);
DBG(("%p tcp_connect %p = %d", nc, tpcb, cs->err));
if (cs->err != ERR_OK) {
mg_lwip_post_signal(MG_SIG_CONNECT_RESULT, nc);
return;
}
}
/*
* Lwip included in the SDKs for nRF5x chips has different type for the
* callback of `udp_recv()`
*/
#if CS_PLATFORM == CS_P_NRF51 || CS_PLATFORM == CS_P_NRF52 || \
CS_PLATFORM == CS_P_STM32
static void mg_lwip_udp_recv_cb(void *arg, struct udp_pcb *pcb, struct pbuf *p,
const ip_addr_t *addr, u16_t port)
#else
static void mg_lwip_udp_recv_cb(void *arg, struct udp_pcb *pcb, struct pbuf *p,
ip_addr_t *addr, u16_t port)
#endif
{
struct mg_connection *nc = (struct mg_connection *) arg;
DBG(("%p %s:%u %p %u %u", nc, IPADDR_NTOA(addr), port, p, p->ref, p->len));
/* Put address in a separate pbuf and tack it onto the packet. */
struct pbuf *sap =
pbuf_alloc(PBUF_RAW, sizeof(union socket_address), PBUF_RAM);
if (sap == NULL) {
pbuf_free(p);
return;
}
union socket_address *sa = (union socket_address *) sap->payload;
sa->sin.sin_addr.s_addr = addr->addr;
sa->sin.sin_port = htons(port);
/* Logic in the recv handler requires that there be exactly one data pbuf. */
p = pbuf_coalesce(p, PBUF_RAW);
pbuf_chain(sap, p);
mg_lwip_recv_common(nc, sap);
(void) pcb;
}
static void mg_lwip_recv_common(struct mg_connection *nc, struct pbuf *p) {
struct mg_lwip_conn_state *cs = (struct mg_lwip_conn_state *) nc->sock;
mgos_lock();
if (cs->rx_chain == NULL) {
cs->rx_chain = p;
} else {
pbuf_chain(cs->rx_chain, p);
}
if (!cs->recv_pending) {
cs->recv_pending = 1;
mg_lwip_post_signal(MG_SIG_RECV, nc);
}
mgos_unlock();
}
static void mg_lwip_handle_recv_udp(struct mg_connection *nc) {
struct mg_lwip_conn_state *cs = (struct mg_lwip_conn_state *) nc->sock;
/*
* For UDP, RX chain consists of interleaved address and packet bufs:
* Address pbuf followed by exactly one data pbuf (recv_cb took care of that).
*/
while (cs->rx_chain != NULL) {
struct pbuf *sap = cs->rx_chain;
struct pbuf *p = sap->next;
cs->rx_chain = pbuf_dechain(p);
size_t data_len = p->len;
char *data = (char *) MG_MALLOC(data_len);
if (data != NULL) {
pbuf_copy_partial(p, data, data_len, 0);
pbuf_free(p);
mg_if_recv_udp_cb(nc, data, data_len,
(union socket_address *) sap->payload, sap->len);
pbuf_free(sap);
} else {
pbuf_free(p);
pbuf_free(sap);
}
}
}
void mg_lwip_if_connect_udp(struct mg_connection *nc) {
struct mg_lwip_conn_state *cs = (struct mg_lwip_conn_state *) nc->sock;
struct udp_pcb *upcb = udp_new();
cs->err = UDP_BIND(upcb, IP_ADDR_ANY, 0 /* any port */);
DBG(("%p udp_bind %p = %d", nc, upcb, cs->err));
if (cs->err == ERR_OK) {
udp_recv(upcb, mg_lwip_udp_recv_cb, nc);
cs->pcb.udp = upcb;
} else {
udp_remove(upcb);
}
mg_lwip_post_signal(MG_SIG_CONNECT_RESULT, nc);
}
void mg_lwip_accept_conn(struct mg_connection *nc, struct tcp_pcb *tpcb) {
union socket_address sa;
SET_ADDR(&sa, &tpcb->remote_ip);
sa.sin.sin_port = htons(tpcb->remote_port);
mg_if_accept_tcp_cb(nc, &sa, sizeof(sa.sin));
}
void mg_lwip_handle_accept(struct mg_connection *nc) {
struct mg_lwip_conn_state *cs = (struct mg_lwip_conn_state *) nc->sock;
#if MG_ENABLE_SSL
if (cs->lc->flags & MG_F_SSL) {
if (mg_ssl_if_conn_accept(nc, cs->lc) != MG_SSL_OK) {
LOG(LL_ERROR, ("SSL error"));
tcp_close(cs->pcb.tcp);
}
} else
#endif
{
mg_lwip_accept_conn(nc, cs->pcb.tcp);
}
}
static err_t mg_lwip_accept_cb(void *arg, struct tcp_pcb *newtpcb, err_t err) {
struct mg_connection *lc = (struct mg_connection *) arg;
DBG(("%p conn %p from %s:%u", lc, newtpcb,
IPADDR_NTOA(ipX_2_ip(&newtpcb->remote_ip)), newtpcb->remote_port));
struct mg_connection *nc = mg_if_accept_new_conn(lc);
if (nc == NULL) {
tcp_abort(newtpcb);
return ERR_ABRT;
}
struct mg_lwip_conn_state *cs = (struct mg_lwip_conn_state *) nc->sock;
cs->lc = lc;
cs->pcb.tcp = newtpcb;
/* We need to set up callbacks before returning because data may start
* arriving immediately. */
tcp_arg(newtpcb, nc);
tcp_err(newtpcb, mg_lwip_tcp_error_cb);
tcp_sent(newtpcb, mg_lwip_tcp_sent_cb);
tcp_recv(newtpcb, mg_lwip_tcp_recv_cb);
#if LWIP_TCP_KEEPALIVE
mg_lwip_set_keepalive_params(nc, 60, 10, 6);
#endif
mg_lwip_post_signal(MG_SIG_ACCEPT, nc);
(void) err;
return ERR_OK;
}
int mg_lwip_if_listen_tcp(struct mg_connection *nc, union socket_address *sa) {
struct mg_lwip_conn_state *cs = (struct mg_lwip_conn_state *) nc->sock;
struct tcp_pcb *tpcb = TCP_NEW();
ip_addr_t *ip = (ip_addr_t *) &sa->sin.sin_addr.s_addr;
u16_t port = ntohs(sa->sin.sin_port);
cs->err = TCP_BIND(tpcb, ip, port);
DBG(("%p tcp_bind(%s:%u) = %d", nc, IPADDR_NTOA(ip), port, cs->err));
if (cs->err != ERR_OK) {
tcp_close(tpcb);
return -1;
}
tcp_arg(tpcb, nc);
tpcb = tcp_listen(tpcb);
cs->pcb.tcp = tpcb;
tcp_accept(tpcb, mg_lwip_accept_cb);
return 0;
}
int mg_lwip_if_listen_udp(struct mg_connection *nc, union socket_address *sa) {
struct mg_lwip_conn_state *cs = (struct mg_lwip_conn_state *) nc->sock;
struct udp_pcb *upcb = udp_new();
ip_addr_t *ip = (ip_addr_t *) &sa->sin.sin_addr.s_addr;
u16_t port = ntohs(sa->sin.sin_port);
cs->err = UDP_BIND(upcb, ip, port);
DBG(("%p udb_bind(%s:%u) = %d", nc, IPADDR_NTOA(ip), port, cs->err));
if (cs->err != ERR_OK) {
udp_remove(upcb);
return -1;
}
udp_recv(upcb, mg_lwip_udp_recv_cb, nc);
cs->pcb.udp = upcb;
return 0;
}
int mg_lwip_tcp_write(struct mg_connection *nc, const void *data,
uint16_t len) {
struct mg_lwip_conn_state *cs = (struct mg_lwip_conn_state *) nc->sock;
struct tcp_pcb *tpcb = cs->pcb.tcp;
if (tpcb == NULL) return -1;
len = MIN(tpcb->mss, MIN(len, tpcb->snd_buf));
if (len == 0) {
DBG(("%p no buf avail %u %u %u %p %p", tpcb, tpcb->acked, tpcb->snd_buf,
tpcb->snd_queuelen, tpcb->unsent, tpcb->unacked));
tcp_output(tpcb);
return 0;
}
/*
* On ESP8266 we only allow one TCP segment in flight at any given time.
* This may increase latency and reduce efficiency of tcp windowing,
* but memory is scarce and precious on that platform so we do this to
* reduce footprint.
*/
#if CS_PLATFORM == CS_P_ESP8266
if (tpcb->unacked != NULL) {
return 0;
}
if (tpcb->unsent != NULL) {
len = MIN(len, (TCP_MSS - tpcb->unsent->len));
}
#endif
err_t err = tcp_write(tpcb, data, len, TCP_WRITE_FLAG_COPY);
DBG(("%p tcp_write %u = %d", tpcb, len, err));
if (err != ERR_OK) {
/*
* We ignore ERR_MEM because memory will be freed up when the data is sent
* and we'll retry.
*/
return (err == ERR_MEM ? 0 : -1);
}
return len;
}
static void mg_lwip_send_more(struct mg_connection *nc) {
struct mg_lwip_conn_state *cs = (struct mg_lwip_conn_state *) nc->sock;
if (nc->sock == INVALID_SOCKET || cs->pcb.tcp == NULL) {
DBG(("%p invalid socket", nc));
return;
}
int num_written = mg_lwip_tcp_write(nc, nc->send_mbuf.buf, nc->send_mbuf.len);
DBG(("%p mg_lwip_tcp_write %u = %d", nc, nc->send_mbuf.len, num_written));
if (num_written == 0) return;
if (num_written < 0) {
mg_lwip_post_signal(MG_SIG_CLOSE_CONN, nc);
}
mbuf_remove(&nc->send_mbuf, num_written);
mbuf_trim(&nc->send_mbuf);
}
void mg_lwip_if_tcp_send(struct mg_connection *nc, const void *buf,
size_t len) {
mbuf_append(&nc->send_mbuf, buf, len);
mg_lwip_mgr_schedule_poll(nc->mgr);
}
void mg_lwip_if_udp_send(struct mg_connection *nc, const void *buf,
size_t len) {
struct mg_lwip_conn_state *cs = (struct mg_lwip_conn_state *) nc->sock;
if (nc->sock == INVALID_SOCKET || cs->pcb.udp == NULL) {
/*
* In case of UDP, this usually means, what
* async DNS resolve is still in progress and connection
* is not ready yet
*/
DBG(("%p socket is not connected", nc));
return;
}
struct udp_pcb *upcb = cs->pcb.udp;
struct pbuf *p = pbuf_alloc(PBUF_TRANSPORT, len, PBUF_RAM);
ip_addr_t *ip = (ip_addr_t *) &nc->sa.sin.sin_addr.s_addr;
u16_t port = ntohs(nc->sa.sin.sin_port);
if (p == NULL) {
DBG(("OOM"));
return;
}
memcpy(p->payload, buf, len);
cs->err = udp_sendto(upcb, p, (ip_addr_t *) ip, port);
DBG(("%p udp_sendto = %d", nc, cs->err));
pbuf_free(p);
if (cs->err != ERR_OK) {
mg_lwip_post_signal(MG_SIG_CLOSE_CONN, nc);
} else {
cs->num_sent += len;
mg_lwip_post_signal(MG_SIG_SENT_CB, nc);
}
}
void mg_lwip_if_recved(struct mg_connection *nc, size_t len) {
if (nc->flags & MG_F_UDP) return;
struct mg_lwip_conn_state *cs = (struct mg_lwip_conn_state *) nc->sock;
if (nc->sock == INVALID_SOCKET || cs->pcb.tcp == NULL) {
DBG(("%p invalid socket", nc));
return;
}
DBG(("%p %p %u", nc, cs->pcb.tcp, len));
/* Currently SSL acknowledges data immediately.
* TODO(rojer): Find a way to propagate mg_lwip_if_recved. */
#if MG_ENABLE_SSL
if (!(nc->flags & MG_F_SSL)) {
tcp_recved(cs->pcb.tcp, len);
}
#else
tcp_recved(cs->pcb.tcp, len);
#endif
mbuf_trim(&nc->recv_mbuf);
}
int mg_lwip_if_create_conn(struct mg_connection *nc) {
struct mg_lwip_conn_state *cs =
(struct mg_lwip_conn_state *) MG_CALLOC(1, sizeof(*cs));
if (cs == NULL) return 0;
cs->nc = nc;
nc->sock = (intptr_t) cs;
return 1;
}
void mg_lwip_if_destroy_conn(struct mg_connection *nc) {
if (nc->sock == INVALID_SOCKET) return;
struct mg_lwip_conn_state *cs = (struct mg_lwip_conn_state *) nc->sock;
if (!(nc->flags & MG_F_UDP)) {
struct tcp_pcb *tpcb = cs->pcb.tcp;
if (tpcb != NULL) {
tcp_arg(tpcb, NULL);
DBG(("%p tcp_close %p", nc, tpcb));
tcp_arg(tpcb, NULL);
tcp_close(tpcb);
}
while (cs->rx_chain != NULL) {
struct pbuf *seg = cs->rx_chain;
cs->rx_chain = pbuf_dechain(cs->rx_chain);
pbuf_free(seg);
}
memset(cs, 0, sizeof(*cs));
MG_FREE(cs);
} else if (nc->listener == NULL) {
/* Only close outgoing UDP pcb or listeners. */
struct udp_pcb *upcb = cs->pcb.udp;
if (upcb != NULL) {
DBG(("%p udp_remove %p", nc, upcb));
udp_remove(upcb);
}
memset(cs, 0, sizeof(*cs));
MG_FREE(cs);
}
nc->sock = INVALID_SOCKET;
}
void mg_lwip_if_get_conn_addr(struct mg_connection *nc, int remote,
union socket_address *sa) {
memset(sa, 0, sizeof(*sa));
if (nc->sock == INVALID_SOCKET) return;
struct mg_lwip_conn_state *cs = (struct mg_lwip_conn_state *) nc->sock;
if (nc->flags & MG_F_UDP) {
struct udp_pcb *upcb = cs->pcb.udp;
if (remote) {
memcpy(sa, &nc->sa, sizeof(*sa));
} else {
sa->sin.sin_port = htons(upcb->local_port);
SET_ADDR(sa, &upcb->local_ip);
}
} else {
struct tcp_pcb *tpcb = cs->pcb.tcp;
if (remote) {
sa->sin.sin_port = htons(tpcb->remote_port);
SET_ADDR(sa, &tpcb->remote_ip);
} else {
sa->sin.sin_port = htons(tpcb->local_port);
SET_ADDR(sa, &tpcb->local_ip);
}
}
}
void mg_lwip_if_sock_set(struct mg_connection *nc, sock_t sock) {
nc->sock = sock;
}
/* clang-format off */
#define MG_LWIP_IFACE_VTABLE \
{ \
mg_lwip_if_init, \
mg_lwip_if_free, \
mg_lwip_if_add_conn, \
mg_lwip_if_remove_conn, \
mg_lwip_if_poll, \
mg_lwip_if_listen_tcp, \
mg_lwip_if_listen_udp, \
mg_lwip_if_connect_tcp, \
mg_lwip_if_connect_udp, \
mg_lwip_if_tcp_send, \
mg_lwip_if_udp_send, \
mg_lwip_if_recved, \
mg_lwip_if_create_conn, \
mg_lwip_if_destroy_conn, \
mg_lwip_if_sock_set, \
mg_lwip_if_get_conn_addr, \
}
/* clang-format on */
const struct mg_iface_vtable mg_lwip_iface_vtable = MG_LWIP_IFACE_VTABLE;
#if MG_NET_IF == MG_NET_IF_LWIP_LOW_LEVEL
const struct mg_iface_vtable mg_default_iface_vtable = MG_LWIP_IFACE_VTABLE;
#endif
#endif /* MG_ENABLE_NET_IF_LWIP_LOW_LEVEL */
#ifdef MG_MODULE_LINES
#line 1 "common/platforms/lwip/mg_lwip_ev_mgr.c"
#endif
/*
* Copyright (c) 2014-2016 Cesanta Software Limited
* All rights reserved
*/
#if MG_NET_IF == MG_NET_IF_LWIP_LOW_LEVEL
#ifndef MG_SIG_QUEUE_LEN
#define MG_SIG_QUEUE_LEN 32
#endif
struct mg_ev_mgr_lwip_signal {
int sig;
struct mg_connection *nc;
};
struct mg_ev_mgr_lwip_data {
struct mg_ev_mgr_lwip_signal sig_queue[MG_SIG_QUEUE_LEN];
int sig_queue_len;
int start_index;
};
void mg_lwip_post_signal(enum mg_sig_type sig, struct mg_connection *nc) {
struct mg_ev_mgr_lwip_data *md =
(struct mg_ev_mgr_lwip_data *) nc->iface->data;
mgos_lock();
if (md->sig_queue_len >= MG_SIG_QUEUE_LEN) {
mgos_unlock();
return;
}
int end_index = (md->start_index + md->sig_queue_len) % MG_SIG_QUEUE_LEN;
md->sig_queue[end_index].sig = sig;
md->sig_queue[end_index].nc = nc;
md->sig_queue_len++;
mg_lwip_mgr_schedule_poll(nc->mgr);
mgos_unlock();
}
void mg_ev_mgr_lwip_process_signals(struct mg_mgr *mgr) {
struct mg_ev_mgr_lwip_data *md =
(struct mg_ev_mgr_lwip_data *) mgr->ifaces[MG_MAIN_IFACE]->data;
while (md->sig_queue_len > 0) {
mgos_lock();
int sig = md->sig_queue[md->start_index].sig;
struct mg_connection *nc = md->sig_queue[md->start_index].nc;
struct mg_lwip_conn_state *cs = (struct mg_lwip_conn_state *) nc->sock;
md->start_index = (md->start_index + 1) % MG_SIG_QUEUE_LEN;
md->sig_queue_len--;
mgos_unlock();
if (nc->iface == NULL || nc->mgr == NULL) continue;
switch (sig) {
case MG_SIG_CONNECT_RESULT: {
#if MG_ENABLE_SSL
if (cs->err == 0 && (nc->flags & MG_F_SSL) &&
!(nc->flags & MG_F_SSL_HANDSHAKE_DONE)) {
mg_lwip_ssl_do_hs(nc);
} else
#endif
{
mg_if_connect_cb(nc, cs->err);
}
break;
}
case MG_SIG_CLOSE_CONN: {
nc->flags |= MG_F_CLOSE_IMMEDIATELY;
mg_close_conn(nc);
break;
}
case MG_SIG_RECV: {
cs->recv_pending = 0;
if (nc->flags & MG_F_UDP) {
mg_lwip_handle_recv_udp(nc);
} else {
mg_lwip_handle_recv_tcp(nc);
}
break;
}
case MG_SIG_SENT_CB: {
if (cs->num_sent > 0) mg_if_sent_cb(nc, cs->num_sent);
cs->num_sent = 0;
if (nc->send_mbuf.len == 0 && (nc->flags & MG_F_SEND_AND_CLOSE) &&
!(nc->flags & MG_F_WANT_WRITE)) {
mg_close_conn(nc);
}
break;
}
case MG_SIG_TOMBSTONE: {
break;
}
case MG_SIG_ACCEPT: {
mg_lwip_handle_accept(nc);
break;
}
}
}
}
void mg_lwip_if_init(struct mg_iface *iface) {
LOG(LL_INFO, ("%p Mongoose init"));
iface->data = MG_CALLOC(1, sizeof(struct mg_ev_mgr_lwip_data));
}
void mg_lwip_if_free(struct mg_iface *iface) {
MG_FREE(iface->data);
iface->data = NULL;
}
void mg_lwip_if_add_conn(struct mg_connection *nc) {
(void) nc;
}
void mg_lwip_if_remove_conn(struct mg_connection *nc) {
struct mg_ev_mgr_lwip_data *md =
(struct mg_ev_mgr_lwip_data *) nc->iface->data;
/* Walk the queue and null-out further signals for this conn. */
for (int i = 0; i < MG_SIG_QUEUE_LEN; i++) {
if (md->sig_queue[i].nc == nc) {
md->sig_queue[i].sig = MG_SIG_TOMBSTONE;
}
}
}
time_t mg_lwip_if_poll(struct mg_iface *iface, int timeout_ms) {
struct mg_mgr *mgr = iface->mgr;
int n = 0;
double now = mg_time();
struct mg_connection *nc, *tmp;
double min_timer = 0;
int num_timers = 0;
#if 0
DBG(("begin poll @%u", (unsigned int) (now * 1000)));
#endif
mg_ev_mgr_lwip_process_signals(mgr);
for (nc = mgr->active_connections; nc != NULL; nc = tmp) {
struct mg_lwip_conn_state *cs = (struct mg_lwip_conn_state *) nc->sock;
tmp = nc->next;
n++;
if ((nc->flags & MG_F_CLOSE_IMMEDIATELY) ||
((nc->flags & MG_F_SEND_AND_CLOSE) && (nc->flags & MG_F_UDP) &&
(nc->send_mbuf.len == 0))) {
mg_close_conn(nc);
continue;
}
mg_if_poll(nc, now);
mg_if_timer(nc, now);
#if MG_ENABLE_SSL
if ((nc->flags & MG_F_SSL) && cs != NULL && cs->pcb.tcp != NULL &&
cs->pcb.tcp->state == ESTABLISHED) {
if (((nc->flags & MG_F_WANT_WRITE) ||
((nc->send_mbuf.len > 0) &&
(nc->flags & MG_F_SSL_HANDSHAKE_DONE))) &&
cs->pcb.tcp->snd_buf > 0) {
/* Can write more. */
if (nc->flags & MG_F_SSL_HANDSHAKE_DONE) {
if (!(nc->flags & MG_F_CONNECTING)) mg_lwip_ssl_send(nc);
} else {
mg_lwip_ssl_do_hs(nc);
}
}
if (cs->rx_chain != NULL || (nc->flags & MG_F_WANT_READ)) {
if (nc->flags & MG_F_SSL_HANDSHAKE_DONE) {
if (!(nc->flags & MG_F_CONNECTING)) mg_lwip_ssl_recv(nc);
} else {
mg_lwip_ssl_do_hs(nc);
}
}
} else
#endif /* MG_ENABLE_SSL */
{
if (!(nc->flags & (MG_F_CONNECTING | MG_F_UDP))) {
if (nc->send_mbuf.len > 0) mg_lwip_send_more(nc);
}
}
if (nc->sock != INVALID_SOCKET &&
!(nc->flags & (MG_F_UDP | MG_F_LISTENING)) && cs->pcb.tcp != NULL &&
cs->pcb.tcp->unsent != NULL) {
tcp_output(cs->pcb.tcp);
}
if (nc->ev_timer_time > 0) {
if (num_timers == 0 || nc->ev_timer_time < min_timer) {
min_timer = nc->ev_timer_time;
}
num_timers++;
}
}
#if 0
DBG(("end poll @%u, %d conns, %d timers (min %u), next in %d ms",
(unsigned int) (now * 1000), n, num_timers,
(unsigned int) (min_timer * 1000), timeout_ms));
#endif
(void) timeout_ms;
return now;
}
uint32_t mg_lwip_get_poll_delay_ms(struct mg_mgr *mgr) {
struct mg_connection *nc;
double now = mg_time();
double min_timer = 0;
int num_timers = 0;
mg_ev_mgr_lwip_process_signals(mgr);
for (nc = mg_next(mgr, NULL); nc != NULL; nc = mg_next(mgr, nc)) {
struct mg_lwip_conn_state *cs = (struct mg_lwip_conn_state *) nc->sock;
if (nc->ev_timer_time > 0) {
if (num_timers == 0 || nc->ev_timer_time < min_timer) {
min_timer = nc->ev_timer_time;
}
num_timers++;
}
if (nc->send_mbuf.len > 0) {
int can_send = 0;
/* We have stuff to send, but can we? */
if (nc->flags & MG_F_UDP) {
/* UDP is always ready for sending. */
can_send = (cs->pcb.udp != NULL);
} else {
can_send = (cs->pcb.tcp != NULL && cs->pcb.tcp->snd_buf > 0);
}
/* We want and can send, request a poll immediately. */
if (can_send) return 0;
}
}
uint32_t timeout_ms = ~0;
if (num_timers > 0) {
double timer_timeout_ms = (min_timer - now) * 1000 + 1 /* rounding */;
if (timer_timeout_ms < timeout_ms) {
timeout_ms = timer_timeout_ms;
}
}
return timeout_ms;
}
#endif /* MG_NET_IF == MG_NET_IF_LWIP_LOW_LEVEL */
#ifdef MG_MODULE_LINES
#line 1 "common/platforms/lwip/mg_lwip_ssl_if.c"
#endif
/*
* Copyright (c) 2014-2016 Cesanta Software Limited
* All rights reserved
*/
#if MG_ENABLE_SSL && MG_NET_IF == MG_NET_IF_LWIP_LOW_LEVEL
/* Amalgamated: #include "common/mg_mem.h" */
/* Amalgamated: #include "common/cs_dbg.h" */
#include <lwip/pbuf.h>
#include <lwip/tcp.h>
#ifndef MG_LWIP_SSL_IO_SIZE
#define MG_LWIP_SSL_IO_SIZE 1024
#endif
/*
* Stop processing incoming SSL traffic when recv_mbuf.size is this big.
* It'a a uick solution for SSL recv pushback.
*/
#ifndef MG_LWIP_SSL_RECV_MBUF_LIMIT
#define MG_LWIP_SSL_RECV_MBUF_LIMIT 3072
#endif
#ifndef MIN
#define MIN(a, b) ((a) < (b) ? (a) : (b))
#endif
void mg_lwip_ssl_do_hs(struct mg_connection *nc) {
struct mg_lwip_conn_state *cs = (struct mg_lwip_conn_state *) nc->sock;
int server_side = (nc->listener != NULL);
enum mg_ssl_if_result res;
if (nc->flags & MG_F_CLOSE_IMMEDIATELY) return;
res = mg_ssl_if_handshake(nc);
DBG(("%p %d %d %d", nc, nc->flags, server_side, res));
if (res != MG_SSL_OK) {
if (res == MG_SSL_WANT_WRITE) {
nc->flags |= MG_F_WANT_WRITE;
cs->err = 0;
} else if (res == MG_SSL_WANT_READ) {
/*
* Nothing to do in particular, we are callback-driven.
* What we definitely do not need anymore is SSL reading (nothing left).
*/
nc->flags &= ~MG_F_WANT_READ;
cs->err = 0;
} else {
cs->err = res;
if (server_side) {
mg_lwip_post_signal(MG_SIG_CLOSE_CONN, nc);
} else {
mg_lwip_post_signal(MG_SIG_CONNECT_RESULT, nc);
}
}
} else {
cs->err = 0;
nc->flags &= ~MG_F_WANT_WRITE;
/*
* Handshake is done. Schedule a read immediately to consume app data
* which may already be waiting.
*/
nc->flags |= (MG_F_SSL_HANDSHAKE_DONE | MG_F_WANT_READ);
if (server_side) {
mg_lwip_accept_conn(nc, cs->pcb.tcp);
} else {
mg_lwip_post_signal(MG_SIG_CONNECT_RESULT, nc);
}
}
}
void mg_lwip_ssl_send(struct mg_connection *nc) {
if (nc->sock == INVALID_SOCKET) {
DBG(("%p invalid socket", nc));
return;
}
struct mg_lwip_conn_state *cs = (struct mg_lwip_conn_state *) nc->sock;
/* It's ok if the buffer is empty. Return value of 0 may also be valid. */
int len = cs->last_ssl_write_size;
if (len == 0) {
len = MIN(MG_LWIP_SSL_IO_SIZE, nc->send_mbuf.len);
}
int ret = mg_ssl_if_write(nc, nc->send_mbuf.buf, len);
DBG(("%p SSL_write %u = %d, %d", nc, len, ret));
if (ret > 0) {
mbuf_remove(&nc->send_mbuf, ret);
mbuf_trim(&nc->send_mbuf);
cs->last_ssl_write_size = 0;
} else if (ret < 0) {
/* This is tricky. We must remember the exact data we were sending to retry
* exactly the same send next time. */
cs->last_ssl_write_size = len;
}
if (ret == len) {
nc->flags &= ~MG_F_WANT_WRITE;
} else if (ret == MG_SSL_WANT_WRITE) {
nc->flags |= MG_F_WANT_WRITE;
} else {
mg_lwip_post_signal(MG_SIG_CLOSE_CONN, nc);
}
}
void mg_lwip_ssl_recv(struct mg_connection *nc) {
struct mg_lwip_conn_state *cs = (struct mg_lwip_conn_state *) nc->sock;
/* Don't deliver data before connect callback */
if (nc->flags & MG_F_CONNECTING) return;
while (nc->recv_mbuf.len < MG_LWIP_SSL_RECV_MBUF_LIMIT) {
char *buf = (char *) MG_MALLOC(MG_LWIP_SSL_IO_SIZE);
if (buf == NULL) return;
int ret = mg_ssl_if_read(nc, buf, MG_LWIP_SSL_IO_SIZE);
DBG(("%p %p SSL_read %u = %d", nc, cs->rx_chain, MG_LWIP_SSL_IO_SIZE, ret));
if (ret <= 0) {
MG_FREE(buf);
if (ret == MG_SSL_WANT_WRITE) {
nc->flags |= MG_F_WANT_WRITE;
return;
} else if (ret == MG_SSL_WANT_READ) {
/*
* Nothing to do in particular, we are callback-driven.
* What we definitely do not need anymore is SSL reading (nothing left).
*/
nc->flags &= ~MG_F_WANT_READ;
cs->err = 0;
return;
} else {
mg_lwip_post_signal(MG_SIG_CLOSE_CONN, nc);
return;
}
} else {
mg_if_recv_tcp_cb(nc, buf, ret, 1 /* own */);
}
}
}
#ifdef KR_VERSION
ssize_t kr_send(int fd, const void *buf, size_t len) {
struct mg_lwip_conn_state *cs = (struct mg_lwip_conn_state *) fd;
int ret = mg_lwip_tcp_write(cs->nc, buf, len);
DBG(("%p mg_lwip_tcp_write %u = %d", cs->nc, len, ret));
if (ret == 0) ret = KR_IO_WOULDBLOCK;
return ret;
}
ssize_t kr_recv(int fd, void *buf, size_t len) {
struct mg_lwip_conn_state *cs = (struct mg_lwip_conn_state *) fd;
struct pbuf *seg = cs->rx_chain;
if (seg == NULL) {
DBG(("%u - nothing to read", len));
return KR_IO_WOULDBLOCK;
}
size_t seg_len = (seg->len - cs->rx_offset);
DBG(("%u %u %u %u", len, cs->rx_chain->len, seg_len, cs->rx_chain->tot_len));
len = MIN(len, seg_len);
pbuf_copy_partial(seg, buf, len, cs->rx_offset);
cs->rx_offset += len;
tcp_recved(cs->pcb.tcp, len);
if (cs->rx_offset == cs->rx_chain->len) {
cs->rx_chain = pbuf_dechain(cs->rx_chain);
pbuf_free(seg);
cs->rx_offset = 0;
}
return len;
}
#elif MG_SSL_IF == MG_SSL_IF_MBEDTLS
int ssl_socket_send(void *ctx, const unsigned char *buf, size_t len) {
struct mg_connection *nc = (struct mg_connection *) ctx;
struct mg_lwip_conn_state *cs = (struct mg_lwip_conn_state *) nc->sock;
int ret = mg_lwip_tcp_write(cs->nc, buf, len);
LOG(LL_DEBUG, ("%p %d -> %d", nc, len, ret));
if (ret == 0) ret = MBEDTLS_ERR_SSL_WANT_WRITE;
return ret;
}
int ssl_socket_recv(void *ctx, unsigned char *buf, size_t len) {
struct mg_connection *nc = (struct mg_connection *) ctx;
struct mg_lwip_conn_state *cs = (struct mg_lwip_conn_state *) nc->sock;
struct pbuf *seg = cs->rx_chain;
if (seg == NULL) {
DBG(("%u - nothing to read", len));
return MBEDTLS_ERR_SSL_WANT_READ;
}
size_t seg_len = (seg->len - cs->rx_offset);
DBG(("%u %u %u %u", len, cs->rx_chain->len, seg_len, cs->rx_chain->tot_len));
len = MIN(len, seg_len);
pbuf_copy_partial(seg, buf, len, cs->rx_offset);
cs->rx_offset += len;
/* TCP PCB may be NULL if connection has already been closed
* but we still have data to deliver to SSL. */
if (cs->pcb.tcp != NULL) tcp_recved(cs->pcb.tcp, len);
if (cs->rx_offset == cs->rx_chain->len) {
cs->rx_chain = pbuf_dechain(cs->rx_chain);
pbuf_free(seg);
cs->rx_offset = 0;
}
LOG(LL_DEBUG, ("%p <- %d", nc, (int) len));
return len;
}
#endif
#endif /* MG_ENABLE_SSL && MG_NET_IF == MG_NET_IF_LWIP_LOW_LEVEL */
#ifdef MG_MODULE_LINES
#line 1 "common/platforms/wince/wince_libc.c"
#endif
/*
* Copyright (c) 2016 Cesanta Software Limited
* All rights reserved
*/
#ifdef WINCE
const char *strerror(int err) {
/*
* TODO(alashkin): there is no strerror on WinCE;
* look for similar wce_xxxx function
*/
static char buf[10];
snprintf(buf, sizeof(buf), "%d", err);
return buf;
}
int open(const char *filename, int oflag, int pmode) {
/*
* TODO(alashkin): mg_open function is not used in mongoose
* but exists in documentation as utility function
* Shall we delete it at all or implement for WinCE as well?
*/
DebugBreak();
return 0; /* for compiler */
}
int _wstati64(const wchar_t *path, cs_stat_t *st) {
DWORD fa = GetFileAttributesW(path);
if (fa == INVALID_FILE_ATTRIBUTES) {
return -1;
}
memset(st, 0, sizeof(*st));
if ((fa & FILE_ATTRIBUTE_DIRECTORY) == 0) {
HANDLE h;
FILETIME ftime;
st->st_mode |= _S_IFREG;
h = CreateFileW(path, GENERIC_READ, 0, NULL, OPEN_EXISTING,
FILE_ATTRIBUTE_NORMAL, NULL);
if (h == INVALID_HANDLE_VALUE) {
return -1;
}
st->st_size = GetFileSize(h, NULL);
GetFileTime(h, NULL, NULL, &ftime);
st->st_mtime = (uint32_t)((((uint64_t) ftime.dwLowDateTime +
((uint64_t) ftime.dwHighDateTime << 32)) /
10000000.0) -
11644473600);
CloseHandle(h);
} else {
st->st_mode |= _S_IFDIR;
}
return 0;
}
/* Windows CE doesn't have neither gmtime nor strftime */
static void mg_gmt_time_string(char *buf, size_t buf_len, time_t *t) {
FILETIME ft;
SYSTEMTIME systime;
if (t != NULL) {
uint64_t filetime = (*t + 11644473600) * 10000000;
ft.dwLowDateTime = filetime & 0xFFFFFFFF;
ft.dwHighDateTime = (filetime & 0xFFFFFFFF00000000) >> 32;
FileTimeToSystemTime(&ft, &systime);
} else {
GetSystemTime(&systime);
}
/* There is no PRIu16 in WinCE SDK */
snprintf(buf, buf_len, "%d.%d.%d %d:%d:%d GMT", (int) systime.wYear,
(int) systime.wMonth, (int) systime.wDay, (int) systime.wHour,
(int) systime.wMinute, (int) systime.wSecond);
}
#endif
#ifdef MG_MODULE_LINES
#line 1 "common/platforms/pic32/pic32_net_if.h"
#endif
/*
* Copyright (c) 2014-2016 Cesanta Software Limited
* All rights reserved
*/
#ifndef CS_COMMON_PLATFORMS_PIC32_NET_IF_H_
#define CS_COMMON_PLATFORMS_PIC32_NET_IF_H_
/* Amalgamated: #include "mongoose/src/net_if.h" */
#ifdef __cplusplus
extern "C" {
#endif /* __cplusplus */
#ifndef MG_ENABLE_NET_IF_PIC32
#define MG_ENABLE_NET_IF_PIC32 MG_NET_IF == MG_NET_IF_PIC32
#endif
extern const struct mg_iface_vtable mg_pic32_iface_vtable;
#ifdef __cplusplus
}
#endif /* __cplusplus */
#endif /* CS_COMMON_PLATFORMS_PIC32_NET_IF_H_ */
#ifdef MG_MODULE_LINES
#line 1 "common/platforms/pic32/pic32_net_if.c"
#endif
/*
* Copyright (c) 2014-2016 Cesanta Software Limited
* All rights reserved
*/
#if MG_ENABLE_NET_IF_PIC32
int mg_pic32_if_create_conn(struct mg_connection *nc) {
(void) nc;
return 1;
}
void mg_pic32_if_recved(struct mg_connection *nc, size_t len) {
(void) nc;
(void) len;
}
void mg_pic32_if_add_conn(struct mg_connection *nc) {
(void) nc;
}
void mg_pic32_if_init(struct mg_iface *iface) {
(void) iface;
(void) mg_get_errno(); /* Shutup compiler */
}
void mg_pic32_if_free(struct mg_iface *iface) {
(void) iface;
}
void mg_pic32_if_remove_conn(struct mg_connection *nc) {
(void) nc;
}
void mg_pic32_if_destroy_conn(struct mg_connection *nc) {
if (nc->sock == INVALID_SOCKET) return;
/* For UDP, only close outgoing sockets or listeners. */
if (!(nc->flags & MG_F_UDP)) {
/* Close TCP */
TCPIP_TCP_Close((TCP_SOCKET) nc->sock);
} else if (nc->listener == NULL) {
/* Only close outgoing UDP or listeners. */
TCPIP_UDP_Close((UDP_SOCKET) nc->sock);
}
nc->sock = INVALID_SOCKET;
}
int mg_pic32_if_listen_udp(struct mg_connection *nc, union socket_address *sa) {
nc->sock = TCPIP_UDP_ServerOpen(
sa->sin.sin_family == AF_INET ? IP_ADDRESS_TYPE_IPV4
: IP_ADDRESS_TYPE_IPV6,
ntohs(sa->sin.sin_port),
sa->sin.sin_addr.s_addr == 0 ? 0 : (IP_MULTI_ADDRESS *) &sa->sin);
if (nc->sock == INVALID_SOCKET) {
return -1;
}
return 0;
}
void mg_pic32_if_udp_send(struct mg_connection *nc, const void *buf,
size_t len) {
mbuf_append(&nc->send_mbuf, buf, len);
}
void mg_pic32_if_tcp_send(struct mg_connection *nc, const void *buf,
size_t len) {
mbuf_append(&nc->send_mbuf, buf, len);
}
int mg_pic32_if_listen_tcp(struct mg_connection *nc, union socket_address *sa) {
nc->sock = TCPIP_TCP_ServerOpen(
sa->sin.sin_family == AF_INET ? IP_ADDRESS_TYPE_IPV4
: IP_ADDRESS_TYPE_IPV6,
ntohs(sa->sin.sin_port),
sa->sin.sin_addr.s_addr == 0 ? 0 : (IP_MULTI_ADDRESS *) &sa->sin);
memcpy(&nc->sa, sa, sizeof(*sa));
if (nc->sock == INVALID_SOCKET) {
return -1;
}
return 0;
}
static int mg_accept_conn(struct mg_connection *lc) {
struct mg_connection *nc;
TCP_SOCKET_INFO si;
union socket_address sa;
nc = mg_if_accept_new_conn(lc);
if (nc == NULL) {
return 0;
}
nc->sock = lc->sock;
nc->flags &= ~MG_F_LISTENING;
if (!TCPIP_TCP_SocketInfoGet((TCP_SOCKET) nc->sock, &si)) {
return 0;
}
if (si.addressType == IP_ADDRESS_TYPE_IPV4) {
sa.sin.sin_family = AF_INET;
sa.sin.sin_port = htons(si.remotePort);
sa.sin.sin_addr.s_addr = si.remoteIPaddress.v4Add.Val;
} else {
/* TODO(alashkin): do something with _potential_ IPv6 */
memset(&sa, 0, sizeof(sa));
}
mg_if_accept_tcp_cb(nc, (union socket_address *) &sa, sizeof(sa));
return mg_pic32_if_listen_tcp(lc, &lc->sa) >= 0;
}
char *inet_ntoa(struct in_addr in) {
static char addr[17];
snprintf(addr, sizeof(addr), "%d.%d.%d.%d", (int) in.S_un.S_un_b.s_b1,
(int) in.S_un.S_un_b.s_b2, (int) in.S_un.S_un_b.s_b3,
(int) in.S_un.S_un_b.s_b4);
return addr;
}
static void mg_handle_send(struct mg_connection *nc) {
uint16_t bytes_written = 0;
if (nc->flags & MG_F_UDP) {
if (!TCPIP_UDP_RemoteBind(
(UDP_SOCKET) nc->sock,
nc->sa.sin.sin_family == AF_INET ? IP_ADDRESS_TYPE_IPV4
: IP_ADDRESS_TYPE_IPV6,
ntohs(nc->sa.sin.sin_port), (IP_MULTI_ADDRESS *) &nc->sa.sin)) {
nc->flags |= MG_F_CLOSE_IMMEDIATELY;
return;
}
bytes_written = TCPIP_UDP_TxPutIsReady((UDP_SOCKET) nc->sock, 0);
if (bytes_written >= nc->send_mbuf.len) {
if (TCPIP_UDP_ArrayPut((UDP_SOCKET) nc->sock,
(uint8_t *) nc->send_mbuf.buf,
nc->send_mbuf.len) != nc->send_mbuf.len) {
nc->flags |= MG_F_CLOSE_IMMEDIATELY;
bytes_written = 0;
}
}
} else {
bytes_written = TCPIP_TCP_FifoTxFreeGet((TCP_SOCKET) nc->sock);
if (bytes_written != 0) {
if (bytes_written > nc->send_mbuf.len) {
bytes_written = nc->send_mbuf.len;
}
if (TCPIP_TCP_ArrayPut((TCP_SOCKET) nc->sock,
(uint8_t *) nc->send_mbuf.buf,
bytes_written) != bytes_written) {
nc->flags |= MG_F_CLOSE_IMMEDIATELY;
bytes_written = 0;
}
}
}
if (bytes_written != 0) {
mbuf_remove(&nc->send_mbuf, bytes_written);
mg_if_sent_cb(nc, bytes_written);
}
}
static void mg_handle_recv(struct mg_connection *nc) {
uint16_t bytes_read = 0;
uint8_t *buf = NULL;
if (nc->flags & MG_F_UDP) {
bytes_read = TCPIP_UDP_GetIsReady((UDP_SOCKET) nc->sock);
if (bytes_read != 0 &&
(nc->recv_mbuf_limit == -1 ||
nc->recv_mbuf.len + bytes_read < nc->recv_mbuf_limit)) {
buf = (uint8_t *) MG_MALLOC(bytes_read);
if (TCPIP_UDP_ArrayGet((UDP_SOCKET) nc->sock, buf, bytes_read) !=
bytes_read) {
nc->flags |= MG_F_CLOSE_IMMEDIATELY;
bytes_read = 0;
MG_FREE(buf);
}
}
} else {
bytes_read = TCPIP_TCP_GetIsReady((TCP_SOCKET) nc->sock);
if (bytes_read != 0) {
if (nc->recv_mbuf_limit != -1 &&
nc->recv_mbuf_limit - nc->recv_mbuf.len > bytes_read) {
bytes_read = nc->recv_mbuf_limit - nc->recv_mbuf.len;
}
buf = (uint8_t *) MG_MALLOC(bytes_read);
if (TCPIP_TCP_ArrayGet((TCP_SOCKET) nc->sock, buf, bytes_read) !=
bytes_read) {
nc->flags |= MG_F_CLOSE_IMMEDIATELY;
MG_FREE(buf);
bytes_read = 0;
}
}
}
if (bytes_read != 0) {
mg_if_recv_tcp_cb(nc, buf, bytes_read, 1 /* own */);
}
}
time_t mg_pic32_if_poll(struct mg_iface *iface, int timeout_ms) {
struct mg_mgr *mgr = iface->mgr;
double now = mg_time();
struct mg_connection *nc, *tmp;
for (nc = mgr->active_connections; nc != NULL; nc = tmp) {
tmp = nc->next;
if (nc->flags & MG_F_CONNECTING) {
/* processing connections */
if (nc->flags & MG_F_UDP ||
TCPIP_TCP_IsConnected((TCP_SOCKET) nc->sock)) {
mg_if_connect_cb(nc, 0);
}
} else if (nc->flags & MG_F_LISTENING) {
if (TCPIP_TCP_IsConnected((TCP_SOCKET) nc->sock)) {
/* accept new connections */
mg_accept_conn(nc);
}
} else {
if (nc->send_mbuf.len != 0) {
mg_handle_send(nc);
}
if (nc->recv_mbuf_limit == -1 ||
nc->recv_mbuf.len < nc->recv_mbuf_limit) {
mg_handle_recv(nc);
}
}
}
for (nc = mgr->active_connections; nc != NULL; nc = tmp) {
tmp = nc->next;
if ((nc->flags & MG_F_CLOSE_IMMEDIATELY) ||
(nc->send_mbuf.len == 0 && (nc->flags & MG_F_SEND_AND_CLOSE))) {
mg_close_conn(nc);
}
}
return now;
}
void mg_pic32_if_sock_set(struct mg_connection *nc, sock_t sock) {
nc->sock = sock;
}
void mg_pic32_if_get_conn_addr(struct mg_connection *nc, int remote,
union socket_address *sa) {
/* TODO(alaskin): not implemented yet */
}
void mg_pic32_if_connect_tcp(struct mg_connection *nc,
const union socket_address *sa) {
nc->sock = TCPIP_TCP_ClientOpen(
sa->sin.sin_family == AF_INET ? IP_ADDRESS_TYPE_IPV4
: IP_ADDRESS_TYPE_IPV6,
ntohs(sa->sin.sin_port), (IP_MULTI_ADDRESS *) &sa->sin);
nc->err = (nc->sock == INVALID_SOCKET) ? -1 : 0;
}
void mg_pic32_if_connect_udp(struct mg_connection *nc) {
nc->sock = TCPIP_UDP_ClientOpen(IP_ADDRESS_TYPE_ANY, 0, NULL);
nc->err = (nc->sock == INVALID_SOCKET) ? -1 : 0;
}
/* clang-format off */
#define MG_PIC32_IFACE_VTABLE \
{ \
mg_pic32_if_init, \
mg_pic32_if_free, \
mg_pic32_if_add_conn, \
mg_pic32_if_remove_conn, \
mg_pic32_if_poll, \
mg_pic32_if_listen_tcp, \
mg_pic32_if_listen_udp, \
mg_pic32_if_connect_tcp, \
mg_pic32_if_connect_udp, \
mg_pic32_if_tcp_send, \
mg_pic32_if_udp_send, \
mg_pic32_if_recved, \
mg_pic32_if_create_conn, \
mg_pic32_if_destroy_conn, \
mg_pic32_if_sock_set, \
mg_pic32_if_get_conn_addr, \
}
/* clang-format on */
const struct mg_iface_vtable mg_pic32_iface_vtable = MG_PIC32_IFACE_VTABLE;
#if MG_NET_IF == MG_NET_IF_PIC32
const struct mg_iface_vtable mg_default_iface_vtable = MG_PIC32_IFACE_VTABLE;
#endif
#endif /* MG_ENABLE_NET_IF_PIC32 */
| ./CrossVul/dataset_final_sorted/CWE-416/c/bad_3243_0 |
crossvul-cpp_data_good_903_2 | /*
irc-servers-setup.c : irssi
Copyright (C) 1999-2000 Timo Sirainen
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License along
with this program; if not, write to the Free Software Foundation, Inc.,
51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
*/
#include "module.h"
#include <irssi/src/core/signals.h>
#include <irssi/src/core/network.h>
#include <irssi/src/core/servers-setup.h>
#include <irssi/src/lib-config/iconfig.h>
#include <irssi/src/core/settings.h>
#include <irssi/src/irc/core/irc-chatnets.h>
#include <irssi/src/irc/core/irc-servers-setup.h>
#include <irssi/src/irc/core/irc-servers.h>
#include <irssi/src/irc/core/sasl.h>
/* Fill information to connection from server setup record */
static void sig_server_setup_fill_reconn(IRC_SERVER_CONNECT_REC *conn,
IRC_SERVER_SETUP_REC *sserver)
{
if (!IS_IRC_SERVER_CONNECT(conn) ||
!IS_IRC_SERVER_SETUP(sserver))
return;
if (sserver->cmd_queue_speed > 0)
conn->cmd_queue_speed = sserver->cmd_queue_speed;
if (sserver->max_cmds_at_once > 0)
conn->max_cmds_at_once = sserver->max_cmds_at_once;
if (sserver->max_query_chans > 0)
conn->max_query_chans = sserver->max_query_chans;
}
static void sig_server_setup_fill_connect(IRC_SERVER_CONNECT_REC *conn)
{
const char *value;
if (!IS_IRC_SERVER_CONNECT(conn))
return;
value = settings_get_str("alternate_nick");
conn->alternate_nick = (value != NULL && *value != '\0') ?
g_strdup(value) : NULL;
value = settings_get_str("usermode");
conn->usermode = (value != NULL && *value != '\0') ?
g_strdup(value) : NULL;
}
static void sig_server_setup_fill_chatnet(IRC_SERVER_CONNECT_REC *conn,
IRC_CHATNET_REC *ircnet)
{
if (!IS_IRC_SERVER_CONNECT(conn))
return;
g_return_if_fail(IS_IRCNET(ircnet));
if (ircnet->alternate_nick != NULL) {
g_free_and_null(conn->alternate_nick);
conn->alternate_nick = g_strdup(ircnet->alternate_nick);
}
if (ircnet->usermode != NULL) {
g_free_and_null(conn->usermode);
conn->usermode = g_strdup(ircnet->usermode);
}
if (ircnet->max_kicks > 0) conn->max_kicks = ircnet->max_kicks;
if (ircnet->max_msgs > 0) conn->max_msgs = ircnet->max_msgs;
if (ircnet->max_modes > 0) conn->max_modes = ircnet->max_modes;
if (ircnet->max_whois > 0) conn->max_whois = ircnet->max_whois;
if (ircnet->max_cmds_at_once > 0)
conn->max_cmds_at_once = ircnet->max_cmds_at_once;
if (ircnet->cmd_queue_speed > 0)
conn->cmd_queue_speed = ircnet->cmd_queue_speed;
if (ircnet->max_query_chans > 0)
conn->max_query_chans = ircnet->max_query_chans;
/* Validate the SASL parameters filled by sig_chatnet_read() or cmd_network_add */
conn->sasl_mechanism = SASL_MECHANISM_NONE;
conn->sasl_username = NULL;
conn->sasl_password = NULL;
if (ircnet->sasl_mechanism != NULL) {
if (!g_ascii_strcasecmp(ircnet->sasl_mechanism, "plain")) {
/* The PLAIN method needs both the username and the password */
conn->sasl_mechanism = SASL_MECHANISM_PLAIN;
if (ircnet->sasl_username != NULL && *ircnet->sasl_username &&
ircnet->sasl_password != NULL && *ircnet->sasl_password) {
conn->sasl_username = g_strdup(ircnet->sasl_username);
conn->sasl_password = g_strdup(ircnet->sasl_password);
} else
g_warning("The fields sasl_username and sasl_password are either missing or empty");
}
else if (!g_ascii_strcasecmp(ircnet->sasl_mechanism, "external")) {
conn->sasl_mechanism = SASL_MECHANISM_EXTERNAL;
}
else
g_warning("Unsupported SASL mechanism \"%s\" selected", ircnet->sasl_mechanism);
}
}
static void init_userinfo(void)
{
unsigned int changed;
const char *set, *nick, *user_name, *str;
changed = 0;
/* check if nick/username/realname wasn't read from setup.. */
set = settings_get_str("real_name");
if (set == NULL || *set == '\0') {
str = g_getenv("IRCNAME");
settings_set_str("real_name",
str != NULL ? str : g_get_real_name());
changed |= USER_SETTINGS_REAL_NAME;
}
/* username */
user_name = settings_get_str("user_name");
if (user_name == NULL || *user_name == '\0') {
str = g_getenv("IRCUSER");
settings_set_str("user_name",
str != NULL ? str : g_get_user_name());
user_name = settings_get_str("user_name");
changed |= USER_SETTINGS_USER_NAME;
}
/* nick */
nick = settings_get_str("nick");
if (nick == NULL || *nick == '\0') {
str = g_getenv("IRCNICK");
settings_set_str("nick", str != NULL ? str : user_name);
nick = settings_get_str("nick");
changed |= USER_SETTINGS_NICK;
}
/* host name */
set = settings_get_str("hostname");
if (set == NULL || *set == '\0') {
str = g_getenv("IRCHOST");
if (str != NULL) {
settings_set_str("hostname", str);
changed |= USER_SETTINGS_HOSTNAME;
}
}
signal_emit("irssi init userinfo changed", 1, GUINT_TO_POINTER(changed));
}
static void sig_server_setup_read(IRC_SERVER_SETUP_REC *rec, CONFIG_NODE *node)
{
g_return_if_fail(rec != NULL);
g_return_if_fail(node != NULL);
if (!IS_IRC_SERVER_SETUP(rec))
return;
rec->max_cmds_at_once = config_node_get_int(node, "cmds_max_at_once", 0);
rec->cmd_queue_speed = config_node_get_int(node, "cmd_queue_speed", 0);
rec->max_query_chans = config_node_get_int(node, "max_query_chans", 0);
}
static void sig_server_setup_saved(IRC_SERVER_SETUP_REC *rec,
CONFIG_NODE *node)
{
if (!IS_IRC_SERVER_SETUP(rec))
return;
if (rec->max_cmds_at_once > 0)
iconfig_node_set_int(node, "cmds_max_at_once", rec->max_cmds_at_once);
if (rec->cmd_queue_speed > 0)
iconfig_node_set_int(node, "cmd_queue_speed", rec->cmd_queue_speed);
if (rec->max_query_chans > 0)
iconfig_node_set_int(node, "max_query_chans", rec->max_query_chans);
}
void irc_servers_setup_init(void)
{
settings_add_bool("server", "skip_motd", FALSE);
settings_add_str("server", "alternate_nick", "");
init_userinfo();
signal_add("server setup fill reconn", (SIGNAL_FUNC) sig_server_setup_fill_reconn);
signal_add("server setup fill connect", (SIGNAL_FUNC) sig_server_setup_fill_connect);
signal_add("server setup fill chatnet", (SIGNAL_FUNC) sig_server_setup_fill_chatnet);
signal_add("server setup read", (SIGNAL_FUNC) sig_server_setup_read);
signal_add("server setup saved", (SIGNAL_FUNC) sig_server_setup_saved);
}
void irc_servers_setup_deinit(void)
{
signal_remove("server setup fill reconn", (SIGNAL_FUNC) sig_server_setup_fill_reconn);
signal_remove("server setup fill connect", (SIGNAL_FUNC) sig_server_setup_fill_connect);
signal_remove("server setup fill chatnet", (SIGNAL_FUNC) sig_server_setup_fill_chatnet);
signal_remove("server setup read", (SIGNAL_FUNC) sig_server_setup_read);
signal_remove("server setup saved", (SIGNAL_FUNC) sig_server_setup_saved);
}
| ./CrossVul/dataset_final_sorted/CWE-416/c/good_903_2 |
crossvul-cpp_data_bad_60_0 | /*-
* Copyright (c) 2003-2007 Tim Kientzle
* Copyright (c) 2011 Andres Mejia
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR(S) ``AS IS'' AND ANY EXPRESS OR
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
* IN NO EVENT SHALL THE AUTHOR(S) BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include "archive_platform.h"
#ifdef HAVE_ERRNO_H
#include <errno.h>
#endif
#include <time.h>
#include <limits.h>
#ifdef HAVE_ZLIB_H
#include <zlib.h> /* crc32 */
#endif
#include "archive.h"
#ifndef HAVE_ZLIB_H
#include "archive_crc32.h"
#endif
#include "archive_endian.h"
#include "archive_entry.h"
#include "archive_entry_locale.h"
#include "archive_ppmd7_private.h"
#include "archive_private.h"
#include "archive_read_private.h"
/* RAR signature, also known as the mark header */
#define RAR_SIGNATURE "\x52\x61\x72\x21\x1A\x07\x00"
/* Header types */
#define MARK_HEAD 0x72
#define MAIN_HEAD 0x73
#define FILE_HEAD 0x74
#define COMM_HEAD 0x75
#define AV_HEAD 0x76
#define SUB_HEAD 0x77
#define PROTECT_HEAD 0x78
#define SIGN_HEAD 0x79
#define NEWSUB_HEAD 0x7a
#define ENDARC_HEAD 0x7b
/* Main Header Flags */
#define MHD_VOLUME 0x0001
#define MHD_COMMENT 0x0002
#define MHD_LOCK 0x0004
#define MHD_SOLID 0x0008
#define MHD_NEWNUMBERING 0x0010
#define MHD_AV 0x0020
#define MHD_PROTECT 0x0040
#define MHD_PASSWORD 0x0080
#define MHD_FIRSTVOLUME 0x0100
#define MHD_ENCRYPTVER 0x0200
/* Flags common to all headers */
#define HD_MARKDELETION 0x4000
#define HD_ADD_SIZE_PRESENT 0x8000
/* File Header Flags */
#define FHD_SPLIT_BEFORE 0x0001
#define FHD_SPLIT_AFTER 0x0002
#define FHD_PASSWORD 0x0004
#define FHD_COMMENT 0x0008
#define FHD_SOLID 0x0010
#define FHD_LARGE 0x0100
#define FHD_UNICODE 0x0200
#define FHD_SALT 0x0400
#define FHD_VERSION 0x0800
#define FHD_EXTTIME 0x1000
#define FHD_EXTFLAGS 0x2000
/* File dictionary sizes */
#define DICTIONARY_SIZE_64 0x00
#define DICTIONARY_SIZE_128 0x20
#define DICTIONARY_SIZE_256 0x40
#define DICTIONARY_SIZE_512 0x60
#define DICTIONARY_SIZE_1024 0x80
#define DICTIONARY_SIZE_2048 0xA0
#define DICTIONARY_SIZE_4096 0xC0
#define FILE_IS_DIRECTORY 0xE0
#define DICTIONARY_MASK FILE_IS_DIRECTORY
/* OS Flags */
#define OS_MSDOS 0
#define OS_OS2 1
#define OS_WIN32 2
#define OS_UNIX 3
#define OS_MAC_OS 4
#define OS_BEOS 5
/* Compression Methods */
#define COMPRESS_METHOD_STORE 0x30
/* LZSS */
#define COMPRESS_METHOD_FASTEST 0x31
#define COMPRESS_METHOD_FAST 0x32
#define COMPRESS_METHOD_NORMAL 0x33
/* PPMd Variant H */
#define COMPRESS_METHOD_GOOD 0x34
#define COMPRESS_METHOD_BEST 0x35
#define CRC_POLYNOMIAL 0xEDB88320
#define NS_UNIT 10000000
#define DICTIONARY_MAX_SIZE 0x400000
#define MAINCODE_SIZE 299
#define OFFSETCODE_SIZE 60
#define LOWOFFSETCODE_SIZE 17
#define LENGTHCODE_SIZE 28
#define HUFFMAN_TABLE_SIZE \
MAINCODE_SIZE + OFFSETCODE_SIZE + LOWOFFSETCODE_SIZE + LENGTHCODE_SIZE
#define MAX_SYMBOL_LENGTH 0xF
#define MAX_SYMBOLS 20
/*
* Considering L1,L2 cache miss and a calling of write system-call,
* the best size of the output buffer(uncompressed buffer) is 128K.
* If the structure of extracting process is changed, this value
* might be researched again.
*/
#define UNP_BUFFER_SIZE (128 * 1024)
/* Define this here for non-Windows platforms */
#if !((defined(__WIN32__) || defined(_WIN32) || defined(__WIN32)) && !defined(__CYGWIN__))
#define FILE_ATTRIBUTE_DIRECTORY 0x10
#endif
/* Fields common to all headers */
struct rar_header
{
char crc[2];
char type;
char flags[2];
char size[2];
};
/* Fields common to all file headers */
struct rar_file_header
{
char pack_size[4];
char unp_size[4];
char host_os;
char file_crc[4];
char file_time[4];
char unp_ver;
char method;
char name_size[2];
char file_attr[4];
};
struct huffman_tree_node
{
int branches[2];
};
struct huffman_table_entry
{
unsigned int length;
int value;
};
struct huffman_code
{
struct huffman_tree_node *tree;
int numentries;
int numallocatedentries;
int minlength;
int maxlength;
int tablesize;
struct huffman_table_entry *table;
};
struct lzss
{
unsigned char *window;
int mask;
int64_t position;
};
struct data_block_offsets
{
int64_t header_size;
int64_t start_offset;
int64_t end_offset;
};
struct rar
{
/* Entries from main RAR header */
unsigned main_flags;
unsigned long file_crc;
char reserved1[2];
char reserved2[4];
char encryptver;
/* File header entries */
char compression_method;
unsigned file_flags;
int64_t packed_size;
int64_t unp_size;
time_t mtime;
long mnsec;
mode_t mode;
char *filename;
char *filename_save;
size_t filename_save_size;
size_t filename_allocated;
/* File header optional entries */
char salt[8];
time_t atime;
long ansec;
time_t ctime;
long cnsec;
time_t arctime;
long arcnsec;
/* Fields to help with tracking decompression of files. */
int64_t bytes_unconsumed;
int64_t bytes_remaining;
int64_t bytes_uncopied;
int64_t offset;
int64_t offset_outgoing;
int64_t offset_seek;
char valid;
unsigned int unp_offset;
unsigned int unp_buffer_size;
unsigned char *unp_buffer;
unsigned int dictionary_size;
char start_new_block;
char entry_eof;
unsigned long crc_calculated;
int found_first_header;
char has_endarc_header;
struct data_block_offsets *dbo;
unsigned int cursor;
unsigned int nodes;
/* LZSS members */
struct huffman_code maincode;
struct huffman_code offsetcode;
struct huffman_code lowoffsetcode;
struct huffman_code lengthcode;
unsigned char lengthtable[HUFFMAN_TABLE_SIZE];
struct lzss lzss;
char output_last_match;
unsigned int lastlength;
unsigned int lastoffset;
unsigned int oldoffset[4];
unsigned int lastlowoffset;
unsigned int numlowoffsetrepeats;
int64_t filterstart;
char start_new_table;
/* PPMd Variant H members */
char ppmd_valid;
char ppmd_eod;
char is_ppmd_block;
int ppmd_escape;
CPpmd7 ppmd7_context;
CPpmd7z_RangeDec range_dec;
IByteIn bytein;
/*
* String conversion object.
*/
int init_default_conversion;
struct archive_string_conv *sconv_default;
struct archive_string_conv *opt_sconv;
struct archive_string_conv *sconv_utf8;
struct archive_string_conv *sconv_utf16be;
/*
* Bit stream reader.
*/
struct rar_br {
#define CACHE_TYPE uint64_t
#define CACHE_BITS (8 * sizeof(CACHE_TYPE))
/* Cache buffer. */
CACHE_TYPE cache_buffer;
/* Indicates how many bits avail in cache_buffer. */
int cache_avail;
ssize_t avail_in;
const unsigned char *next_in;
} br;
/*
* Custom field to denote that this archive contains encrypted entries
*/
int has_encrypted_entries;
};
static int archive_read_support_format_rar_capabilities(struct archive_read *);
static int archive_read_format_rar_has_encrypted_entries(struct archive_read *);
static int archive_read_format_rar_bid(struct archive_read *, int);
static int archive_read_format_rar_options(struct archive_read *,
const char *, const char *);
static int archive_read_format_rar_read_header(struct archive_read *,
struct archive_entry *);
static int archive_read_format_rar_read_data(struct archive_read *,
const void **, size_t *, int64_t *);
static int archive_read_format_rar_read_data_skip(struct archive_read *a);
static int64_t archive_read_format_rar_seek_data(struct archive_read *, int64_t,
int);
static int archive_read_format_rar_cleanup(struct archive_read *);
/* Support functions */
static int read_header(struct archive_read *, struct archive_entry *, char);
static time_t get_time(int);
static int read_exttime(const char *, struct rar *, const char *);
static int read_symlink_stored(struct archive_read *, struct archive_entry *,
struct archive_string_conv *);
static int read_data_stored(struct archive_read *, const void **, size_t *,
int64_t *);
static int read_data_compressed(struct archive_read *, const void **, size_t *,
int64_t *);
static int rar_br_preparation(struct archive_read *, struct rar_br *);
static int parse_codes(struct archive_read *);
static void free_codes(struct archive_read *);
static int read_next_symbol(struct archive_read *, struct huffman_code *);
static int create_code(struct archive_read *, struct huffman_code *,
unsigned char *, int, char);
static int add_value(struct archive_read *, struct huffman_code *, int, int,
int);
static int new_node(struct huffman_code *);
static int make_table(struct archive_read *, struct huffman_code *);
static int make_table_recurse(struct archive_read *, struct huffman_code *, int,
struct huffman_table_entry *, int, int);
static int64_t expand(struct archive_read *, int64_t);
static int copy_from_lzss_window(struct archive_read *, const void **,
int64_t, int);
static const void *rar_read_ahead(struct archive_read *, size_t, ssize_t *);
/*
* Bit stream reader.
*/
/* Check that the cache buffer has enough bits. */
#define rar_br_has(br, n) ((br)->cache_avail >= n)
/* Get compressed data by bit. */
#define rar_br_bits(br, n) \
(((uint32_t)((br)->cache_buffer >> \
((br)->cache_avail - (n)))) & cache_masks[n])
#define rar_br_bits_forced(br, n) \
(((uint32_t)((br)->cache_buffer << \
((n) - (br)->cache_avail))) & cache_masks[n])
/* Read ahead to make sure the cache buffer has enough compressed data we
* will use.
* True : completed, there is enough data in the cache buffer.
* False : there is no data in the stream. */
#define rar_br_read_ahead(a, br, n) \
((rar_br_has(br, (n)) || rar_br_fillup(a, br)) || rar_br_has(br, (n)))
/* Notify how many bits we consumed. */
#define rar_br_consume(br, n) ((br)->cache_avail -= (n))
#define rar_br_consume_unalined_bits(br) ((br)->cache_avail &= ~7)
static const uint32_t cache_masks[] = {
0x00000000, 0x00000001, 0x00000003, 0x00000007,
0x0000000F, 0x0000001F, 0x0000003F, 0x0000007F,
0x000000FF, 0x000001FF, 0x000003FF, 0x000007FF,
0x00000FFF, 0x00001FFF, 0x00003FFF, 0x00007FFF,
0x0000FFFF, 0x0001FFFF, 0x0003FFFF, 0x0007FFFF,
0x000FFFFF, 0x001FFFFF, 0x003FFFFF, 0x007FFFFF,
0x00FFFFFF, 0x01FFFFFF, 0x03FFFFFF, 0x07FFFFFF,
0x0FFFFFFF, 0x1FFFFFFF, 0x3FFFFFFF, 0x7FFFFFFF,
0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF
};
/*
* Shift away used bits in the cache data and fill it up with following bits.
* Call this when cache buffer does not have enough bits you need.
*
* Returns 1 if the cache buffer is full.
* Returns 0 if the cache buffer is not full; input buffer is empty.
*/
static int
rar_br_fillup(struct archive_read *a, struct rar_br *br)
{
struct rar *rar = (struct rar *)(a->format->data);
int n = CACHE_BITS - br->cache_avail;
for (;;) {
switch (n >> 3) {
case 8:
if (br->avail_in >= 8) {
br->cache_buffer =
((uint64_t)br->next_in[0]) << 56 |
((uint64_t)br->next_in[1]) << 48 |
((uint64_t)br->next_in[2]) << 40 |
((uint64_t)br->next_in[3]) << 32 |
((uint32_t)br->next_in[4]) << 24 |
((uint32_t)br->next_in[5]) << 16 |
((uint32_t)br->next_in[6]) << 8 |
(uint32_t)br->next_in[7];
br->next_in += 8;
br->avail_in -= 8;
br->cache_avail += 8 * 8;
rar->bytes_unconsumed += 8;
rar->bytes_remaining -= 8;
return (1);
}
break;
case 7:
if (br->avail_in >= 7) {
br->cache_buffer =
(br->cache_buffer << 56) |
((uint64_t)br->next_in[0]) << 48 |
((uint64_t)br->next_in[1]) << 40 |
((uint64_t)br->next_in[2]) << 32 |
((uint32_t)br->next_in[3]) << 24 |
((uint32_t)br->next_in[4]) << 16 |
((uint32_t)br->next_in[5]) << 8 |
(uint32_t)br->next_in[6];
br->next_in += 7;
br->avail_in -= 7;
br->cache_avail += 7 * 8;
rar->bytes_unconsumed += 7;
rar->bytes_remaining -= 7;
return (1);
}
break;
case 6:
if (br->avail_in >= 6) {
br->cache_buffer =
(br->cache_buffer << 48) |
((uint64_t)br->next_in[0]) << 40 |
((uint64_t)br->next_in[1]) << 32 |
((uint32_t)br->next_in[2]) << 24 |
((uint32_t)br->next_in[3]) << 16 |
((uint32_t)br->next_in[4]) << 8 |
(uint32_t)br->next_in[5];
br->next_in += 6;
br->avail_in -= 6;
br->cache_avail += 6 * 8;
rar->bytes_unconsumed += 6;
rar->bytes_remaining -= 6;
return (1);
}
break;
case 0:
/* We have enough compressed data in
* the cache buffer.*/
return (1);
default:
break;
}
if (br->avail_in <= 0) {
if (rar->bytes_unconsumed > 0) {
/* Consume as much as the decompressor
* actually used. */
__archive_read_consume(a, rar->bytes_unconsumed);
rar->bytes_unconsumed = 0;
}
br->next_in = rar_read_ahead(a, 1, &(br->avail_in));
if (br->next_in == NULL)
return (0);
if (br->avail_in == 0)
return (0);
}
br->cache_buffer =
(br->cache_buffer << 8) | *br->next_in++;
br->avail_in--;
br->cache_avail += 8;
n -= 8;
rar->bytes_unconsumed++;
rar->bytes_remaining--;
}
}
static int
rar_br_preparation(struct archive_read *a, struct rar_br *br)
{
struct rar *rar = (struct rar *)(a->format->data);
if (rar->bytes_remaining > 0) {
br->next_in = rar_read_ahead(a, 1, &(br->avail_in));
if (br->next_in == NULL) {
archive_set_error(&a->archive,
ARCHIVE_ERRNO_FILE_FORMAT,
"Truncated RAR file data");
return (ARCHIVE_FATAL);
}
if (br->cache_avail == 0)
(void)rar_br_fillup(a, br);
}
return (ARCHIVE_OK);
}
/* Find last bit set */
static inline int
rar_fls(unsigned int word)
{
word |= (word >> 1);
word |= (word >> 2);
word |= (word >> 4);
word |= (word >> 8);
word |= (word >> 16);
return word - (word >> 1);
}
/* LZSS functions */
static inline int64_t
lzss_position(struct lzss *lzss)
{
return lzss->position;
}
static inline int
lzss_mask(struct lzss *lzss)
{
return lzss->mask;
}
static inline int
lzss_size(struct lzss *lzss)
{
return lzss->mask + 1;
}
static inline int
lzss_offset_for_position(struct lzss *lzss, int64_t pos)
{
return (int)(pos & lzss->mask);
}
static inline unsigned char *
lzss_pointer_for_position(struct lzss *lzss, int64_t pos)
{
return &lzss->window[lzss_offset_for_position(lzss, pos)];
}
static inline int
lzss_current_offset(struct lzss *lzss)
{
return lzss_offset_for_position(lzss, lzss->position);
}
static inline uint8_t *
lzss_current_pointer(struct lzss *lzss)
{
return lzss_pointer_for_position(lzss, lzss->position);
}
static inline void
lzss_emit_literal(struct rar *rar, uint8_t literal)
{
*lzss_current_pointer(&rar->lzss) = literal;
rar->lzss.position++;
}
static inline void
lzss_emit_match(struct rar *rar, int offset, int length)
{
int dstoffs = lzss_current_offset(&rar->lzss);
int srcoffs = (dstoffs - offset) & lzss_mask(&rar->lzss);
int l, li, remaining;
unsigned char *d, *s;
remaining = length;
while (remaining > 0) {
l = remaining;
if (dstoffs > srcoffs) {
if (l > lzss_size(&rar->lzss) - dstoffs)
l = lzss_size(&rar->lzss) - dstoffs;
} else {
if (l > lzss_size(&rar->lzss) - srcoffs)
l = lzss_size(&rar->lzss) - srcoffs;
}
d = &(rar->lzss.window[dstoffs]);
s = &(rar->lzss.window[srcoffs]);
if ((dstoffs + l < srcoffs) || (srcoffs + l < dstoffs))
memcpy(d, s, l);
else {
for (li = 0; li < l; li++)
d[li] = s[li];
}
remaining -= l;
dstoffs = (dstoffs + l) & lzss_mask(&(rar->lzss));
srcoffs = (srcoffs + l) & lzss_mask(&(rar->lzss));
}
rar->lzss.position += length;
}
static Byte
ppmd_read(void *p)
{
struct archive_read *a = ((IByteIn*)p)->a;
struct rar *rar = (struct rar *)(a->format->data);
struct rar_br *br = &(rar->br);
Byte b;
if (!rar_br_read_ahead(a, br, 8))
{
archive_set_error(&a->archive, ARCHIVE_ERRNO_FILE_FORMAT,
"Truncated RAR file data");
rar->valid = 0;
return 0;
}
b = rar_br_bits(br, 8);
rar_br_consume(br, 8);
return b;
}
int
archive_read_support_format_rar(struct archive *_a)
{
struct archive_read *a = (struct archive_read *)_a;
struct rar *rar;
int r;
archive_check_magic(_a, ARCHIVE_READ_MAGIC, ARCHIVE_STATE_NEW,
"archive_read_support_format_rar");
rar = (struct rar *)calloc(sizeof(*rar), 1);
if (rar == NULL)
{
archive_set_error(&a->archive, ENOMEM, "Can't allocate rar data");
return (ARCHIVE_FATAL);
}
/*
* Until enough data has been read, we cannot tell about
* any encrypted entries yet.
*/
rar->has_encrypted_entries = ARCHIVE_READ_FORMAT_ENCRYPTION_DONT_KNOW;
r = __archive_read_register_format(a,
rar,
"rar",
archive_read_format_rar_bid,
archive_read_format_rar_options,
archive_read_format_rar_read_header,
archive_read_format_rar_read_data,
archive_read_format_rar_read_data_skip,
archive_read_format_rar_seek_data,
archive_read_format_rar_cleanup,
archive_read_support_format_rar_capabilities,
archive_read_format_rar_has_encrypted_entries);
if (r != ARCHIVE_OK)
free(rar);
return (r);
}
static int
archive_read_support_format_rar_capabilities(struct archive_read * a)
{
(void)a; /* UNUSED */
return (ARCHIVE_READ_FORMAT_CAPS_ENCRYPT_DATA
| ARCHIVE_READ_FORMAT_CAPS_ENCRYPT_METADATA);
}
static int
archive_read_format_rar_has_encrypted_entries(struct archive_read *_a)
{
if (_a && _a->format) {
struct rar * rar = (struct rar *)_a->format->data;
if (rar) {
return rar->has_encrypted_entries;
}
}
return ARCHIVE_READ_FORMAT_ENCRYPTION_DONT_KNOW;
}
static int
archive_read_format_rar_bid(struct archive_read *a, int best_bid)
{
const char *p;
/* If there's already a bid > 30, we'll never win. */
if (best_bid > 30)
return (-1);
if ((p = __archive_read_ahead(a, 7, NULL)) == NULL)
return (-1);
if (memcmp(p, RAR_SIGNATURE, 7) == 0)
return (30);
if ((p[0] == 'M' && p[1] == 'Z') || memcmp(p, "\x7F\x45LF", 4) == 0) {
/* This is a PE file */
ssize_t offset = 0x10000;
ssize_t window = 4096;
ssize_t bytes_avail;
while (offset + window <= (1024 * 128)) {
const char *buff = __archive_read_ahead(a, offset + window, &bytes_avail);
if (buff == NULL) {
/* Remaining bytes are less than window. */
window >>= 1;
if (window < 0x40)
return (0);
continue;
}
p = buff + offset;
while (p + 7 < buff + bytes_avail) {
if (memcmp(p, RAR_SIGNATURE, 7) == 0)
return (30);
p += 0x10;
}
offset = p - buff;
}
}
return (0);
}
static int
skip_sfx(struct archive_read *a)
{
const void *h;
const char *p, *q;
size_t skip, total;
ssize_t bytes, window;
total = 0;
window = 4096;
while (total + window <= (1024 * 128)) {
h = __archive_read_ahead(a, window, &bytes);
if (h == NULL) {
/* Remaining bytes are less than window. */
window >>= 1;
if (window < 0x40)
goto fatal;
continue;
}
if (bytes < 0x40)
goto fatal;
p = h;
q = p + bytes;
/*
* Scan ahead until we find something that looks
* like the RAR header.
*/
while (p + 7 < q) {
if (memcmp(p, RAR_SIGNATURE, 7) == 0) {
skip = p - (const char *)h;
__archive_read_consume(a, skip);
return (ARCHIVE_OK);
}
p += 0x10;
}
skip = p - (const char *)h;
__archive_read_consume(a, skip);
total += skip;
}
fatal:
archive_set_error(&a->archive, ARCHIVE_ERRNO_FILE_FORMAT,
"Couldn't find out RAR header");
return (ARCHIVE_FATAL);
}
static int
archive_read_format_rar_options(struct archive_read *a,
const char *key, const char *val)
{
struct rar *rar;
int ret = ARCHIVE_FAILED;
rar = (struct rar *)(a->format->data);
if (strcmp(key, "hdrcharset") == 0) {
if (val == NULL || val[0] == 0)
archive_set_error(&a->archive, ARCHIVE_ERRNO_MISC,
"rar: hdrcharset option needs a character-set name");
else {
rar->opt_sconv =
archive_string_conversion_from_charset(
&a->archive, val, 0);
if (rar->opt_sconv != NULL)
ret = ARCHIVE_OK;
else
ret = ARCHIVE_FATAL;
}
return (ret);
}
/* Note: The "warn" return is just to inform the options
* supervisor that we didn't handle it. It will generate
* a suitable error if no one used this option. */
return (ARCHIVE_WARN);
}
static int
archive_read_format_rar_read_header(struct archive_read *a,
struct archive_entry *entry)
{
const void *h;
const char *p;
struct rar *rar;
size_t skip;
char head_type;
int ret;
unsigned flags;
unsigned long crc32_expected;
a->archive.archive_format = ARCHIVE_FORMAT_RAR;
if (a->archive.archive_format_name == NULL)
a->archive.archive_format_name = "RAR";
rar = (struct rar *)(a->format->data);
/*
* It should be sufficient to call archive_read_next_header() for
* a reader to determine if an entry is encrypted or not. If the
* encryption of an entry is only detectable when calling
* archive_read_data(), so be it. We'll do the same check there
* as well.
*/
if (rar->has_encrypted_entries == ARCHIVE_READ_FORMAT_ENCRYPTION_DONT_KNOW) {
rar->has_encrypted_entries = 0;
}
/* RAR files can be generated without EOF headers, so return ARCHIVE_EOF if
* this fails.
*/
if ((h = __archive_read_ahead(a, 7, NULL)) == NULL)
return (ARCHIVE_EOF);
p = h;
if (rar->found_first_header == 0 &&
((p[0] == 'M' && p[1] == 'Z') || memcmp(p, "\x7F\x45LF", 4) == 0)) {
/* This is an executable ? Must be self-extracting... */
ret = skip_sfx(a);
if (ret < ARCHIVE_WARN)
return (ret);
}
rar->found_first_header = 1;
while (1)
{
unsigned long crc32_val;
if ((h = __archive_read_ahead(a, 7, NULL)) == NULL)
return (ARCHIVE_FATAL);
p = h;
head_type = p[2];
switch(head_type)
{
case MARK_HEAD:
if (memcmp(p, RAR_SIGNATURE, 7) != 0) {
archive_set_error(&a->archive, ARCHIVE_ERRNO_FILE_FORMAT,
"Invalid marker header");
return (ARCHIVE_FATAL);
}
__archive_read_consume(a, 7);
break;
case MAIN_HEAD:
rar->main_flags = archive_le16dec(p + 3);
skip = archive_le16dec(p + 5);
if (skip < 7 + sizeof(rar->reserved1) + sizeof(rar->reserved2)) {
archive_set_error(&a->archive, ARCHIVE_ERRNO_FILE_FORMAT,
"Invalid header size");
return (ARCHIVE_FATAL);
}
if ((h = __archive_read_ahead(a, skip, NULL)) == NULL)
return (ARCHIVE_FATAL);
p = h;
memcpy(rar->reserved1, p + 7, sizeof(rar->reserved1));
memcpy(rar->reserved2, p + 7 + sizeof(rar->reserved1),
sizeof(rar->reserved2));
if (rar->main_flags & MHD_ENCRYPTVER) {
if (skip < 7 + sizeof(rar->reserved1) + sizeof(rar->reserved2)+1) {
archive_set_error(&a->archive, ARCHIVE_ERRNO_FILE_FORMAT,
"Invalid header size");
return (ARCHIVE_FATAL);
}
rar->encryptver = *(p + 7 + sizeof(rar->reserved1) +
sizeof(rar->reserved2));
}
/* Main header is password encrypted, so we cannot read any
file names or any other info about files from the header. */
if (rar->main_flags & MHD_PASSWORD)
{
archive_entry_set_is_metadata_encrypted(entry, 1);
archive_entry_set_is_data_encrypted(entry, 1);
rar->has_encrypted_entries = 1;
archive_set_error(&a->archive, ARCHIVE_ERRNO_FILE_FORMAT,
"RAR encryption support unavailable.");
return (ARCHIVE_FATAL);
}
crc32_val = crc32(0, (const unsigned char *)p + 2, (unsigned)skip - 2);
if ((crc32_val & 0xffff) != archive_le16dec(p)) {
archive_set_error(&a->archive, ARCHIVE_ERRNO_FILE_FORMAT,
"Header CRC error");
return (ARCHIVE_FATAL);
}
__archive_read_consume(a, skip);
break;
case FILE_HEAD:
return read_header(a, entry, head_type);
case COMM_HEAD:
case AV_HEAD:
case SUB_HEAD:
case PROTECT_HEAD:
case SIGN_HEAD:
case ENDARC_HEAD:
flags = archive_le16dec(p + 3);
skip = archive_le16dec(p + 5);
if (skip < 7) {
archive_set_error(&a->archive, ARCHIVE_ERRNO_FILE_FORMAT,
"Invalid header size too small");
return (ARCHIVE_FATAL);
}
if (flags & HD_ADD_SIZE_PRESENT)
{
if (skip < 7 + 4) {
archive_set_error(&a->archive, ARCHIVE_ERRNO_FILE_FORMAT,
"Invalid header size too small");
return (ARCHIVE_FATAL);
}
if ((h = __archive_read_ahead(a, skip, NULL)) == NULL)
return (ARCHIVE_FATAL);
p = h;
skip += archive_le32dec(p + 7);
}
/* Skip over the 2-byte CRC at the beginning of the header. */
crc32_expected = archive_le16dec(p);
__archive_read_consume(a, 2);
skip -= 2;
/* Skim the entire header and compute the CRC. */
crc32_val = 0;
while (skip > 0) {
size_t to_read = skip;
ssize_t did_read;
if (to_read > 32 * 1024) {
to_read = 32 * 1024;
}
if ((h = __archive_read_ahead(a, to_read, &did_read)) == NULL) {
return (ARCHIVE_FATAL);
}
p = h;
crc32_val = crc32(crc32_val, (const unsigned char *)p, (unsigned)did_read);
__archive_read_consume(a, did_read);
skip -= did_read;
}
if ((crc32_val & 0xffff) != crc32_expected) {
archive_set_error(&a->archive, ARCHIVE_ERRNO_FILE_FORMAT,
"Header CRC error");
return (ARCHIVE_FATAL);
}
if (head_type == ENDARC_HEAD)
return (ARCHIVE_EOF);
break;
case NEWSUB_HEAD:
if ((ret = read_header(a, entry, head_type)) < ARCHIVE_WARN)
return ret;
break;
default:
archive_set_error(&a->archive, ARCHIVE_ERRNO_FILE_FORMAT,
"Bad RAR file");
return (ARCHIVE_FATAL);
}
}
}
static int
archive_read_format_rar_read_data(struct archive_read *a, const void **buff,
size_t *size, int64_t *offset)
{
struct rar *rar = (struct rar *)(a->format->data);
int ret;
if (rar->has_encrypted_entries == ARCHIVE_READ_FORMAT_ENCRYPTION_DONT_KNOW) {
rar->has_encrypted_entries = 0;
}
if (rar->bytes_unconsumed > 0) {
/* Consume as much as the decompressor actually used. */
__archive_read_consume(a, rar->bytes_unconsumed);
rar->bytes_unconsumed = 0;
}
*buff = NULL;
if (rar->entry_eof || rar->offset_seek >= rar->unp_size) {
*size = 0;
*offset = rar->offset;
if (*offset < rar->unp_size)
*offset = rar->unp_size;
return (ARCHIVE_EOF);
}
switch (rar->compression_method)
{
case COMPRESS_METHOD_STORE:
ret = read_data_stored(a, buff, size, offset);
break;
case COMPRESS_METHOD_FASTEST:
case COMPRESS_METHOD_FAST:
case COMPRESS_METHOD_NORMAL:
case COMPRESS_METHOD_GOOD:
case COMPRESS_METHOD_BEST:
ret = read_data_compressed(a, buff, size, offset);
if (ret != ARCHIVE_OK && ret != ARCHIVE_WARN)
__archive_ppmd7_functions.Ppmd7_Free(&rar->ppmd7_context);
break;
default:
archive_set_error(&a->archive, ARCHIVE_ERRNO_FILE_FORMAT,
"Unsupported compression method for RAR file.");
ret = ARCHIVE_FATAL;
break;
}
return (ret);
}
static int
archive_read_format_rar_read_data_skip(struct archive_read *a)
{
struct rar *rar;
int64_t bytes_skipped;
int ret;
rar = (struct rar *)(a->format->data);
if (rar->bytes_unconsumed > 0) {
/* Consume as much as the decompressor actually used. */
__archive_read_consume(a, rar->bytes_unconsumed);
rar->bytes_unconsumed = 0;
}
if (rar->bytes_remaining > 0) {
bytes_skipped = __archive_read_consume(a, rar->bytes_remaining);
if (bytes_skipped < 0)
return (ARCHIVE_FATAL);
}
/* Compressed data to skip must be read from each header in a multivolume
* archive.
*/
if (rar->main_flags & MHD_VOLUME && rar->file_flags & FHD_SPLIT_AFTER)
{
ret = archive_read_format_rar_read_header(a, a->entry);
if (ret == (ARCHIVE_EOF))
ret = archive_read_format_rar_read_header(a, a->entry);
if (ret != (ARCHIVE_OK))
return ret;
return archive_read_format_rar_read_data_skip(a);
}
return (ARCHIVE_OK);
}
static int64_t
archive_read_format_rar_seek_data(struct archive_read *a, int64_t offset,
int whence)
{
int64_t client_offset, ret;
unsigned int i;
struct rar *rar = (struct rar *)(a->format->data);
if (rar->compression_method == COMPRESS_METHOD_STORE)
{
/* Modify the offset for use with SEEK_SET */
switch (whence)
{
case SEEK_CUR:
client_offset = rar->offset_seek;
break;
case SEEK_END:
client_offset = rar->unp_size;
break;
case SEEK_SET:
default:
client_offset = 0;
}
client_offset += offset;
if (client_offset < 0)
{
/* Can't seek past beginning of data block */
return -1;
}
else if (client_offset > rar->unp_size)
{
/*
* Set the returned offset but only seek to the end of
* the data block.
*/
rar->offset_seek = client_offset;
client_offset = rar->unp_size;
}
client_offset += rar->dbo[0].start_offset;
i = 0;
while (i < rar->cursor)
{
i++;
client_offset += rar->dbo[i].start_offset - rar->dbo[i-1].end_offset;
}
if (rar->main_flags & MHD_VOLUME)
{
/* Find the appropriate offset among the multivolume archive */
while (1)
{
if (client_offset < rar->dbo[rar->cursor].start_offset &&
rar->file_flags & FHD_SPLIT_BEFORE)
{
/* Search backwards for the correct data block */
if (rar->cursor == 0)
{
archive_set_error(&a->archive, ARCHIVE_ERRNO_MISC,
"Attempt to seek past beginning of RAR data block");
return (ARCHIVE_FAILED);
}
rar->cursor--;
client_offset -= rar->dbo[rar->cursor+1].start_offset -
rar->dbo[rar->cursor].end_offset;
if (client_offset < rar->dbo[rar->cursor].start_offset)
continue;
ret = __archive_read_seek(a, rar->dbo[rar->cursor].start_offset -
rar->dbo[rar->cursor].header_size, SEEK_SET);
if (ret < (ARCHIVE_OK))
return ret;
ret = archive_read_format_rar_read_header(a, a->entry);
if (ret != (ARCHIVE_OK))
{
archive_set_error(&a->archive, ARCHIVE_ERRNO_MISC,
"Error during seek of RAR file");
return (ARCHIVE_FAILED);
}
rar->cursor--;
break;
}
else if (client_offset > rar->dbo[rar->cursor].end_offset &&
rar->file_flags & FHD_SPLIT_AFTER)
{
/* Search forward for the correct data block */
rar->cursor++;
if (rar->cursor < rar->nodes &&
client_offset > rar->dbo[rar->cursor].end_offset)
{
client_offset += rar->dbo[rar->cursor].start_offset -
rar->dbo[rar->cursor-1].end_offset;
continue;
}
rar->cursor--;
ret = __archive_read_seek(a, rar->dbo[rar->cursor].end_offset,
SEEK_SET);
if (ret < (ARCHIVE_OK))
return ret;
ret = archive_read_format_rar_read_header(a, a->entry);
if (ret == (ARCHIVE_EOF))
{
rar->has_endarc_header = 1;
ret = archive_read_format_rar_read_header(a, a->entry);
}
if (ret != (ARCHIVE_OK))
{
archive_set_error(&a->archive, ARCHIVE_ERRNO_MISC,
"Error during seek of RAR file");
return (ARCHIVE_FAILED);
}
client_offset += rar->dbo[rar->cursor].start_offset -
rar->dbo[rar->cursor-1].end_offset;
continue;
}
break;
}
}
ret = __archive_read_seek(a, client_offset, SEEK_SET);
if (ret < (ARCHIVE_OK))
return ret;
rar->bytes_remaining = rar->dbo[rar->cursor].end_offset - ret;
i = rar->cursor;
while (i > 0)
{
i--;
ret -= rar->dbo[i+1].start_offset - rar->dbo[i].end_offset;
}
ret -= rar->dbo[0].start_offset;
/* Always restart reading the file after a seek */
__archive_reset_read_data(&a->archive);
rar->bytes_unconsumed = 0;
rar->offset = 0;
/*
* If a seek past the end of file was requested, return the requested
* offset.
*/
if (ret == rar->unp_size && rar->offset_seek > rar->unp_size)
return rar->offset_seek;
/* Return the new offset */
rar->offset_seek = ret;
return rar->offset_seek;
}
else
{
archive_set_error(&a->archive, ARCHIVE_ERRNO_MISC,
"Seeking of compressed RAR files is unsupported");
}
return (ARCHIVE_FAILED);
}
static int
archive_read_format_rar_cleanup(struct archive_read *a)
{
struct rar *rar;
rar = (struct rar *)(a->format->data);
free_codes(a);
free(rar->filename);
free(rar->filename_save);
free(rar->dbo);
free(rar->unp_buffer);
free(rar->lzss.window);
__archive_ppmd7_functions.Ppmd7_Free(&rar->ppmd7_context);
free(rar);
(a->format->data) = NULL;
return (ARCHIVE_OK);
}
static int
read_header(struct archive_read *a, struct archive_entry *entry,
char head_type)
{
const void *h;
const char *p, *endp;
struct rar *rar;
struct rar_header rar_header;
struct rar_file_header file_header;
int64_t header_size;
unsigned filename_size, end;
char *filename;
char *strp;
char packed_size[8];
char unp_size[8];
int ttime;
struct archive_string_conv *sconv, *fn_sconv;
unsigned long crc32_val;
int ret = (ARCHIVE_OK), ret2;
rar = (struct rar *)(a->format->data);
/* Setup a string conversion object for non-rar-unicode filenames. */
sconv = rar->opt_sconv;
if (sconv == NULL) {
if (!rar->init_default_conversion) {
rar->sconv_default =
archive_string_default_conversion_for_read(
&(a->archive));
rar->init_default_conversion = 1;
}
sconv = rar->sconv_default;
}
if ((h = __archive_read_ahead(a, 7, NULL)) == NULL)
return (ARCHIVE_FATAL);
p = h;
memcpy(&rar_header, p, sizeof(rar_header));
rar->file_flags = archive_le16dec(rar_header.flags);
header_size = archive_le16dec(rar_header.size);
if (header_size < (int64_t)sizeof(file_header) + 7) {
archive_set_error(&a->archive, ARCHIVE_ERRNO_FILE_FORMAT,
"Invalid header size");
return (ARCHIVE_FATAL);
}
crc32_val = crc32(0, (const unsigned char *)p + 2, 7 - 2);
__archive_read_consume(a, 7);
if (!(rar->file_flags & FHD_SOLID))
{
rar->compression_method = 0;
rar->packed_size = 0;
rar->unp_size = 0;
rar->mtime = 0;
rar->ctime = 0;
rar->atime = 0;
rar->arctime = 0;
rar->mode = 0;
memset(&rar->salt, 0, sizeof(rar->salt));
rar->atime = 0;
rar->ansec = 0;
rar->ctime = 0;
rar->cnsec = 0;
rar->mtime = 0;
rar->mnsec = 0;
rar->arctime = 0;
rar->arcnsec = 0;
}
else
{
archive_set_error(&a->archive, ARCHIVE_ERRNO_FILE_FORMAT,
"RAR solid archive support unavailable.");
return (ARCHIVE_FATAL);
}
if ((h = __archive_read_ahead(a, (size_t)header_size - 7, NULL)) == NULL)
return (ARCHIVE_FATAL);
/* File Header CRC check. */
crc32_val = crc32(crc32_val, h, (unsigned)(header_size - 7));
if ((crc32_val & 0xffff) != archive_le16dec(rar_header.crc)) {
archive_set_error(&a->archive, ARCHIVE_ERRNO_FILE_FORMAT,
"Header CRC error");
return (ARCHIVE_FATAL);
}
/* If no CRC error, Go on parsing File Header. */
p = h;
endp = p + header_size - 7;
memcpy(&file_header, p, sizeof(file_header));
p += sizeof(file_header);
rar->compression_method = file_header.method;
ttime = archive_le32dec(file_header.file_time);
rar->mtime = get_time(ttime);
rar->file_crc = archive_le32dec(file_header.file_crc);
if (rar->file_flags & FHD_PASSWORD)
{
archive_entry_set_is_data_encrypted(entry, 1);
rar->has_encrypted_entries = 1;
archive_set_error(&a->archive, ARCHIVE_ERRNO_FILE_FORMAT,
"RAR encryption support unavailable.");
/* Since it is only the data part itself that is encrypted we can at least
extract information about the currently processed entry and don't need
to return ARCHIVE_FATAL here. */
/*return (ARCHIVE_FATAL);*/
}
if (rar->file_flags & FHD_LARGE)
{
memcpy(packed_size, file_header.pack_size, 4);
memcpy(packed_size + 4, p, 4); /* High pack size */
p += 4;
memcpy(unp_size, file_header.unp_size, 4);
memcpy(unp_size + 4, p, 4); /* High unpack size */
p += 4;
rar->packed_size = archive_le64dec(&packed_size);
rar->unp_size = archive_le64dec(&unp_size);
}
else
{
rar->packed_size = archive_le32dec(file_header.pack_size);
rar->unp_size = archive_le32dec(file_header.unp_size);
}
if (rar->packed_size < 0 || rar->unp_size < 0)
{
archive_set_error(&a->archive, ARCHIVE_ERRNO_FILE_FORMAT,
"Invalid sizes specified.");
return (ARCHIVE_FATAL);
}
rar->bytes_remaining = rar->packed_size;
/* TODO: RARv3 subblocks contain comments. For now the complete block is
* consumed at the end.
*/
if (head_type == NEWSUB_HEAD) {
size_t distance = p - (const char *)h;
header_size += rar->packed_size;
/* Make sure we have the extended data. */
if ((h = __archive_read_ahead(a, (size_t)header_size - 7, NULL)) == NULL)
return (ARCHIVE_FATAL);
p = h;
endp = p + header_size - 7;
p += distance;
}
filename_size = archive_le16dec(file_header.name_size);
if (p + filename_size > endp) {
archive_set_error(&a->archive, ARCHIVE_ERRNO_FILE_FORMAT,
"Invalid filename size");
return (ARCHIVE_FATAL);
}
if (rar->filename_allocated < filename_size * 2 + 2) {
char *newptr;
size_t newsize = filename_size * 2 + 2;
newptr = realloc(rar->filename, newsize);
if (newptr == NULL) {
archive_set_error(&a->archive, ENOMEM,
"Couldn't allocate memory.");
return (ARCHIVE_FATAL);
}
rar->filename = newptr;
rar->filename_allocated = newsize;
}
filename = rar->filename;
memcpy(filename, p, filename_size);
filename[filename_size] = '\0';
if (rar->file_flags & FHD_UNICODE)
{
if (filename_size != strlen(filename))
{
unsigned char highbyte, flagbits, flagbyte;
unsigned fn_end, offset;
end = filename_size;
fn_end = filename_size * 2;
filename_size = 0;
offset = (unsigned)strlen(filename) + 1;
highbyte = *(p + offset++);
flagbits = 0;
flagbyte = 0;
while (offset < end && filename_size < fn_end)
{
if (!flagbits)
{
flagbyte = *(p + offset++);
flagbits = 8;
}
flagbits -= 2;
switch((flagbyte >> flagbits) & 3)
{
case 0:
filename[filename_size++] = '\0';
filename[filename_size++] = *(p + offset++);
break;
case 1:
filename[filename_size++] = highbyte;
filename[filename_size++] = *(p + offset++);
break;
case 2:
filename[filename_size++] = *(p + offset + 1);
filename[filename_size++] = *(p + offset);
offset += 2;
break;
case 3:
{
char extra, high;
uint8_t length = *(p + offset++);
if (length & 0x80) {
extra = *(p + offset++);
high = (char)highbyte;
} else
extra = high = 0;
length = (length & 0x7f) + 2;
while (length && filename_size < fn_end) {
unsigned cp = filename_size >> 1;
filename[filename_size++] = high;
filename[filename_size++] = p[cp] + extra;
length--;
}
}
break;
}
}
if (filename_size > fn_end) {
archive_set_error(&a->archive, ARCHIVE_ERRNO_FILE_FORMAT,
"Invalid filename");
return (ARCHIVE_FATAL);
}
filename[filename_size++] = '\0';
/*
* Do not increment filename_size here as the computations below
* add the space for the terminating NUL explicitly.
*/
filename[filename_size] = '\0';
/* Decoded unicode form is UTF-16BE, so we have to update a string
* conversion object for it. */
if (rar->sconv_utf16be == NULL) {
rar->sconv_utf16be = archive_string_conversion_from_charset(
&a->archive, "UTF-16BE", 1);
if (rar->sconv_utf16be == NULL)
return (ARCHIVE_FATAL);
}
fn_sconv = rar->sconv_utf16be;
strp = filename;
while (memcmp(strp, "\x00\x00", 2))
{
if (!memcmp(strp, "\x00\\", 2))
*(strp + 1) = '/';
strp += 2;
}
p += offset;
} else {
/*
* If FHD_UNICODE is set but no unicode data, this file name form
* is UTF-8, so we have to update a string conversion object for
* it accordingly.
*/
if (rar->sconv_utf8 == NULL) {
rar->sconv_utf8 = archive_string_conversion_from_charset(
&a->archive, "UTF-8", 1);
if (rar->sconv_utf8 == NULL)
return (ARCHIVE_FATAL);
}
fn_sconv = rar->sconv_utf8;
while ((strp = strchr(filename, '\\')) != NULL)
*strp = '/';
p += filename_size;
}
}
else
{
fn_sconv = sconv;
while ((strp = strchr(filename, '\\')) != NULL)
*strp = '/';
p += filename_size;
}
/* Split file in multivolume RAR. No more need to process header. */
if (rar->filename_save &&
filename_size == rar->filename_save_size &&
!memcmp(rar->filename, rar->filename_save, filename_size + 1))
{
__archive_read_consume(a, header_size - 7);
rar->cursor++;
if (rar->cursor >= rar->nodes)
{
rar->nodes++;
if ((rar->dbo =
realloc(rar->dbo, sizeof(*rar->dbo) * rar->nodes)) == NULL)
{
archive_set_error(&a->archive, ENOMEM, "Couldn't allocate memory.");
return (ARCHIVE_FATAL);
}
rar->dbo[rar->cursor].header_size = header_size;
rar->dbo[rar->cursor].start_offset = -1;
rar->dbo[rar->cursor].end_offset = -1;
}
if (rar->dbo[rar->cursor].start_offset < 0)
{
rar->dbo[rar->cursor].start_offset = a->filter->position;
rar->dbo[rar->cursor].end_offset = rar->dbo[rar->cursor].start_offset +
rar->packed_size;
}
return ret;
}
rar->filename_save = (char*)realloc(rar->filename_save,
filename_size + 1);
memcpy(rar->filename_save, rar->filename, filename_size + 1);
rar->filename_save_size = filename_size;
/* Set info for seeking */
free(rar->dbo);
if ((rar->dbo = calloc(1, sizeof(*rar->dbo))) == NULL)
{
archive_set_error(&a->archive, ENOMEM, "Couldn't allocate memory.");
return (ARCHIVE_FATAL);
}
rar->dbo[0].header_size = header_size;
rar->dbo[0].start_offset = -1;
rar->dbo[0].end_offset = -1;
rar->cursor = 0;
rar->nodes = 1;
if (rar->file_flags & FHD_SALT)
{
if (p + 8 > endp) {
archive_set_error(&a->archive, ARCHIVE_ERRNO_FILE_FORMAT,
"Invalid header size");
return (ARCHIVE_FATAL);
}
memcpy(rar->salt, p, 8);
p += 8;
}
if (rar->file_flags & FHD_EXTTIME) {
if (read_exttime(p, rar, endp) < 0) {
archive_set_error(&a->archive, ARCHIVE_ERRNO_FILE_FORMAT,
"Invalid header size");
return (ARCHIVE_FATAL);
}
}
__archive_read_consume(a, header_size - 7);
rar->dbo[0].start_offset = a->filter->position;
rar->dbo[0].end_offset = rar->dbo[0].start_offset + rar->packed_size;
switch(file_header.host_os)
{
case OS_MSDOS:
case OS_OS2:
case OS_WIN32:
rar->mode = archive_le32dec(file_header.file_attr);
if (rar->mode & FILE_ATTRIBUTE_DIRECTORY)
rar->mode = AE_IFDIR | S_IXUSR | S_IXGRP | S_IXOTH;
else
rar->mode = AE_IFREG;
rar->mode |= S_IRUSR | S_IWUSR | S_IRGRP | S_IROTH;
break;
case OS_UNIX:
case OS_MAC_OS:
case OS_BEOS:
rar->mode = archive_le32dec(file_header.file_attr);
break;
default:
archive_set_error(&a->archive, ARCHIVE_ERRNO_FILE_FORMAT,
"Unknown file attributes from RAR file's host OS");
return (ARCHIVE_FATAL);
}
rar->bytes_uncopied = rar->bytes_unconsumed = 0;
rar->lzss.position = rar->offset = 0;
rar->offset_seek = 0;
rar->dictionary_size = 0;
rar->offset_outgoing = 0;
rar->br.cache_avail = 0;
rar->br.avail_in = 0;
rar->crc_calculated = 0;
rar->entry_eof = 0;
rar->valid = 1;
rar->is_ppmd_block = 0;
rar->start_new_table = 1;
free(rar->unp_buffer);
rar->unp_buffer = NULL;
rar->unp_offset = 0;
rar->unp_buffer_size = UNP_BUFFER_SIZE;
memset(rar->lengthtable, 0, sizeof(rar->lengthtable));
__archive_ppmd7_functions.Ppmd7_Free(&rar->ppmd7_context);
rar->ppmd_valid = rar->ppmd_eod = 0;
/* Don't set any archive entries for non-file header types */
if (head_type == NEWSUB_HEAD)
return ret;
archive_entry_set_mtime(entry, rar->mtime, rar->mnsec);
archive_entry_set_ctime(entry, rar->ctime, rar->cnsec);
archive_entry_set_atime(entry, rar->atime, rar->ansec);
archive_entry_set_size(entry, rar->unp_size);
archive_entry_set_mode(entry, rar->mode);
if (archive_entry_copy_pathname_l(entry, filename, filename_size, fn_sconv))
{
if (errno == ENOMEM)
{
archive_set_error(&a->archive, ENOMEM,
"Can't allocate memory for Pathname");
return (ARCHIVE_FATAL);
}
archive_set_error(&a->archive, ARCHIVE_ERRNO_FILE_FORMAT,
"Pathname cannot be converted from %s to current locale.",
archive_string_conversion_charset_name(fn_sconv));
ret = (ARCHIVE_WARN);
}
if (((rar->mode) & AE_IFMT) == AE_IFLNK)
{
/* Make sure a symbolic-link file does not have its body. */
rar->bytes_remaining = 0;
archive_entry_set_size(entry, 0);
/* Read a symbolic-link name. */
if ((ret2 = read_symlink_stored(a, entry, sconv)) < (ARCHIVE_WARN))
return ret2;
if (ret > ret2)
ret = ret2;
}
if (rar->bytes_remaining == 0)
rar->entry_eof = 1;
return ret;
}
static time_t
get_time(int ttime)
{
struct tm tm;
tm.tm_sec = 2 * (ttime & 0x1f);
tm.tm_min = (ttime >> 5) & 0x3f;
tm.tm_hour = (ttime >> 11) & 0x1f;
tm.tm_mday = (ttime >> 16) & 0x1f;
tm.tm_mon = ((ttime >> 21) & 0x0f) - 1;
tm.tm_year = ((ttime >> 25) & 0x7f) + 80;
tm.tm_isdst = -1;
return mktime(&tm);
}
static int
read_exttime(const char *p, struct rar *rar, const char *endp)
{
unsigned rmode, flags, rem, j, count;
int ttime, i;
struct tm *tm;
time_t t;
long nsec;
if (p + 2 > endp)
return (-1);
flags = archive_le16dec(p);
p += 2;
for (i = 3; i >= 0; i--)
{
t = 0;
if (i == 3)
t = rar->mtime;
rmode = flags >> i * 4;
if (rmode & 8)
{
if (!t)
{
if (p + 4 > endp)
return (-1);
ttime = archive_le32dec(p);
t = get_time(ttime);
p += 4;
}
rem = 0;
count = rmode & 3;
if (p + count > endp)
return (-1);
for (j = 0; j < count; j++)
{
rem = (((unsigned)(unsigned char)*p) << 16) | (rem >> 8);
p++;
}
tm = localtime(&t);
nsec = tm->tm_sec + rem / NS_UNIT;
if (rmode & 4)
{
tm->tm_sec++;
t = mktime(tm);
}
if (i == 3)
{
rar->mtime = t;
rar->mnsec = nsec;
}
else if (i == 2)
{
rar->ctime = t;
rar->cnsec = nsec;
}
else if (i == 1)
{
rar->atime = t;
rar->ansec = nsec;
}
else
{
rar->arctime = t;
rar->arcnsec = nsec;
}
}
}
return (0);
}
static int
read_symlink_stored(struct archive_read *a, struct archive_entry *entry,
struct archive_string_conv *sconv)
{
const void *h;
const char *p;
struct rar *rar;
int ret = (ARCHIVE_OK);
rar = (struct rar *)(a->format->data);
if ((h = rar_read_ahead(a, (size_t)rar->packed_size, NULL)) == NULL)
return (ARCHIVE_FATAL);
p = h;
if (archive_entry_copy_symlink_l(entry,
p, (size_t)rar->packed_size, sconv))
{
if (errno == ENOMEM)
{
archive_set_error(&a->archive, ENOMEM,
"Can't allocate memory for link");
return (ARCHIVE_FATAL);
}
archive_set_error(&a->archive, ARCHIVE_ERRNO_FILE_FORMAT,
"link cannot be converted from %s to current locale.",
archive_string_conversion_charset_name(sconv));
ret = (ARCHIVE_WARN);
}
__archive_read_consume(a, rar->packed_size);
return ret;
}
static int
read_data_stored(struct archive_read *a, const void **buff, size_t *size,
int64_t *offset)
{
struct rar *rar;
ssize_t bytes_avail;
rar = (struct rar *)(a->format->data);
if (rar->bytes_remaining == 0 &&
!(rar->main_flags & MHD_VOLUME && rar->file_flags & FHD_SPLIT_AFTER))
{
*buff = NULL;
*size = 0;
*offset = rar->offset;
if (rar->file_crc != rar->crc_calculated) {
archive_set_error(&a->archive, ARCHIVE_ERRNO_FILE_FORMAT,
"File CRC error");
return (ARCHIVE_FATAL);
}
rar->entry_eof = 1;
return (ARCHIVE_EOF);
}
*buff = rar_read_ahead(a, 1, &bytes_avail);
if (bytes_avail <= 0)
{
archive_set_error(&a->archive, ARCHIVE_ERRNO_FILE_FORMAT,
"Truncated RAR file data");
return (ARCHIVE_FATAL);
}
*size = bytes_avail;
*offset = rar->offset;
rar->offset += bytes_avail;
rar->offset_seek += bytes_avail;
rar->bytes_remaining -= bytes_avail;
rar->bytes_unconsumed = bytes_avail;
/* Calculate File CRC. */
rar->crc_calculated = crc32(rar->crc_calculated, *buff,
(unsigned)bytes_avail);
return (ARCHIVE_OK);
}
static int
read_data_compressed(struct archive_read *a, const void **buff, size_t *size,
int64_t *offset)
{
struct rar *rar;
int64_t start, end, actualend;
size_t bs;
int ret = (ARCHIVE_OK), sym, code, lzss_offset, length, i;
rar = (struct rar *)(a->format->data);
do {
if (!rar->valid)
return (ARCHIVE_FATAL);
if (rar->ppmd_eod ||
(rar->dictionary_size && rar->offset >= rar->unp_size))
{
if (rar->unp_offset > 0) {
/*
* We have unprocessed extracted data. write it out.
*/
*buff = rar->unp_buffer;
*size = rar->unp_offset;
*offset = rar->offset_outgoing;
rar->offset_outgoing += *size;
/* Calculate File CRC. */
rar->crc_calculated = crc32(rar->crc_calculated, *buff,
(unsigned)*size);
rar->unp_offset = 0;
return (ARCHIVE_OK);
}
*buff = NULL;
*size = 0;
*offset = rar->offset;
if (rar->file_crc != rar->crc_calculated) {
archive_set_error(&a->archive, ARCHIVE_ERRNO_FILE_FORMAT,
"File CRC error");
return (ARCHIVE_FATAL);
}
rar->entry_eof = 1;
return (ARCHIVE_EOF);
}
if (!rar->is_ppmd_block && rar->dictionary_size && rar->bytes_uncopied > 0)
{
if (rar->bytes_uncopied > (rar->unp_buffer_size - rar->unp_offset))
bs = rar->unp_buffer_size - rar->unp_offset;
else
bs = (size_t)rar->bytes_uncopied;
ret = copy_from_lzss_window(a, buff, rar->offset, (int)bs);
if (ret != ARCHIVE_OK)
return (ret);
rar->offset += bs;
rar->bytes_uncopied -= bs;
if (*buff != NULL) {
rar->unp_offset = 0;
*size = rar->unp_buffer_size;
*offset = rar->offset_outgoing;
rar->offset_outgoing += *size;
/* Calculate File CRC. */
rar->crc_calculated = crc32(rar->crc_calculated, *buff,
(unsigned)*size);
return (ret);
}
continue;
}
if (!rar->br.next_in &&
(ret = rar_br_preparation(a, &(rar->br))) < ARCHIVE_WARN)
return (ret);
if (rar->start_new_table && ((ret = parse_codes(a)) < (ARCHIVE_WARN)))
return (ret);
if (rar->is_ppmd_block)
{
if ((sym = __archive_ppmd7_functions.Ppmd7_DecodeSymbol(
&rar->ppmd7_context, &rar->range_dec.p)) < 0)
{
archive_set_error(&a->archive, ARCHIVE_ERRNO_FILE_FORMAT,
"Invalid symbol");
return (ARCHIVE_FATAL);
}
if(sym != rar->ppmd_escape)
{
lzss_emit_literal(rar, sym);
rar->bytes_uncopied++;
}
else
{
if ((code = __archive_ppmd7_functions.Ppmd7_DecodeSymbol(
&rar->ppmd7_context, &rar->range_dec.p)) < 0)
{
archive_set_error(&a->archive, ARCHIVE_ERRNO_FILE_FORMAT,
"Invalid symbol");
return (ARCHIVE_FATAL);
}
switch(code)
{
case 0:
rar->start_new_table = 1;
return read_data_compressed(a, buff, size, offset);
case 2:
rar->ppmd_eod = 1;/* End Of ppmd Data. */
continue;
case 3:
archive_set_error(&a->archive, ARCHIVE_ERRNO_MISC,
"Parsing filters is unsupported.");
return (ARCHIVE_FAILED);
case 4:
lzss_offset = 0;
for (i = 2; i >= 0; i--)
{
if ((code = __archive_ppmd7_functions.Ppmd7_DecodeSymbol(
&rar->ppmd7_context, &rar->range_dec.p)) < 0)
{
archive_set_error(&a->archive, ARCHIVE_ERRNO_FILE_FORMAT,
"Invalid symbol");
return (ARCHIVE_FATAL);
}
lzss_offset |= code << (i * 8);
}
if ((length = __archive_ppmd7_functions.Ppmd7_DecodeSymbol(
&rar->ppmd7_context, &rar->range_dec.p)) < 0)
{
archive_set_error(&a->archive, ARCHIVE_ERRNO_FILE_FORMAT,
"Invalid symbol");
return (ARCHIVE_FATAL);
}
lzss_emit_match(rar, lzss_offset + 2, length + 32);
rar->bytes_uncopied += length + 32;
break;
case 5:
if ((length = __archive_ppmd7_functions.Ppmd7_DecodeSymbol(
&rar->ppmd7_context, &rar->range_dec.p)) < 0)
{
archive_set_error(&a->archive, ARCHIVE_ERRNO_FILE_FORMAT,
"Invalid symbol");
return (ARCHIVE_FATAL);
}
lzss_emit_match(rar, 1, length + 4);
rar->bytes_uncopied += length + 4;
break;
default:
lzss_emit_literal(rar, sym);
rar->bytes_uncopied++;
}
}
}
else
{
start = rar->offset;
end = start + rar->dictionary_size;
rar->filterstart = INT64_MAX;
if ((actualend = expand(a, end)) < 0)
return ((int)actualend);
rar->bytes_uncopied = actualend - start;
if (rar->bytes_uncopied == 0) {
/* Broken RAR files cause this case.
* NOTE: If this case were possible on a normal RAR file
* we would find out where it was actually bad and
* what we would do to solve it. */
archive_set_error(&a->archive, ARCHIVE_ERRNO_FILE_FORMAT,
"Internal error extracting RAR file");
return (ARCHIVE_FATAL);
}
}
if (rar->bytes_uncopied > (rar->unp_buffer_size - rar->unp_offset))
bs = rar->unp_buffer_size - rar->unp_offset;
else
bs = (size_t)rar->bytes_uncopied;
ret = copy_from_lzss_window(a, buff, rar->offset, (int)bs);
if (ret != ARCHIVE_OK)
return (ret);
rar->offset += bs;
rar->bytes_uncopied -= bs;
/*
* If *buff is NULL, it means unp_buffer is not full.
* So we have to continue extracting a RAR file.
*/
} while (*buff == NULL);
rar->unp_offset = 0;
*size = rar->unp_buffer_size;
*offset = rar->offset_outgoing;
rar->offset_outgoing += *size;
/* Calculate File CRC. */
rar->crc_calculated = crc32(rar->crc_calculated, *buff, (unsigned)*size);
return ret;
}
static int
parse_codes(struct archive_read *a)
{
int i, j, val, n, r;
unsigned char bitlengths[MAX_SYMBOLS], zerocount, ppmd_flags;
unsigned int maxorder;
struct huffman_code precode;
struct rar *rar = (struct rar *)(a->format->data);
struct rar_br *br = &(rar->br);
free_codes(a);
/* Skip to the next byte */
rar_br_consume_unalined_bits(br);
/* PPMd block flag */
if (!rar_br_read_ahead(a, br, 1))
goto truncated_data;
if ((rar->is_ppmd_block = rar_br_bits(br, 1)) != 0)
{
rar_br_consume(br, 1);
if (!rar_br_read_ahead(a, br, 7))
goto truncated_data;
ppmd_flags = rar_br_bits(br, 7);
rar_br_consume(br, 7);
/* Memory is allocated in MB */
if (ppmd_flags & 0x20)
{
if (!rar_br_read_ahead(a, br, 8))
goto truncated_data;
rar->dictionary_size = (rar_br_bits(br, 8) + 1) << 20;
rar_br_consume(br, 8);
}
if (ppmd_flags & 0x40)
{
if (!rar_br_read_ahead(a, br, 8))
goto truncated_data;
rar->ppmd_escape = rar->ppmd7_context.InitEsc = rar_br_bits(br, 8);
rar_br_consume(br, 8);
}
else
rar->ppmd_escape = 2;
if (ppmd_flags & 0x20)
{
maxorder = (ppmd_flags & 0x1F) + 1;
if(maxorder > 16)
maxorder = 16 + (maxorder - 16) * 3;
if (maxorder == 1)
{
archive_set_error(&a->archive, ARCHIVE_ERRNO_FILE_FORMAT,
"Truncated RAR file data");
return (ARCHIVE_FATAL);
}
/* Make sure ppmd7_contest is freed before Ppmd7_Construct
* because reading a broken file cause this abnormal sequence. */
__archive_ppmd7_functions.Ppmd7_Free(&rar->ppmd7_context);
rar->bytein.a = a;
rar->bytein.Read = &ppmd_read;
__archive_ppmd7_functions.PpmdRAR_RangeDec_CreateVTable(&rar->range_dec);
rar->range_dec.Stream = &rar->bytein;
__archive_ppmd7_functions.Ppmd7_Construct(&rar->ppmd7_context);
if (rar->dictionary_size == 0) {
archive_set_error(&a->archive, ARCHIVE_ERRNO_FILE_FORMAT,
"Invalid zero dictionary size");
return (ARCHIVE_FATAL);
}
if (!__archive_ppmd7_functions.Ppmd7_Alloc(&rar->ppmd7_context,
rar->dictionary_size))
{
archive_set_error(&a->archive, ENOMEM,
"Out of memory");
return (ARCHIVE_FATAL);
}
if (!__archive_ppmd7_functions.PpmdRAR_RangeDec_Init(&rar->range_dec))
{
archive_set_error(&a->archive, ARCHIVE_ERRNO_FILE_FORMAT,
"Unable to initialize PPMd range decoder");
return (ARCHIVE_FATAL);
}
__archive_ppmd7_functions.Ppmd7_Init(&rar->ppmd7_context, maxorder);
rar->ppmd_valid = 1;
}
else
{
if (!rar->ppmd_valid) {
archive_set_error(&a->archive, ARCHIVE_ERRNO_FILE_FORMAT,
"Invalid PPMd sequence");
return (ARCHIVE_FATAL);
}
if (!__archive_ppmd7_functions.PpmdRAR_RangeDec_Init(&rar->range_dec))
{
archive_set_error(&a->archive, ARCHIVE_ERRNO_FILE_FORMAT,
"Unable to initialize PPMd range decoder");
return (ARCHIVE_FATAL);
}
}
}
else
{
rar_br_consume(br, 1);
/* Keep existing table flag */
if (!rar_br_read_ahead(a, br, 1))
goto truncated_data;
if (!rar_br_bits(br, 1))
memset(rar->lengthtable, 0, sizeof(rar->lengthtable));
rar_br_consume(br, 1);
memset(&bitlengths, 0, sizeof(bitlengths));
for (i = 0; i < MAX_SYMBOLS;)
{
if (!rar_br_read_ahead(a, br, 4))
goto truncated_data;
bitlengths[i++] = rar_br_bits(br, 4);
rar_br_consume(br, 4);
if (bitlengths[i-1] == 0xF)
{
if (!rar_br_read_ahead(a, br, 4))
goto truncated_data;
zerocount = rar_br_bits(br, 4);
rar_br_consume(br, 4);
if (zerocount)
{
i--;
for (j = 0; j < zerocount + 2 && i < MAX_SYMBOLS; j++)
bitlengths[i++] = 0;
}
}
}
memset(&precode, 0, sizeof(precode));
r = create_code(a, &precode, bitlengths, MAX_SYMBOLS, MAX_SYMBOL_LENGTH);
if (r != ARCHIVE_OK) {
free(precode.tree);
free(precode.table);
return (r);
}
for (i = 0; i < HUFFMAN_TABLE_SIZE;)
{
if ((val = read_next_symbol(a, &precode)) < 0) {
free(precode.tree);
free(precode.table);
return (ARCHIVE_FATAL);
}
if (val < 16)
{
rar->lengthtable[i] = (rar->lengthtable[i] + val) & 0xF;
i++;
}
else if (val < 18)
{
if (i == 0)
{
free(precode.tree);
free(precode.table);
archive_set_error(&a->archive, ARCHIVE_ERRNO_FILE_FORMAT,
"Internal error extracting RAR file.");
return (ARCHIVE_FATAL);
}
if(val == 16) {
if (!rar_br_read_ahead(a, br, 3)) {
free(precode.tree);
free(precode.table);
goto truncated_data;
}
n = rar_br_bits(br, 3) + 3;
rar_br_consume(br, 3);
} else {
if (!rar_br_read_ahead(a, br, 7)) {
free(precode.tree);
free(precode.table);
goto truncated_data;
}
n = rar_br_bits(br, 7) + 11;
rar_br_consume(br, 7);
}
for (j = 0; j < n && i < HUFFMAN_TABLE_SIZE; j++)
{
rar->lengthtable[i] = rar->lengthtable[i-1];
i++;
}
}
else
{
if(val == 18) {
if (!rar_br_read_ahead(a, br, 3)) {
free(precode.tree);
free(precode.table);
goto truncated_data;
}
n = rar_br_bits(br, 3) + 3;
rar_br_consume(br, 3);
} else {
if (!rar_br_read_ahead(a, br, 7)) {
free(precode.tree);
free(precode.table);
goto truncated_data;
}
n = rar_br_bits(br, 7) + 11;
rar_br_consume(br, 7);
}
for(j = 0; j < n && i < HUFFMAN_TABLE_SIZE; j++)
rar->lengthtable[i++] = 0;
}
}
free(precode.tree);
free(precode.table);
r = create_code(a, &rar->maincode, &rar->lengthtable[0], MAINCODE_SIZE,
MAX_SYMBOL_LENGTH);
if (r != ARCHIVE_OK)
return (r);
r = create_code(a, &rar->offsetcode, &rar->lengthtable[MAINCODE_SIZE],
OFFSETCODE_SIZE, MAX_SYMBOL_LENGTH);
if (r != ARCHIVE_OK)
return (r);
r = create_code(a, &rar->lowoffsetcode,
&rar->lengthtable[MAINCODE_SIZE + OFFSETCODE_SIZE],
LOWOFFSETCODE_SIZE, MAX_SYMBOL_LENGTH);
if (r != ARCHIVE_OK)
return (r);
r = create_code(a, &rar->lengthcode,
&rar->lengthtable[MAINCODE_SIZE + OFFSETCODE_SIZE +
LOWOFFSETCODE_SIZE], LENGTHCODE_SIZE, MAX_SYMBOL_LENGTH);
if (r != ARCHIVE_OK)
return (r);
}
if (!rar->dictionary_size || !rar->lzss.window)
{
/* Seems as though dictionary sizes are not used. Even so, minimize
* memory usage as much as possible.
*/
void *new_window;
unsigned int new_size;
if (rar->unp_size >= DICTIONARY_MAX_SIZE)
new_size = DICTIONARY_MAX_SIZE;
else
new_size = rar_fls((unsigned int)rar->unp_size) << 1;
if (new_size == 0) {
archive_set_error(&a->archive, ARCHIVE_ERRNO_FILE_FORMAT,
"Zero window size is invalid.");
return (ARCHIVE_FATAL);
}
new_window = realloc(rar->lzss.window, new_size);
if (new_window == NULL) {
archive_set_error(&a->archive, ENOMEM,
"Unable to allocate memory for uncompressed data.");
return (ARCHIVE_FATAL);
}
rar->lzss.window = (unsigned char *)new_window;
rar->dictionary_size = new_size;
memset(rar->lzss.window, 0, rar->dictionary_size);
rar->lzss.mask = rar->dictionary_size - 1;
}
rar->start_new_table = 0;
return (ARCHIVE_OK);
truncated_data:
archive_set_error(&a->archive, ARCHIVE_ERRNO_FILE_FORMAT,
"Truncated RAR file data");
rar->valid = 0;
return (ARCHIVE_FATAL);
}
static void
free_codes(struct archive_read *a)
{
struct rar *rar = (struct rar *)(a->format->data);
free(rar->maincode.tree);
free(rar->offsetcode.tree);
free(rar->lowoffsetcode.tree);
free(rar->lengthcode.tree);
free(rar->maincode.table);
free(rar->offsetcode.table);
free(rar->lowoffsetcode.table);
free(rar->lengthcode.table);
memset(&rar->maincode, 0, sizeof(rar->maincode));
memset(&rar->offsetcode, 0, sizeof(rar->offsetcode));
memset(&rar->lowoffsetcode, 0, sizeof(rar->lowoffsetcode));
memset(&rar->lengthcode, 0, sizeof(rar->lengthcode));
}
static int
read_next_symbol(struct archive_read *a, struct huffman_code *code)
{
unsigned char bit;
unsigned int bits;
int length, value, node;
struct rar *rar;
struct rar_br *br;
if (!code->table)
{
if (make_table(a, code) != (ARCHIVE_OK))
return -1;
}
rar = (struct rar *)(a->format->data);
br = &(rar->br);
/* Look ahead (peek) at bits */
if (!rar_br_read_ahead(a, br, code->tablesize)) {
archive_set_error(&a->archive, ARCHIVE_ERRNO_FILE_FORMAT,
"Truncated RAR file data");
rar->valid = 0;
return -1;
}
bits = rar_br_bits(br, code->tablesize);
length = code->table[bits].length;
value = code->table[bits].value;
if (length < 0)
{
archive_set_error(&a->archive, ARCHIVE_ERRNO_FILE_FORMAT,
"Invalid prefix code in bitstream");
return -1;
}
if (length <= code->tablesize)
{
/* Skip length bits */
rar_br_consume(br, length);
return value;
}
/* Skip tablesize bits */
rar_br_consume(br, code->tablesize);
node = value;
while (!(code->tree[node].branches[0] ==
code->tree[node].branches[1]))
{
if (!rar_br_read_ahead(a, br, 1)) {
archive_set_error(&a->archive, ARCHIVE_ERRNO_FILE_FORMAT,
"Truncated RAR file data");
rar->valid = 0;
return -1;
}
bit = rar_br_bits(br, 1);
rar_br_consume(br, 1);
if (code->tree[node].branches[bit] < 0)
{
archive_set_error(&a->archive, ARCHIVE_ERRNO_FILE_FORMAT,
"Invalid prefix code in bitstream");
return -1;
}
node = code->tree[node].branches[bit];
}
return code->tree[node].branches[0];
}
static int
create_code(struct archive_read *a, struct huffman_code *code,
unsigned char *lengths, int numsymbols, char maxlength)
{
int i, j, codebits = 0, symbolsleft = numsymbols;
code->numentries = 0;
code->numallocatedentries = 0;
if (new_node(code) < 0) {
archive_set_error(&a->archive, ENOMEM,
"Unable to allocate memory for node data.");
return (ARCHIVE_FATAL);
}
code->numentries = 1;
code->minlength = INT_MAX;
code->maxlength = INT_MIN;
codebits = 0;
for(i = 1; i <= maxlength; i++)
{
for(j = 0; j < numsymbols; j++)
{
if (lengths[j] != i) continue;
if (add_value(a, code, j, codebits, i) != ARCHIVE_OK)
return (ARCHIVE_FATAL);
codebits++;
if (--symbolsleft <= 0) { break; break; }
}
codebits <<= 1;
}
return (ARCHIVE_OK);
}
static int
add_value(struct archive_read *a, struct huffman_code *code, int value,
int codebits, int length)
{
int repeatpos, lastnode, bitpos, bit, repeatnode, nextnode;
free(code->table);
code->table = NULL;
if(length > code->maxlength)
code->maxlength = length;
if(length < code->minlength)
code->minlength = length;
repeatpos = -1;
if (repeatpos == 0 || (repeatpos >= 0
&& (((codebits >> (repeatpos - 1)) & 3) == 0
|| ((codebits >> (repeatpos - 1)) & 3) == 3)))
{
archive_set_error(&a->archive, ARCHIVE_ERRNO_FILE_FORMAT,
"Invalid repeat position");
return (ARCHIVE_FATAL);
}
lastnode = 0;
for (bitpos = length - 1; bitpos >= 0; bitpos--)
{
bit = (codebits >> bitpos) & 1;
/* Leaf node check */
if (code->tree[lastnode].branches[0] ==
code->tree[lastnode].branches[1])
{
archive_set_error(&a->archive, ARCHIVE_ERRNO_FILE_FORMAT,
"Prefix found");
return (ARCHIVE_FATAL);
}
if (bitpos == repeatpos)
{
/* Open branch check */
if (!(code->tree[lastnode].branches[bit] < 0))
{
archive_set_error(&a->archive, ARCHIVE_ERRNO_FILE_FORMAT,
"Invalid repeating code");
return (ARCHIVE_FATAL);
}
if ((repeatnode = new_node(code)) < 0) {
archive_set_error(&a->archive, ENOMEM,
"Unable to allocate memory for node data.");
return (ARCHIVE_FATAL);
}
if ((nextnode = new_node(code)) < 0) {
archive_set_error(&a->archive, ENOMEM,
"Unable to allocate memory for node data.");
return (ARCHIVE_FATAL);
}
/* Set branches */
code->tree[lastnode].branches[bit] = repeatnode;
code->tree[repeatnode].branches[bit] = repeatnode;
code->tree[repeatnode].branches[bit^1] = nextnode;
lastnode = nextnode;
bitpos++; /* terminating bit already handled, skip it */
}
else
{
/* Open branch check */
if (code->tree[lastnode].branches[bit] < 0)
{
if (new_node(code) < 0) {
archive_set_error(&a->archive, ENOMEM,
"Unable to allocate memory for node data.");
return (ARCHIVE_FATAL);
}
code->tree[lastnode].branches[bit] = code->numentries++;
}
/* set to branch */
lastnode = code->tree[lastnode].branches[bit];
}
}
if (!(code->tree[lastnode].branches[0] == -1
&& code->tree[lastnode].branches[1] == -2))
{
archive_set_error(&a->archive, ARCHIVE_ERRNO_FILE_FORMAT,
"Prefix found");
return (ARCHIVE_FATAL);
}
/* Set leaf value */
code->tree[lastnode].branches[0] = value;
code->tree[lastnode].branches[1] = value;
return (ARCHIVE_OK);
}
static int
new_node(struct huffman_code *code)
{
void *new_tree;
if (code->numallocatedentries == code->numentries) {
int new_num_entries = 256;
if (code->numentries > 0) {
new_num_entries = code->numentries * 2;
}
new_tree = realloc(code->tree, new_num_entries * sizeof(*code->tree));
if (new_tree == NULL)
return (-1);
code->tree = (struct huffman_tree_node *)new_tree;
code->numallocatedentries = new_num_entries;
}
code->tree[code->numentries].branches[0] = -1;
code->tree[code->numentries].branches[1] = -2;
return 1;
}
static int
make_table(struct archive_read *a, struct huffman_code *code)
{
if (code->maxlength < code->minlength || code->maxlength > 10)
code->tablesize = 10;
else
code->tablesize = code->maxlength;
code->table =
(struct huffman_table_entry *)calloc(1, sizeof(*code->table)
* ((size_t)1 << code->tablesize));
return make_table_recurse(a, code, 0, code->table, 0, code->tablesize);
}
static int
make_table_recurse(struct archive_read *a, struct huffman_code *code, int node,
struct huffman_table_entry *table, int depth,
int maxdepth)
{
int currtablesize, i, ret = (ARCHIVE_OK);
if (!code->tree)
{
archive_set_error(&a->archive, ARCHIVE_ERRNO_FILE_FORMAT,
"Huffman tree was not created.");
return (ARCHIVE_FATAL);
}
if (node < 0 || node >= code->numentries)
{
archive_set_error(&a->archive, ARCHIVE_ERRNO_FILE_FORMAT,
"Invalid location to Huffman tree specified.");
return (ARCHIVE_FATAL);
}
currtablesize = 1 << (maxdepth - depth);
if (code->tree[node].branches[0] ==
code->tree[node].branches[1])
{
for(i = 0; i < currtablesize; i++)
{
table[i].length = depth;
table[i].value = code->tree[node].branches[0];
}
}
else if (node < 0)
{
for(i = 0; i < currtablesize; i++)
table[i].length = -1;
}
else
{
if(depth == maxdepth)
{
table[0].length = maxdepth + 1;
table[0].value = node;
}
else
{
ret |= make_table_recurse(a, code, code->tree[node].branches[0], table,
depth + 1, maxdepth);
ret |= make_table_recurse(a, code, code->tree[node].branches[1],
table + currtablesize / 2, depth + 1, maxdepth);
}
}
return ret;
}
static int64_t
expand(struct archive_read *a, int64_t end)
{
static const unsigned char lengthbases[] =
{ 0, 1, 2, 3, 4, 5, 6,
7, 8, 10, 12, 14, 16, 20,
24, 28, 32, 40, 48, 56, 64,
80, 96, 112, 128, 160, 192, 224 };
static const unsigned char lengthbits[] =
{ 0, 0, 0, 0, 0, 0, 0,
0, 1, 1, 1, 1, 2, 2,
2, 2, 3, 3, 3, 3, 4,
4, 4, 4, 5, 5, 5, 5 };
static const unsigned int offsetbases[] =
{ 0, 1, 2, 3, 4, 6,
8, 12, 16, 24, 32, 48,
64, 96, 128, 192, 256, 384,
512, 768, 1024, 1536, 2048, 3072,
4096, 6144, 8192, 12288, 16384, 24576,
32768, 49152, 65536, 98304, 131072, 196608,
262144, 327680, 393216, 458752, 524288, 589824,
655360, 720896, 786432, 851968, 917504, 983040,
1048576, 1310720, 1572864, 1835008, 2097152, 2359296,
2621440, 2883584, 3145728, 3407872, 3670016, 3932160 };
static const unsigned char offsetbits[] =
{ 0, 0, 0, 0, 1, 1, 2, 2, 3, 3, 4, 4,
5, 5, 6, 6, 7, 7, 8, 8, 9, 9, 10, 10,
11, 11, 12, 12, 13, 13, 14, 14, 15, 15, 16, 16,
16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16,
18, 18, 18, 18, 18, 18, 18, 18, 18, 18, 18, 18 };
static const unsigned char shortbases[] =
{ 0, 4, 8, 16, 32, 64, 128, 192 };
static const unsigned char shortbits[] =
{ 2, 2, 3, 4, 5, 6, 6, 6 };
int symbol, offs, len, offsindex, lensymbol, i, offssymbol, lowoffsetsymbol;
unsigned char newfile;
struct rar *rar = (struct rar *)(a->format->data);
struct rar_br *br = &(rar->br);
if (rar->filterstart < end)
end = rar->filterstart;
while (1)
{
if (rar->output_last_match &&
lzss_position(&rar->lzss) + rar->lastlength <= end)
{
lzss_emit_match(rar, rar->lastoffset, rar->lastlength);
rar->output_last_match = 0;
}
if(rar->is_ppmd_block || rar->output_last_match ||
lzss_position(&rar->lzss) >= end)
return lzss_position(&rar->lzss);
if ((symbol = read_next_symbol(a, &rar->maincode)) < 0)
return (ARCHIVE_FATAL);
rar->output_last_match = 0;
if (symbol < 256)
{
lzss_emit_literal(rar, symbol);
continue;
}
else if (symbol == 256)
{
if (!rar_br_read_ahead(a, br, 1))
goto truncated_data;
newfile = !rar_br_bits(br, 1);
rar_br_consume(br, 1);
if(newfile)
{
rar->start_new_block = 1;
if (!rar_br_read_ahead(a, br, 1))
goto truncated_data;
rar->start_new_table = rar_br_bits(br, 1);
rar_br_consume(br, 1);
return lzss_position(&rar->lzss);
}
else
{
if (parse_codes(a) != ARCHIVE_OK)
return (ARCHIVE_FATAL);
continue;
}
}
else if(symbol==257)
{
archive_set_error(&a->archive, ARCHIVE_ERRNO_MISC,
"Parsing filters is unsupported.");
return (ARCHIVE_FAILED);
}
else if(symbol==258)
{
if(rar->lastlength == 0)
continue;
offs = rar->lastoffset;
len = rar->lastlength;
}
else if (symbol <= 262)
{
offsindex = symbol - 259;
offs = rar->oldoffset[offsindex];
if ((lensymbol = read_next_symbol(a, &rar->lengthcode)) < 0)
goto bad_data;
if (lensymbol > (int)(sizeof(lengthbases)/sizeof(lengthbases[0])))
goto bad_data;
if (lensymbol > (int)(sizeof(lengthbits)/sizeof(lengthbits[0])))
goto bad_data;
len = lengthbases[lensymbol] + 2;
if (lengthbits[lensymbol] > 0) {
if (!rar_br_read_ahead(a, br, lengthbits[lensymbol]))
goto truncated_data;
len += rar_br_bits(br, lengthbits[lensymbol]);
rar_br_consume(br, lengthbits[lensymbol]);
}
for (i = offsindex; i > 0; i--)
rar->oldoffset[i] = rar->oldoffset[i-1];
rar->oldoffset[0] = offs;
}
else if(symbol<=270)
{
offs = shortbases[symbol-263] + 1;
if(shortbits[symbol-263] > 0) {
if (!rar_br_read_ahead(a, br, shortbits[symbol-263]))
goto truncated_data;
offs += rar_br_bits(br, shortbits[symbol-263]);
rar_br_consume(br, shortbits[symbol-263]);
}
len = 2;
for(i = 3; i > 0; i--)
rar->oldoffset[i] = rar->oldoffset[i-1];
rar->oldoffset[0] = offs;
}
else
{
if (symbol-271 > (int)(sizeof(lengthbases)/sizeof(lengthbases[0])))
goto bad_data;
if (symbol-271 > (int)(sizeof(lengthbits)/sizeof(lengthbits[0])))
goto bad_data;
len = lengthbases[symbol-271]+3;
if(lengthbits[symbol-271] > 0) {
if (!rar_br_read_ahead(a, br, lengthbits[symbol-271]))
goto truncated_data;
len += rar_br_bits(br, lengthbits[symbol-271]);
rar_br_consume(br, lengthbits[symbol-271]);
}
if ((offssymbol = read_next_symbol(a, &rar->offsetcode)) < 0)
goto bad_data;
if (offssymbol > (int)(sizeof(offsetbases)/sizeof(offsetbases[0])))
goto bad_data;
if (offssymbol > (int)(sizeof(offsetbits)/sizeof(offsetbits[0])))
goto bad_data;
offs = offsetbases[offssymbol]+1;
if(offsetbits[offssymbol] > 0)
{
if(offssymbol > 9)
{
if(offsetbits[offssymbol] > 4) {
if (!rar_br_read_ahead(a, br, offsetbits[offssymbol] - 4))
goto truncated_data;
offs += rar_br_bits(br, offsetbits[offssymbol] - 4) << 4;
rar_br_consume(br, offsetbits[offssymbol] - 4);
}
if(rar->numlowoffsetrepeats > 0)
{
rar->numlowoffsetrepeats--;
offs += rar->lastlowoffset;
}
else
{
if ((lowoffsetsymbol =
read_next_symbol(a, &rar->lowoffsetcode)) < 0)
return (ARCHIVE_FATAL);
if(lowoffsetsymbol == 16)
{
rar->numlowoffsetrepeats = 15;
offs += rar->lastlowoffset;
}
else
{
offs += lowoffsetsymbol;
rar->lastlowoffset = lowoffsetsymbol;
}
}
}
else {
if (!rar_br_read_ahead(a, br, offsetbits[offssymbol]))
goto truncated_data;
offs += rar_br_bits(br, offsetbits[offssymbol]);
rar_br_consume(br, offsetbits[offssymbol]);
}
}
if (offs >= 0x40000)
len++;
if (offs >= 0x2000)
len++;
for(i = 3; i > 0; i--)
rar->oldoffset[i] = rar->oldoffset[i-1];
rar->oldoffset[0] = offs;
}
rar->lastoffset = offs;
rar->lastlength = len;
rar->output_last_match = 1;
}
truncated_data:
archive_set_error(&a->archive, ARCHIVE_ERRNO_FILE_FORMAT,
"Truncated RAR file data");
rar->valid = 0;
return (ARCHIVE_FATAL);
bad_data:
archive_set_error(&a->archive, ARCHIVE_ERRNO_FILE_FORMAT,
"Bad RAR file data");
return (ARCHIVE_FATAL);
}
static int
copy_from_lzss_window(struct archive_read *a, const void **buffer,
int64_t startpos, int length)
{
int windowoffs, firstpart;
struct rar *rar = (struct rar *)(a->format->data);
if (!rar->unp_buffer)
{
if ((rar->unp_buffer = malloc(rar->unp_buffer_size)) == NULL)
{
archive_set_error(&a->archive, ENOMEM,
"Unable to allocate memory for uncompressed data.");
return (ARCHIVE_FATAL);
}
}
windowoffs = lzss_offset_for_position(&rar->lzss, startpos);
if(windowoffs + length <= lzss_size(&rar->lzss)) {
memcpy(&rar->unp_buffer[rar->unp_offset], &rar->lzss.window[windowoffs],
length);
} else if (length <= lzss_size(&rar->lzss)) {
firstpart = lzss_size(&rar->lzss) - windowoffs;
if (firstpart < 0) {
archive_set_error(&a->archive, ARCHIVE_ERRNO_FILE_FORMAT,
"Bad RAR file data");
return (ARCHIVE_FATAL);
}
if (firstpart < length) {
memcpy(&rar->unp_buffer[rar->unp_offset],
&rar->lzss.window[windowoffs], firstpart);
memcpy(&rar->unp_buffer[rar->unp_offset + firstpart],
&rar->lzss.window[0], length - firstpart);
} else {
memcpy(&rar->unp_buffer[rar->unp_offset],
&rar->lzss.window[windowoffs], length);
}
} else {
archive_set_error(&a->archive, ARCHIVE_ERRNO_FILE_FORMAT,
"Bad RAR file data");
return (ARCHIVE_FATAL);
}
rar->unp_offset += length;
if (rar->unp_offset >= rar->unp_buffer_size)
*buffer = rar->unp_buffer;
else
*buffer = NULL;
return (ARCHIVE_OK);
}
static const void *
rar_read_ahead(struct archive_read *a, size_t min, ssize_t *avail)
{
struct rar *rar = (struct rar *)(a->format->data);
const void *h = __archive_read_ahead(a, min, avail);
int ret;
if (avail)
{
if (a->archive.read_data_is_posix_read && *avail > (ssize_t)a->archive.read_data_requested)
*avail = a->archive.read_data_requested;
if (*avail > rar->bytes_remaining)
*avail = (ssize_t)rar->bytes_remaining;
if (*avail < 0)
return NULL;
else if (*avail == 0 && rar->main_flags & MHD_VOLUME &&
rar->file_flags & FHD_SPLIT_AFTER)
{
ret = archive_read_format_rar_read_header(a, a->entry);
if (ret == (ARCHIVE_EOF))
{
rar->has_endarc_header = 1;
ret = archive_read_format_rar_read_header(a, a->entry);
}
if (ret != (ARCHIVE_OK))
return NULL;
return rar_read_ahead(a, min, avail);
}
}
return h;
}
| ./CrossVul/dataset_final_sorted/CWE-416/c/bad_60_0 |
crossvul-cpp_data_good_180_0 | /* radare - LGPL - Copyright 2010-2016 - pancake, nibble */
#include <r_anal.h>
#include <r_util.h>
#include <r_list.h>
#include <limits.h>
#define DFLT_NINSTR 3
R_API RAnalBlock *r_anal_bb_new() {
RAnalBlock *bb = R_NEW0 (RAnalBlock);
if (!bb) {
return NULL;
}
bb->addr = UT64_MAX;
bb->jump = UT64_MAX;
bb->fail = UT64_MAX;
bb->switch_op = NULL;
bb->type = R_ANAL_BB_TYPE_NULL;
bb->cond = NULL;
bb->fingerprint = NULL;
bb->diff = NULL; //r_anal_diff_new ();
bb->label = NULL;
bb->op_pos = R_NEWS0 (ut16, DFLT_NINSTR);
bb->op_pos_size = DFLT_NINSTR;
bb->parent_reg_arena = NULL;
bb->stackptr = 0;
bb->parent_stackptr = INT_MAX;
return bb;
}
R_API void r_anal_bb_free(RAnalBlock *bb) {
if (!bb) {
return;
}
r_anal_cond_free (bb->cond);
R_FREE (bb->fingerprint);
r_anal_diff_free (bb->diff);
bb->diff = NULL;
R_FREE (bb->op_bytes);
r_anal_switch_op_free (bb->switch_op);
bb->switch_op = NULL;
bb->fingerprint = NULL;
bb->cond = NULL;
R_FREE (bb->label);
R_FREE (bb->op_pos);
R_FREE (bb->parent_reg_arena);
if (bb->prev) {
if (bb->prev->jumpbb == bb) {
bb->prev->jumpbb = NULL;
}
if (bb->prev->failbb == bb) {
bb->prev->failbb = NULL;
}
bb->prev = NULL;
}
if (bb->jumpbb) {
bb->jumpbb->prev = NULL;
bb->jumpbb = NULL;
}
if (bb->failbb) {
bb->failbb->prev = NULL;
bb->failbb = NULL;
}
if (bb->next) {
// avoid double free
bb->next->prev = NULL;
}
R_FREE (bb); // double free
}
R_API RList *r_anal_bb_list_new() {
RList *list = r_list_newf ((RListFree)r_anal_bb_free);
if (!list) {
return NULL;
}
return list;
}
R_API int r_anal_bb(RAnal *anal, RAnalBlock *bb, ut64 addr, ut8 *buf, ut64 len, int head) {
RAnalOp *op = NULL;
int oplen, idx = 0;
if (bb->addr == -1) {
bb->addr = addr;
}
len -= 16; // XXX: hack to avoid segfault by x86im
while (idx < len) {
// TODO: too slow object construction
if (!(op = r_anal_op_new ())) {
eprintf ("Error: new (op)\n");
return R_ANAL_RET_ERROR;
}
if ((oplen = r_anal_op (anal, op, addr + idx, buf + idx, len - idx, R_ANAL_OP_MASK_VAL)) == 0) {
r_anal_op_free (op);
op = NULL;
if (idx == 0) {
VERBOSE_ANAL eprintf ("Unknown opcode at 0x%08"PFMT64x"\n", addr+idx);
return R_ANAL_RET_END;
}
break;
}
if (oplen < 1) {
goto beach;
}
r_anal_bb_set_offset (bb, bb->ninstr++, addr + idx - bb->addr);
idx += oplen;
bb->size += oplen;
if (head) {
bb->type = R_ANAL_BB_TYPE_HEAD;
}
switch (op->type) {
case R_ANAL_OP_TYPE_CMP:
r_anal_cond_free (bb->cond);
bb->cond = r_anal_cond_new_from_op (op);
break;
case R_ANAL_OP_TYPE_CJMP:
if (bb->cond) {
// TODO: get values from anal backend
bb->cond->type = R_ANAL_COND_EQ;
} else VERBOSE_ANAL eprintf ("Unknown conditional for block 0x%"PFMT64x"\n", bb->addr);
bb->conditional = 1;
bb->fail = op->fail;
bb->jump = op->jump;
bb->type |= R_ANAL_BB_TYPE_BODY;
goto beach;
case R_ANAL_OP_TYPE_JMP:
bb->jump = op->jump;
bb->type |= R_ANAL_BB_TYPE_BODY;
goto beach;
case R_ANAL_OP_TYPE_UJMP:
case R_ANAL_OP_TYPE_IJMP:
case R_ANAL_OP_TYPE_RJMP:
case R_ANAL_OP_TYPE_IRJMP:
bb->type |= R_ANAL_BB_TYPE_FOOT;
goto beach;
case R_ANAL_OP_TYPE_RET:
bb->type |= R_ANAL_BB_TYPE_LAST;
goto beach;
case R_ANAL_OP_TYPE_LEA:
{
RAnalValue *src = op->src[0];
if (src && src->reg && anal->reg) {
const char *pc = anal->reg->name[R_REG_NAME_PC];
RAnalValue *dst = op->dst;
if (dst && dst->reg && !strcmp (src->reg->name, pc)) {
int memref = anal->bits/8;
ut8 b[8];
ut64 ptr = idx+addr+src->delta;
anal->iob.read_at (anal->iob.io, ptr, b, memref);
r_anal_xrefs_set (anal, addr+idx-op->size, ptr, R_ANAL_REF_TYPE_DATA);
}
}
}
}
r_anal_op_free (op);
}
return bb->size;
beach:
r_anal_op_free (op);
return R_ANAL_RET_END;
}
R_API inline int r_anal_bb_is_in_offset (RAnalBlock *bb, ut64 off) {
return (off >= bb->addr && off < bb->addr + bb->size);
}
R_API RAnalBlock *r_anal_bb_from_offset(RAnal *anal, ut64 off) {
RListIter *iter, *iter2;
RAnalFunction *fcn;
RAnalBlock *bb;
r_list_foreach (anal->fcns, iter, fcn) {
r_list_foreach (fcn->bbs, iter2, bb) {
if (r_anal_bb_is_in_offset (bb, off)) {
return bb;
}
}
}
return NULL;
}
R_API RAnalBlock *r_anal_bb_get_jumpbb(RAnalFunction *fcn, RAnalBlock *bb) {
if (bb->jump == UT64_MAX) {
return NULL;
}
if (bb->jumpbb) {
return bb->jumpbb;
}
RListIter *iter;
RAnalBlock *b;
r_list_foreach (fcn->bbs, iter, b) {
if (b->addr == bb->jump) {
bb->jumpbb = b;
b->prev = bb;
return b;
}
}
return NULL;
}
R_API RAnalBlock *r_anal_bb_get_failbb(RAnalFunction *fcn, RAnalBlock *bb) {
RListIter *iter;
RAnalBlock *b;
if (bb->fail == UT64_MAX) {
return NULL;
}
if (bb->failbb) {
return bb->failbb;
}
r_list_foreach (fcn->bbs, iter, b) {
if (b->addr == bb->fail) {
bb->failbb = b;
b->prev = bb;
return b;
}
}
return NULL;
}
/* return the offset of the i-th instruction in the basicblock bb.
* If the index of the instruction is not valid, it returns UT16_MAX */
R_API ut16 r_anal_bb_offset_inst(RAnalBlock *bb, int i) {
if (i < 0 || i >= bb->ninstr) {
return UT16_MAX;
}
return (i > 0 && (i - 1) < bb->op_pos_size) ? bb->op_pos[i - 1] : 0;
}
/* set the offset of the i-th instruction in the basicblock bb */
R_API bool r_anal_bb_set_offset(RAnalBlock *bb, int i, ut16 v) {
// the offset 0 of the instruction 0 is not stored because always 0
if (i > 0 && v > 0) {
if (i >= bb->op_pos_size) {
int new_pos_size = i * 2;
ut16 *tmp_op_pos = realloc (bb->op_pos, new_pos_size * sizeof (*bb->op_pos));
if (!tmp_op_pos) {
return false;
}
bb->op_pos_size = new_pos_size;
bb->op_pos = tmp_op_pos;
}
bb->op_pos[i - 1] = v;
return true;
}
return true;
}
/* return the address of the instruction that occupy a given offset.
* If the offset is not part of the given basicblock, UT64_MAX is returned. */
R_API ut64 r_anal_bb_opaddr_at(RAnalBlock *bb, ut64 off) {
ut16 delta, delta_off, last_delta;
int i;
if (!r_anal_bb_is_in_offset (bb, off)) {
return UT64_MAX;
}
last_delta = 0;
delta_off = off - bb->addr;
for (i = 0; i < bb->ninstr; i++) {
delta = r_anal_bb_offset_inst (bb, i);
if (delta > delta_off) {
return bb->addr + last_delta;
}
last_delta = delta;
}
return UT64_MAX;
}
| ./CrossVul/dataset_final_sorted/CWE-416/c/good_180_0 |
crossvul-cpp_data_bad_3045_0 | /******************************************************************************
* emulate.c
*
* Generic x86 (32-bit and 64-bit) instruction decoder and emulator.
*
* Copyright (c) 2005 Keir Fraser
*
* Linux coding style, mod r/m decoder, segment base fixes, real-mode
* privileged instructions:
*
* Copyright (C) 2006 Qumranet
* Copyright 2010 Red Hat, Inc. and/or its affiliates.
*
* Avi Kivity <avi@qumranet.com>
* Yaniv Kamay <yaniv@qumranet.com>
*
* This work is licensed under the terms of the GNU GPL, version 2. See
* the COPYING file in the top-level directory.
*
* From: xen-unstable 10676:af9809f51f81a3c43f276f00c81a52ef558afda4
*/
#include <linux/kvm_host.h>
#include "kvm_cache_regs.h"
#include <asm/kvm_emulate.h>
#include <linux/stringify.h>
#include <asm/debugreg.h>
#include "x86.h"
#include "tss.h"
/*
* Operand types
*/
#define OpNone 0ull
#define OpImplicit 1ull /* No generic decode */
#define OpReg 2ull /* Register */
#define OpMem 3ull /* Memory */
#define OpAcc 4ull /* Accumulator: AL/AX/EAX/RAX */
#define OpDI 5ull /* ES:DI/EDI/RDI */
#define OpMem64 6ull /* Memory, 64-bit */
#define OpImmUByte 7ull /* Zero-extended 8-bit immediate */
#define OpDX 8ull /* DX register */
#define OpCL 9ull /* CL register (for shifts) */
#define OpImmByte 10ull /* 8-bit sign extended immediate */
#define OpOne 11ull /* Implied 1 */
#define OpImm 12ull /* Sign extended up to 32-bit immediate */
#define OpMem16 13ull /* Memory operand (16-bit). */
#define OpMem32 14ull /* Memory operand (32-bit). */
#define OpImmU 15ull /* Immediate operand, zero extended */
#define OpSI 16ull /* SI/ESI/RSI */
#define OpImmFAddr 17ull /* Immediate far address */
#define OpMemFAddr 18ull /* Far address in memory */
#define OpImmU16 19ull /* Immediate operand, 16 bits, zero extended */
#define OpES 20ull /* ES */
#define OpCS 21ull /* CS */
#define OpSS 22ull /* SS */
#define OpDS 23ull /* DS */
#define OpFS 24ull /* FS */
#define OpGS 25ull /* GS */
#define OpMem8 26ull /* 8-bit zero extended memory operand */
#define OpImm64 27ull /* Sign extended 16/32/64-bit immediate */
#define OpXLat 28ull /* memory at BX/EBX/RBX + zero-extended AL */
#define OpAccLo 29ull /* Low part of extended acc (AX/AX/EAX/RAX) */
#define OpAccHi 30ull /* High part of extended acc (-/DX/EDX/RDX) */
#define OpBits 5 /* Width of operand field */
#define OpMask ((1ull << OpBits) - 1)
/*
* Opcode effective-address decode tables.
* Note that we only emulate instructions that have at least one memory
* operand (excluding implicit stack references). We assume that stack
* references and instruction fetches will never occur in special memory
* areas that require emulation. So, for example, 'mov <imm>,<reg>' need
* not be handled.
*/
/* Operand sizes: 8-bit operands or specified/overridden size. */
#define ByteOp (1<<0) /* 8-bit operands. */
/* Destination operand type. */
#define DstShift 1
#define ImplicitOps (OpImplicit << DstShift)
#define DstReg (OpReg << DstShift)
#define DstMem (OpMem << DstShift)
#define DstAcc (OpAcc << DstShift)
#define DstDI (OpDI << DstShift)
#define DstMem64 (OpMem64 << DstShift)
#define DstMem16 (OpMem16 << DstShift)
#define DstImmUByte (OpImmUByte << DstShift)
#define DstDX (OpDX << DstShift)
#define DstAccLo (OpAccLo << DstShift)
#define DstMask (OpMask << DstShift)
/* Source operand type. */
#define SrcShift 6
#define SrcNone (OpNone << SrcShift)
#define SrcReg (OpReg << SrcShift)
#define SrcMem (OpMem << SrcShift)
#define SrcMem16 (OpMem16 << SrcShift)
#define SrcMem32 (OpMem32 << SrcShift)
#define SrcImm (OpImm << SrcShift)
#define SrcImmByte (OpImmByte << SrcShift)
#define SrcOne (OpOne << SrcShift)
#define SrcImmUByte (OpImmUByte << SrcShift)
#define SrcImmU (OpImmU << SrcShift)
#define SrcSI (OpSI << SrcShift)
#define SrcXLat (OpXLat << SrcShift)
#define SrcImmFAddr (OpImmFAddr << SrcShift)
#define SrcMemFAddr (OpMemFAddr << SrcShift)
#define SrcAcc (OpAcc << SrcShift)
#define SrcImmU16 (OpImmU16 << SrcShift)
#define SrcImm64 (OpImm64 << SrcShift)
#define SrcDX (OpDX << SrcShift)
#define SrcMem8 (OpMem8 << SrcShift)
#define SrcAccHi (OpAccHi << SrcShift)
#define SrcMask (OpMask << SrcShift)
#define BitOp (1<<11)
#define MemAbs (1<<12) /* Memory operand is absolute displacement */
#define String (1<<13) /* String instruction (rep capable) */
#define Stack (1<<14) /* Stack instruction (push/pop) */
#define GroupMask (7<<15) /* Opcode uses one of the group mechanisms */
#define Group (1<<15) /* Bits 3:5 of modrm byte extend opcode */
#define GroupDual (2<<15) /* Alternate decoding of mod == 3 */
#define Prefix (3<<15) /* Instruction varies with 66/f2/f3 prefix */
#define RMExt (4<<15) /* Opcode extension in ModRM r/m if mod == 3 */
#define Escape (5<<15) /* Escape to coprocessor instruction */
#define InstrDual (6<<15) /* Alternate instruction decoding of mod == 3 */
#define ModeDual (7<<15) /* Different instruction for 32/64 bit */
#define Sse (1<<18) /* SSE Vector instruction */
/* Generic ModRM decode. */
#define ModRM (1<<19)
/* Destination is only written; never read. */
#define Mov (1<<20)
/* Misc flags */
#define Prot (1<<21) /* instruction generates #UD if not in prot-mode */
#define EmulateOnUD (1<<22) /* Emulate if unsupported by the host */
#define NoAccess (1<<23) /* Don't access memory (lea/invlpg/verr etc) */
#define Op3264 (1<<24) /* Operand is 64b in long mode, 32b otherwise */
#define Undefined (1<<25) /* No Such Instruction */
#define Lock (1<<26) /* lock prefix is allowed for the instruction */
#define Priv (1<<27) /* instruction generates #GP if current CPL != 0 */
#define No64 (1<<28)
#define PageTable (1 << 29) /* instruction used to write page table */
#define NotImpl (1 << 30) /* instruction is not implemented */
/* Source 2 operand type */
#define Src2Shift (31)
#define Src2None (OpNone << Src2Shift)
#define Src2Mem (OpMem << Src2Shift)
#define Src2CL (OpCL << Src2Shift)
#define Src2ImmByte (OpImmByte << Src2Shift)
#define Src2One (OpOne << Src2Shift)
#define Src2Imm (OpImm << Src2Shift)
#define Src2ES (OpES << Src2Shift)
#define Src2CS (OpCS << Src2Shift)
#define Src2SS (OpSS << Src2Shift)
#define Src2DS (OpDS << Src2Shift)
#define Src2FS (OpFS << Src2Shift)
#define Src2GS (OpGS << Src2Shift)
#define Src2Mask (OpMask << Src2Shift)
#define Mmx ((u64)1 << 40) /* MMX Vector instruction */
#define AlignMask ((u64)7 << 41)
#define Aligned ((u64)1 << 41) /* Explicitly aligned (e.g. MOVDQA) */
#define Unaligned ((u64)2 << 41) /* Explicitly unaligned (e.g. MOVDQU) */
#define Avx ((u64)3 << 41) /* Advanced Vector Extensions */
#define Aligned16 ((u64)4 << 41) /* Aligned to 16 byte boundary (e.g. FXSAVE) */
#define Fastop ((u64)1 << 44) /* Use opcode::u.fastop */
#define NoWrite ((u64)1 << 45) /* No writeback */
#define SrcWrite ((u64)1 << 46) /* Write back src operand */
#define NoMod ((u64)1 << 47) /* Mod field is ignored */
#define Intercept ((u64)1 << 48) /* Has valid intercept field */
#define CheckPerm ((u64)1 << 49) /* Has valid check_perm field */
#define PrivUD ((u64)1 << 51) /* #UD instead of #GP on CPL > 0 */
#define NearBranch ((u64)1 << 52) /* Near branches */
#define No16 ((u64)1 << 53) /* No 16 bit operand */
#define IncSP ((u64)1 << 54) /* SP is incremented before ModRM calc */
#define DstXacc (DstAccLo | SrcAccHi | SrcWrite)
#define X2(x...) x, x
#define X3(x...) X2(x), x
#define X4(x...) X2(x), X2(x)
#define X5(x...) X4(x), x
#define X6(x...) X4(x), X2(x)
#define X7(x...) X4(x), X3(x)
#define X8(x...) X4(x), X4(x)
#define X16(x...) X8(x), X8(x)
#define NR_FASTOP (ilog2(sizeof(ulong)) + 1)
#define FASTOP_SIZE 8
/*
* fastop functions have a special calling convention:
*
* dst: rax (in/out)
* src: rdx (in/out)
* src2: rcx (in)
* flags: rflags (in/out)
* ex: rsi (in:fastop pointer, out:zero if exception)
*
* Moreover, they are all exactly FASTOP_SIZE bytes long, so functions for
* different operand sizes can be reached by calculation, rather than a jump
* table (which would be bigger than the code).
*
* fastop functions are declared as taking a never-defined fastop parameter,
* so they can't be called from C directly.
*/
struct fastop;
struct opcode {
u64 flags : 56;
u64 intercept : 8;
union {
int (*execute)(struct x86_emulate_ctxt *ctxt);
const struct opcode *group;
const struct group_dual *gdual;
const struct gprefix *gprefix;
const struct escape *esc;
const struct instr_dual *idual;
const struct mode_dual *mdual;
void (*fastop)(struct fastop *fake);
} u;
int (*check_perm)(struct x86_emulate_ctxt *ctxt);
};
struct group_dual {
struct opcode mod012[8];
struct opcode mod3[8];
};
struct gprefix {
struct opcode pfx_no;
struct opcode pfx_66;
struct opcode pfx_f2;
struct opcode pfx_f3;
};
struct escape {
struct opcode op[8];
struct opcode high[64];
};
struct instr_dual {
struct opcode mod012;
struct opcode mod3;
};
struct mode_dual {
struct opcode mode32;
struct opcode mode64;
};
#define EFLG_RESERVED_ZEROS_MASK 0xffc0802a
enum x86_transfer_type {
X86_TRANSFER_NONE,
X86_TRANSFER_CALL_JMP,
X86_TRANSFER_RET,
X86_TRANSFER_TASK_SWITCH,
};
static ulong reg_read(struct x86_emulate_ctxt *ctxt, unsigned nr)
{
if (!(ctxt->regs_valid & (1 << nr))) {
ctxt->regs_valid |= 1 << nr;
ctxt->_regs[nr] = ctxt->ops->read_gpr(ctxt, nr);
}
return ctxt->_regs[nr];
}
static ulong *reg_write(struct x86_emulate_ctxt *ctxt, unsigned nr)
{
ctxt->regs_valid |= 1 << nr;
ctxt->regs_dirty |= 1 << nr;
return &ctxt->_regs[nr];
}
static ulong *reg_rmw(struct x86_emulate_ctxt *ctxt, unsigned nr)
{
reg_read(ctxt, nr);
return reg_write(ctxt, nr);
}
static void writeback_registers(struct x86_emulate_ctxt *ctxt)
{
unsigned reg;
for_each_set_bit(reg, (ulong *)&ctxt->regs_dirty, 16)
ctxt->ops->write_gpr(ctxt, reg, ctxt->_regs[reg]);
}
static void invalidate_registers(struct x86_emulate_ctxt *ctxt)
{
ctxt->regs_dirty = 0;
ctxt->regs_valid = 0;
}
/*
* These EFLAGS bits are restored from saved value during emulation, and
* any changes are written back to the saved value after emulation.
*/
#define EFLAGS_MASK (X86_EFLAGS_OF|X86_EFLAGS_SF|X86_EFLAGS_ZF|X86_EFLAGS_AF|\
X86_EFLAGS_PF|X86_EFLAGS_CF)
#ifdef CONFIG_X86_64
#define ON64(x) x
#else
#define ON64(x)
#endif
static int fastop(struct x86_emulate_ctxt *ctxt, void (*fop)(struct fastop *));
#define FOP_FUNC(name) \
".align " __stringify(FASTOP_SIZE) " \n\t" \
".type " name ", @function \n\t" \
name ":\n\t"
#define FOP_RET "ret \n\t"
#define FOP_START(op) \
extern void em_##op(struct fastop *fake); \
asm(".pushsection .text, \"ax\" \n\t" \
".global em_" #op " \n\t" \
FOP_FUNC("em_" #op)
#define FOP_END \
".popsection")
#define FOPNOP() \
FOP_FUNC(__stringify(__UNIQUE_ID(nop))) \
FOP_RET
#define FOP1E(op, dst) \
FOP_FUNC(#op "_" #dst) \
"10: " #op " %" #dst " \n\t" FOP_RET
#define FOP1EEX(op, dst) \
FOP1E(op, dst) _ASM_EXTABLE(10b, kvm_fastop_exception)
#define FASTOP1(op) \
FOP_START(op) \
FOP1E(op##b, al) \
FOP1E(op##w, ax) \
FOP1E(op##l, eax) \
ON64(FOP1E(op##q, rax)) \
FOP_END
/* 1-operand, using src2 (for MUL/DIV r/m) */
#define FASTOP1SRC2(op, name) \
FOP_START(name) \
FOP1E(op, cl) \
FOP1E(op, cx) \
FOP1E(op, ecx) \
ON64(FOP1E(op, rcx)) \
FOP_END
/* 1-operand, using src2 (for MUL/DIV r/m), with exceptions */
#define FASTOP1SRC2EX(op, name) \
FOP_START(name) \
FOP1EEX(op, cl) \
FOP1EEX(op, cx) \
FOP1EEX(op, ecx) \
ON64(FOP1EEX(op, rcx)) \
FOP_END
#define FOP2E(op, dst, src) \
FOP_FUNC(#op "_" #dst "_" #src) \
#op " %" #src ", %" #dst " \n\t" FOP_RET
#define FASTOP2(op) \
FOP_START(op) \
FOP2E(op##b, al, dl) \
FOP2E(op##w, ax, dx) \
FOP2E(op##l, eax, edx) \
ON64(FOP2E(op##q, rax, rdx)) \
FOP_END
/* 2 operand, word only */
#define FASTOP2W(op) \
FOP_START(op) \
FOPNOP() \
FOP2E(op##w, ax, dx) \
FOP2E(op##l, eax, edx) \
ON64(FOP2E(op##q, rax, rdx)) \
FOP_END
/* 2 operand, src is CL */
#define FASTOP2CL(op) \
FOP_START(op) \
FOP2E(op##b, al, cl) \
FOP2E(op##w, ax, cl) \
FOP2E(op##l, eax, cl) \
ON64(FOP2E(op##q, rax, cl)) \
FOP_END
/* 2 operand, src and dest are reversed */
#define FASTOP2R(op, name) \
FOP_START(name) \
FOP2E(op##b, dl, al) \
FOP2E(op##w, dx, ax) \
FOP2E(op##l, edx, eax) \
ON64(FOP2E(op##q, rdx, rax)) \
FOP_END
#define FOP3E(op, dst, src, src2) \
FOP_FUNC(#op "_" #dst "_" #src "_" #src2) \
#op " %" #src2 ", %" #src ", %" #dst " \n\t" FOP_RET
/* 3-operand, word-only, src2=cl */
#define FASTOP3WCL(op) \
FOP_START(op) \
FOPNOP() \
FOP3E(op##w, ax, dx, cl) \
FOP3E(op##l, eax, edx, cl) \
ON64(FOP3E(op##q, rax, rdx, cl)) \
FOP_END
/* Special case for SETcc - 1 instruction per cc */
#define FOP_SETCC(op) \
".align 4 \n\t" \
".type " #op ", @function \n\t" \
#op ": \n\t" \
#op " %al \n\t" \
FOP_RET
asm(".global kvm_fastop_exception \n"
"kvm_fastop_exception: xor %esi, %esi; ret");
FOP_START(setcc)
FOP_SETCC(seto)
FOP_SETCC(setno)
FOP_SETCC(setc)
FOP_SETCC(setnc)
FOP_SETCC(setz)
FOP_SETCC(setnz)
FOP_SETCC(setbe)
FOP_SETCC(setnbe)
FOP_SETCC(sets)
FOP_SETCC(setns)
FOP_SETCC(setp)
FOP_SETCC(setnp)
FOP_SETCC(setl)
FOP_SETCC(setnl)
FOP_SETCC(setle)
FOP_SETCC(setnle)
FOP_END;
FOP_START(salc) "pushf; sbb %al, %al; popf \n\t" FOP_RET
FOP_END;
/*
* XXX: inoutclob user must know where the argument is being expanded.
* Relying on CC_HAVE_ASM_GOTO would allow us to remove _fault.
*/
#define asm_safe(insn, inoutclob...) \
({ \
int _fault = 0; \
\
asm volatile("1:" insn "\n" \
"2:\n" \
".pushsection .fixup, \"ax\"\n" \
"3: movl $1, %[_fault]\n" \
" jmp 2b\n" \
".popsection\n" \
_ASM_EXTABLE(1b, 3b) \
: [_fault] "+qm"(_fault) inoutclob ); \
\
_fault ? X86EMUL_UNHANDLEABLE : X86EMUL_CONTINUE; \
})
static int emulator_check_intercept(struct x86_emulate_ctxt *ctxt,
enum x86_intercept intercept,
enum x86_intercept_stage stage)
{
struct x86_instruction_info info = {
.intercept = intercept,
.rep_prefix = ctxt->rep_prefix,
.modrm_mod = ctxt->modrm_mod,
.modrm_reg = ctxt->modrm_reg,
.modrm_rm = ctxt->modrm_rm,
.src_val = ctxt->src.val64,
.dst_val = ctxt->dst.val64,
.src_bytes = ctxt->src.bytes,
.dst_bytes = ctxt->dst.bytes,
.ad_bytes = ctxt->ad_bytes,
.next_rip = ctxt->eip,
};
return ctxt->ops->intercept(ctxt, &info, stage);
}
static void assign_masked(ulong *dest, ulong src, ulong mask)
{
*dest = (*dest & ~mask) | (src & mask);
}
static void assign_register(unsigned long *reg, u64 val, int bytes)
{
/* The 4-byte case *is* correct: in 64-bit mode we zero-extend. */
switch (bytes) {
case 1:
*(u8 *)reg = (u8)val;
break;
case 2:
*(u16 *)reg = (u16)val;
break;
case 4:
*reg = (u32)val;
break; /* 64b: zero-extend */
case 8:
*reg = val;
break;
}
}
static inline unsigned long ad_mask(struct x86_emulate_ctxt *ctxt)
{
return (1UL << (ctxt->ad_bytes << 3)) - 1;
}
static ulong stack_mask(struct x86_emulate_ctxt *ctxt)
{
u16 sel;
struct desc_struct ss;
if (ctxt->mode == X86EMUL_MODE_PROT64)
return ~0UL;
ctxt->ops->get_segment(ctxt, &sel, &ss, NULL, VCPU_SREG_SS);
return ~0U >> ((ss.d ^ 1) * 16); /* d=0: 0xffff; d=1: 0xffffffff */
}
static int stack_size(struct x86_emulate_ctxt *ctxt)
{
return (__fls(stack_mask(ctxt)) + 1) >> 3;
}
/* Access/update address held in a register, based on addressing mode. */
static inline unsigned long
address_mask(struct x86_emulate_ctxt *ctxt, unsigned long reg)
{
if (ctxt->ad_bytes == sizeof(unsigned long))
return reg;
else
return reg & ad_mask(ctxt);
}
static inline unsigned long
register_address(struct x86_emulate_ctxt *ctxt, int reg)
{
return address_mask(ctxt, reg_read(ctxt, reg));
}
static void masked_increment(ulong *reg, ulong mask, int inc)
{
assign_masked(reg, *reg + inc, mask);
}
static inline void
register_address_increment(struct x86_emulate_ctxt *ctxt, int reg, int inc)
{
ulong *preg = reg_rmw(ctxt, reg);
assign_register(preg, *preg + inc, ctxt->ad_bytes);
}
static void rsp_increment(struct x86_emulate_ctxt *ctxt, int inc)
{
masked_increment(reg_rmw(ctxt, VCPU_REGS_RSP), stack_mask(ctxt), inc);
}
static u32 desc_limit_scaled(struct desc_struct *desc)
{
u32 limit = get_desc_limit(desc);
return desc->g ? (limit << 12) | 0xfff : limit;
}
static unsigned long seg_base(struct x86_emulate_ctxt *ctxt, int seg)
{
if (ctxt->mode == X86EMUL_MODE_PROT64 && seg < VCPU_SREG_FS)
return 0;
return ctxt->ops->get_cached_segment_base(ctxt, seg);
}
static int emulate_exception(struct x86_emulate_ctxt *ctxt, int vec,
u32 error, bool valid)
{
WARN_ON(vec > 0x1f);
ctxt->exception.vector = vec;
ctxt->exception.error_code = error;
ctxt->exception.error_code_valid = valid;
return X86EMUL_PROPAGATE_FAULT;
}
static int emulate_db(struct x86_emulate_ctxt *ctxt)
{
return emulate_exception(ctxt, DB_VECTOR, 0, false);
}
static int emulate_gp(struct x86_emulate_ctxt *ctxt, int err)
{
return emulate_exception(ctxt, GP_VECTOR, err, true);
}
static int emulate_ss(struct x86_emulate_ctxt *ctxt, int err)
{
return emulate_exception(ctxt, SS_VECTOR, err, true);
}
static int emulate_ud(struct x86_emulate_ctxt *ctxt)
{
return emulate_exception(ctxt, UD_VECTOR, 0, false);
}
static int emulate_ts(struct x86_emulate_ctxt *ctxt, int err)
{
return emulate_exception(ctxt, TS_VECTOR, err, true);
}
static int emulate_de(struct x86_emulate_ctxt *ctxt)
{
return emulate_exception(ctxt, DE_VECTOR, 0, false);
}
static int emulate_nm(struct x86_emulate_ctxt *ctxt)
{
return emulate_exception(ctxt, NM_VECTOR, 0, false);
}
static u16 get_segment_selector(struct x86_emulate_ctxt *ctxt, unsigned seg)
{
u16 selector;
struct desc_struct desc;
ctxt->ops->get_segment(ctxt, &selector, &desc, NULL, seg);
return selector;
}
static void set_segment_selector(struct x86_emulate_ctxt *ctxt, u16 selector,
unsigned seg)
{
u16 dummy;
u32 base3;
struct desc_struct desc;
ctxt->ops->get_segment(ctxt, &dummy, &desc, &base3, seg);
ctxt->ops->set_segment(ctxt, selector, &desc, base3, seg);
}
/*
* x86 defines three classes of vector instructions: explicitly
* aligned, explicitly unaligned, and the rest, which change behaviour
* depending on whether they're AVX encoded or not.
*
* Also included is CMPXCHG16B which is not a vector instruction, yet it is
* subject to the same check. FXSAVE and FXRSTOR are checked here too as their
* 512 bytes of data must be aligned to a 16 byte boundary.
*/
static unsigned insn_alignment(struct x86_emulate_ctxt *ctxt, unsigned size)
{
u64 alignment = ctxt->d & AlignMask;
if (likely(size < 16))
return 1;
switch (alignment) {
case Unaligned:
case Avx:
return 1;
case Aligned16:
return 16;
case Aligned:
default:
return size;
}
}
static __always_inline int __linearize(struct x86_emulate_ctxt *ctxt,
struct segmented_address addr,
unsigned *max_size, unsigned size,
bool write, bool fetch,
enum x86emul_mode mode, ulong *linear)
{
struct desc_struct desc;
bool usable;
ulong la;
u32 lim;
u16 sel;
la = seg_base(ctxt, addr.seg) + addr.ea;
*max_size = 0;
switch (mode) {
case X86EMUL_MODE_PROT64:
*linear = la;
if (is_noncanonical_address(la))
goto bad;
*max_size = min_t(u64, ~0u, (1ull << 48) - la);
if (size > *max_size)
goto bad;
break;
default:
*linear = la = (u32)la;
usable = ctxt->ops->get_segment(ctxt, &sel, &desc, NULL,
addr.seg);
if (!usable)
goto bad;
/* code segment in protected mode or read-only data segment */
if ((((ctxt->mode != X86EMUL_MODE_REAL) && (desc.type & 8))
|| !(desc.type & 2)) && write)
goto bad;
/* unreadable code segment */
if (!fetch && (desc.type & 8) && !(desc.type & 2))
goto bad;
lim = desc_limit_scaled(&desc);
if (!(desc.type & 8) && (desc.type & 4)) {
/* expand-down segment */
if (addr.ea <= lim)
goto bad;
lim = desc.d ? 0xffffffff : 0xffff;
}
if (addr.ea > lim)
goto bad;
if (lim == 0xffffffff)
*max_size = ~0u;
else {
*max_size = (u64)lim + 1 - addr.ea;
if (size > *max_size)
goto bad;
}
break;
}
if (la & (insn_alignment(ctxt, size) - 1))
return emulate_gp(ctxt, 0);
return X86EMUL_CONTINUE;
bad:
if (addr.seg == VCPU_SREG_SS)
return emulate_ss(ctxt, 0);
else
return emulate_gp(ctxt, 0);
}
static int linearize(struct x86_emulate_ctxt *ctxt,
struct segmented_address addr,
unsigned size, bool write,
ulong *linear)
{
unsigned max_size;
return __linearize(ctxt, addr, &max_size, size, write, false,
ctxt->mode, linear);
}
static inline int assign_eip(struct x86_emulate_ctxt *ctxt, ulong dst,
enum x86emul_mode mode)
{
ulong linear;
int rc;
unsigned max_size;
struct segmented_address addr = { .seg = VCPU_SREG_CS,
.ea = dst };
if (ctxt->op_bytes != sizeof(unsigned long))
addr.ea = dst & ((1UL << (ctxt->op_bytes << 3)) - 1);
rc = __linearize(ctxt, addr, &max_size, 1, false, true, mode, &linear);
if (rc == X86EMUL_CONTINUE)
ctxt->_eip = addr.ea;
return rc;
}
static inline int assign_eip_near(struct x86_emulate_ctxt *ctxt, ulong dst)
{
return assign_eip(ctxt, dst, ctxt->mode);
}
static int assign_eip_far(struct x86_emulate_ctxt *ctxt, ulong dst,
const struct desc_struct *cs_desc)
{
enum x86emul_mode mode = ctxt->mode;
int rc;
#ifdef CONFIG_X86_64
if (ctxt->mode >= X86EMUL_MODE_PROT16) {
if (cs_desc->l) {
u64 efer = 0;
ctxt->ops->get_msr(ctxt, MSR_EFER, &efer);
if (efer & EFER_LMA)
mode = X86EMUL_MODE_PROT64;
} else
mode = X86EMUL_MODE_PROT32; /* temporary value */
}
#endif
if (mode == X86EMUL_MODE_PROT16 || mode == X86EMUL_MODE_PROT32)
mode = cs_desc->d ? X86EMUL_MODE_PROT32 : X86EMUL_MODE_PROT16;
rc = assign_eip(ctxt, dst, mode);
if (rc == X86EMUL_CONTINUE)
ctxt->mode = mode;
return rc;
}
static inline int jmp_rel(struct x86_emulate_ctxt *ctxt, int rel)
{
return assign_eip_near(ctxt, ctxt->_eip + rel);
}
static int segmented_read_std(struct x86_emulate_ctxt *ctxt,
struct segmented_address addr,
void *data,
unsigned size)
{
int rc;
ulong linear;
rc = linearize(ctxt, addr, size, false, &linear);
if (rc != X86EMUL_CONTINUE)
return rc;
return ctxt->ops->read_std(ctxt, linear, data, size, &ctxt->exception);
}
/*
* Prefetch the remaining bytes of the instruction without crossing page
* boundary if they are not in fetch_cache yet.
*/
static int __do_insn_fetch_bytes(struct x86_emulate_ctxt *ctxt, int op_size)
{
int rc;
unsigned size, max_size;
unsigned long linear;
int cur_size = ctxt->fetch.end - ctxt->fetch.data;
struct segmented_address addr = { .seg = VCPU_SREG_CS,
.ea = ctxt->eip + cur_size };
/*
* We do not know exactly how many bytes will be needed, and
* __linearize is expensive, so fetch as much as possible. We
* just have to avoid going beyond the 15 byte limit, the end
* of the segment, or the end of the page.
*
* __linearize is called with size 0 so that it does not do any
* boundary check itself. Instead, we use max_size to check
* against op_size.
*/
rc = __linearize(ctxt, addr, &max_size, 0, false, true, ctxt->mode,
&linear);
if (unlikely(rc != X86EMUL_CONTINUE))
return rc;
size = min_t(unsigned, 15UL ^ cur_size, max_size);
size = min_t(unsigned, size, PAGE_SIZE - offset_in_page(linear));
/*
* One instruction can only straddle two pages,
* and one has been loaded at the beginning of
* x86_decode_insn. So, if not enough bytes
* still, we must have hit the 15-byte boundary.
*/
if (unlikely(size < op_size))
return emulate_gp(ctxt, 0);
rc = ctxt->ops->fetch(ctxt, linear, ctxt->fetch.end,
size, &ctxt->exception);
if (unlikely(rc != X86EMUL_CONTINUE))
return rc;
ctxt->fetch.end += size;
return X86EMUL_CONTINUE;
}
static __always_inline int do_insn_fetch_bytes(struct x86_emulate_ctxt *ctxt,
unsigned size)
{
unsigned done_size = ctxt->fetch.end - ctxt->fetch.ptr;
if (unlikely(done_size < size))
return __do_insn_fetch_bytes(ctxt, size - done_size);
else
return X86EMUL_CONTINUE;
}
/* Fetch next part of the instruction being emulated. */
#define insn_fetch(_type, _ctxt) \
({ _type _x; \
\
rc = do_insn_fetch_bytes(_ctxt, sizeof(_type)); \
if (rc != X86EMUL_CONTINUE) \
goto done; \
ctxt->_eip += sizeof(_type); \
_x = *(_type __aligned(1) *) ctxt->fetch.ptr; \
ctxt->fetch.ptr += sizeof(_type); \
_x; \
})
#define insn_fetch_arr(_arr, _size, _ctxt) \
({ \
rc = do_insn_fetch_bytes(_ctxt, _size); \
if (rc != X86EMUL_CONTINUE) \
goto done; \
ctxt->_eip += (_size); \
memcpy(_arr, ctxt->fetch.ptr, _size); \
ctxt->fetch.ptr += (_size); \
})
/*
* Given the 'reg' portion of a ModRM byte, and a register block, return a
* pointer into the block that addresses the relevant register.
* @highbyte_regs specifies whether to decode AH,CH,DH,BH.
*/
static void *decode_register(struct x86_emulate_ctxt *ctxt, u8 modrm_reg,
int byteop)
{
void *p;
int highbyte_regs = (ctxt->rex_prefix == 0) && byteop;
if (highbyte_regs && modrm_reg >= 4 && modrm_reg < 8)
p = (unsigned char *)reg_rmw(ctxt, modrm_reg & 3) + 1;
else
p = reg_rmw(ctxt, modrm_reg);
return p;
}
static int read_descriptor(struct x86_emulate_ctxt *ctxt,
struct segmented_address addr,
u16 *size, unsigned long *address, int op_bytes)
{
int rc;
if (op_bytes == 2)
op_bytes = 3;
*address = 0;
rc = segmented_read_std(ctxt, addr, size, 2);
if (rc != X86EMUL_CONTINUE)
return rc;
addr.ea += 2;
rc = segmented_read_std(ctxt, addr, address, op_bytes);
return rc;
}
FASTOP2(add);
FASTOP2(or);
FASTOP2(adc);
FASTOP2(sbb);
FASTOP2(and);
FASTOP2(sub);
FASTOP2(xor);
FASTOP2(cmp);
FASTOP2(test);
FASTOP1SRC2(mul, mul_ex);
FASTOP1SRC2(imul, imul_ex);
FASTOP1SRC2EX(div, div_ex);
FASTOP1SRC2EX(idiv, idiv_ex);
FASTOP3WCL(shld);
FASTOP3WCL(shrd);
FASTOP2W(imul);
FASTOP1(not);
FASTOP1(neg);
FASTOP1(inc);
FASTOP1(dec);
FASTOP2CL(rol);
FASTOP2CL(ror);
FASTOP2CL(rcl);
FASTOP2CL(rcr);
FASTOP2CL(shl);
FASTOP2CL(shr);
FASTOP2CL(sar);
FASTOP2W(bsf);
FASTOP2W(bsr);
FASTOP2W(bt);
FASTOP2W(bts);
FASTOP2W(btr);
FASTOP2W(btc);
FASTOP2(xadd);
FASTOP2R(cmp, cmp_r);
static int em_bsf_c(struct x86_emulate_ctxt *ctxt)
{
/* If src is zero, do not writeback, but update flags */
if (ctxt->src.val == 0)
ctxt->dst.type = OP_NONE;
return fastop(ctxt, em_bsf);
}
static int em_bsr_c(struct x86_emulate_ctxt *ctxt)
{
/* If src is zero, do not writeback, but update flags */
if (ctxt->src.val == 0)
ctxt->dst.type = OP_NONE;
return fastop(ctxt, em_bsr);
}
static __always_inline u8 test_cc(unsigned int condition, unsigned long flags)
{
u8 rc;
void (*fop)(void) = (void *)em_setcc + 4 * (condition & 0xf);
flags = (flags & EFLAGS_MASK) | X86_EFLAGS_IF;
asm("push %[flags]; popf; call *%[fastop]"
: "=a"(rc) : [fastop]"r"(fop), [flags]"r"(flags));
return rc;
}
static void fetch_register_operand(struct operand *op)
{
switch (op->bytes) {
case 1:
op->val = *(u8 *)op->addr.reg;
break;
case 2:
op->val = *(u16 *)op->addr.reg;
break;
case 4:
op->val = *(u32 *)op->addr.reg;
break;
case 8:
op->val = *(u64 *)op->addr.reg;
break;
}
}
static void read_sse_reg(struct x86_emulate_ctxt *ctxt, sse128_t *data, int reg)
{
ctxt->ops->get_fpu(ctxt);
switch (reg) {
case 0: asm("movdqa %%xmm0, %0" : "=m"(*data)); break;
case 1: asm("movdqa %%xmm1, %0" : "=m"(*data)); break;
case 2: asm("movdqa %%xmm2, %0" : "=m"(*data)); break;
case 3: asm("movdqa %%xmm3, %0" : "=m"(*data)); break;
case 4: asm("movdqa %%xmm4, %0" : "=m"(*data)); break;
case 5: asm("movdqa %%xmm5, %0" : "=m"(*data)); break;
case 6: asm("movdqa %%xmm6, %0" : "=m"(*data)); break;
case 7: asm("movdqa %%xmm7, %0" : "=m"(*data)); break;
#ifdef CONFIG_X86_64
case 8: asm("movdqa %%xmm8, %0" : "=m"(*data)); break;
case 9: asm("movdqa %%xmm9, %0" : "=m"(*data)); break;
case 10: asm("movdqa %%xmm10, %0" : "=m"(*data)); break;
case 11: asm("movdqa %%xmm11, %0" : "=m"(*data)); break;
case 12: asm("movdqa %%xmm12, %0" : "=m"(*data)); break;
case 13: asm("movdqa %%xmm13, %0" : "=m"(*data)); break;
case 14: asm("movdqa %%xmm14, %0" : "=m"(*data)); break;
case 15: asm("movdqa %%xmm15, %0" : "=m"(*data)); break;
#endif
default: BUG();
}
ctxt->ops->put_fpu(ctxt);
}
static void write_sse_reg(struct x86_emulate_ctxt *ctxt, sse128_t *data,
int reg)
{
ctxt->ops->get_fpu(ctxt);
switch (reg) {
case 0: asm("movdqa %0, %%xmm0" : : "m"(*data)); break;
case 1: asm("movdqa %0, %%xmm1" : : "m"(*data)); break;
case 2: asm("movdqa %0, %%xmm2" : : "m"(*data)); break;
case 3: asm("movdqa %0, %%xmm3" : : "m"(*data)); break;
case 4: asm("movdqa %0, %%xmm4" : : "m"(*data)); break;
case 5: asm("movdqa %0, %%xmm5" : : "m"(*data)); break;
case 6: asm("movdqa %0, %%xmm6" : : "m"(*data)); break;
case 7: asm("movdqa %0, %%xmm7" : : "m"(*data)); break;
#ifdef CONFIG_X86_64
case 8: asm("movdqa %0, %%xmm8" : : "m"(*data)); break;
case 9: asm("movdqa %0, %%xmm9" : : "m"(*data)); break;
case 10: asm("movdqa %0, %%xmm10" : : "m"(*data)); break;
case 11: asm("movdqa %0, %%xmm11" : : "m"(*data)); break;
case 12: asm("movdqa %0, %%xmm12" : : "m"(*data)); break;
case 13: asm("movdqa %0, %%xmm13" : : "m"(*data)); break;
case 14: asm("movdqa %0, %%xmm14" : : "m"(*data)); break;
case 15: asm("movdqa %0, %%xmm15" : : "m"(*data)); break;
#endif
default: BUG();
}
ctxt->ops->put_fpu(ctxt);
}
static void read_mmx_reg(struct x86_emulate_ctxt *ctxt, u64 *data, int reg)
{
ctxt->ops->get_fpu(ctxt);
switch (reg) {
case 0: asm("movq %%mm0, %0" : "=m"(*data)); break;
case 1: asm("movq %%mm1, %0" : "=m"(*data)); break;
case 2: asm("movq %%mm2, %0" : "=m"(*data)); break;
case 3: asm("movq %%mm3, %0" : "=m"(*data)); break;
case 4: asm("movq %%mm4, %0" : "=m"(*data)); break;
case 5: asm("movq %%mm5, %0" : "=m"(*data)); break;
case 6: asm("movq %%mm6, %0" : "=m"(*data)); break;
case 7: asm("movq %%mm7, %0" : "=m"(*data)); break;
default: BUG();
}
ctxt->ops->put_fpu(ctxt);
}
static void write_mmx_reg(struct x86_emulate_ctxt *ctxt, u64 *data, int reg)
{
ctxt->ops->get_fpu(ctxt);
switch (reg) {
case 0: asm("movq %0, %%mm0" : : "m"(*data)); break;
case 1: asm("movq %0, %%mm1" : : "m"(*data)); break;
case 2: asm("movq %0, %%mm2" : : "m"(*data)); break;
case 3: asm("movq %0, %%mm3" : : "m"(*data)); break;
case 4: asm("movq %0, %%mm4" : : "m"(*data)); break;
case 5: asm("movq %0, %%mm5" : : "m"(*data)); break;
case 6: asm("movq %0, %%mm6" : : "m"(*data)); break;
case 7: asm("movq %0, %%mm7" : : "m"(*data)); break;
default: BUG();
}
ctxt->ops->put_fpu(ctxt);
}
static int em_fninit(struct x86_emulate_ctxt *ctxt)
{
if (ctxt->ops->get_cr(ctxt, 0) & (X86_CR0_TS | X86_CR0_EM))
return emulate_nm(ctxt);
ctxt->ops->get_fpu(ctxt);
asm volatile("fninit");
ctxt->ops->put_fpu(ctxt);
return X86EMUL_CONTINUE;
}
static int em_fnstcw(struct x86_emulate_ctxt *ctxt)
{
u16 fcw;
if (ctxt->ops->get_cr(ctxt, 0) & (X86_CR0_TS | X86_CR0_EM))
return emulate_nm(ctxt);
ctxt->ops->get_fpu(ctxt);
asm volatile("fnstcw %0": "+m"(fcw));
ctxt->ops->put_fpu(ctxt);
ctxt->dst.val = fcw;
return X86EMUL_CONTINUE;
}
static int em_fnstsw(struct x86_emulate_ctxt *ctxt)
{
u16 fsw;
if (ctxt->ops->get_cr(ctxt, 0) & (X86_CR0_TS | X86_CR0_EM))
return emulate_nm(ctxt);
ctxt->ops->get_fpu(ctxt);
asm volatile("fnstsw %0": "+m"(fsw));
ctxt->ops->put_fpu(ctxt);
ctxt->dst.val = fsw;
return X86EMUL_CONTINUE;
}
static void decode_register_operand(struct x86_emulate_ctxt *ctxt,
struct operand *op)
{
unsigned reg = ctxt->modrm_reg;
if (!(ctxt->d & ModRM))
reg = (ctxt->b & 7) | ((ctxt->rex_prefix & 1) << 3);
if (ctxt->d & Sse) {
op->type = OP_XMM;
op->bytes = 16;
op->addr.xmm = reg;
read_sse_reg(ctxt, &op->vec_val, reg);
return;
}
if (ctxt->d & Mmx) {
reg &= 7;
op->type = OP_MM;
op->bytes = 8;
op->addr.mm = reg;
return;
}
op->type = OP_REG;
op->bytes = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes;
op->addr.reg = decode_register(ctxt, reg, ctxt->d & ByteOp);
fetch_register_operand(op);
op->orig_val = op->val;
}
static void adjust_modrm_seg(struct x86_emulate_ctxt *ctxt, int base_reg)
{
if (base_reg == VCPU_REGS_RSP || base_reg == VCPU_REGS_RBP)
ctxt->modrm_seg = VCPU_SREG_SS;
}
static int decode_modrm(struct x86_emulate_ctxt *ctxt,
struct operand *op)
{
u8 sib;
int index_reg, base_reg, scale;
int rc = X86EMUL_CONTINUE;
ulong modrm_ea = 0;
ctxt->modrm_reg = ((ctxt->rex_prefix << 1) & 8); /* REX.R */
index_reg = (ctxt->rex_prefix << 2) & 8; /* REX.X */
base_reg = (ctxt->rex_prefix << 3) & 8; /* REX.B */
ctxt->modrm_mod = (ctxt->modrm & 0xc0) >> 6;
ctxt->modrm_reg |= (ctxt->modrm & 0x38) >> 3;
ctxt->modrm_rm = base_reg | (ctxt->modrm & 0x07);
ctxt->modrm_seg = VCPU_SREG_DS;
if (ctxt->modrm_mod == 3 || (ctxt->d & NoMod)) {
op->type = OP_REG;
op->bytes = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes;
op->addr.reg = decode_register(ctxt, ctxt->modrm_rm,
ctxt->d & ByteOp);
if (ctxt->d & Sse) {
op->type = OP_XMM;
op->bytes = 16;
op->addr.xmm = ctxt->modrm_rm;
read_sse_reg(ctxt, &op->vec_val, ctxt->modrm_rm);
return rc;
}
if (ctxt->d & Mmx) {
op->type = OP_MM;
op->bytes = 8;
op->addr.mm = ctxt->modrm_rm & 7;
return rc;
}
fetch_register_operand(op);
return rc;
}
op->type = OP_MEM;
if (ctxt->ad_bytes == 2) {
unsigned bx = reg_read(ctxt, VCPU_REGS_RBX);
unsigned bp = reg_read(ctxt, VCPU_REGS_RBP);
unsigned si = reg_read(ctxt, VCPU_REGS_RSI);
unsigned di = reg_read(ctxt, VCPU_REGS_RDI);
/* 16-bit ModR/M decode. */
switch (ctxt->modrm_mod) {
case 0:
if (ctxt->modrm_rm == 6)
modrm_ea += insn_fetch(u16, ctxt);
break;
case 1:
modrm_ea += insn_fetch(s8, ctxt);
break;
case 2:
modrm_ea += insn_fetch(u16, ctxt);
break;
}
switch (ctxt->modrm_rm) {
case 0:
modrm_ea += bx + si;
break;
case 1:
modrm_ea += bx + di;
break;
case 2:
modrm_ea += bp + si;
break;
case 3:
modrm_ea += bp + di;
break;
case 4:
modrm_ea += si;
break;
case 5:
modrm_ea += di;
break;
case 6:
if (ctxt->modrm_mod != 0)
modrm_ea += bp;
break;
case 7:
modrm_ea += bx;
break;
}
if (ctxt->modrm_rm == 2 || ctxt->modrm_rm == 3 ||
(ctxt->modrm_rm == 6 && ctxt->modrm_mod != 0))
ctxt->modrm_seg = VCPU_SREG_SS;
modrm_ea = (u16)modrm_ea;
} else {
/* 32/64-bit ModR/M decode. */
if ((ctxt->modrm_rm & 7) == 4) {
sib = insn_fetch(u8, ctxt);
index_reg |= (sib >> 3) & 7;
base_reg |= sib & 7;
scale = sib >> 6;
if ((base_reg & 7) == 5 && ctxt->modrm_mod == 0)
modrm_ea += insn_fetch(s32, ctxt);
else {
modrm_ea += reg_read(ctxt, base_reg);
adjust_modrm_seg(ctxt, base_reg);
/* Increment ESP on POP [ESP] */
if ((ctxt->d & IncSP) &&
base_reg == VCPU_REGS_RSP)
modrm_ea += ctxt->op_bytes;
}
if (index_reg != 4)
modrm_ea += reg_read(ctxt, index_reg) << scale;
} else if ((ctxt->modrm_rm & 7) == 5 && ctxt->modrm_mod == 0) {
modrm_ea += insn_fetch(s32, ctxt);
if (ctxt->mode == X86EMUL_MODE_PROT64)
ctxt->rip_relative = 1;
} else {
base_reg = ctxt->modrm_rm;
modrm_ea += reg_read(ctxt, base_reg);
adjust_modrm_seg(ctxt, base_reg);
}
switch (ctxt->modrm_mod) {
case 1:
modrm_ea += insn_fetch(s8, ctxt);
break;
case 2:
modrm_ea += insn_fetch(s32, ctxt);
break;
}
}
op->addr.mem.ea = modrm_ea;
if (ctxt->ad_bytes != 8)
ctxt->memop.addr.mem.ea = (u32)ctxt->memop.addr.mem.ea;
done:
return rc;
}
static int decode_abs(struct x86_emulate_ctxt *ctxt,
struct operand *op)
{
int rc = X86EMUL_CONTINUE;
op->type = OP_MEM;
switch (ctxt->ad_bytes) {
case 2:
op->addr.mem.ea = insn_fetch(u16, ctxt);
break;
case 4:
op->addr.mem.ea = insn_fetch(u32, ctxt);
break;
case 8:
op->addr.mem.ea = insn_fetch(u64, ctxt);
break;
}
done:
return rc;
}
static void fetch_bit_operand(struct x86_emulate_ctxt *ctxt)
{
long sv = 0, mask;
if (ctxt->dst.type == OP_MEM && ctxt->src.type == OP_REG) {
mask = ~((long)ctxt->dst.bytes * 8 - 1);
if (ctxt->src.bytes == 2)
sv = (s16)ctxt->src.val & (s16)mask;
else if (ctxt->src.bytes == 4)
sv = (s32)ctxt->src.val & (s32)mask;
else
sv = (s64)ctxt->src.val & (s64)mask;
ctxt->dst.addr.mem.ea = address_mask(ctxt,
ctxt->dst.addr.mem.ea + (sv >> 3));
}
/* only subword offset */
ctxt->src.val &= (ctxt->dst.bytes << 3) - 1;
}
static int read_emulated(struct x86_emulate_ctxt *ctxt,
unsigned long addr, void *dest, unsigned size)
{
int rc;
struct read_cache *mc = &ctxt->mem_read;
if (mc->pos < mc->end)
goto read_cached;
WARN_ON((mc->end + size) >= sizeof(mc->data));
rc = ctxt->ops->read_emulated(ctxt, addr, mc->data + mc->end, size,
&ctxt->exception);
if (rc != X86EMUL_CONTINUE)
return rc;
mc->end += size;
read_cached:
memcpy(dest, mc->data + mc->pos, size);
mc->pos += size;
return X86EMUL_CONTINUE;
}
static int segmented_read(struct x86_emulate_ctxt *ctxt,
struct segmented_address addr,
void *data,
unsigned size)
{
int rc;
ulong linear;
rc = linearize(ctxt, addr, size, false, &linear);
if (rc != X86EMUL_CONTINUE)
return rc;
return read_emulated(ctxt, linear, data, size);
}
static int segmented_write(struct x86_emulate_ctxt *ctxt,
struct segmented_address addr,
const void *data,
unsigned size)
{
int rc;
ulong linear;
rc = linearize(ctxt, addr, size, true, &linear);
if (rc != X86EMUL_CONTINUE)
return rc;
return ctxt->ops->write_emulated(ctxt, linear, data, size,
&ctxt->exception);
}
static int segmented_cmpxchg(struct x86_emulate_ctxt *ctxt,
struct segmented_address addr,
const void *orig_data, const void *data,
unsigned size)
{
int rc;
ulong linear;
rc = linearize(ctxt, addr, size, true, &linear);
if (rc != X86EMUL_CONTINUE)
return rc;
return ctxt->ops->cmpxchg_emulated(ctxt, linear, orig_data, data,
size, &ctxt->exception);
}
static int pio_in_emulated(struct x86_emulate_ctxt *ctxt,
unsigned int size, unsigned short port,
void *dest)
{
struct read_cache *rc = &ctxt->io_read;
if (rc->pos == rc->end) { /* refill pio read ahead */
unsigned int in_page, n;
unsigned int count = ctxt->rep_prefix ?
address_mask(ctxt, reg_read(ctxt, VCPU_REGS_RCX)) : 1;
in_page = (ctxt->eflags & X86_EFLAGS_DF) ?
offset_in_page(reg_read(ctxt, VCPU_REGS_RDI)) :
PAGE_SIZE - offset_in_page(reg_read(ctxt, VCPU_REGS_RDI));
n = min3(in_page, (unsigned int)sizeof(rc->data) / size, count);
if (n == 0)
n = 1;
rc->pos = rc->end = 0;
if (!ctxt->ops->pio_in_emulated(ctxt, size, port, rc->data, n))
return 0;
rc->end = n * size;
}
if (ctxt->rep_prefix && (ctxt->d & String) &&
!(ctxt->eflags & X86_EFLAGS_DF)) {
ctxt->dst.data = rc->data + rc->pos;
ctxt->dst.type = OP_MEM_STR;
ctxt->dst.count = (rc->end - rc->pos) / size;
rc->pos = rc->end;
} else {
memcpy(dest, rc->data + rc->pos, size);
rc->pos += size;
}
return 1;
}
static int read_interrupt_descriptor(struct x86_emulate_ctxt *ctxt,
u16 index, struct desc_struct *desc)
{
struct desc_ptr dt;
ulong addr;
ctxt->ops->get_idt(ctxt, &dt);
if (dt.size < index * 8 + 7)
return emulate_gp(ctxt, index << 3 | 0x2);
addr = dt.address + index * 8;
return ctxt->ops->read_std(ctxt, addr, desc, sizeof *desc,
&ctxt->exception);
}
static void get_descriptor_table_ptr(struct x86_emulate_ctxt *ctxt,
u16 selector, struct desc_ptr *dt)
{
const struct x86_emulate_ops *ops = ctxt->ops;
u32 base3 = 0;
if (selector & 1 << 2) {
struct desc_struct desc;
u16 sel;
memset (dt, 0, sizeof *dt);
if (!ops->get_segment(ctxt, &sel, &desc, &base3,
VCPU_SREG_LDTR))
return;
dt->size = desc_limit_scaled(&desc); /* what if limit > 65535? */
dt->address = get_desc_base(&desc) | ((u64)base3 << 32);
} else
ops->get_gdt(ctxt, dt);
}
static int get_descriptor_ptr(struct x86_emulate_ctxt *ctxt,
u16 selector, ulong *desc_addr_p)
{
struct desc_ptr dt;
u16 index = selector >> 3;
ulong addr;
get_descriptor_table_ptr(ctxt, selector, &dt);
if (dt.size < index * 8 + 7)
return emulate_gp(ctxt, selector & 0xfffc);
addr = dt.address + index * 8;
#ifdef CONFIG_X86_64
if (addr >> 32 != 0) {
u64 efer = 0;
ctxt->ops->get_msr(ctxt, MSR_EFER, &efer);
if (!(efer & EFER_LMA))
addr &= (u32)-1;
}
#endif
*desc_addr_p = addr;
return X86EMUL_CONTINUE;
}
/* allowed just for 8 bytes segments */
static int read_segment_descriptor(struct x86_emulate_ctxt *ctxt,
u16 selector, struct desc_struct *desc,
ulong *desc_addr_p)
{
int rc;
rc = get_descriptor_ptr(ctxt, selector, desc_addr_p);
if (rc != X86EMUL_CONTINUE)
return rc;
return ctxt->ops->read_std(ctxt, *desc_addr_p, desc, sizeof(*desc),
&ctxt->exception);
}
/* allowed just for 8 bytes segments */
static int write_segment_descriptor(struct x86_emulate_ctxt *ctxt,
u16 selector, struct desc_struct *desc)
{
int rc;
ulong addr;
rc = get_descriptor_ptr(ctxt, selector, &addr);
if (rc != X86EMUL_CONTINUE)
return rc;
return ctxt->ops->write_std(ctxt, addr, desc, sizeof *desc,
&ctxt->exception);
}
/* Does not support long mode */
static int __load_segment_descriptor(struct x86_emulate_ctxt *ctxt,
u16 selector, int seg, u8 cpl,
enum x86_transfer_type transfer,
struct desc_struct *desc)
{
struct desc_struct seg_desc, old_desc;
u8 dpl, rpl;
unsigned err_vec = GP_VECTOR;
u32 err_code = 0;
bool null_selector = !(selector & ~0x3); /* 0000-0003 are null */
ulong desc_addr;
int ret;
u16 dummy;
u32 base3 = 0;
memset(&seg_desc, 0, sizeof seg_desc);
if (ctxt->mode == X86EMUL_MODE_REAL) {
/* set real mode segment descriptor (keep limit etc. for
* unreal mode) */
ctxt->ops->get_segment(ctxt, &dummy, &seg_desc, NULL, seg);
set_desc_base(&seg_desc, selector << 4);
goto load;
} else if (seg <= VCPU_SREG_GS && ctxt->mode == X86EMUL_MODE_VM86) {
/* VM86 needs a clean new segment descriptor */
set_desc_base(&seg_desc, selector << 4);
set_desc_limit(&seg_desc, 0xffff);
seg_desc.type = 3;
seg_desc.p = 1;
seg_desc.s = 1;
seg_desc.dpl = 3;
goto load;
}
rpl = selector & 3;
/* NULL selector is not valid for TR, CS and SS (except for long mode) */
if ((seg == VCPU_SREG_CS
|| (seg == VCPU_SREG_SS
&& (ctxt->mode != X86EMUL_MODE_PROT64 || rpl != cpl))
|| seg == VCPU_SREG_TR)
&& null_selector)
goto exception;
/* TR should be in GDT only */
if (seg == VCPU_SREG_TR && (selector & (1 << 2)))
goto exception;
if (null_selector) /* for NULL selector skip all following checks */
goto load;
ret = read_segment_descriptor(ctxt, selector, &seg_desc, &desc_addr);
if (ret != X86EMUL_CONTINUE)
return ret;
err_code = selector & 0xfffc;
err_vec = (transfer == X86_TRANSFER_TASK_SWITCH) ? TS_VECTOR :
GP_VECTOR;
/* can't load system descriptor into segment selector */
if (seg <= VCPU_SREG_GS && !seg_desc.s) {
if (transfer == X86_TRANSFER_CALL_JMP)
return X86EMUL_UNHANDLEABLE;
goto exception;
}
if (!seg_desc.p) {
err_vec = (seg == VCPU_SREG_SS) ? SS_VECTOR : NP_VECTOR;
goto exception;
}
dpl = seg_desc.dpl;
switch (seg) {
case VCPU_SREG_SS:
/*
* segment is not a writable data segment or segment
* selector's RPL != CPL or segment selector's RPL != CPL
*/
if (rpl != cpl || (seg_desc.type & 0xa) != 0x2 || dpl != cpl)
goto exception;
break;
case VCPU_SREG_CS:
if (!(seg_desc.type & 8))
goto exception;
if (seg_desc.type & 4) {
/* conforming */
if (dpl > cpl)
goto exception;
} else {
/* nonconforming */
if (rpl > cpl || dpl != cpl)
goto exception;
}
/* in long-mode d/b must be clear if l is set */
if (seg_desc.d && seg_desc.l) {
u64 efer = 0;
ctxt->ops->get_msr(ctxt, MSR_EFER, &efer);
if (efer & EFER_LMA)
goto exception;
}
/* CS(RPL) <- CPL */
selector = (selector & 0xfffc) | cpl;
break;
case VCPU_SREG_TR:
if (seg_desc.s || (seg_desc.type != 1 && seg_desc.type != 9))
goto exception;
old_desc = seg_desc;
seg_desc.type |= 2; /* busy */
ret = ctxt->ops->cmpxchg_emulated(ctxt, desc_addr, &old_desc, &seg_desc,
sizeof(seg_desc), &ctxt->exception);
if (ret != X86EMUL_CONTINUE)
return ret;
break;
case VCPU_SREG_LDTR:
if (seg_desc.s || seg_desc.type != 2)
goto exception;
break;
default: /* DS, ES, FS, or GS */
/*
* segment is not a data or readable code segment or
* ((segment is a data or nonconforming code segment)
* and (both RPL and CPL > DPL))
*/
if ((seg_desc.type & 0xa) == 0x8 ||
(((seg_desc.type & 0xc) != 0xc) &&
(rpl > dpl && cpl > dpl)))
goto exception;
break;
}
if (seg_desc.s) {
/* mark segment as accessed */
if (!(seg_desc.type & 1)) {
seg_desc.type |= 1;
ret = write_segment_descriptor(ctxt, selector,
&seg_desc);
if (ret != X86EMUL_CONTINUE)
return ret;
}
} else if (ctxt->mode == X86EMUL_MODE_PROT64) {
ret = ctxt->ops->read_std(ctxt, desc_addr+8, &base3,
sizeof(base3), &ctxt->exception);
if (ret != X86EMUL_CONTINUE)
return ret;
if (is_noncanonical_address(get_desc_base(&seg_desc) |
((u64)base3 << 32)))
return emulate_gp(ctxt, 0);
}
load:
ctxt->ops->set_segment(ctxt, selector, &seg_desc, base3, seg);
if (desc)
*desc = seg_desc;
return X86EMUL_CONTINUE;
exception:
return emulate_exception(ctxt, err_vec, err_code, true);
}
static int load_segment_descriptor(struct x86_emulate_ctxt *ctxt,
u16 selector, int seg)
{
u8 cpl = ctxt->ops->cpl(ctxt);
return __load_segment_descriptor(ctxt, selector, seg, cpl,
X86_TRANSFER_NONE, NULL);
}
static void write_register_operand(struct operand *op)
{
return assign_register(op->addr.reg, op->val, op->bytes);
}
static int writeback(struct x86_emulate_ctxt *ctxt, struct operand *op)
{
switch (op->type) {
case OP_REG:
write_register_operand(op);
break;
case OP_MEM:
if (ctxt->lock_prefix)
return segmented_cmpxchg(ctxt,
op->addr.mem,
&op->orig_val,
&op->val,
op->bytes);
else
return segmented_write(ctxt,
op->addr.mem,
&op->val,
op->bytes);
break;
case OP_MEM_STR:
return segmented_write(ctxt,
op->addr.mem,
op->data,
op->bytes * op->count);
break;
case OP_XMM:
write_sse_reg(ctxt, &op->vec_val, op->addr.xmm);
break;
case OP_MM:
write_mmx_reg(ctxt, &op->mm_val, op->addr.mm);
break;
case OP_NONE:
/* no writeback */
break;
default:
break;
}
return X86EMUL_CONTINUE;
}
static int push(struct x86_emulate_ctxt *ctxt, void *data, int bytes)
{
struct segmented_address addr;
rsp_increment(ctxt, -bytes);
addr.ea = reg_read(ctxt, VCPU_REGS_RSP) & stack_mask(ctxt);
addr.seg = VCPU_SREG_SS;
return segmented_write(ctxt, addr, data, bytes);
}
static int em_push(struct x86_emulate_ctxt *ctxt)
{
/* Disable writeback. */
ctxt->dst.type = OP_NONE;
return push(ctxt, &ctxt->src.val, ctxt->op_bytes);
}
static int emulate_pop(struct x86_emulate_ctxt *ctxt,
void *dest, int len)
{
int rc;
struct segmented_address addr;
addr.ea = reg_read(ctxt, VCPU_REGS_RSP) & stack_mask(ctxt);
addr.seg = VCPU_SREG_SS;
rc = segmented_read(ctxt, addr, dest, len);
if (rc != X86EMUL_CONTINUE)
return rc;
rsp_increment(ctxt, len);
return rc;
}
static int em_pop(struct x86_emulate_ctxt *ctxt)
{
return emulate_pop(ctxt, &ctxt->dst.val, ctxt->op_bytes);
}
static int emulate_popf(struct x86_emulate_ctxt *ctxt,
void *dest, int len)
{
int rc;
unsigned long val, change_mask;
int iopl = (ctxt->eflags & X86_EFLAGS_IOPL) >> X86_EFLAGS_IOPL_BIT;
int cpl = ctxt->ops->cpl(ctxt);
rc = emulate_pop(ctxt, &val, len);
if (rc != X86EMUL_CONTINUE)
return rc;
change_mask = X86_EFLAGS_CF | X86_EFLAGS_PF | X86_EFLAGS_AF |
X86_EFLAGS_ZF | X86_EFLAGS_SF | X86_EFLAGS_OF |
X86_EFLAGS_TF | X86_EFLAGS_DF | X86_EFLAGS_NT |
X86_EFLAGS_AC | X86_EFLAGS_ID;
switch(ctxt->mode) {
case X86EMUL_MODE_PROT64:
case X86EMUL_MODE_PROT32:
case X86EMUL_MODE_PROT16:
if (cpl == 0)
change_mask |= X86_EFLAGS_IOPL;
if (cpl <= iopl)
change_mask |= X86_EFLAGS_IF;
break;
case X86EMUL_MODE_VM86:
if (iopl < 3)
return emulate_gp(ctxt, 0);
change_mask |= X86_EFLAGS_IF;
break;
default: /* real mode */
change_mask |= (X86_EFLAGS_IOPL | X86_EFLAGS_IF);
break;
}
*(unsigned long *)dest =
(ctxt->eflags & ~change_mask) | (val & change_mask);
return rc;
}
static int em_popf(struct x86_emulate_ctxt *ctxt)
{
ctxt->dst.type = OP_REG;
ctxt->dst.addr.reg = &ctxt->eflags;
ctxt->dst.bytes = ctxt->op_bytes;
return emulate_popf(ctxt, &ctxt->dst.val, ctxt->op_bytes);
}
static int em_enter(struct x86_emulate_ctxt *ctxt)
{
int rc;
unsigned frame_size = ctxt->src.val;
unsigned nesting_level = ctxt->src2.val & 31;
ulong rbp;
if (nesting_level)
return X86EMUL_UNHANDLEABLE;
rbp = reg_read(ctxt, VCPU_REGS_RBP);
rc = push(ctxt, &rbp, stack_size(ctxt));
if (rc != X86EMUL_CONTINUE)
return rc;
assign_masked(reg_rmw(ctxt, VCPU_REGS_RBP), reg_read(ctxt, VCPU_REGS_RSP),
stack_mask(ctxt));
assign_masked(reg_rmw(ctxt, VCPU_REGS_RSP),
reg_read(ctxt, VCPU_REGS_RSP) - frame_size,
stack_mask(ctxt));
return X86EMUL_CONTINUE;
}
static int em_leave(struct x86_emulate_ctxt *ctxt)
{
assign_masked(reg_rmw(ctxt, VCPU_REGS_RSP), reg_read(ctxt, VCPU_REGS_RBP),
stack_mask(ctxt));
return emulate_pop(ctxt, reg_rmw(ctxt, VCPU_REGS_RBP), ctxt->op_bytes);
}
static int em_push_sreg(struct x86_emulate_ctxt *ctxt)
{
int seg = ctxt->src2.val;
ctxt->src.val = get_segment_selector(ctxt, seg);
if (ctxt->op_bytes == 4) {
rsp_increment(ctxt, -2);
ctxt->op_bytes = 2;
}
return em_push(ctxt);
}
static int em_pop_sreg(struct x86_emulate_ctxt *ctxt)
{
int seg = ctxt->src2.val;
unsigned long selector;
int rc;
rc = emulate_pop(ctxt, &selector, 2);
if (rc != X86EMUL_CONTINUE)
return rc;
if (ctxt->modrm_reg == VCPU_SREG_SS)
ctxt->interruptibility = KVM_X86_SHADOW_INT_MOV_SS;
if (ctxt->op_bytes > 2)
rsp_increment(ctxt, ctxt->op_bytes - 2);
rc = load_segment_descriptor(ctxt, (u16)selector, seg);
return rc;
}
static int em_pusha(struct x86_emulate_ctxt *ctxt)
{
unsigned long old_esp = reg_read(ctxt, VCPU_REGS_RSP);
int rc = X86EMUL_CONTINUE;
int reg = VCPU_REGS_RAX;
while (reg <= VCPU_REGS_RDI) {
(reg == VCPU_REGS_RSP) ?
(ctxt->src.val = old_esp) : (ctxt->src.val = reg_read(ctxt, reg));
rc = em_push(ctxt);
if (rc != X86EMUL_CONTINUE)
return rc;
++reg;
}
return rc;
}
static int em_pushf(struct x86_emulate_ctxt *ctxt)
{
ctxt->src.val = (unsigned long)ctxt->eflags & ~X86_EFLAGS_VM;
return em_push(ctxt);
}
static int em_popa(struct x86_emulate_ctxt *ctxt)
{
int rc = X86EMUL_CONTINUE;
int reg = VCPU_REGS_RDI;
u32 val;
while (reg >= VCPU_REGS_RAX) {
if (reg == VCPU_REGS_RSP) {
rsp_increment(ctxt, ctxt->op_bytes);
--reg;
}
rc = emulate_pop(ctxt, &val, ctxt->op_bytes);
if (rc != X86EMUL_CONTINUE)
break;
assign_register(reg_rmw(ctxt, reg), val, ctxt->op_bytes);
--reg;
}
return rc;
}
static int __emulate_int_real(struct x86_emulate_ctxt *ctxt, int irq)
{
const struct x86_emulate_ops *ops = ctxt->ops;
int rc;
struct desc_ptr dt;
gva_t cs_addr;
gva_t eip_addr;
u16 cs, eip;
/* TODO: Add limit checks */
ctxt->src.val = ctxt->eflags;
rc = em_push(ctxt);
if (rc != X86EMUL_CONTINUE)
return rc;
ctxt->eflags &= ~(X86_EFLAGS_IF | X86_EFLAGS_TF | X86_EFLAGS_AC);
ctxt->src.val = get_segment_selector(ctxt, VCPU_SREG_CS);
rc = em_push(ctxt);
if (rc != X86EMUL_CONTINUE)
return rc;
ctxt->src.val = ctxt->_eip;
rc = em_push(ctxt);
if (rc != X86EMUL_CONTINUE)
return rc;
ops->get_idt(ctxt, &dt);
eip_addr = dt.address + (irq << 2);
cs_addr = dt.address + (irq << 2) + 2;
rc = ops->read_std(ctxt, cs_addr, &cs, 2, &ctxt->exception);
if (rc != X86EMUL_CONTINUE)
return rc;
rc = ops->read_std(ctxt, eip_addr, &eip, 2, &ctxt->exception);
if (rc != X86EMUL_CONTINUE)
return rc;
rc = load_segment_descriptor(ctxt, cs, VCPU_SREG_CS);
if (rc != X86EMUL_CONTINUE)
return rc;
ctxt->_eip = eip;
return rc;
}
int emulate_int_real(struct x86_emulate_ctxt *ctxt, int irq)
{
int rc;
invalidate_registers(ctxt);
rc = __emulate_int_real(ctxt, irq);
if (rc == X86EMUL_CONTINUE)
writeback_registers(ctxt);
return rc;
}
static int emulate_int(struct x86_emulate_ctxt *ctxt, int irq)
{
switch(ctxt->mode) {
case X86EMUL_MODE_REAL:
return __emulate_int_real(ctxt, irq);
case X86EMUL_MODE_VM86:
case X86EMUL_MODE_PROT16:
case X86EMUL_MODE_PROT32:
case X86EMUL_MODE_PROT64:
default:
/* Protected mode interrupts unimplemented yet */
return X86EMUL_UNHANDLEABLE;
}
}
static int emulate_iret_real(struct x86_emulate_ctxt *ctxt)
{
int rc = X86EMUL_CONTINUE;
unsigned long temp_eip = 0;
unsigned long temp_eflags = 0;
unsigned long cs = 0;
unsigned long mask = X86_EFLAGS_CF | X86_EFLAGS_PF | X86_EFLAGS_AF |
X86_EFLAGS_ZF | X86_EFLAGS_SF | X86_EFLAGS_TF |
X86_EFLAGS_IF | X86_EFLAGS_DF | X86_EFLAGS_OF |
X86_EFLAGS_IOPL | X86_EFLAGS_NT | X86_EFLAGS_RF |
X86_EFLAGS_AC | X86_EFLAGS_ID |
X86_EFLAGS_FIXED;
unsigned long vm86_mask = X86_EFLAGS_VM | X86_EFLAGS_VIF |
X86_EFLAGS_VIP;
/* TODO: Add stack limit check */
rc = emulate_pop(ctxt, &temp_eip, ctxt->op_bytes);
if (rc != X86EMUL_CONTINUE)
return rc;
if (temp_eip & ~0xffff)
return emulate_gp(ctxt, 0);
rc = emulate_pop(ctxt, &cs, ctxt->op_bytes);
if (rc != X86EMUL_CONTINUE)
return rc;
rc = emulate_pop(ctxt, &temp_eflags, ctxt->op_bytes);
if (rc != X86EMUL_CONTINUE)
return rc;
rc = load_segment_descriptor(ctxt, (u16)cs, VCPU_SREG_CS);
if (rc != X86EMUL_CONTINUE)
return rc;
ctxt->_eip = temp_eip;
if (ctxt->op_bytes == 4)
ctxt->eflags = ((temp_eflags & mask) | (ctxt->eflags & vm86_mask));
else if (ctxt->op_bytes == 2) {
ctxt->eflags &= ~0xffff;
ctxt->eflags |= temp_eflags;
}
ctxt->eflags &= ~EFLG_RESERVED_ZEROS_MASK; /* Clear reserved zeros */
ctxt->eflags |= X86_EFLAGS_FIXED;
ctxt->ops->set_nmi_mask(ctxt, false);
return rc;
}
static int em_iret(struct x86_emulate_ctxt *ctxt)
{
switch(ctxt->mode) {
case X86EMUL_MODE_REAL:
return emulate_iret_real(ctxt);
case X86EMUL_MODE_VM86:
case X86EMUL_MODE_PROT16:
case X86EMUL_MODE_PROT32:
case X86EMUL_MODE_PROT64:
default:
/* iret from protected mode unimplemented yet */
return X86EMUL_UNHANDLEABLE;
}
}
static int em_jmp_far(struct x86_emulate_ctxt *ctxt)
{
int rc;
unsigned short sel;
struct desc_struct new_desc;
u8 cpl = ctxt->ops->cpl(ctxt);
memcpy(&sel, ctxt->src.valptr + ctxt->op_bytes, 2);
rc = __load_segment_descriptor(ctxt, sel, VCPU_SREG_CS, cpl,
X86_TRANSFER_CALL_JMP,
&new_desc);
if (rc != X86EMUL_CONTINUE)
return rc;
rc = assign_eip_far(ctxt, ctxt->src.val, &new_desc);
/* Error handling is not implemented. */
if (rc != X86EMUL_CONTINUE)
return X86EMUL_UNHANDLEABLE;
return rc;
}
static int em_jmp_abs(struct x86_emulate_ctxt *ctxt)
{
return assign_eip_near(ctxt, ctxt->src.val);
}
static int em_call_near_abs(struct x86_emulate_ctxt *ctxt)
{
int rc;
long int old_eip;
old_eip = ctxt->_eip;
rc = assign_eip_near(ctxt, ctxt->src.val);
if (rc != X86EMUL_CONTINUE)
return rc;
ctxt->src.val = old_eip;
rc = em_push(ctxt);
return rc;
}
static int em_cmpxchg8b(struct x86_emulate_ctxt *ctxt)
{
u64 old = ctxt->dst.orig_val64;
if (ctxt->dst.bytes == 16)
return X86EMUL_UNHANDLEABLE;
if (((u32) (old >> 0) != (u32) reg_read(ctxt, VCPU_REGS_RAX)) ||
((u32) (old >> 32) != (u32) reg_read(ctxt, VCPU_REGS_RDX))) {
*reg_write(ctxt, VCPU_REGS_RAX) = (u32) (old >> 0);
*reg_write(ctxt, VCPU_REGS_RDX) = (u32) (old >> 32);
ctxt->eflags &= ~X86_EFLAGS_ZF;
} else {
ctxt->dst.val64 = ((u64)reg_read(ctxt, VCPU_REGS_RCX) << 32) |
(u32) reg_read(ctxt, VCPU_REGS_RBX);
ctxt->eflags |= X86_EFLAGS_ZF;
}
return X86EMUL_CONTINUE;
}
static int em_ret(struct x86_emulate_ctxt *ctxt)
{
int rc;
unsigned long eip;
rc = emulate_pop(ctxt, &eip, ctxt->op_bytes);
if (rc != X86EMUL_CONTINUE)
return rc;
return assign_eip_near(ctxt, eip);
}
static int em_ret_far(struct x86_emulate_ctxt *ctxt)
{
int rc;
unsigned long eip, cs;
int cpl = ctxt->ops->cpl(ctxt);
struct desc_struct new_desc;
rc = emulate_pop(ctxt, &eip, ctxt->op_bytes);
if (rc != X86EMUL_CONTINUE)
return rc;
rc = emulate_pop(ctxt, &cs, ctxt->op_bytes);
if (rc != X86EMUL_CONTINUE)
return rc;
/* Outer-privilege level return is not implemented */
if (ctxt->mode >= X86EMUL_MODE_PROT16 && (cs & 3) > cpl)
return X86EMUL_UNHANDLEABLE;
rc = __load_segment_descriptor(ctxt, (u16)cs, VCPU_SREG_CS, cpl,
X86_TRANSFER_RET,
&new_desc);
if (rc != X86EMUL_CONTINUE)
return rc;
rc = assign_eip_far(ctxt, eip, &new_desc);
/* Error handling is not implemented. */
if (rc != X86EMUL_CONTINUE)
return X86EMUL_UNHANDLEABLE;
return rc;
}
static int em_ret_far_imm(struct x86_emulate_ctxt *ctxt)
{
int rc;
rc = em_ret_far(ctxt);
if (rc != X86EMUL_CONTINUE)
return rc;
rsp_increment(ctxt, ctxt->src.val);
return X86EMUL_CONTINUE;
}
static int em_cmpxchg(struct x86_emulate_ctxt *ctxt)
{
/* Save real source value, then compare EAX against destination. */
ctxt->dst.orig_val = ctxt->dst.val;
ctxt->dst.val = reg_read(ctxt, VCPU_REGS_RAX);
ctxt->src.orig_val = ctxt->src.val;
ctxt->src.val = ctxt->dst.orig_val;
fastop(ctxt, em_cmp);
if (ctxt->eflags & X86_EFLAGS_ZF) {
/* Success: write back to memory; no update of EAX */
ctxt->src.type = OP_NONE;
ctxt->dst.val = ctxt->src.orig_val;
} else {
/* Failure: write the value we saw to EAX. */
ctxt->src.type = OP_REG;
ctxt->src.addr.reg = reg_rmw(ctxt, VCPU_REGS_RAX);
ctxt->src.val = ctxt->dst.orig_val;
/* Create write-cycle to dest by writing the same value */
ctxt->dst.val = ctxt->dst.orig_val;
}
return X86EMUL_CONTINUE;
}
static int em_lseg(struct x86_emulate_ctxt *ctxt)
{
int seg = ctxt->src2.val;
unsigned short sel;
int rc;
memcpy(&sel, ctxt->src.valptr + ctxt->op_bytes, 2);
rc = load_segment_descriptor(ctxt, sel, seg);
if (rc != X86EMUL_CONTINUE)
return rc;
ctxt->dst.val = ctxt->src.val;
return rc;
}
static int emulator_has_longmode(struct x86_emulate_ctxt *ctxt)
{
u32 eax, ebx, ecx, edx;
eax = 0x80000001;
ecx = 0;
ctxt->ops->get_cpuid(ctxt, &eax, &ebx, &ecx, &edx);
return edx & bit(X86_FEATURE_LM);
}
#define GET_SMSTATE(type, smbase, offset) \
({ \
type __val; \
int r = ctxt->ops->read_phys(ctxt, smbase + offset, &__val, \
sizeof(__val)); \
if (r != X86EMUL_CONTINUE) \
return X86EMUL_UNHANDLEABLE; \
__val; \
})
static void rsm_set_desc_flags(struct desc_struct *desc, u32 flags)
{
desc->g = (flags >> 23) & 1;
desc->d = (flags >> 22) & 1;
desc->l = (flags >> 21) & 1;
desc->avl = (flags >> 20) & 1;
desc->p = (flags >> 15) & 1;
desc->dpl = (flags >> 13) & 3;
desc->s = (flags >> 12) & 1;
desc->type = (flags >> 8) & 15;
}
static int rsm_load_seg_32(struct x86_emulate_ctxt *ctxt, u64 smbase, int n)
{
struct desc_struct desc;
int offset;
u16 selector;
selector = GET_SMSTATE(u32, smbase, 0x7fa8 + n * 4);
if (n < 3)
offset = 0x7f84 + n * 12;
else
offset = 0x7f2c + (n - 3) * 12;
set_desc_base(&desc, GET_SMSTATE(u32, smbase, offset + 8));
set_desc_limit(&desc, GET_SMSTATE(u32, smbase, offset + 4));
rsm_set_desc_flags(&desc, GET_SMSTATE(u32, smbase, offset));
ctxt->ops->set_segment(ctxt, selector, &desc, 0, n);
return X86EMUL_CONTINUE;
}
static int rsm_load_seg_64(struct x86_emulate_ctxt *ctxt, u64 smbase, int n)
{
struct desc_struct desc;
int offset;
u16 selector;
u32 base3;
offset = 0x7e00 + n * 16;
selector = GET_SMSTATE(u16, smbase, offset);
rsm_set_desc_flags(&desc, GET_SMSTATE(u16, smbase, offset + 2) << 8);
set_desc_limit(&desc, GET_SMSTATE(u32, smbase, offset + 4));
set_desc_base(&desc, GET_SMSTATE(u32, smbase, offset + 8));
base3 = GET_SMSTATE(u32, smbase, offset + 12);
ctxt->ops->set_segment(ctxt, selector, &desc, base3, n);
return X86EMUL_CONTINUE;
}
static int rsm_enter_protected_mode(struct x86_emulate_ctxt *ctxt,
u64 cr0, u64 cr4)
{
int bad;
/*
* First enable PAE, long mode needs it before CR0.PG = 1 is set.
* Then enable protected mode. However, PCID cannot be enabled
* if EFER.LMA=0, so set it separately.
*/
bad = ctxt->ops->set_cr(ctxt, 4, cr4 & ~X86_CR4_PCIDE);
if (bad)
return X86EMUL_UNHANDLEABLE;
bad = ctxt->ops->set_cr(ctxt, 0, cr0);
if (bad)
return X86EMUL_UNHANDLEABLE;
if (cr4 & X86_CR4_PCIDE) {
bad = ctxt->ops->set_cr(ctxt, 4, cr4);
if (bad)
return X86EMUL_UNHANDLEABLE;
}
return X86EMUL_CONTINUE;
}
static int rsm_load_state_32(struct x86_emulate_ctxt *ctxt, u64 smbase)
{
struct desc_struct desc;
struct desc_ptr dt;
u16 selector;
u32 val, cr0, cr4;
int i;
cr0 = GET_SMSTATE(u32, smbase, 0x7ffc);
ctxt->ops->set_cr(ctxt, 3, GET_SMSTATE(u32, smbase, 0x7ff8));
ctxt->eflags = GET_SMSTATE(u32, smbase, 0x7ff4) | X86_EFLAGS_FIXED;
ctxt->_eip = GET_SMSTATE(u32, smbase, 0x7ff0);
for (i = 0; i < 8; i++)
*reg_write(ctxt, i) = GET_SMSTATE(u32, smbase, 0x7fd0 + i * 4);
val = GET_SMSTATE(u32, smbase, 0x7fcc);
ctxt->ops->set_dr(ctxt, 6, (val & DR6_VOLATILE) | DR6_FIXED_1);
val = GET_SMSTATE(u32, smbase, 0x7fc8);
ctxt->ops->set_dr(ctxt, 7, (val & DR7_VOLATILE) | DR7_FIXED_1);
selector = GET_SMSTATE(u32, smbase, 0x7fc4);
set_desc_base(&desc, GET_SMSTATE(u32, smbase, 0x7f64));
set_desc_limit(&desc, GET_SMSTATE(u32, smbase, 0x7f60));
rsm_set_desc_flags(&desc, GET_SMSTATE(u32, smbase, 0x7f5c));
ctxt->ops->set_segment(ctxt, selector, &desc, 0, VCPU_SREG_TR);
selector = GET_SMSTATE(u32, smbase, 0x7fc0);
set_desc_base(&desc, GET_SMSTATE(u32, smbase, 0x7f80));
set_desc_limit(&desc, GET_SMSTATE(u32, smbase, 0x7f7c));
rsm_set_desc_flags(&desc, GET_SMSTATE(u32, smbase, 0x7f78));
ctxt->ops->set_segment(ctxt, selector, &desc, 0, VCPU_SREG_LDTR);
dt.address = GET_SMSTATE(u32, smbase, 0x7f74);
dt.size = GET_SMSTATE(u32, smbase, 0x7f70);
ctxt->ops->set_gdt(ctxt, &dt);
dt.address = GET_SMSTATE(u32, smbase, 0x7f58);
dt.size = GET_SMSTATE(u32, smbase, 0x7f54);
ctxt->ops->set_idt(ctxt, &dt);
for (i = 0; i < 6; i++) {
int r = rsm_load_seg_32(ctxt, smbase, i);
if (r != X86EMUL_CONTINUE)
return r;
}
cr4 = GET_SMSTATE(u32, smbase, 0x7f14);
ctxt->ops->set_smbase(ctxt, GET_SMSTATE(u32, smbase, 0x7ef8));
return rsm_enter_protected_mode(ctxt, cr0, cr4);
}
static int rsm_load_state_64(struct x86_emulate_ctxt *ctxt, u64 smbase)
{
struct desc_struct desc;
struct desc_ptr dt;
u64 val, cr0, cr4;
u32 base3;
u16 selector;
int i, r;
for (i = 0; i < 16; i++)
*reg_write(ctxt, i) = GET_SMSTATE(u64, smbase, 0x7ff8 - i * 8);
ctxt->_eip = GET_SMSTATE(u64, smbase, 0x7f78);
ctxt->eflags = GET_SMSTATE(u32, smbase, 0x7f70) | X86_EFLAGS_FIXED;
val = GET_SMSTATE(u32, smbase, 0x7f68);
ctxt->ops->set_dr(ctxt, 6, (val & DR6_VOLATILE) | DR6_FIXED_1);
val = GET_SMSTATE(u32, smbase, 0x7f60);
ctxt->ops->set_dr(ctxt, 7, (val & DR7_VOLATILE) | DR7_FIXED_1);
cr0 = GET_SMSTATE(u64, smbase, 0x7f58);
ctxt->ops->set_cr(ctxt, 3, GET_SMSTATE(u64, smbase, 0x7f50));
cr4 = GET_SMSTATE(u64, smbase, 0x7f48);
ctxt->ops->set_smbase(ctxt, GET_SMSTATE(u32, smbase, 0x7f00));
val = GET_SMSTATE(u64, smbase, 0x7ed0);
ctxt->ops->set_msr(ctxt, MSR_EFER, val & ~EFER_LMA);
selector = GET_SMSTATE(u32, smbase, 0x7e90);
rsm_set_desc_flags(&desc, GET_SMSTATE(u32, smbase, 0x7e92) << 8);
set_desc_limit(&desc, GET_SMSTATE(u32, smbase, 0x7e94));
set_desc_base(&desc, GET_SMSTATE(u32, smbase, 0x7e98));
base3 = GET_SMSTATE(u32, smbase, 0x7e9c);
ctxt->ops->set_segment(ctxt, selector, &desc, base3, VCPU_SREG_TR);
dt.size = GET_SMSTATE(u32, smbase, 0x7e84);
dt.address = GET_SMSTATE(u64, smbase, 0x7e88);
ctxt->ops->set_idt(ctxt, &dt);
selector = GET_SMSTATE(u32, smbase, 0x7e70);
rsm_set_desc_flags(&desc, GET_SMSTATE(u32, smbase, 0x7e72) << 8);
set_desc_limit(&desc, GET_SMSTATE(u32, smbase, 0x7e74));
set_desc_base(&desc, GET_SMSTATE(u32, smbase, 0x7e78));
base3 = GET_SMSTATE(u32, smbase, 0x7e7c);
ctxt->ops->set_segment(ctxt, selector, &desc, base3, VCPU_SREG_LDTR);
dt.size = GET_SMSTATE(u32, smbase, 0x7e64);
dt.address = GET_SMSTATE(u64, smbase, 0x7e68);
ctxt->ops->set_gdt(ctxt, &dt);
r = rsm_enter_protected_mode(ctxt, cr0, cr4);
if (r != X86EMUL_CONTINUE)
return r;
for (i = 0; i < 6; i++) {
r = rsm_load_seg_64(ctxt, smbase, i);
if (r != X86EMUL_CONTINUE)
return r;
}
return X86EMUL_CONTINUE;
}
static int em_rsm(struct x86_emulate_ctxt *ctxt)
{
unsigned long cr0, cr4, efer;
u64 smbase;
int ret;
if ((ctxt->emul_flags & X86EMUL_SMM_MASK) == 0)
return emulate_ud(ctxt);
/*
* Get back to real mode, to prepare a safe state in which to load
* CR0/CR3/CR4/EFER. It's all a bit more complicated if the vCPU
* supports long mode.
*/
cr4 = ctxt->ops->get_cr(ctxt, 4);
if (emulator_has_longmode(ctxt)) {
struct desc_struct cs_desc;
/* Zero CR4.PCIDE before CR0.PG. */
if (cr4 & X86_CR4_PCIDE) {
ctxt->ops->set_cr(ctxt, 4, cr4 & ~X86_CR4_PCIDE);
cr4 &= ~X86_CR4_PCIDE;
}
/* A 32-bit code segment is required to clear EFER.LMA. */
memset(&cs_desc, 0, sizeof(cs_desc));
cs_desc.type = 0xb;
cs_desc.s = cs_desc.g = cs_desc.p = 1;
ctxt->ops->set_segment(ctxt, 0, &cs_desc, 0, VCPU_SREG_CS);
}
/* For the 64-bit case, this will clear EFER.LMA. */
cr0 = ctxt->ops->get_cr(ctxt, 0);
if (cr0 & X86_CR0_PE)
ctxt->ops->set_cr(ctxt, 0, cr0 & ~(X86_CR0_PG | X86_CR0_PE));
/* Now clear CR4.PAE (which must be done before clearing EFER.LME). */
if (cr4 & X86_CR4_PAE)
ctxt->ops->set_cr(ctxt, 4, cr4 & ~X86_CR4_PAE);
/* And finally go back to 32-bit mode. */
efer = 0;
ctxt->ops->set_msr(ctxt, MSR_EFER, efer);
smbase = ctxt->ops->get_smbase(ctxt);
if (emulator_has_longmode(ctxt))
ret = rsm_load_state_64(ctxt, smbase + 0x8000);
else
ret = rsm_load_state_32(ctxt, smbase + 0x8000);
if (ret != X86EMUL_CONTINUE) {
/* FIXME: should triple fault */
return X86EMUL_UNHANDLEABLE;
}
if ((ctxt->emul_flags & X86EMUL_SMM_INSIDE_NMI_MASK) == 0)
ctxt->ops->set_nmi_mask(ctxt, false);
ctxt->emul_flags &= ~X86EMUL_SMM_INSIDE_NMI_MASK;
ctxt->emul_flags &= ~X86EMUL_SMM_MASK;
return X86EMUL_CONTINUE;
}
static void
setup_syscalls_segments(struct x86_emulate_ctxt *ctxt,
struct desc_struct *cs, struct desc_struct *ss)
{
cs->l = 0; /* will be adjusted later */
set_desc_base(cs, 0); /* flat segment */
cs->g = 1; /* 4kb granularity */
set_desc_limit(cs, 0xfffff); /* 4GB limit */
cs->type = 0x0b; /* Read, Execute, Accessed */
cs->s = 1;
cs->dpl = 0; /* will be adjusted later */
cs->p = 1;
cs->d = 1;
cs->avl = 0;
set_desc_base(ss, 0); /* flat segment */
set_desc_limit(ss, 0xfffff); /* 4GB limit */
ss->g = 1; /* 4kb granularity */
ss->s = 1;
ss->type = 0x03; /* Read/Write, Accessed */
ss->d = 1; /* 32bit stack segment */
ss->dpl = 0;
ss->p = 1;
ss->l = 0;
ss->avl = 0;
}
static bool vendor_intel(struct x86_emulate_ctxt *ctxt)
{
u32 eax, ebx, ecx, edx;
eax = ecx = 0;
ctxt->ops->get_cpuid(ctxt, &eax, &ebx, &ecx, &edx);
return ebx == X86EMUL_CPUID_VENDOR_GenuineIntel_ebx
&& ecx == X86EMUL_CPUID_VENDOR_GenuineIntel_ecx
&& edx == X86EMUL_CPUID_VENDOR_GenuineIntel_edx;
}
static bool em_syscall_is_enabled(struct x86_emulate_ctxt *ctxt)
{
const struct x86_emulate_ops *ops = ctxt->ops;
u32 eax, ebx, ecx, edx;
/*
* syscall should always be enabled in longmode - so only become
* vendor specific (cpuid) if other modes are active...
*/
if (ctxt->mode == X86EMUL_MODE_PROT64)
return true;
eax = 0x00000000;
ecx = 0x00000000;
ops->get_cpuid(ctxt, &eax, &ebx, &ecx, &edx);
/*
* Intel ("GenuineIntel")
* remark: Intel CPUs only support "syscall" in 64bit
* longmode. Also an 64bit guest with a
* 32bit compat-app running will #UD !! While this
* behaviour can be fixed (by emulating) into AMD
* response - CPUs of AMD can't behave like Intel.
*/
if (ebx == X86EMUL_CPUID_VENDOR_GenuineIntel_ebx &&
ecx == X86EMUL_CPUID_VENDOR_GenuineIntel_ecx &&
edx == X86EMUL_CPUID_VENDOR_GenuineIntel_edx)
return false;
/* AMD ("AuthenticAMD") */
if (ebx == X86EMUL_CPUID_VENDOR_AuthenticAMD_ebx &&
ecx == X86EMUL_CPUID_VENDOR_AuthenticAMD_ecx &&
edx == X86EMUL_CPUID_VENDOR_AuthenticAMD_edx)
return true;
/* AMD ("AMDisbetter!") */
if (ebx == X86EMUL_CPUID_VENDOR_AMDisbetterI_ebx &&
ecx == X86EMUL_CPUID_VENDOR_AMDisbetterI_ecx &&
edx == X86EMUL_CPUID_VENDOR_AMDisbetterI_edx)
return true;
/* default: (not Intel, not AMD), apply Intel's stricter rules... */
return false;
}
static int em_syscall(struct x86_emulate_ctxt *ctxt)
{
const struct x86_emulate_ops *ops = ctxt->ops;
struct desc_struct cs, ss;
u64 msr_data;
u16 cs_sel, ss_sel;
u64 efer = 0;
/* syscall is not available in real mode */
if (ctxt->mode == X86EMUL_MODE_REAL ||
ctxt->mode == X86EMUL_MODE_VM86)
return emulate_ud(ctxt);
if (!(em_syscall_is_enabled(ctxt)))
return emulate_ud(ctxt);
ops->get_msr(ctxt, MSR_EFER, &efer);
setup_syscalls_segments(ctxt, &cs, &ss);
if (!(efer & EFER_SCE))
return emulate_ud(ctxt);
ops->get_msr(ctxt, MSR_STAR, &msr_data);
msr_data >>= 32;
cs_sel = (u16)(msr_data & 0xfffc);
ss_sel = (u16)(msr_data + 8);
if (efer & EFER_LMA) {
cs.d = 0;
cs.l = 1;
}
ops->set_segment(ctxt, cs_sel, &cs, 0, VCPU_SREG_CS);
ops->set_segment(ctxt, ss_sel, &ss, 0, VCPU_SREG_SS);
*reg_write(ctxt, VCPU_REGS_RCX) = ctxt->_eip;
if (efer & EFER_LMA) {
#ifdef CONFIG_X86_64
*reg_write(ctxt, VCPU_REGS_R11) = ctxt->eflags;
ops->get_msr(ctxt,
ctxt->mode == X86EMUL_MODE_PROT64 ?
MSR_LSTAR : MSR_CSTAR, &msr_data);
ctxt->_eip = msr_data;
ops->get_msr(ctxt, MSR_SYSCALL_MASK, &msr_data);
ctxt->eflags &= ~msr_data;
ctxt->eflags |= X86_EFLAGS_FIXED;
#endif
} else {
/* legacy mode */
ops->get_msr(ctxt, MSR_STAR, &msr_data);
ctxt->_eip = (u32)msr_data;
ctxt->eflags &= ~(X86_EFLAGS_VM | X86_EFLAGS_IF);
}
return X86EMUL_CONTINUE;
}
static int em_sysenter(struct x86_emulate_ctxt *ctxt)
{
const struct x86_emulate_ops *ops = ctxt->ops;
struct desc_struct cs, ss;
u64 msr_data;
u16 cs_sel, ss_sel;
u64 efer = 0;
ops->get_msr(ctxt, MSR_EFER, &efer);
/* inject #GP if in real mode */
if (ctxt->mode == X86EMUL_MODE_REAL)
return emulate_gp(ctxt, 0);
/*
* Not recognized on AMD in compat mode (but is recognized in legacy
* mode).
*/
if ((ctxt->mode != X86EMUL_MODE_PROT64) && (efer & EFER_LMA)
&& !vendor_intel(ctxt))
return emulate_ud(ctxt);
/* sysenter/sysexit have not been tested in 64bit mode. */
if (ctxt->mode == X86EMUL_MODE_PROT64)
return X86EMUL_UNHANDLEABLE;
setup_syscalls_segments(ctxt, &cs, &ss);
ops->get_msr(ctxt, MSR_IA32_SYSENTER_CS, &msr_data);
if ((msr_data & 0xfffc) == 0x0)
return emulate_gp(ctxt, 0);
ctxt->eflags &= ~(X86_EFLAGS_VM | X86_EFLAGS_IF);
cs_sel = (u16)msr_data & ~SEGMENT_RPL_MASK;
ss_sel = cs_sel + 8;
if (efer & EFER_LMA) {
cs.d = 0;
cs.l = 1;
}
ops->set_segment(ctxt, cs_sel, &cs, 0, VCPU_SREG_CS);
ops->set_segment(ctxt, ss_sel, &ss, 0, VCPU_SREG_SS);
ops->get_msr(ctxt, MSR_IA32_SYSENTER_EIP, &msr_data);
ctxt->_eip = (efer & EFER_LMA) ? msr_data : (u32)msr_data;
ops->get_msr(ctxt, MSR_IA32_SYSENTER_ESP, &msr_data);
*reg_write(ctxt, VCPU_REGS_RSP) = (efer & EFER_LMA) ? msr_data :
(u32)msr_data;
return X86EMUL_CONTINUE;
}
static int em_sysexit(struct x86_emulate_ctxt *ctxt)
{
const struct x86_emulate_ops *ops = ctxt->ops;
struct desc_struct cs, ss;
u64 msr_data, rcx, rdx;
int usermode;
u16 cs_sel = 0, ss_sel = 0;
/* inject #GP if in real mode or Virtual 8086 mode */
if (ctxt->mode == X86EMUL_MODE_REAL ||
ctxt->mode == X86EMUL_MODE_VM86)
return emulate_gp(ctxt, 0);
setup_syscalls_segments(ctxt, &cs, &ss);
if ((ctxt->rex_prefix & 0x8) != 0x0)
usermode = X86EMUL_MODE_PROT64;
else
usermode = X86EMUL_MODE_PROT32;
rcx = reg_read(ctxt, VCPU_REGS_RCX);
rdx = reg_read(ctxt, VCPU_REGS_RDX);
cs.dpl = 3;
ss.dpl = 3;
ops->get_msr(ctxt, MSR_IA32_SYSENTER_CS, &msr_data);
switch (usermode) {
case X86EMUL_MODE_PROT32:
cs_sel = (u16)(msr_data + 16);
if ((msr_data & 0xfffc) == 0x0)
return emulate_gp(ctxt, 0);
ss_sel = (u16)(msr_data + 24);
rcx = (u32)rcx;
rdx = (u32)rdx;
break;
case X86EMUL_MODE_PROT64:
cs_sel = (u16)(msr_data + 32);
if (msr_data == 0x0)
return emulate_gp(ctxt, 0);
ss_sel = cs_sel + 8;
cs.d = 0;
cs.l = 1;
if (is_noncanonical_address(rcx) ||
is_noncanonical_address(rdx))
return emulate_gp(ctxt, 0);
break;
}
cs_sel |= SEGMENT_RPL_MASK;
ss_sel |= SEGMENT_RPL_MASK;
ops->set_segment(ctxt, cs_sel, &cs, 0, VCPU_SREG_CS);
ops->set_segment(ctxt, ss_sel, &ss, 0, VCPU_SREG_SS);
ctxt->_eip = rdx;
*reg_write(ctxt, VCPU_REGS_RSP) = rcx;
return X86EMUL_CONTINUE;
}
static bool emulator_bad_iopl(struct x86_emulate_ctxt *ctxt)
{
int iopl;
if (ctxt->mode == X86EMUL_MODE_REAL)
return false;
if (ctxt->mode == X86EMUL_MODE_VM86)
return true;
iopl = (ctxt->eflags & X86_EFLAGS_IOPL) >> X86_EFLAGS_IOPL_BIT;
return ctxt->ops->cpl(ctxt) > iopl;
}
static bool emulator_io_port_access_allowed(struct x86_emulate_ctxt *ctxt,
u16 port, u16 len)
{
const struct x86_emulate_ops *ops = ctxt->ops;
struct desc_struct tr_seg;
u32 base3;
int r;
u16 tr, io_bitmap_ptr, perm, bit_idx = port & 0x7;
unsigned mask = (1 << len) - 1;
unsigned long base;
ops->get_segment(ctxt, &tr, &tr_seg, &base3, VCPU_SREG_TR);
if (!tr_seg.p)
return false;
if (desc_limit_scaled(&tr_seg) < 103)
return false;
base = get_desc_base(&tr_seg);
#ifdef CONFIG_X86_64
base |= ((u64)base3) << 32;
#endif
r = ops->read_std(ctxt, base + 102, &io_bitmap_ptr, 2, NULL);
if (r != X86EMUL_CONTINUE)
return false;
if (io_bitmap_ptr + port/8 > desc_limit_scaled(&tr_seg))
return false;
r = ops->read_std(ctxt, base + io_bitmap_ptr + port/8, &perm, 2, NULL);
if (r != X86EMUL_CONTINUE)
return false;
if ((perm >> bit_idx) & mask)
return false;
return true;
}
static bool emulator_io_permited(struct x86_emulate_ctxt *ctxt,
u16 port, u16 len)
{
if (ctxt->perm_ok)
return true;
if (emulator_bad_iopl(ctxt))
if (!emulator_io_port_access_allowed(ctxt, port, len))
return false;
ctxt->perm_ok = true;
return true;
}
static void string_registers_quirk(struct x86_emulate_ctxt *ctxt)
{
/*
* Intel CPUs mask the counter and pointers in quite strange
* manner when ECX is zero due to REP-string optimizations.
*/
#ifdef CONFIG_X86_64
if (ctxt->ad_bytes != 4 || !vendor_intel(ctxt))
return;
*reg_write(ctxt, VCPU_REGS_RCX) = 0;
switch (ctxt->b) {
case 0xa4: /* movsb */
case 0xa5: /* movsd/w */
*reg_rmw(ctxt, VCPU_REGS_RSI) &= (u32)-1;
/* fall through */
case 0xaa: /* stosb */
case 0xab: /* stosd/w */
*reg_rmw(ctxt, VCPU_REGS_RDI) &= (u32)-1;
}
#endif
}
static void save_state_to_tss16(struct x86_emulate_ctxt *ctxt,
struct tss_segment_16 *tss)
{
tss->ip = ctxt->_eip;
tss->flag = ctxt->eflags;
tss->ax = reg_read(ctxt, VCPU_REGS_RAX);
tss->cx = reg_read(ctxt, VCPU_REGS_RCX);
tss->dx = reg_read(ctxt, VCPU_REGS_RDX);
tss->bx = reg_read(ctxt, VCPU_REGS_RBX);
tss->sp = reg_read(ctxt, VCPU_REGS_RSP);
tss->bp = reg_read(ctxt, VCPU_REGS_RBP);
tss->si = reg_read(ctxt, VCPU_REGS_RSI);
tss->di = reg_read(ctxt, VCPU_REGS_RDI);
tss->es = get_segment_selector(ctxt, VCPU_SREG_ES);
tss->cs = get_segment_selector(ctxt, VCPU_SREG_CS);
tss->ss = get_segment_selector(ctxt, VCPU_SREG_SS);
tss->ds = get_segment_selector(ctxt, VCPU_SREG_DS);
tss->ldt = get_segment_selector(ctxt, VCPU_SREG_LDTR);
}
static int load_state_from_tss16(struct x86_emulate_ctxt *ctxt,
struct tss_segment_16 *tss)
{
int ret;
u8 cpl;
ctxt->_eip = tss->ip;
ctxt->eflags = tss->flag | 2;
*reg_write(ctxt, VCPU_REGS_RAX) = tss->ax;
*reg_write(ctxt, VCPU_REGS_RCX) = tss->cx;
*reg_write(ctxt, VCPU_REGS_RDX) = tss->dx;
*reg_write(ctxt, VCPU_REGS_RBX) = tss->bx;
*reg_write(ctxt, VCPU_REGS_RSP) = tss->sp;
*reg_write(ctxt, VCPU_REGS_RBP) = tss->bp;
*reg_write(ctxt, VCPU_REGS_RSI) = tss->si;
*reg_write(ctxt, VCPU_REGS_RDI) = tss->di;
/*
* SDM says that segment selectors are loaded before segment
* descriptors
*/
set_segment_selector(ctxt, tss->ldt, VCPU_SREG_LDTR);
set_segment_selector(ctxt, tss->es, VCPU_SREG_ES);
set_segment_selector(ctxt, tss->cs, VCPU_SREG_CS);
set_segment_selector(ctxt, tss->ss, VCPU_SREG_SS);
set_segment_selector(ctxt, tss->ds, VCPU_SREG_DS);
cpl = tss->cs & 3;
/*
* Now load segment descriptors. If fault happens at this stage
* it is handled in a context of new task
*/
ret = __load_segment_descriptor(ctxt, tss->ldt, VCPU_SREG_LDTR, cpl,
X86_TRANSFER_TASK_SWITCH, NULL);
if (ret != X86EMUL_CONTINUE)
return ret;
ret = __load_segment_descriptor(ctxt, tss->es, VCPU_SREG_ES, cpl,
X86_TRANSFER_TASK_SWITCH, NULL);
if (ret != X86EMUL_CONTINUE)
return ret;
ret = __load_segment_descriptor(ctxt, tss->cs, VCPU_SREG_CS, cpl,
X86_TRANSFER_TASK_SWITCH, NULL);
if (ret != X86EMUL_CONTINUE)
return ret;
ret = __load_segment_descriptor(ctxt, tss->ss, VCPU_SREG_SS, cpl,
X86_TRANSFER_TASK_SWITCH, NULL);
if (ret != X86EMUL_CONTINUE)
return ret;
ret = __load_segment_descriptor(ctxt, tss->ds, VCPU_SREG_DS, cpl,
X86_TRANSFER_TASK_SWITCH, NULL);
if (ret != X86EMUL_CONTINUE)
return ret;
return X86EMUL_CONTINUE;
}
static int task_switch_16(struct x86_emulate_ctxt *ctxt,
u16 tss_selector, u16 old_tss_sel,
ulong old_tss_base, struct desc_struct *new_desc)
{
const struct x86_emulate_ops *ops = ctxt->ops;
struct tss_segment_16 tss_seg;
int ret;
u32 new_tss_base = get_desc_base(new_desc);
ret = ops->read_std(ctxt, old_tss_base, &tss_seg, sizeof tss_seg,
&ctxt->exception);
if (ret != X86EMUL_CONTINUE)
return ret;
save_state_to_tss16(ctxt, &tss_seg);
ret = ops->write_std(ctxt, old_tss_base, &tss_seg, sizeof tss_seg,
&ctxt->exception);
if (ret != X86EMUL_CONTINUE)
return ret;
ret = ops->read_std(ctxt, new_tss_base, &tss_seg, sizeof tss_seg,
&ctxt->exception);
if (ret != X86EMUL_CONTINUE)
return ret;
if (old_tss_sel != 0xffff) {
tss_seg.prev_task_link = old_tss_sel;
ret = ops->write_std(ctxt, new_tss_base,
&tss_seg.prev_task_link,
sizeof tss_seg.prev_task_link,
&ctxt->exception);
if (ret != X86EMUL_CONTINUE)
return ret;
}
return load_state_from_tss16(ctxt, &tss_seg);
}
static void save_state_to_tss32(struct x86_emulate_ctxt *ctxt,
struct tss_segment_32 *tss)
{
/* CR3 and ldt selector are not saved intentionally */
tss->eip = ctxt->_eip;
tss->eflags = ctxt->eflags;
tss->eax = reg_read(ctxt, VCPU_REGS_RAX);
tss->ecx = reg_read(ctxt, VCPU_REGS_RCX);
tss->edx = reg_read(ctxt, VCPU_REGS_RDX);
tss->ebx = reg_read(ctxt, VCPU_REGS_RBX);
tss->esp = reg_read(ctxt, VCPU_REGS_RSP);
tss->ebp = reg_read(ctxt, VCPU_REGS_RBP);
tss->esi = reg_read(ctxt, VCPU_REGS_RSI);
tss->edi = reg_read(ctxt, VCPU_REGS_RDI);
tss->es = get_segment_selector(ctxt, VCPU_SREG_ES);
tss->cs = get_segment_selector(ctxt, VCPU_SREG_CS);
tss->ss = get_segment_selector(ctxt, VCPU_SREG_SS);
tss->ds = get_segment_selector(ctxt, VCPU_SREG_DS);
tss->fs = get_segment_selector(ctxt, VCPU_SREG_FS);
tss->gs = get_segment_selector(ctxt, VCPU_SREG_GS);
}
static int load_state_from_tss32(struct x86_emulate_ctxt *ctxt,
struct tss_segment_32 *tss)
{
int ret;
u8 cpl;
if (ctxt->ops->set_cr(ctxt, 3, tss->cr3))
return emulate_gp(ctxt, 0);
ctxt->_eip = tss->eip;
ctxt->eflags = tss->eflags | 2;
/* General purpose registers */
*reg_write(ctxt, VCPU_REGS_RAX) = tss->eax;
*reg_write(ctxt, VCPU_REGS_RCX) = tss->ecx;
*reg_write(ctxt, VCPU_REGS_RDX) = tss->edx;
*reg_write(ctxt, VCPU_REGS_RBX) = tss->ebx;
*reg_write(ctxt, VCPU_REGS_RSP) = tss->esp;
*reg_write(ctxt, VCPU_REGS_RBP) = tss->ebp;
*reg_write(ctxt, VCPU_REGS_RSI) = tss->esi;
*reg_write(ctxt, VCPU_REGS_RDI) = tss->edi;
/*
* SDM says that segment selectors are loaded before segment
* descriptors. This is important because CPL checks will
* use CS.RPL.
*/
set_segment_selector(ctxt, tss->ldt_selector, VCPU_SREG_LDTR);
set_segment_selector(ctxt, tss->es, VCPU_SREG_ES);
set_segment_selector(ctxt, tss->cs, VCPU_SREG_CS);
set_segment_selector(ctxt, tss->ss, VCPU_SREG_SS);
set_segment_selector(ctxt, tss->ds, VCPU_SREG_DS);
set_segment_selector(ctxt, tss->fs, VCPU_SREG_FS);
set_segment_selector(ctxt, tss->gs, VCPU_SREG_GS);
/*
* If we're switching between Protected Mode and VM86, we need to make
* sure to update the mode before loading the segment descriptors so
* that the selectors are interpreted correctly.
*/
if (ctxt->eflags & X86_EFLAGS_VM) {
ctxt->mode = X86EMUL_MODE_VM86;
cpl = 3;
} else {
ctxt->mode = X86EMUL_MODE_PROT32;
cpl = tss->cs & 3;
}
/*
* Now load segment descriptors. If fault happenes at this stage
* it is handled in a context of new task
*/
ret = __load_segment_descriptor(ctxt, tss->ldt_selector, VCPU_SREG_LDTR,
cpl, X86_TRANSFER_TASK_SWITCH, NULL);
if (ret != X86EMUL_CONTINUE)
return ret;
ret = __load_segment_descriptor(ctxt, tss->es, VCPU_SREG_ES, cpl,
X86_TRANSFER_TASK_SWITCH, NULL);
if (ret != X86EMUL_CONTINUE)
return ret;
ret = __load_segment_descriptor(ctxt, tss->cs, VCPU_SREG_CS, cpl,
X86_TRANSFER_TASK_SWITCH, NULL);
if (ret != X86EMUL_CONTINUE)
return ret;
ret = __load_segment_descriptor(ctxt, tss->ss, VCPU_SREG_SS, cpl,
X86_TRANSFER_TASK_SWITCH, NULL);
if (ret != X86EMUL_CONTINUE)
return ret;
ret = __load_segment_descriptor(ctxt, tss->ds, VCPU_SREG_DS, cpl,
X86_TRANSFER_TASK_SWITCH, NULL);
if (ret != X86EMUL_CONTINUE)
return ret;
ret = __load_segment_descriptor(ctxt, tss->fs, VCPU_SREG_FS, cpl,
X86_TRANSFER_TASK_SWITCH, NULL);
if (ret != X86EMUL_CONTINUE)
return ret;
ret = __load_segment_descriptor(ctxt, tss->gs, VCPU_SREG_GS, cpl,
X86_TRANSFER_TASK_SWITCH, NULL);
return ret;
}
static int task_switch_32(struct x86_emulate_ctxt *ctxt,
u16 tss_selector, u16 old_tss_sel,
ulong old_tss_base, struct desc_struct *new_desc)
{
const struct x86_emulate_ops *ops = ctxt->ops;
struct tss_segment_32 tss_seg;
int ret;
u32 new_tss_base = get_desc_base(new_desc);
u32 eip_offset = offsetof(struct tss_segment_32, eip);
u32 ldt_sel_offset = offsetof(struct tss_segment_32, ldt_selector);
ret = ops->read_std(ctxt, old_tss_base, &tss_seg, sizeof tss_seg,
&ctxt->exception);
if (ret != X86EMUL_CONTINUE)
return ret;
save_state_to_tss32(ctxt, &tss_seg);
/* Only GP registers and segment selectors are saved */
ret = ops->write_std(ctxt, old_tss_base + eip_offset, &tss_seg.eip,
ldt_sel_offset - eip_offset, &ctxt->exception);
if (ret != X86EMUL_CONTINUE)
return ret;
ret = ops->read_std(ctxt, new_tss_base, &tss_seg, sizeof tss_seg,
&ctxt->exception);
if (ret != X86EMUL_CONTINUE)
return ret;
if (old_tss_sel != 0xffff) {
tss_seg.prev_task_link = old_tss_sel;
ret = ops->write_std(ctxt, new_tss_base,
&tss_seg.prev_task_link,
sizeof tss_seg.prev_task_link,
&ctxt->exception);
if (ret != X86EMUL_CONTINUE)
return ret;
}
return load_state_from_tss32(ctxt, &tss_seg);
}
static int emulator_do_task_switch(struct x86_emulate_ctxt *ctxt,
u16 tss_selector, int idt_index, int reason,
bool has_error_code, u32 error_code)
{
const struct x86_emulate_ops *ops = ctxt->ops;
struct desc_struct curr_tss_desc, next_tss_desc;
int ret;
u16 old_tss_sel = get_segment_selector(ctxt, VCPU_SREG_TR);
ulong old_tss_base =
ops->get_cached_segment_base(ctxt, VCPU_SREG_TR);
u32 desc_limit;
ulong desc_addr, dr7;
/* FIXME: old_tss_base == ~0 ? */
ret = read_segment_descriptor(ctxt, tss_selector, &next_tss_desc, &desc_addr);
if (ret != X86EMUL_CONTINUE)
return ret;
ret = read_segment_descriptor(ctxt, old_tss_sel, &curr_tss_desc, &desc_addr);
if (ret != X86EMUL_CONTINUE)
return ret;
/* FIXME: check that next_tss_desc is tss */
/*
* Check privileges. The three cases are task switch caused by...
*
* 1. jmp/call/int to task gate: Check against DPL of the task gate
* 2. Exception/IRQ/iret: No check is performed
* 3. jmp/call to TSS/task-gate: No check is performed since the
* hardware checks it before exiting.
*/
if (reason == TASK_SWITCH_GATE) {
if (idt_index != -1) {
/* Software interrupts */
struct desc_struct task_gate_desc;
int dpl;
ret = read_interrupt_descriptor(ctxt, idt_index,
&task_gate_desc);
if (ret != X86EMUL_CONTINUE)
return ret;
dpl = task_gate_desc.dpl;
if ((tss_selector & 3) > dpl || ops->cpl(ctxt) > dpl)
return emulate_gp(ctxt, (idt_index << 3) | 0x2);
}
}
desc_limit = desc_limit_scaled(&next_tss_desc);
if (!next_tss_desc.p ||
((desc_limit < 0x67 && (next_tss_desc.type & 8)) ||
desc_limit < 0x2b)) {
return emulate_ts(ctxt, tss_selector & 0xfffc);
}
if (reason == TASK_SWITCH_IRET || reason == TASK_SWITCH_JMP) {
curr_tss_desc.type &= ~(1 << 1); /* clear busy flag */
write_segment_descriptor(ctxt, old_tss_sel, &curr_tss_desc);
}
if (reason == TASK_SWITCH_IRET)
ctxt->eflags = ctxt->eflags & ~X86_EFLAGS_NT;
/* set back link to prev task only if NT bit is set in eflags
note that old_tss_sel is not used after this point */
if (reason != TASK_SWITCH_CALL && reason != TASK_SWITCH_GATE)
old_tss_sel = 0xffff;
if (next_tss_desc.type & 8)
ret = task_switch_32(ctxt, tss_selector, old_tss_sel,
old_tss_base, &next_tss_desc);
else
ret = task_switch_16(ctxt, tss_selector, old_tss_sel,
old_tss_base, &next_tss_desc);
if (ret != X86EMUL_CONTINUE)
return ret;
if (reason == TASK_SWITCH_CALL || reason == TASK_SWITCH_GATE)
ctxt->eflags = ctxt->eflags | X86_EFLAGS_NT;
if (reason != TASK_SWITCH_IRET) {
next_tss_desc.type |= (1 << 1); /* set busy flag */
write_segment_descriptor(ctxt, tss_selector, &next_tss_desc);
}
ops->set_cr(ctxt, 0, ops->get_cr(ctxt, 0) | X86_CR0_TS);
ops->set_segment(ctxt, tss_selector, &next_tss_desc, 0, VCPU_SREG_TR);
if (has_error_code) {
ctxt->op_bytes = ctxt->ad_bytes = (next_tss_desc.type & 8) ? 4 : 2;
ctxt->lock_prefix = 0;
ctxt->src.val = (unsigned long) error_code;
ret = em_push(ctxt);
}
ops->get_dr(ctxt, 7, &dr7);
ops->set_dr(ctxt, 7, dr7 & ~(DR_LOCAL_ENABLE_MASK | DR_LOCAL_SLOWDOWN));
return ret;
}
int emulator_task_switch(struct x86_emulate_ctxt *ctxt,
u16 tss_selector, int idt_index, int reason,
bool has_error_code, u32 error_code)
{
int rc;
invalidate_registers(ctxt);
ctxt->_eip = ctxt->eip;
ctxt->dst.type = OP_NONE;
rc = emulator_do_task_switch(ctxt, tss_selector, idt_index, reason,
has_error_code, error_code);
if (rc == X86EMUL_CONTINUE) {
ctxt->eip = ctxt->_eip;
writeback_registers(ctxt);
}
return (rc == X86EMUL_UNHANDLEABLE) ? EMULATION_FAILED : EMULATION_OK;
}
static void string_addr_inc(struct x86_emulate_ctxt *ctxt, int reg,
struct operand *op)
{
int df = (ctxt->eflags & X86_EFLAGS_DF) ? -op->count : op->count;
register_address_increment(ctxt, reg, df * op->bytes);
op->addr.mem.ea = register_address(ctxt, reg);
}
static int em_das(struct x86_emulate_ctxt *ctxt)
{
u8 al, old_al;
bool af, cf, old_cf;
cf = ctxt->eflags & X86_EFLAGS_CF;
al = ctxt->dst.val;
old_al = al;
old_cf = cf;
cf = false;
af = ctxt->eflags & X86_EFLAGS_AF;
if ((al & 0x0f) > 9 || af) {
al -= 6;
cf = old_cf | (al >= 250);
af = true;
} else {
af = false;
}
if (old_al > 0x99 || old_cf) {
al -= 0x60;
cf = true;
}
ctxt->dst.val = al;
/* Set PF, ZF, SF */
ctxt->src.type = OP_IMM;
ctxt->src.val = 0;
ctxt->src.bytes = 1;
fastop(ctxt, em_or);
ctxt->eflags &= ~(X86_EFLAGS_AF | X86_EFLAGS_CF);
if (cf)
ctxt->eflags |= X86_EFLAGS_CF;
if (af)
ctxt->eflags |= X86_EFLAGS_AF;
return X86EMUL_CONTINUE;
}
static int em_aam(struct x86_emulate_ctxt *ctxt)
{
u8 al, ah;
if (ctxt->src.val == 0)
return emulate_de(ctxt);
al = ctxt->dst.val & 0xff;
ah = al / ctxt->src.val;
al %= ctxt->src.val;
ctxt->dst.val = (ctxt->dst.val & 0xffff0000) | al | (ah << 8);
/* Set PF, ZF, SF */
ctxt->src.type = OP_IMM;
ctxt->src.val = 0;
ctxt->src.bytes = 1;
fastop(ctxt, em_or);
return X86EMUL_CONTINUE;
}
static int em_aad(struct x86_emulate_ctxt *ctxt)
{
u8 al = ctxt->dst.val & 0xff;
u8 ah = (ctxt->dst.val >> 8) & 0xff;
al = (al + (ah * ctxt->src.val)) & 0xff;
ctxt->dst.val = (ctxt->dst.val & 0xffff0000) | al;
/* Set PF, ZF, SF */
ctxt->src.type = OP_IMM;
ctxt->src.val = 0;
ctxt->src.bytes = 1;
fastop(ctxt, em_or);
return X86EMUL_CONTINUE;
}
static int em_call(struct x86_emulate_ctxt *ctxt)
{
int rc;
long rel = ctxt->src.val;
ctxt->src.val = (unsigned long)ctxt->_eip;
rc = jmp_rel(ctxt, rel);
if (rc != X86EMUL_CONTINUE)
return rc;
return em_push(ctxt);
}
static int em_call_far(struct x86_emulate_ctxt *ctxt)
{
u16 sel, old_cs;
ulong old_eip;
int rc;
struct desc_struct old_desc, new_desc;
const struct x86_emulate_ops *ops = ctxt->ops;
int cpl = ctxt->ops->cpl(ctxt);
enum x86emul_mode prev_mode = ctxt->mode;
old_eip = ctxt->_eip;
ops->get_segment(ctxt, &old_cs, &old_desc, NULL, VCPU_SREG_CS);
memcpy(&sel, ctxt->src.valptr + ctxt->op_bytes, 2);
rc = __load_segment_descriptor(ctxt, sel, VCPU_SREG_CS, cpl,
X86_TRANSFER_CALL_JMP, &new_desc);
if (rc != X86EMUL_CONTINUE)
return rc;
rc = assign_eip_far(ctxt, ctxt->src.val, &new_desc);
if (rc != X86EMUL_CONTINUE)
goto fail;
ctxt->src.val = old_cs;
rc = em_push(ctxt);
if (rc != X86EMUL_CONTINUE)
goto fail;
ctxt->src.val = old_eip;
rc = em_push(ctxt);
/* If we failed, we tainted the memory, but the very least we should
restore cs */
if (rc != X86EMUL_CONTINUE) {
pr_warn_once("faulting far call emulation tainted memory\n");
goto fail;
}
return rc;
fail:
ops->set_segment(ctxt, old_cs, &old_desc, 0, VCPU_SREG_CS);
ctxt->mode = prev_mode;
return rc;
}
static int em_ret_near_imm(struct x86_emulate_ctxt *ctxt)
{
int rc;
unsigned long eip;
rc = emulate_pop(ctxt, &eip, ctxt->op_bytes);
if (rc != X86EMUL_CONTINUE)
return rc;
rc = assign_eip_near(ctxt, eip);
if (rc != X86EMUL_CONTINUE)
return rc;
rsp_increment(ctxt, ctxt->src.val);
return X86EMUL_CONTINUE;
}
static int em_xchg(struct x86_emulate_ctxt *ctxt)
{
/* Write back the register source. */
ctxt->src.val = ctxt->dst.val;
write_register_operand(&ctxt->src);
/* Write back the memory destination with implicit LOCK prefix. */
ctxt->dst.val = ctxt->src.orig_val;
ctxt->lock_prefix = 1;
return X86EMUL_CONTINUE;
}
static int em_imul_3op(struct x86_emulate_ctxt *ctxt)
{
ctxt->dst.val = ctxt->src2.val;
return fastop(ctxt, em_imul);
}
static int em_cwd(struct x86_emulate_ctxt *ctxt)
{
ctxt->dst.type = OP_REG;
ctxt->dst.bytes = ctxt->src.bytes;
ctxt->dst.addr.reg = reg_rmw(ctxt, VCPU_REGS_RDX);
ctxt->dst.val = ~((ctxt->src.val >> (ctxt->src.bytes * 8 - 1)) - 1);
return X86EMUL_CONTINUE;
}
static int em_rdtsc(struct x86_emulate_ctxt *ctxt)
{
u64 tsc = 0;
ctxt->ops->get_msr(ctxt, MSR_IA32_TSC, &tsc);
*reg_write(ctxt, VCPU_REGS_RAX) = (u32)tsc;
*reg_write(ctxt, VCPU_REGS_RDX) = tsc >> 32;
return X86EMUL_CONTINUE;
}
static int em_rdpmc(struct x86_emulate_ctxt *ctxt)
{
u64 pmc;
if (ctxt->ops->read_pmc(ctxt, reg_read(ctxt, VCPU_REGS_RCX), &pmc))
return emulate_gp(ctxt, 0);
*reg_write(ctxt, VCPU_REGS_RAX) = (u32)pmc;
*reg_write(ctxt, VCPU_REGS_RDX) = pmc >> 32;
return X86EMUL_CONTINUE;
}
static int em_mov(struct x86_emulate_ctxt *ctxt)
{
memcpy(ctxt->dst.valptr, ctxt->src.valptr, sizeof(ctxt->src.valptr));
return X86EMUL_CONTINUE;
}
#define FFL(x) bit(X86_FEATURE_##x)
static int em_movbe(struct x86_emulate_ctxt *ctxt)
{
u32 ebx, ecx, edx, eax = 1;
u16 tmp;
/*
* Check MOVBE is set in the guest-visible CPUID leaf.
*/
ctxt->ops->get_cpuid(ctxt, &eax, &ebx, &ecx, &edx);
if (!(ecx & FFL(MOVBE)))
return emulate_ud(ctxt);
switch (ctxt->op_bytes) {
case 2:
/*
* From MOVBE definition: "...When the operand size is 16 bits,
* the upper word of the destination register remains unchanged
* ..."
*
* Both casting ->valptr and ->val to u16 breaks strict aliasing
* rules so we have to do the operation almost per hand.
*/
tmp = (u16)ctxt->src.val;
ctxt->dst.val &= ~0xffffUL;
ctxt->dst.val |= (unsigned long)swab16(tmp);
break;
case 4:
ctxt->dst.val = swab32((u32)ctxt->src.val);
break;
case 8:
ctxt->dst.val = swab64(ctxt->src.val);
break;
default:
BUG();
}
return X86EMUL_CONTINUE;
}
static int em_cr_write(struct x86_emulate_ctxt *ctxt)
{
if (ctxt->ops->set_cr(ctxt, ctxt->modrm_reg, ctxt->src.val))
return emulate_gp(ctxt, 0);
/* Disable writeback. */
ctxt->dst.type = OP_NONE;
return X86EMUL_CONTINUE;
}
static int em_dr_write(struct x86_emulate_ctxt *ctxt)
{
unsigned long val;
if (ctxt->mode == X86EMUL_MODE_PROT64)
val = ctxt->src.val & ~0ULL;
else
val = ctxt->src.val & ~0U;
/* #UD condition is already handled. */
if (ctxt->ops->set_dr(ctxt, ctxt->modrm_reg, val) < 0)
return emulate_gp(ctxt, 0);
/* Disable writeback. */
ctxt->dst.type = OP_NONE;
return X86EMUL_CONTINUE;
}
static int em_wrmsr(struct x86_emulate_ctxt *ctxt)
{
u64 msr_data;
msr_data = (u32)reg_read(ctxt, VCPU_REGS_RAX)
| ((u64)reg_read(ctxt, VCPU_REGS_RDX) << 32);
if (ctxt->ops->set_msr(ctxt, reg_read(ctxt, VCPU_REGS_RCX), msr_data))
return emulate_gp(ctxt, 0);
return X86EMUL_CONTINUE;
}
static int em_rdmsr(struct x86_emulate_ctxt *ctxt)
{
u64 msr_data;
if (ctxt->ops->get_msr(ctxt, reg_read(ctxt, VCPU_REGS_RCX), &msr_data))
return emulate_gp(ctxt, 0);
*reg_write(ctxt, VCPU_REGS_RAX) = (u32)msr_data;
*reg_write(ctxt, VCPU_REGS_RDX) = msr_data >> 32;
return X86EMUL_CONTINUE;
}
static int em_mov_rm_sreg(struct x86_emulate_ctxt *ctxt)
{
if (ctxt->modrm_reg > VCPU_SREG_GS)
return emulate_ud(ctxt);
ctxt->dst.val = get_segment_selector(ctxt, ctxt->modrm_reg);
if (ctxt->dst.bytes == 4 && ctxt->dst.type == OP_MEM)
ctxt->dst.bytes = 2;
return X86EMUL_CONTINUE;
}
static int em_mov_sreg_rm(struct x86_emulate_ctxt *ctxt)
{
u16 sel = ctxt->src.val;
if (ctxt->modrm_reg == VCPU_SREG_CS || ctxt->modrm_reg > VCPU_SREG_GS)
return emulate_ud(ctxt);
if (ctxt->modrm_reg == VCPU_SREG_SS)
ctxt->interruptibility = KVM_X86_SHADOW_INT_MOV_SS;
/* Disable writeback. */
ctxt->dst.type = OP_NONE;
return load_segment_descriptor(ctxt, sel, ctxt->modrm_reg);
}
static int em_lldt(struct x86_emulate_ctxt *ctxt)
{
u16 sel = ctxt->src.val;
/* Disable writeback. */
ctxt->dst.type = OP_NONE;
return load_segment_descriptor(ctxt, sel, VCPU_SREG_LDTR);
}
static int em_ltr(struct x86_emulate_ctxt *ctxt)
{
u16 sel = ctxt->src.val;
/* Disable writeback. */
ctxt->dst.type = OP_NONE;
return load_segment_descriptor(ctxt, sel, VCPU_SREG_TR);
}
static int em_invlpg(struct x86_emulate_ctxt *ctxt)
{
int rc;
ulong linear;
rc = linearize(ctxt, ctxt->src.addr.mem, 1, false, &linear);
if (rc == X86EMUL_CONTINUE)
ctxt->ops->invlpg(ctxt, linear);
/* Disable writeback. */
ctxt->dst.type = OP_NONE;
return X86EMUL_CONTINUE;
}
static int em_clts(struct x86_emulate_ctxt *ctxt)
{
ulong cr0;
cr0 = ctxt->ops->get_cr(ctxt, 0);
cr0 &= ~X86_CR0_TS;
ctxt->ops->set_cr(ctxt, 0, cr0);
return X86EMUL_CONTINUE;
}
static int em_hypercall(struct x86_emulate_ctxt *ctxt)
{
int rc = ctxt->ops->fix_hypercall(ctxt);
if (rc != X86EMUL_CONTINUE)
return rc;
/* Let the processor re-execute the fixed hypercall */
ctxt->_eip = ctxt->eip;
/* Disable writeback. */
ctxt->dst.type = OP_NONE;
return X86EMUL_CONTINUE;
}
static int emulate_store_desc_ptr(struct x86_emulate_ctxt *ctxt,
void (*get)(struct x86_emulate_ctxt *ctxt,
struct desc_ptr *ptr))
{
struct desc_ptr desc_ptr;
if (ctxt->mode == X86EMUL_MODE_PROT64)
ctxt->op_bytes = 8;
get(ctxt, &desc_ptr);
if (ctxt->op_bytes == 2) {
ctxt->op_bytes = 4;
desc_ptr.address &= 0x00ffffff;
}
/* Disable writeback. */
ctxt->dst.type = OP_NONE;
return segmented_write(ctxt, ctxt->dst.addr.mem,
&desc_ptr, 2 + ctxt->op_bytes);
}
static int em_sgdt(struct x86_emulate_ctxt *ctxt)
{
return emulate_store_desc_ptr(ctxt, ctxt->ops->get_gdt);
}
static int em_sidt(struct x86_emulate_ctxt *ctxt)
{
return emulate_store_desc_ptr(ctxt, ctxt->ops->get_idt);
}
static int em_lgdt_lidt(struct x86_emulate_ctxt *ctxt, bool lgdt)
{
struct desc_ptr desc_ptr;
int rc;
if (ctxt->mode == X86EMUL_MODE_PROT64)
ctxt->op_bytes = 8;
rc = read_descriptor(ctxt, ctxt->src.addr.mem,
&desc_ptr.size, &desc_ptr.address,
ctxt->op_bytes);
if (rc != X86EMUL_CONTINUE)
return rc;
if (ctxt->mode == X86EMUL_MODE_PROT64 &&
is_noncanonical_address(desc_ptr.address))
return emulate_gp(ctxt, 0);
if (lgdt)
ctxt->ops->set_gdt(ctxt, &desc_ptr);
else
ctxt->ops->set_idt(ctxt, &desc_ptr);
/* Disable writeback. */
ctxt->dst.type = OP_NONE;
return X86EMUL_CONTINUE;
}
static int em_lgdt(struct x86_emulate_ctxt *ctxt)
{
return em_lgdt_lidt(ctxt, true);
}
static int em_lidt(struct x86_emulate_ctxt *ctxt)
{
return em_lgdt_lidt(ctxt, false);
}
static int em_smsw(struct x86_emulate_ctxt *ctxt)
{
if (ctxt->dst.type == OP_MEM)
ctxt->dst.bytes = 2;
ctxt->dst.val = ctxt->ops->get_cr(ctxt, 0);
return X86EMUL_CONTINUE;
}
static int em_lmsw(struct x86_emulate_ctxt *ctxt)
{
ctxt->ops->set_cr(ctxt, 0, (ctxt->ops->get_cr(ctxt, 0) & ~0x0eul)
| (ctxt->src.val & 0x0f));
ctxt->dst.type = OP_NONE;
return X86EMUL_CONTINUE;
}
static int em_loop(struct x86_emulate_ctxt *ctxt)
{
int rc = X86EMUL_CONTINUE;
register_address_increment(ctxt, VCPU_REGS_RCX, -1);
if ((address_mask(ctxt, reg_read(ctxt, VCPU_REGS_RCX)) != 0) &&
(ctxt->b == 0xe2 || test_cc(ctxt->b ^ 0x5, ctxt->eflags)))
rc = jmp_rel(ctxt, ctxt->src.val);
return rc;
}
static int em_jcxz(struct x86_emulate_ctxt *ctxt)
{
int rc = X86EMUL_CONTINUE;
if (address_mask(ctxt, reg_read(ctxt, VCPU_REGS_RCX)) == 0)
rc = jmp_rel(ctxt, ctxt->src.val);
return rc;
}
static int em_in(struct x86_emulate_ctxt *ctxt)
{
if (!pio_in_emulated(ctxt, ctxt->dst.bytes, ctxt->src.val,
&ctxt->dst.val))
return X86EMUL_IO_NEEDED;
return X86EMUL_CONTINUE;
}
static int em_out(struct x86_emulate_ctxt *ctxt)
{
ctxt->ops->pio_out_emulated(ctxt, ctxt->src.bytes, ctxt->dst.val,
&ctxt->src.val, 1);
/* Disable writeback. */
ctxt->dst.type = OP_NONE;
return X86EMUL_CONTINUE;
}
static int em_cli(struct x86_emulate_ctxt *ctxt)
{
if (emulator_bad_iopl(ctxt))
return emulate_gp(ctxt, 0);
ctxt->eflags &= ~X86_EFLAGS_IF;
return X86EMUL_CONTINUE;
}
static int em_sti(struct x86_emulate_ctxt *ctxt)
{
if (emulator_bad_iopl(ctxt))
return emulate_gp(ctxt, 0);
ctxt->interruptibility = KVM_X86_SHADOW_INT_STI;
ctxt->eflags |= X86_EFLAGS_IF;
return X86EMUL_CONTINUE;
}
static int em_cpuid(struct x86_emulate_ctxt *ctxt)
{
u32 eax, ebx, ecx, edx;
eax = reg_read(ctxt, VCPU_REGS_RAX);
ecx = reg_read(ctxt, VCPU_REGS_RCX);
ctxt->ops->get_cpuid(ctxt, &eax, &ebx, &ecx, &edx);
*reg_write(ctxt, VCPU_REGS_RAX) = eax;
*reg_write(ctxt, VCPU_REGS_RBX) = ebx;
*reg_write(ctxt, VCPU_REGS_RCX) = ecx;
*reg_write(ctxt, VCPU_REGS_RDX) = edx;
return X86EMUL_CONTINUE;
}
static int em_sahf(struct x86_emulate_ctxt *ctxt)
{
u32 flags;
flags = X86_EFLAGS_CF | X86_EFLAGS_PF | X86_EFLAGS_AF | X86_EFLAGS_ZF |
X86_EFLAGS_SF;
flags &= *reg_rmw(ctxt, VCPU_REGS_RAX) >> 8;
ctxt->eflags &= ~0xffUL;
ctxt->eflags |= flags | X86_EFLAGS_FIXED;
return X86EMUL_CONTINUE;
}
static int em_lahf(struct x86_emulate_ctxt *ctxt)
{
*reg_rmw(ctxt, VCPU_REGS_RAX) &= ~0xff00UL;
*reg_rmw(ctxt, VCPU_REGS_RAX) |= (ctxt->eflags & 0xff) << 8;
return X86EMUL_CONTINUE;
}
static int em_bswap(struct x86_emulate_ctxt *ctxt)
{
switch (ctxt->op_bytes) {
#ifdef CONFIG_X86_64
case 8:
asm("bswap %0" : "+r"(ctxt->dst.val));
break;
#endif
default:
asm("bswap %0" : "+r"(*(u32 *)&ctxt->dst.val));
break;
}
return X86EMUL_CONTINUE;
}
static int em_clflush(struct x86_emulate_ctxt *ctxt)
{
/* emulating clflush regardless of cpuid */
return X86EMUL_CONTINUE;
}
static int em_movsxd(struct x86_emulate_ctxt *ctxt)
{
ctxt->dst.val = (s32) ctxt->src.val;
return X86EMUL_CONTINUE;
}
static int check_fxsr(struct x86_emulate_ctxt *ctxt)
{
u32 eax = 1, ebx, ecx = 0, edx;
ctxt->ops->get_cpuid(ctxt, &eax, &ebx, &ecx, &edx);
if (!(edx & FFL(FXSR)))
return emulate_ud(ctxt);
if (ctxt->ops->get_cr(ctxt, 0) & (X86_CR0_TS | X86_CR0_EM))
return emulate_nm(ctxt);
/*
* Don't emulate a case that should never be hit, instead of working
* around a lack of fxsave64/fxrstor64 on old compilers.
*/
if (ctxt->mode >= X86EMUL_MODE_PROT64)
return X86EMUL_UNHANDLEABLE;
return X86EMUL_CONTINUE;
}
/*
* FXSAVE and FXRSTOR have 4 different formats depending on execution mode,
* 1) 16 bit mode
* 2) 32 bit mode
* - like (1), but FIP and FDP (foo) are only 16 bit. At least Intel CPUs
* preserve whole 32 bit values, though, so (1) and (2) are the same wrt.
* save and restore
* 3) 64-bit mode with REX.W prefix
* - like (2), but XMM 8-15 are being saved and restored
* 4) 64-bit mode without REX.W prefix
* - like (3), but FIP and FDP are 64 bit
*
* Emulation uses (3) for (1) and (2) and preserves XMM 8-15 to reach the
* desired result. (4) is not emulated.
*
* Note: Guest and host CPUID.(EAX=07H,ECX=0H):EBX[bit 13] (deprecate FPU CS
* and FPU DS) should match.
*/
static int em_fxsave(struct x86_emulate_ctxt *ctxt)
{
struct fxregs_state fx_state;
size_t size;
int rc;
rc = check_fxsr(ctxt);
if (rc != X86EMUL_CONTINUE)
return rc;
ctxt->ops->get_fpu(ctxt);
rc = asm_safe("fxsave %[fx]", , [fx] "+m"(fx_state));
ctxt->ops->put_fpu(ctxt);
if (rc != X86EMUL_CONTINUE)
return rc;
if (ctxt->ops->get_cr(ctxt, 4) & X86_CR4_OSFXSR)
size = offsetof(struct fxregs_state, xmm_space[8 * 16/4]);
else
size = offsetof(struct fxregs_state, xmm_space[0]);
return segmented_write(ctxt, ctxt->memop.addr.mem, &fx_state, size);
}
static int fxrstor_fixup(struct x86_emulate_ctxt *ctxt,
struct fxregs_state *new)
{
int rc = X86EMUL_CONTINUE;
struct fxregs_state old;
rc = asm_safe("fxsave %[fx]", , [fx] "+m"(old));
if (rc != X86EMUL_CONTINUE)
return rc;
/*
* 64 bit host will restore XMM 8-15, which is not correct on non-64
* bit guests. Load the current values in order to preserve 64 bit
* XMMs after fxrstor.
*/
#ifdef CONFIG_X86_64
/* XXX: accessing XMM 8-15 very awkwardly */
memcpy(&new->xmm_space[8 * 16/4], &old.xmm_space[8 * 16/4], 8 * 16);
#endif
/*
* Hardware doesn't save and restore XMM 0-7 without CR4.OSFXSR, but
* does save and restore MXCSR.
*/
if (!(ctxt->ops->get_cr(ctxt, 4) & X86_CR4_OSFXSR))
memcpy(new->xmm_space, old.xmm_space, 8 * 16);
return rc;
}
static int em_fxrstor(struct x86_emulate_ctxt *ctxt)
{
struct fxregs_state fx_state;
int rc;
rc = check_fxsr(ctxt);
if (rc != X86EMUL_CONTINUE)
return rc;
rc = segmented_read(ctxt, ctxt->memop.addr.mem, &fx_state, 512);
if (rc != X86EMUL_CONTINUE)
return rc;
if (fx_state.mxcsr >> 16)
return emulate_gp(ctxt, 0);
ctxt->ops->get_fpu(ctxt);
if (ctxt->mode < X86EMUL_MODE_PROT64)
rc = fxrstor_fixup(ctxt, &fx_state);
if (rc == X86EMUL_CONTINUE)
rc = asm_safe("fxrstor %[fx]", : [fx] "m"(fx_state));
ctxt->ops->put_fpu(ctxt);
return rc;
}
static bool valid_cr(int nr)
{
switch (nr) {
case 0:
case 2 ... 4:
case 8:
return true;
default:
return false;
}
}
static int check_cr_read(struct x86_emulate_ctxt *ctxt)
{
if (!valid_cr(ctxt->modrm_reg))
return emulate_ud(ctxt);
return X86EMUL_CONTINUE;
}
static int check_cr_write(struct x86_emulate_ctxt *ctxt)
{
u64 new_val = ctxt->src.val64;
int cr = ctxt->modrm_reg;
u64 efer = 0;
static u64 cr_reserved_bits[] = {
0xffffffff00000000ULL,
0, 0, 0, /* CR3 checked later */
CR4_RESERVED_BITS,
0, 0, 0,
CR8_RESERVED_BITS,
};
if (!valid_cr(cr))
return emulate_ud(ctxt);
if (new_val & cr_reserved_bits[cr])
return emulate_gp(ctxt, 0);
switch (cr) {
case 0: {
u64 cr4;
if (((new_val & X86_CR0_PG) && !(new_val & X86_CR0_PE)) ||
((new_val & X86_CR0_NW) && !(new_val & X86_CR0_CD)))
return emulate_gp(ctxt, 0);
cr4 = ctxt->ops->get_cr(ctxt, 4);
ctxt->ops->get_msr(ctxt, MSR_EFER, &efer);
if ((new_val & X86_CR0_PG) && (efer & EFER_LME) &&
!(cr4 & X86_CR4_PAE))
return emulate_gp(ctxt, 0);
break;
}
case 3: {
u64 rsvd = 0;
ctxt->ops->get_msr(ctxt, MSR_EFER, &efer);
if (efer & EFER_LMA)
rsvd = CR3_L_MODE_RESERVED_BITS & ~CR3_PCID_INVD;
if (new_val & rsvd)
return emulate_gp(ctxt, 0);
break;
}
case 4: {
ctxt->ops->get_msr(ctxt, MSR_EFER, &efer);
if ((efer & EFER_LMA) && !(new_val & X86_CR4_PAE))
return emulate_gp(ctxt, 0);
break;
}
}
return X86EMUL_CONTINUE;
}
static int check_dr7_gd(struct x86_emulate_ctxt *ctxt)
{
unsigned long dr7;
ctxt->ops->get_dr(ctxt, 7, &dr7);
/* Check if DR7.Global_Enable is set */
return dr7 & (1 << 13);
}
static int check_dr_read(struct x86_emulate_ctxt *ctxt)
{
int dr = ctxt->modrm_reg;
u64 cr4;
if (dr > 7)
return emulate_ud(ctxt);
cr4 = ctxt->ops->get_cr(ctxt, 4);
if ((cr4 & X86_CR4_DE) && (dr == 4 || dr == 5))
return emulate_ud(ctxt);
if (check_dr7_gd(ctxt)) {
ulong dr6;
ctxt->ops->get_dr(ctxt, 6, &dr6);
dr6 &= ~15;
dr6 |= DR6_BD | DR6_RTM;
ctxt->ops->set_dr(ctxt, 6, dr6);
return emulate_db(ctxt);
}
return X86EMUL_CONTINUE;
}
static int check_dr_write(struct x86_emulate_ctxt *ctxt)
{
u64 new_val = ctxt->src.val64;
int dr = ctxt->modrm_reg;
if ((dr == 6 || dr == 7) && (new_val & 0xffffffff00000000ULL))
return emulate_gp(ctxt, 0);
return check_dr_read(ctxt);
}
static int check_svme(struct x86_emulate_ctxt *ctxt)
{
u64 efer;
ctxt->ops->get_msr(ctxt, MSR_EFER, &efer);
if (!(efer & EFER_SVME))
return emulate_ud(ctxt);
return X86EMUL_CONTINUE;
}
static int check_svme_pa(struct x86_emulate_ctxt *ctxt)
{
u64 rax = reg_read(ctxt, VCPU_REGS_RAX);
/* Valid physical address? */
if (rax & 0xffff000000000000ULL)
return emulate_gp(ctxt, 0);
return check_svme(ctxt);
}
static int check_rdtsc(struct x86_emulate_ctxt *ctxt)
{
u64 cr4 = ctxt->ops->get_cr(ctxt, 4);
if (cr4 & X86_CR4_TSD && ctxt->ops->cpl(ctxt))
return emulate_ud(ctxt);
return X86EMUL_CONTINUE;
}
static int check_rdpmc(struct x86_emulate_ctxt *ctxt)
{
u64 cr4 = ctxt->ops->get_cr(ctxt, 4);
u64 rcx = reg_read(ctxt, VCPU_REGS_RCX);
if ((!(cr4 & X86_CR4_PCE) && ctxt->ops->cpl(ctxt)) ||
ctxt->ops->check_pmc(ctxt, rcx))
return emulate_gp(ctxt, 0);
return X86EMUL_CONTINUE;
}
static int check_perm_in(struct x86_emulate_ctxt *ctxt)
{
ctxt->dst.bytes = min(ctxt->dst.bytes, 4u);
if (!emulator_io_permited(ctxt, ctxt->src.val, ctxt->dst.bytes))
return emulate_gp(ctxt, 0);
return X86EMUL_CONTINUE;
}
static int check_perm_out(struct x86_emulate_ctxt *ctxt)
{
ctxt->src.bytes = min(ctxt->src.bytes, 4u);
if (!emulator_io_permited(ctxt, ctxt->dst.val, ctxt->src.bytes))
return emulate_gp(ctxt, 0);
return X86EMUL_CONTINUE;
}
#define D(_y) { .flags = (_y) }
#define DI(_y, _i) { .flags = (_y)|Intercept, .intercept = x86_intercept_##_i }
#define DIP(_y, _i, _p) { .flags = (_y)|Intercept|CheckPerm, \
.intercept = x86_intercept_##_i, .check_perm = (_p) }
#define N D(NotImpl)
#define EXT(_f, _e) { .flags = ((_f) | RMExt), .u.group = (_e) }
#define G(_f, _g) { .flags = ((_f) | Group | ModRM), .u.group = (_g) }
#define GD(_f, _g) { .flags = ((_f) | GroupDual | ModRM), .u.gdual = (_g) }
#define ID(_f, _i) { .flags = ((_f) | InstrDual | ModRM), .u.idual = (_i) }
#define MD(_f, _m) { .flags = ((_f) | ModeDual), .u.mdual = (_m) }
#define E(_f, _e) { .flags = ((_f) | Escape | ModRM), .u.esc = (_e) }
#define I(_f, _e) { .flags = (_f), .u.execute = (_e) }
#define F(_f, _e) { .flags = (_f) | Fastop, .u.fastop = (_e) }
#define II(_f, _e, _i) \
{ .flags = (_f)|Intercept, .u.execute = (_e), .intercept = x86_intercept_##_i }
#define IIP(_f, _e, _i, _p) \
{ .flags = (_f)|Intercept|CheckPerm, .u.execute = (_e), \
.intercept = x86_intercept_##_i, .check_perm = (_p) }
#define GP(_f, _g) { .flags = ((_f) | Prefix), .u.gprefix = (_g) }
#define D2bv(_f) D((_f) | ByteOp), D(_f)
#define D2bvIP(_f, _i, _p) DIP((_f) | ByteOp, _i, _p), DIP(_f, _i, _p)
#define I2bv(_f, _e) I((_f) | ByteOp, _e), I(_f, _e)
#define F2bv(_f, _e) F((_f) | ByteOp, _e), F(_f, _e)
#define I2bvIP(_f, _e, _i, _p) \
IIP((_f) | ByteOp, _e, _i, _p), IIP(_f, _e, _i, _p)
#define F6ALU(_f, _e) F2bv((_f) | DstMem | SrcReg | ModRM, _e), \
F2bv(((_f) | DstReg | SrcMem | ModRM) & ~Lock, _e), \
F2bv(((_f) & ~Lock) | DstAcc | SrcImm, _e)
static const struct opcode group7_rm0[] = {
N,
I(SrcNone | Priv | EmulateOnUD, em_hypercall),
N, N, N, N, N, N,
};
static const struct opcode group7_rm1[] = {
DI(SrcNone | Priv, monitor),
DI(SrcNone | Priv, mwait),
N, N, N, N, N, N,
};
static const struct opcode group7_rm3[] = {
DIP(SrcNone | Prot | Priv, vmrun, check_svme_pa),
II(SrcNone | Prot | EmulateOnUD, em_hypercall, vmmcall),
DIP(SrcNone | Prot | Priv, vmload, check_svme_pa),
DIP(SrcNone | Prot | Priv, vmsave, check_svme_pa),
DIP(SrcNone | Prot | Priv, stgi, check_svme),
DIP(SrcNone | Prot | Priv, clgi, check_svme),
DIP(SrcNone | Prot | Priv, skinit, check_svme),
DIP(SrcNone | Prot | Priv, invlpga, check_svme),
};
static const struct opcode group7_rm7[] = {
N,
DIP(SrcNone, rdtscp, check_rdtsc),
N, N, N, N, N, N,
};
static const struct opcode group1[] = {
F(Lock, em_add),
F(Lock | PageTable, em_or),
F(Lock, em_adc),
F(Lock, em_sbb),
F(Lock | PageTable, em_and),
F(Lock, em_sub),
F(Lock, em_xor),
F(NoWrite, em_cmp),
};
static const struct opcode group1A[] = {
I(DstMem | SrcNone | Mov | Stack | IncSP, em_pop), N, N, N, N, N, N, N,
};
static const struct opcode group2[] = {
F(DstMem | ModRM, em_rol),
F(DstMem | ModRM, em_ror),
F(DstMem | ModRM, em_rcl),
F(DstMem | ModRM, em_rcr),
F(DstMem | ModRM, em_shl),
F(DstMem | ModRM, em_shr),
F(DstMem | ModRM, em_shl),
F(DstMem | ModRM, em_sar),
};
static const struct opcode group3[] = {
F(DstMem | SrcImm | NoWrite, em_test),
F(DstMem | SrcImm | NoWrite, em_test),
F(DstMem | SrcNone | Lock, em_not),
F(DstMem | SrcNone | Lock, em_neg),
F(DstXacc | Src2Mem, em_mul_ex),
F(DstXacc | Src2Mem, em_imul_ex),
F(DstXacc | Src2Mem, em_div_ex),
F(DstXacc | Src2Mem, em_idiv_ex),
};
static const struct opcode group4[] = {
F(ByteOp | DstMem | SrcNone | Lock, em_inc),
F(ByteOp | DstMem | SrcNone | Lock, em_dec),
N, N, N, N, N, N,
};
static const struct opcode group5[] = {
F(DstMem | SrcNone | Lock, em_inc),
F(DstMem | SrcNone | Lock, em_dec),
I(SrcMem | NearBranch, em_call_near_abs),
I(SrcMemFAddr | ImplicitOps, em_call_far),
I(SrcMem | NearBranch, em_jmp_abs),
I(SrcMemFAddr | ImplicitOps, em_jmp_far),
I(SrcMem | Stack, em_push), D(Undefined),
};
static const struct opcode group6[] = {
DI(Prot | DstMem, sldt),
DI(Prot | DstMem, str),
II(Prot | Priv | SrcMem16, em_lldt, lldt),
II(Prot | Priv | SrcMem16, em_ltr, ltr),
N, N, N, N,
};
static const struct group_dual group7 = { {
II(Mov | DstMem, em_sgdt, sgdt),
II(Mov | DstMem, em_sidt, sidt),
II(SrcMem | Priv, em_lgdt, lgdt),
II(SrcMem | Priv, em_lidt, lidt),
II(SrcNone | DstMem | Mov, em_smsw, smsw), N,
II(SrcMem16 | Mov | Priv, em_lmsw, lmsw),
II(SrcMem | ByteOp | Priv | NoAccess, em_invlpg, invlpg),
}, {
EXT(0, group7_rm0),
EXT(0, group7_rm1),
N, EXT(0, group7_rm3),
II(SrcNone | DstMem | Mov, em_smsw, smsw), N,
II(SrcMem16 | Mov | Priv, em_lmsw, lmsw),
EXT(0, group7_rm7),
} };
static const struct opcode group8[] = {
N, N, N, N,
F(DstMem | SrcImmByte | NoWrite, em_bt),
F(DstMem | SrcImmByte | Lock | PageTable, em_bts),
F(DstMem | SrcImmByte | Lock, em_btr),
F(DstMem | SrcImmByte | Lock | PageTable, em_btc),
};
static const struct group_dual group9 = { {
N, I(DstMem64 | Lock | PageTable, em_cmpxchg8b), N, N, N, N, N, N,
}, {
N, N, N, N, N, N, N, N,
} };
static const struct opcode group11[] = {
I(DstMem | SrcImm | Mov | PageTable, em_mov),
X7(D(Undefined)),
};
static const struct gprefix pfx_0f_ae_7 = {
I(SrcMem | ByteOp, em_clflush), N, N, N,
};
static const struct group_dual group15 = { {
I(ModRM | Aligned16, em_fxsave),
I(ModRM | Aligned16, em_fxrstor),
N, N, N, N, N, GP(0, &pfx_0f_ae_7),
}, {
N, N, N, N, N, N, N, N,
} };
static const struct gprefix pfx_0f_6f_0f_7f = {
I(Mmx, em_mov), I(Sse | Aligned, em_mov), N, I(Sse | Unaligned, em_mov),
};
static const struct instr_dual instr_dual_0f_2b = {
I(0, em_mov), N
};
static const struct gprefix pfx_0f_2b = {
ID(0, &instr_dual_0f_2b), ID(0, &instr_dual_0f_2b), N, N,
};
static const struct gprefix pfx_0f_28_0f_29 = {
I(Aligned, em_mov), I(Aligned, em_mov), N, N,
};
static const struct gprefix pfx_0f_e7 = {
N, I(Sse, em_mov), N, N,
};
static const struct escape escape_d9 = { {
N, N, N, N, N, N, N, I(DstMem16 | Mov, em_fnstcw),
}, {
/* 0xC0 - 0xC7 */
N, N, N, N, N, N, N, N,
/* 0xC8 - 0xCF */
N, N, N, N, N, N, N, N,
/* 0xD0 - 0xC7 */
N, N, N, N, N, N, N, N,
/* 0xD8 - 0xDF */
N, N, N, N, N, N, N, N,
/* 0xE0 - 0xE7 */
N, N, N, N, N, N, N, N,
/* 0xE8 - 0xEF */
N, N, N, N, N, N, N, N,
/* 0xF0 - 0xF7 */
N, N, N, N, N, N, N, N,
/* 0xF8 - 0xFF */
N, N, N, N, N, N, N, N,
} };
static const struct escape escape_db = { {
N, N, N, N, N, N, N, N,
}, {
/* 0xC0 - 0xC7 */
N, N, N, N, N, N, N, N,
/* 0xC8 - 0xCF */
N, N, N, N, N, N, N, N,
/* 0xD0 - 0xC7 */
N, N, N, N, N, N, N, N,
/* 0xD8 - 0xDF */
N, N, N, N, N, N, N, N,
/* 0xE0 - 0xE7 */
N, N, N, I(ImplicitOps, em_fninit), N, N, N, N,
/* 0xE8 - 0xEF */
N, N, N, N, N, N, N, N,
/* 0xF0 - 0xF7 */
N, N, N, N, N, N, N, N,
/* 0xF8 - 0xFF */
N, N, N, N, N, N, N, N,
} };
static const struct escape escape_dd = { {
N, N, N, N, N, N, N, I(DstMem16 | Mov, em_fnstsw),
}, {
/* 0xC0 - 0xC7 */
N, N, N, N, N, N, N, N,
/* 0xC8 - 0xCF */
N, N, N, N, N, N, N, N,
/* 0xD0 - 0xC7 */
N, N, N, N, N, N, N, N,
/* 0xD8 - 0xDF */
N, N, N, N, N, N, N, N,
/* 0xE0 - 0xE7 */
N, N, N, N, N, N, N, N,
/* 0xE8 - 0xEF */
N, N, N, N, N, N, N, N,
/* 0xF0 - 0xF7 */
N, N, N, N, N, N, N, N,
/* 0xF8 - 0xFF */
N, N, N, N, N, N, N, N,
} };
static const struct instr_dual instr_dual_0f_c3 = {
I(DstMem | SrcReg | ModRM | No16 | Mov, em_mov), N
};
static const struct mode_dual mode_dual_63 = {
N, I(DstReg | SrcMem32 | ModRM | Mov, em_movsxd)
};
static const struct opcode opcode_table[256] = {
/* 0x00 - 0x07 */
F6ALU(Lock, em_add),
I(ImplicitOps | Stack | No64 | Src2ES, em_push_sreg),
I(ImplicitOps | Stack | No64 | Src2ES, em_pop_sreg),
/* 0x08 - 0x0F */
F6ALU(Lock | PageTable, em_or),
I(ImplicitOps | Stack | No64 | Src2CS, em_push_sreg),
N,
/* 0x10 - 0x17 */
F6ALU(Lock, em_adc),
I(ImplicitOps | Stack | No64 | Src2SS, em_push_sreg),
I(ImplicitOps | Stack | No64 | Src2SS, em_pop_sreg),
/* 0x18 - 0x1F */
F6ALU(Lock, em_sbb),
I(ImplicitOps | Stack | No64 | Src2DS, em_push_sreg),
I(ImplicitOps | Stack | No64 | Src2DS, em_pop_sreg),
/* 0x20 - 0x27 */
F6ALU(Lock | PageTable, em_and), N, N,
/* 0x28 - 0x2F */
F6ALU(Lock, em_sub), N, I(ByteOp | DstAcc | No64, em_das),
/* 0x30 - 0x37 */
F6ALU(Lock, em_xor), N, N,
/* 0x38 - 0x3F */
F6ALU(NoWrite, em_cmp), N, N,
/* 0x40 - 0x4F */
X8(F(DstReg, em_inc)), X8(F(DstReg, em_dec)),
/* 0x50 - 0x57 */
X8(I(SrcReg | Stack, em_push)),
/* 0x58 - 0x5F */
X8(I(DstReg | Stack, em_pop)),
/* 0x60 - 0x67 */
I(ImplicitOps | Stack | No64, em_pusha),
I(ImplicitOps | Stack | No64, em_popa),
N, MD(ModRM, &mode_dual_63),
N, N, N, N,
/* 0x68 - 0x6F */
I(SrcImm | Mov | Stack, em_push),
I(DstReg | SrcMem | ModRM | Src2Imm, em_imul_3op),
I(SrcImmByte | Mov | Stack, em_push),
I(DstReg | SrcMem | ModRM | Src2ImmByte, em_imul_3op),
I2bvIP(DstDI | SrcDX | Mov | String | Unaligned, em_in, ins, check_perm_in), /* insb, insw/insd */
I2bvIP(SrcSI | DstDX | String, em_out, outs, check_perm_out), /* outsb, outsw/outsd */
/* 0x70 - 0x7F */
X16(D(SrcImmByte | NearBranch)),
/* 0x80 - 0x87 */
G(ByteOp | DstMem | SrcImm, group1),
G(DstMem | SrcImm, group1),
G(ByteOp | DstMem | SrcImm | No64, group1),
G(DstMem | SrcImmByte, group1),
F2bv(DstMem | SrcReg | ModRM | NoWrite, em_test),
I2bv(DstMem | SrcReg | ModRM | Lock | PageTable, em_xchg),
/* 0x88 - 0x8F */
I2bv(DstMem | SrcReg | ModRM | Mov | PageTable, em_mov),
I2bv(DstReg | SrcMem | ModRM | Mov, em_mov),
I(DstMem | SrcNone | ModRM | Mov | PageTable, em_mov_rm_sreg),
D(ModRM | SrcMem | NoAccess | DstReg),
I(ImplicitOps | SrcMem16 | ModRM, em_mov_sreg_rm),
G(0, group1A),
/* 0x90 - 0x97 */
DI(SrcAcc | DstReg, pause), X7(D(SrcAcc | DstReg)),
/* 0x98 - 0x9F */
D(DstAcc | SrcNone), I(ImplicitOps | SrcAcc, em_cwd),
I(SrcImmFAddr | No64, em_call_far), N,
II(ImplicitOps | Stack, em_pushf, pushf),
II(ImplicitOps | Stack, em_popf, popf),
I(ImplicitOps, em_sahf), I(ImplicitOps, em_lahf),
/* 0xA0 - 0xA7 */
I2bv(DstAcc | SrcMem | Mov | MemAbs, em_mov),
I2bv(DstMem | SrcAcc | Mov | MemAbs | PageTable, em_mov),
I2bv(SrcSI | DstDI | Mov | String, em_mov),
F2bv(SrcSI | DstDI | String | NoWrite, em_cmp_r),
/* 0xA8 - 0xAF */
F2bv(DstAcc | SrcImm | NoWrite, em_test),
I2bv(SrcAcc | DstDI | Mov | String, em_mov),
I2bv(SrcSI | DstAcc | Mov | String, em_mov),
F2bv(SrcAcc | DstDI | String | NoWrite, em_cmp_r),
/* 0xB0 - 0xB7 */
X8(I(ByteOp | DstReg | SrcImm | Mov, em_mov)),
/* 0xB8 - 0xBF */
X8(I(DstReg | SrcImm64 | Mov, em_mov)),
/* 0xC0 - 0xC7 */
G(ByteOp | Src2ImmByte, group2), G(Src2ImmByte, group2),
I(ImplicitOps | NearBranch | SrcImmU16, em_ret_near_imm),
I(ImplicitOps | NearBranch, em_ret),
I(DstReg | SrcMemFAddr | ModRM | No64 | Src2ES, em_lseg),
I(DstReg | SrcMemFAddr | ModRM | No64 | Src2DS, em_lseg),
G(ByteOp, group11), G(0, group11),
/* 0xC8 - 0xCF */
I(Stack | SrcImmU16 | Src2ImmByte, em_enter), I(Stack, em_leave),
I(ImplicitOps | SrcImmU16, em_ret_far_imm),
I(ImplicitOps, em_ret_far),
D(ImplicitOps), DI(SrcImmByte, intn),
D(ImplicitOps | No64), II(ImplicitOps, em_iret, iret),
/* 0xD0 - 0xD7 */
G(Src2One | ByteOp, group2), G(Src2One, group2),
G(Src2CL | ByteOp, group2), G(Src2CL, group2),
I(DstAcc | SrcImmUByte | No64, em_aam),
I(DstAcc | SrcImmUByte | No64, em_aad),
F(DstAcc | ByteOp | No64, em_salc),
I(DstAcc | SrcXLat | ByteOp, em_mov),
/* 0xD8 - 0xDF */
N, E(0, &escape_d9), N, E(0, &escape_db), N, E(0, &escape_dd), N, N,
/* 0xE0 - 0xE7 */
X3(I(SrcImmByte | NearBranch, em_loop)),
I(SrcImmByte | NearBranch, em_jcxz),
I2bvIP(SrcImmUByte | DstAcc, em_in, in, check_perm_in),
I2bvIP(SrcAcc | DstImmUByte, em_out, out, check_perm_out),
/* 0xE8 - 0xEF */
I(SrcImm | NearBranch, em_call), D(SrcImm | ImplicitOps | NearBranch),
I(SrcImmFAddr | No64, em_jmp_far),
D(SrcImmByte | ImplicitOps | NearBranch),
I2bvIP(SrcDX | DstAcc, em_in, in, check_perm_in),
I2bvIP(SrcAcc | DstDX, em_out, out, check_perm_out),
/* 0xF0 - 0xF7 */
N, DI(ImplicitOps, icebp), N, N,
DI(ImplicitOps | Priv, hlt), D(ImplicitOps),
G(ByteOp, group3), G(0, group3),
/* 0xF8 - 0xFF */
D(ImplicitOps), D(ImplicitOps),
I(ImplicitOps, em_cli), I(ImplicitOps, em_sti),
D(ImplicitOps), D(ImplicitOps), G(0, group4), G(0, group5),
};
static const struct opcode twobyte_table[256] = {
/* 0x00 - 0x0F */
G(0, group6), GD(0, &group7), N, N,
N, I(ImplicitOps | EmulateOnUD, em_syscall),
II(ImplicitOps | Priv, em_clts, clts), N,
DI(ImplicitOps | Priv, invd), DI(ImplicitOps | Priv, wbinvd), N, N,
N, D(ImplicitOps | ModRM | SrcMem | NoAccess), N, N,
/* 0x10 - 0x1F */
N, N, N, N, N, N, N, N,
D(ImplicitOps | ModRM | SrcMem | NoAccess),
N, N, N, N, N, N, D(ImplicitOps | ModRM | SrcMem | NoAccess),
/* 0x20 - 0x2F */
DIP(ModRM | DstMem | Priv | Op3264 | NoMod, cr_read, check_cr_read),
DIP(ModRM | DstMem | Priv | Op3264 | NoMod, dr_read, check_dr_read),
IIP(ModRM | SrcMem | Priv | Op3264 | NoMod, em_cr_write, cr_write,
check_cr_write),
IIP(ModRM | SrcMem | Priv | Op3264 | NoMod, em_dr_write, dr_write,
check_dr_write),
N, N, N, N,
GP(ModRM | DstReg | SrcMem | Mov | Sse, &pfx_0f_28_0f_29),
GP(ModRM | DstMem | SrcReg | Mov | Sse, &pfx_0f_28_0f_29),
N, GP(ModRM | DstMem | SrcReg | Mov | Sse, &pfx_0f_2b),
N, N, N, N,
/* 0x30 - 0x3F */
II(ImplicitOps | Priv, em_wrmsr, wrmsr),
IIP(ImplicitOps, em_rdtsc, rdtsc, check_rdtsc),
II(ImplicitOps | Priv, em_rdmsr, rdmsr),
IIP(ImplicitOps, em_rdpmc, rdpmc, check_rdpmc),
I(ImplicitOps | EmulateOnUD, em_sysenter),
I(ImplicitOps | Priv | EmulateOnUD, em_sysexit),
N, N,
N, N, N, N, N, N, N, N,
/* 0x40 - 0x4F */
X16(D(DstReg | SrcMem | ModRM)),
/* 0x50 - 0x5F */
N, N, N, N, N, N, N, N, N, N, N, N, N, N, N, N,
/* 0x60 - 0x6F */
N, N, N, N,
N, N, N, N,
N, N, N, N,
N, N, N, GP(SrcMem | DstReg | ModRM | Mov, &pfx_0f_6f_0f_7f),
/* 0x70 - 0x7F */
N, N, N, N,
N, N, N, N,
N, N, N, N,
N, N, N, GP(SrcReg | DstMem | ModRM | Mov, &pfx_0f_6f_0f_7f),
/* 0x80 - 0x8F */
X16(D(SrcImm | NearBranch)),
/* 0x90 - 0x9F */
X16(D(ByteOp | DstMem | SrcNone | ModRM| Mov)),
/* 0xA0 - 0xA7 */
I(Stack | Src2FS, em_push_sreg), I(Stack | Src2FS, em_pop_sreg),
II(ImplicitOps, em_cpuid, cpuid),
F(DstMem | SrcReg | ModRM | BitOp | NoWrite, em_bt),
F(DstMem | SrcReg | Src2ImmByte | ModRM, em_shld),
F(DstMem | SrcReg | Src2CL | ModRM, em_shld), N, N,
/* 0xA8 - 0xAF */
I(Stack | Src2GS, em_push_sreg), I(Stack | Src2GS, em_pop_sreg),
II(EmulateOnUD | ImplicitOps, em_rsm, rsm),
F(DstMem | SrcReg | ModRM | BitOp | Lock | PageTable, em_bts),
F(DstMem | SrcReg | Src2ImmByte | ModRM, em_shrd),
F(DstMem | SrcReg | Src2CL | ModRM, em_shrd),
GD(0, &group15), F(DstReg | SrcMem | ModRM, em_imul),
/* 0xB0 - 0xB7 */
I2bv(DstMem | SrcReg | ModRM | Lock | PageTable | SrcWrite, em_cmpxchg),
I(DstReg | SrcMemFAddr | ModRM | Src2SS, em_lseg),
F(DstMem | SrcReg | ModRM | BitOp | Lock, em_btr),
I(DstReg | SrcMemFAddr | ModRM | Src2FS, em_lseg),
I(DstReg | SrcMemFAddr | ModRM | Src2GS, em_lseg),
D(DstReg | SrcMem8 | ModRM | Mov), D(DstReg | SrcMem16 | ModRM | Mov),
/* 0xB8 - 0xBF */
N, N,
G(BitOp, group8),
F(DstMem | SrcReg | ModRM | BitOp | Lock | PageTable, em_btc),
I(DstReg | SrcMem | ModRM, em_bsf_c),
I(DstReg | SrcMem | ModRM, em_bsr_c),
D(DstReg | SrcMem8 | ModRM | Mov), D(DstReg | SrcMem16 | ModRM | Mov),
/* 0xC0 - 0xC7 */
F2bv(DstMem | SrcReg | ModRM | SrcWrite | Lock, em_xadd),
N, ID(0, &instr_dual_0f_c3),
N, N, N, GD(0, &group9),
/* 0xC8 - 0xCF */
X8(I(DstReg, em_bswap)),
/* 0xD0 - 0xDF */
N, N, N, N, N, N, N, N, N, N, N, N, N, N, N, N,
/* 0xE0 - 0xEF */
N, N, N, N, N, N, N, GP(SrcReg | DstMem | ModRM | Mov, &pfx_0f_e7),
N, N, N, N, N, N, N, N,
/* 0xF0 - 0xFF */
N, N, N, N, N, N, N, N, N, N, N, N, N, N, N, N
};
static const struct instr_dual instr_dual_0f_38_f0 = {
I(DstReg | SrcMem | Mov, em_movbe), N
};
static const struct instr_dual instr_dual_0f_38_f1 = {
I(DstMem | SrcReg | Mov, em_movbe), N
};
static const struct gprefix three_byte_0f_38_f0 = {
ID(0, &instr_dual_0f_38_f0), N, N, N
};
static const struct gprefix three_byte_0f_38_f1 = {
ID(0, &instr_dual_0f_38_f1), N, N, N
};
/*
* Insns below are selected by the prefix which indexed by the third opcode
* byte.
*/
static const struct opcode opcode_map_0f_38[256] = {
/* 0x00 - 0x7f */
X16(N), X16(N), X16(N), X16(N), X16(N), X16(N), X16(N), X16(N),
/* 0x80 - 0xef */
X16(N), X16(N), X16(N), X16(N), X16(N), X16(N), X16(N),
/* 0xf0 - 0xf1 */
GP(EmulateOnUD | ModRM, &three_byte_0f_38_f0),
GP(EmulateOnUD | ModRM, &three_byte_0f_38_f1),
/* 0xf2 - 0xff */
N, N, X4(N), X8(N)
};
#undef D
#undef N
#undef G
#undef GD
#undef I
#undef GP
#undef EXT
#undef MD
#undef ID
#undef D2bv
#undef D2bvIP
#undef I2bv
#undef I2bvIP
#undef I6ALU
static unsigned imm_size(struct x86_emulate_ctxt *ctxt)
{
unsigned size;
size = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes;
if (size == 8)
size = 4;
return size;
}
static int decode_imm(struct x86_emulate_ctxt *ctxt, struct operand *op,
unsigned size, bool sign_extension)
{
int rc = X86EMUL_CONTINUE;
op->type = OP_IMM;
op->bytes = size;
op->addr.mem.ea = ctxt->_eip;
/* NB. Immediates are sign-extended as necessary. */
switch (op->bytes) {
case 1:
op->val = insn_fetch(s8, ctxt);
break;
case 2:
op->val = insn_fetch(s16, ctxt);
break;
case 4:
op->val = insn_fetch(s32, ctxt);
break;
case 8:
op->val = insn_fetch(s64, ctxt);
break;
}
if (!sign_extension) {
switch (op->bytes) {
case 1:
op->val &= 0xff;
break;
case 2:
op->val &= 0xffff;
break;
case 4:
op->val &= 0xffffffff;
break;
}
}
done:
return rc;
}
static int decode_operand(struct x86_emulate_ctxt *ctxt, struct operand *op,
unsigned d)
{
int rc = X86EMUL_CONTINUE;
switch (d) {
case OpReg:
decode_register_operand(ctxt, op);
break;
case OpImmUByte:
rc = decode_imm(ctxt, op, 1, false);
break;
case OpMem:
ctxt->memop.bytes = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes;
mem_common:
*op = ctxt->memop;
ctxt->memopp = op;
if (ctxt->d & BitOp)
fetch_bit_operand(ctxt);
op->orig_val = op->val;
break;
case OpMem64:
ctxt->memop.bytes = (ctxt->op_bytes == 8) ? 16 : 8;
goto mem_common;
case OpAcc:
op->type = OP_REG;
op->bytes = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes;
op->addr.reg = reg_rmw(ctxt, VCPU_REGS_RAX);
fetch_register_operand(op);
op->orig_val = op->val;
break;
case OpAccLo:
op->type = OP_REG;
op->bytes = (ctxt->d & ByteOp) ? 2 : ctxt->op_bytes;
op->addr.reg = reg_rmw(ctxt, VCPU_REGS_RAX);
fetch_register_operand(op);
op->orig_val = op->val;
break;
case OpAccHi:
if (ctxt->d & ByteOp) {
op->type = OP_NONE;
break;
}
op->type = OP_REG;
op->bytes = ctxt->op_bytes;
op->addr.reg = reg_rmw(ctxt, VCPU_REGS_RDX);
fetch_register_operand(op);
op->orig_val = op->val;
break;
case OpDI:
op->type = OP_MEM;
op->bytes = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes;
op->addr.mem.ea =
register_address(ctxt, VCPU_REGS_RDI);
op->addr.mem.seg = VCPU_SREG_ES;
op->val = 0;
op->count = 1;
break;
case OpDX:
op->type = OP_REG;
op->bytes = 2;
op->addr.reg = reg_rmw(ctxt, VCPU_REGS_RDX);
fetch_register_operand(op);
break;
case OpCL:
op->type = OP_IMM;
op->bytes = 1;
op->val = reg_read(ctxt, VCPU_REGS_RCX) & 0xff;
break;
case OpImmByte:
rc = decode_imm(ctxt, op, 1, true);
break;
case OpOne:
op->type = OP_IMM;
op->bytes = 1;
op->val = 1;
break;
case OpImm:
rc = decode_imm(ctxt, op, imm_size(ctxt), true);
break;
case OpImm64:
rc = decode_imm(ctxt, op, ctxt->op_bytes, true);
break;
case OpMem8:
ctxt->memop.bytes = 1;
if (ctxt->memop.type == OP_REG) {
ctxt->memop.addr.reg = decode_register(ctxt,
ctxt->modrm_rm, true);
fetch_register_operand(&ctxt->memop);
}
goto mem_common;
case OpMem16:
ctxt->memop.bytes = 2;
goto mem_common;
case OpMem32:
ctxt->memop.bytes = 4;
goto mem_common;
case OpImmU16:
rc = decode_imm(ctxt, op, 2, false);
break;
case OpImmU:
rc = decode_imm(ctxt, op, imm_size(ctxt), false);
break;
case OpSI:
op->type = OP_MEM;
op->bytes = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes;
op->addr.mem.ea =
register_address(ctxt, VCPU_REGS_RSI);
op->addr.mem.seg = ctxt->seg_override;
op->val = 0;
op->count = 1;
break;
case OpXLat:
op->type = OP_MEM;
op->bytes = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes;
op->addr.mem.ea =
address_mask(ctxt,
reg_read(ctxt, VCPU_REGS_RBX) +
(reg_read(ctxt, VCPU_REGS_RAX) & 0xff));
op->addr.mem.seg = ctxt->seg_override;
op->val = 0;
break;
case OpImmFAddr:
op->type = OP_IMM;
op->addr.mem.ea = ctxt->_eip;
op->bytes = ctxt->op_bytes + 2;
insn_fetch_arr(op->valptr, op->bytes, ctxt);
break;
case OpMemFAddr:
ctxt->memop.bytes = ctxt->op_bytes + 2;
goto mem_common;
case OpES:
op->type = OP_IMM;
op->val = VCPU_SREG_ES;
break;
case OpCS:
op->type = OP_IMM;
op->val = VCPU_SREG_CS;
break;
case OpSS:
op->type = OP_IMM;
op->val = VCPU_SREG_SS;
break;
case OpDS:
op->type = OP_IMM;
op->val = VCPU_SREG_DS;
break;
case OpFS:
op->type = OP_IMM;
op->val = VCPU_SREG_FS;
break;
case OpGS:
op->type = OP_IMM;
op->val = VCPU_SREG_GS;
break;
case OpImplicit:
/* Special instructions do their own operand decoding. */
default:
op->type = OP_NONE; /* Disable writeback. */
break;
}
done:
return rc;
}
int x86_decode_insn(struct x86_emulate_ctxt *ctxt, void *insn, int insn_len)
{
int rc = X86EMUL_CONTINUE;
int mode = ctxt->mode;
int def_op_bytes, def_ad_bytes, goffset, simd_prefix;
bool op_prefix = false;
bool has_seg_override = false;
struct opcode opcode;
ctxt->memop.type = OP_NONE;
ctxt->memopp = NULL;
ctxt->_eip = ctxt->eip;
ctxt->fetch.ptr = ctxt->fetch.data;
ctxt->fetch.end = ctxt->fetch.data + insn_len;
ctxt->opcode_len = 1;
if (insn_len > 0)
memcpy(ctxt->fetch.data, insn, insn_len);
else {
rc = __do_insn_fetch_bytes(ctxt, 1);
if (rc != X86EMUL_CONTINUE)
return rc;
}
switch (mode) {
case X86EMUL_MODE_REAL:
case X86EMUL_MODE_VM86:
case X86EMUL_MODE_PROT16:
def_op_bytes = def_ad_bytes = 2;
break;
case X86EMUL_MODE_PROT32:
def_op_bytes = def_ad_bytes = 4;
break;
#ifdef CONFIG_X86_64
case X86EMUL_MODE_PROT64:
def_op_bytes = 4;
def_ad_bytes = 8;
break;
#endif
default:
return EMULATION_FAILED;
}
ctxt->op_bytes = def_op_bytes;
ctxt->ad_bytes = def_ad_bytes;
/* Legacy prefixes. */
for (;;) {
switch (ctxt->b = insn_fetch(u8, ctxt)) {
case 0x66: /* operand-size override */
op_prefix = true;
/* switch between 2/4 bytes */
ctxt->op_bytes = def_op_bytes ^ 6;
break;
case 0x67: /* address-size override */
if (mode == X86EMUL_MODE_PROT64)
/* switch between 4/8 bytes */
ctxt->ad_bytes = def_ad_bytes ^ 12;
else
/* switch between 2/4 bytes */
ctxt->ad_bytes = def_ad_bytes ^ 6;
break;
case 0x26: /* ES override */
case 0x2e: /* CS override */
case 0x36: /* SS override */
case 0x3e: /* DS override */
has_seg_override = true;
ctxt->seg_override = (ctxt->b >> 3) & 3;
break;
case 0x64: /* FS override */
case 0x65: /* GS override */
has_seg_override = true;
ctxt->seg_override = ctxt->b & 7;
break;
case 0x40 ... 0x4f: /* REX */
if (mode != X86EMUL_MODE_PROT64)
goto done_prefixes;
ctxt->rex_prefix = ctxt->b;
continue;
case 0xf0: /* LOCK */
ctxt->lock_prefix = 1;
break;
case 0xf2: /* REPNE/REPNZ */
case 0xf3: /* REP/REPE/REPZ */
ctxt->rep_prefix = ctxt->b;
break;
default:
goto done_prefixes;
}
/* Any legacy prefix after a REX prefix nullifies its effect. */
ctxt->rex_prefix = 0;
}
done_prefixes:
/* REX prefix. */
if (ctxt->rex_prefix & 8)
ctxt->op_bytes = 8; /* REX.W */
/* Opcode byte(s). */
opcode = opcode_table[ctxt->b];
/* Two-byte opcode? */
if (ctxt->b == 0x0f) {
ctxt->opcode_len = 2;
ctxt->b = insn_fetch(u8, ctxt);
opcode = twobyte_table[ctxt->b];
/* 0F_38 opcode map */
if (ctxt->b == 0x38) {
ctxt->opcode_len = 3;
ctxt->b = insn_fetch(u8, ctxt);
opcode = opcode_map_0f_38[ctxt->b];
}
}
ctxt->d = opcode.flags;
if (ctxt->d & ModRM)
ctxt->modrm = insn_fetch(u8, ctxt);
/* vex-prefix instructions are not implemented */
if (ctxt->opcode_len == 1 && (ctxt->b == 0xc5 || ctxt->b == 0xc4) &&
(mode == X86EMUL_MODE_PROT64 || (ctxt->modrm & 0xc0) == 0xc0)) {
ctxt->d = NotImpl;
}
while (ctxt->d & GroupMask) {
switch (ctxt->d & GroupMask) {
case Group:
goffset = (ctxt->modrm >> 3) & 7;
opcode = opcode.u.group[goffset];
break;
case GroupDual:
goffset = (ctxt->modrm >> 3) & 7;
if ((ctxt->modrm >> 6) == 3)
opcode = opcode.u.gdual->mod3[goffset];
else
opcode = opcode.u.gdual->mod012[goffset];
break;
case RMExt:
goffset = ctxt->modrm & 7;
opcode = opcode.u.group[goffset];
break;
case Prefix:
if (ctxt->rep_prefix && op_prefix)
return EMULATION_FAILED;
simd_prefix = op_prefix ? 0x66 : ctxt->rep_prefix;
switch (simd_prefix) {
case 0x00: opcode = opcode.u.gprefix->pfx_no; break;
case 0x66: opcode = opcode.u.gprefix->pfx_66; break;
case 0xf2: opcode = opcode.u.gprefix->pfx_f2; break;
case 0xf3: opcode = opcode.u.gprefix->pfx_f3; break;
}
break;
case Escape:
if (ctxt->modrm > 0xbf)
opcode = opcode.u.esc->high[ctxt->modrm - 0xc0];
else
opcode = opcode.u.esc->op[(ctxt->modrm >> 3) & 7];
break;
case InstrDual:
if ((ctxt->modrm >> 6) == 3)
opcode = opcode.u.idual->mod3;
else
opcode = opcode.u.idual->mod012;
break;
case ModeDual:
if (ctxt->mode == X86EMUL_MODE_PROT64)
opcode = opcode.u.mdual->mode64;
else
opcode = opcode.u.mdual->mode32;
break;
default:
return EMULATION_FAILED;
}
ctxt->d &= ~(u64)GroupMask;
ctxt->d |= opcode.flags;
}
/* Unrecognised? */
if (ctxt->d == 0)
return EMULATION_FAILED;
ctxt->execute = opcode.u.execute;
if (unlikely(ctxt->ud) && likely(!(ctxt->d & EmulateOnUD)))
return EMULATION_FAILED;
if (unlikely(ctxt->d &
(NotImpl|Stack|Op3264|Sse|Mmx|Intercept|CheckPerm|NearBranch|
No16))) {
/*
* These are copied unconditionally here, and checked unconditionally
* in x86_emulate_insn.
*/
ctxt->check_perm = opcode.check_perm;
ctxt->intercept = opcode.intercept;
if (ctxt->d & NotImpl)
return EMULATION_FAILED;
if (mode == X86EMUL_MODE_PROT64) {
if (ctxt->op_bytes == 4 && (ctxt->d & Stack))
ctxt->op_bytes = 8;
else if (ctxt->d & NearBranch)
ctxt->op_bytes = 8;
}
if (ctxt->d & Op3264) {
if (mode == X86EMUL_MODE_PROT64)
ctxt->op_bytes = 8;
else
ctxt->op_bytes = 4;
}
if ((ctxt->d & No16) && ctxt->op_bytes == 2)
ctxt->op_bytes = 4;
if (ctxt->d & Sse)
ctxt->op_bytes = 16;
else if (ctxt->d & Mmx)
ctxt->op_bytes = 8;
}
/* ModRM and SIB bytes. */
if (ctxt->d & ModRM) {
rc = decode_modrm(ctxt, &ctxt->memop);
if (!has_seg_override) {
has_seg_override = true;
ctxt->seg_override = ctxt->modrm_seg;
}
} else if (ctxt->d & MemAbs)
rc = decode_abs(ctxt, &ctxt->memop);
if (rc != X86EMUL_CONTINUE)
goto done;
if (!has_seg_override)
ctxt->seg_override = VCPU_SREG_DS;
ctxt->memop.addr.mem.seg = ctxt->seg_override;
/*
* Decode and fetch the source operand: register, memory
* or immediate.
*/
rc = decode_operand(ctxt, &ctxt->src, (ctxt->d >> SrcShift) & OpMask);
if (rc != X86EMUL_CONTINUE)
goto done;
/*
* Decode and fetch the second source operand: register, memory
* or immediate.
*/
rc = decode_operand(ctxt, &ctxt->src2, (ctxt->d >> Src2Shift) & OpMask);
if (rc != X86EMUL_CONTINUE)
goto done;
/* Decode and fetch the destination operand: register or memory. */
rc = decode_operand(ctxt, &ctxt->dst, (ctxt->d >> DstShift) & OpMask);
if (ctxt->rip_relative && likely(ctxt->memopp))
ctxt->memopp->addr.mem.ea = address_mask(ctxt,
ctxt->memopp->addr.mem.ea + ctxt->_eip);
done:
return (rc != X86EMUL_CONTINUE) ? EMULATION_FAILED : EMULATION_OK;
}
bool x86_page_table_writing_insn(struct x86_emulate_ctxt *ctxt)
{
return ctxt->d & PageTable;
}
static bool string_insn_completed(struct x86_emulate_ctxt *ctxt)
{
/* The second termination condition only applies for REPE
* and REPNE. Test if the repeat string operation prefix is
* REPE/REPZ or REPNE/REPNZ and if it's the case it tests the
* corresponding termination condition according to:
* - if REPE/REPZ and ZF = 0 then done
* - if REPNE/REPNZ and ZF = 1 then done
*/
if (((ctxt->b == 0xa6) || (ctxt->b == 0xa7) ||
(ctxt->b == 0xae) || (ctxt->b == 0xaf))
&& (((ctxt->rep_prefix == REPE_PREFIX) &&
((ctxt->eflags & X86_EFLAGS_ZF) == 0))
|| ((ctxt->rep_prefix == REPNE_PREFIX) &&
((ctxt->eflags & X86_EFLAGS_ZF) == X86_EFLAGS_ZF))))
return true;
return false;
}
static int flush_pending_x87_faults(struct x86_emulate_ctxt *ctxt)
{
int rc;
ctxt->ops->get_fpu(ctxt);
rc = asm_safe("fwait");
ctxt->ops->put_fpu(ctxt);
if (unlikely(rc != X86EMUL_CONTINUE))
return emulate_exception(ctxt, MF_VECTOR, 0, false);
return X86EMUL_CONTINUE;
}
static void fetch_possible_mmx_operand(struct x86_emulate_ctxt *ctxt,
struct operand *op)
{
if (op->type == OP_MM)
read_mmx_reg(ctxt, &op->mm_val, op->addr.mm);
}
static int fastop(struct x86_emulate_ctxt *ctxt, void (*fop)(struct fastop *))
{
register void *__sp asm(_ASM_SP);
ulong flags = (ctxt->eflags & EFLAGS_MASK) | X86_EFLAGS_IF;
if (!(ctxt->d & ByteOp))
fop += __ffs(ctxt->dst.bytes) * FASTOP_SIZE;
asm("push %[flags]; popf; call *%[fastop]; pushf; pop %[flags]\n"
: "+a"(ctxt->dst.val), "+d"(ctxt->src.val), [flags]"+D"(flags),
[fastop]"+S"(fop), "+r"(__sp)
: "c"(ctxt->src2.val));
ctxt->eflags = (ctxt->eflags & ~EFLAGS_MASK) | (flags & EFLAGS_MASK);
if (!fop) /* exception is returned in fop variable */
return emulate_de(ctxt);
return X86EMUL_CONTINUE;
}
void init_decode_cache(struct x86_emulate_ctxt *ctxt)
{
memset(&ctxt->rip_relative, 0,
(void *)&ctxt->modrm - (void *)&ctxt->rip_relative);
ctxt->io_read.pos = 0;
ctxt->io_read.end = 0;
ctxt->mem_read.end = 0;
}
int x86_emulate_insn(struct x86_emulate_ctxt *ctxt)
{
const struct x86_emulate_ops *ops = ctxt->ops;
int rc = X86EMUL_CONTINUE;
int saved_dst_type = ctxt->dst.type;
ctxt->mem_read.pos = 0;
/* LOCK prefix is allowed only with some instructions */
if (ctxt->lock_prefix && (!(ctxt->d & Lock) || ctxt->dst.type != OP_MEM)) {
rc = emulate_ud(ctxt);
goto done;
}
if ((ctxt->d & SrcMask) == SrcMemFAddr && ctxt->src.type != OP_MEM) {
rc = emulate_ud(ctxt);
goto done;
}
if (unlikely(ctxt->d &
(No64|Undefined|Sse|Mmx|Intercept|CheckPerm|Priv|Prot|String))) {
if ((ctxt->mode == X86EMUL_MODE_PROT64 && (ctxt->d & No64)) ||
(ctxt->d & Undefined)) {
rc = emulate_ud(ctxt);
goto done;
}
if (((ctxt->d & (Sse|Mmx)) && ((ops->get_cr(ctxt, 0) & X86_CR0_EM)))
|| ((ctxt->d & Sse) && !(ops->get_cr(ctxt, 4) & X86_CR4_OSFXSR))) {
rc = emulate_ud(ctxt);
goto done;
}
if ((ctxt->d & (Sse|Mmx)) && (ops->get_cr(ctxt, 0) & X86_CR0_TS)) {
rc = emulate_nm(ctxt);
goto done;
}
if (ctxt->d & Mmx) {
rc = flush_pending_x87_faults(ctxt);
if (rc != X86EMUL_CONTINUE)
goto done;
/*
* Now that we know the fpu is exception safe, we can fetch
* operands from it.
*/
fetch_possible_mmx_operand(ctxt, &ctxt->src);
fetch_possible_mmx_operand(ctxt, &ctxt->src2);
if (!(ctxt->d & Mov))
fetch_possible_mmx_operand(ctxt, &ctxt->dst);
}
if (unlikely(ctxt->emul_flags & X86EMUL_GUEST_MASK) && ctxt->intercept) {
rc = emulator_check_intercept(ctxt, ctxt->intercept,
X86_ICPT_PRE_EXCEPT);
if (rc != X86EMUL_CONTINUE)
goto done;
}
/* Instruction can only be executed in protected mode */
if ((ctxt->d & Prot) && ctxt->mode < X86EMUL_MODE_PROT16) {
rc = emulate_ud(ctxt);
goto done;
}
/* Privileged instruction can be executed only in CPL=0 */
if ((ctxt->d & Priv) && ops->cpl(ctxt)) {
if (ctxt->d & PrivUD)
rc = emulate_ud(ctxt);
else
rc = emulate_gp(ctxt, 0);
goto done;
}
/* Do instruction specific permission checks */
if (ctxt->d & CheckPerm) {
rc = ctxt->check_perm(ctxt);
if (rc != X86EMUL_CONTINUE)
goto done;
}
if (unlikely(ctxt->emul_flags & X86EMUL_GUEST_MASK) && (ctxt->d & Intercept)) {
rc = emulator_check_intercept(ctxt, ctxt->intercept,
X86_ICPT_POST_EXCEPT);
if (rc != X86EMUL_CONTINUE)
goto done;
}
if (ctxt->rep_prefix && (ctxt->d & String)) {
/* All REP prefixes have the same first termination condition */
if (address_mask(ctxt, reg_read(ctxt, VCPU_REGS_RCX)) == 0) {
string_registers_quirk(ctxt);
ctxt->eip = ctxt->_eip;
ctxt->eflags &= ~X86_EFLAGS_RF;
goto done;
}
}
}
if ((ctxt->src.type == OP_MEM) && !(ctxt->d & NoAccess)) {
rc = segmented_read(ctxt, ctxt->src.addr.mem,
ctxt->src.valptr, ctxt->src.bytes);
if (rc != X86EMUL_CONTINUE)
goto done;
ctxt->src.orig_val64 = ctxt->src.val64;
}
if (ctxt->src2.type == OP_MEM) {
rc = segmented_read(ctxt, ctxt->src2.addr.mem,
&ctxt->src2.val, ctxt->src2.bytes);
if (rc != X86EMUL_CONTINUE)
goto done;
}
if ((ctxt->d & DstMask) == ImplicitOps)
goto special_insn;
if ((ctxt->dst.type == OP_MEM) && !(ctxt->d & Mov)) {
/* optimisation - avoid slow emulated read if Mov */
rc = segmented_read(ctxt, ctxt->dst.addr.mem,
&ctxt->dst.val, ctxt->dst.bytes);
if (rc != X86EMUL_CONTINUE) {
if (!(ctxt->d & NoWrite) &&
rc == X86EMUL_PROPAGATE_FAULT &&
ctxt->exception.vector == PF_VECTOR)
ctxt->exception.error_code |= PFERR_WRITE_MASK;
goto done;
}
}
/* Copy full 64-bit value for CMPXCHG8B. */
ctxt->dst.orig_val64 = ctxt->dst.val64;
special_insn:
if (unlikely(ctxt->emul_flags & X86EMUL_GUEST_MASK) && (ctxt->d & Intercept)) {
rc = emulator_check_intercept(ctxt, ctxt->intercept,
X86_ICPT_POST_MEMACCESS);
if (rc != X86EMUL_CONTINUE)
goto done;
}
if (ctxt->rep_prefix && (ctxt->d & String))
ctxt->eflags |= X86_EFLAGS_RF;
else
ctxt->eflags &= ~X86_EFLAGS_RF;
if (ctxt->execute) {
if (ctxt->d & Fastop) {
void (*fop)(struct fastop *) = (void *)ctxt->execute;
rc = fastop(ctxt, fop);
if (rc != X86EMUL_CONTINUE)
goto done;
goto writeback;
}
rc = ctxt->execute(ctxt);
if (rc != X86EMUL_CONTINUE)
goto done;
goto writeback;
}
if (ctxt->opcode_len == 2)
goto twobyte_insn;
else if (ctxt->opcode_len == 3)
goto threebyte_insn;
switch (ctxt->b) {
case 0x70 ... 0x7f: /* jcc (short) */
if (test_cc(ctxt->b, ctxt->eflags))
rc = jmp_rel(ctxt, ctxt->src.val);
break;
case 0x8d: /* lea r16/r32, m */
ctxt->dst.val = ctxt->src.addr.mem.ea;
break;
case 0x90 ... 0x97: /* nop / xchg reg, rax */
if (ctxt->dst.addr.reg == reg_rmw(ctxt, VCPU_REGS_RAX))
ctxt->dst.type = OP_NONE;
else
rc = em_xchg(ctxt);
break;
case 0x98: /* cbw/cwde/cdqe */
switch (ctxt->op_bytes) {
case 2: ctxt->dst.val = (s8)ctxt->dst.val; break;
case 4: ctxt->dst.val = (s16)ctxt->dst.val; break;
case 8: ctxt->dst.val = (s32)ctxt->dst.val; break;
}
break;
case 0xcc: /* int3 */
rc = emulate_int(ctxt, 3);
break;
case 0xcd: /* int n */
rc = emulate_int(ctxt, ctxt->src.val);
break;
case 0xce: /* into */
if (ctxt->eflags & X86_EFLAGS_OF)
rc = emulate_int(ctxt, 4);
break;
case 0xe9: /* jmp rel */
case 0xeb: /* jmp rel short */
rc = jmp_rel(ctxt, ctxt->src.val);
ctxt->dst.type = OP_NONE; /* Disable writeback. */
break;
case 0xf4: /* hlt */
ctxt->ops->halt(ctxt);
break;
case 0xf5: /* cmc */
/* complement carry flag from eflags reg */
ctxt->eflags ^= X86_EFLAGS_CF;
break;
case 0xf8: /* clc */
ctxt->eflags &= ~X86_EFLAGS_CF;
break;
case 0xf9: /* stc */
ctxt->eflags |= X86_EFLAGS_CF;
break;
case 0xfc: /* cld */
ctxt->eflags &= ~X86_EFLAGS_DF;
break;
case 0xfd: /* std */
ctxt->eflags |= X86_EFLAGS_DF;
break;
default:
goto cannot_emulate;
}
if (rc != X86EMUL_CONTINUE)
goto done;
writeback:
if (ctxt->d & SrcWrite) {
BUG_ON(ctxt->src.type == OP_MEM || ctxt->src.type == OP_MEM_STR);
rc = writeback(ctxt, &ctxt->src);
if (rc != X86EMUL_CONTINUE)
goto done;
}
if (!(ctxt->d & NoWrite)) {
rc = writeback(ctxt, &ctxt->dst);
if (rc != X86EMUL_CONTINUE)
goto done;
}
/*
* restore dst type in case the decoding will be reused
* (happens for string instruction )
*/
ctxt->dst.type = saved_dst_type;
if ((ctxt->d & SrcMask) == SrcSI)
string_addr_inc(ctxt, VCPU_REGS_RSI, &ctxt->src);
if ((ctxt->d & DstMask) == DstDI)
string_addr_inc(ctxt, VCPU_REGS_RDI, &ctxt->dst);
if (ctxt->rep_prefix && (ctxt->d & String)) {
unsigned int count;
struct read_cache *r = &ctxt->io_read;
if ((ctxt->d & SrcMask) == SrcSI)
count = ctxt->src.count;
else
count = ctxt->dst.count;
register_address_increment(ctxt, VCPU_REGS_RCX, -count);
if (!string_insn_completed(ctxt)) {
/*
* Re-enter guest when pio read ahead buffer is empty
* or, if it is not used, after each 1024 iteration.
*/
if ((r->end != 0 || reg_read(ctxt, VCPU_REGS_RCX) & 0x3ff) &&
(r->end == 0 || r->end != r->pos)) {
/*
* Reset read cache. Usually happens before
* decode, but since instruction is restarted
* we have to do it here.
*/
ctxt->mem_read.end = 0;
writeback_registers(ctxt);
return EMULATION_RESTART;
}
goto done; /* skip rip writeback */
}
ctxt->eflags &= ~X86_EFLAGS_RF;
}
ctxt->eip = ctxt->_eip;
done:
if (rc == X86EMUL_PROPAGATE_FAULT) {
WARN_ON(ctxt->exception.vector > 0x1f);
ctxt->have_exception = true;
}
if (rc == X86EMUL_INTERCEPTED)
return EMULATION_INTERCEPTED;
if (rc == X86EMUL_CONTINUE)
writeback_registers(ctxt);
return (rc == X86EMUL_UNHANDLEABLE) ? EMULATION_FAILED : EMULATION_OK;
twobyte_insn:
switch (ctxt->b) {
case 0x09: /* wbinvd */
(ctxt->ops->wbinvd)(ctxt);
break;
case 0x08: /* invd */
case 0x0d: /* GrpP (prefetch) */
case 0x18: /* Grp16 (prefetch/nop) */
case 0x1f: /* nop */
break;
case 0x20: /* mov cr, reg */
ctxt->dst.val = ops->get_cr(ctxt, ctxt->modrm_reg);
break;
case 0x21: /* mov from dr to reg */
ops->get_dr(ctxt, ctxt->modrm_reg, &ctxt->dst.val);
break;
case 0x40 ... 0x4f: /* cmov */
if (test_cc(ctxt->b, ctxt->eflags))
ctxt->dst.val = ctxt->src.val;
else if (ctxt->op_bytes != 4)
ctxt->dst.type = OP_NONE; /* no writeback */
break;
case 0x80 ... 0x8f: /* jnz rel, etc*/
if (test_cc(ctxt->b, ctxt->eflags))
rc = jmp_rel(ctxt, ctxt->src.val);
break;
case 0x90 ... 0x9f: /* setcc r/m8 */
ctxt->dst.val = test_cc(ctxt->b, ctxt->eflags);
break;
case 0xb6 ... 0xb7: /* movzx */
ctxt->dst.bytes = ctxt->op_bytes;
ctxt->dst.val = (ctxt->src.bytes == 1) ? (u8) ctxt->src.val
: (u16) ctxt->src.val;
break;
case 0xbe ... 0xbf: /* movsx */
ctxt->dst.bytes = ctxt->op_bytes;
ctxt->dst.val = (ctxt->src.bytes == 1) ? (s8) ctxt->src.val :
(s16) ctxt->src.val;
break;
default:
goto cannot_emulate;
}
threebyte_insn:
if (rc != X86EMUL_CONTINUE)
goto done;
goto writeback;
cannot_emulate:
return EMULATION_FAILED;
}
void emulator_invalidate_register_cache(struct x86_emulate_ctxt *ctxt)
{
invalidate_registers(ctxt);
}
void emulator_writeback_register_cache(struct x86_emulate_ctxt *ctxt)
{
writeback_registers(ctxt);
}
| ./CrossVul/dataset_final_sorted/CWE-416/c/bad_3045_0 |
crossvul-cpp_data_good_2908_0 | /*
* (Tentative) USB Audio Driver for ALSA
*
* Mixer control part
*
* Copyright (c) 2002 by Takashi Iwai <tiwai@suse.de>
*
* Many codes borrowed from audio.c by
* Alan Cox (alan@lxorguk.ukuu.org.uk)
* Thomas Sailer (sailer@ife.ee.ethz.ch)
*
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*
*/
/*
* TODOs, for both the mixer and the streaming interfaces:
*
* - support for UAC2 effect units
* - support for graphical equalizers
* - RANGE and MEM set commands (UAC2)
* - RANGE and MEM interrupt dispatchers (UAC2)
* - audio channel clustering (UAC2)
* - audio sample rate converter units (UAC2)
* - proper handling of clock multipliers (UAC2)
* - dispatch clock change notifications (UAC2)
* - stop PCM streams which use a clock that became invalid
* - stop PCM streams which use a clock selector that has changed
* - parse available sample rates again when clock sources changed
*/
#include <linux/bitops.h>
#include <linux/init.h>
#include <linux/list.h>
#include <linux/log2.h>
#include <linux/slab.h>
#include <linux/string.h>
#include <linux/usb.h>
#include <linux/usb/audio.h>
#include <linux/usb/audio-v2.h>
#include <sound/core.h>
#include <sound/control.h>
#include <sound/hwdep.h>
#include <sound/info.h>
#include <sound/tlv.h>
#include "usbaudio.h"
#include "mixer.h"
#include "helper.h"
#include "mixer_quirks.h"
#include "power.h"
#define MAX_ID_ELEMS 256
struct usb_audio_term {
int id;
int type;
int channels;
unsigned int chconfig;
int name;
};
struct usbmix_name_map;
struct mixer_build {
struct snd_usb_audio *chip;
struct usb_mixer_interface *mixer;
unsigned char *buffer;
unsigned int buflen;
DECLARE_BITMAP(unitbitmap, MAX_ID_ELEMS);
struct usb_audio_term oterm;
const struct usbmix_name_map *map;
const struct usbmix_selector_map *selector_map;
};
/*E-mu 0202/0404/0204 eXtension Unit(XU) control*/
enum {
USB_XU_CLOCK_RATE = 0xe301,
USB_XU_CLOCK_SOURCE = 0xe302,
USB_XU_DIGITAL_IO_STATUS = 0xe303,
USB_XU_DEVICE_OPTIONS = 0xe304,
USB_XU_DIRECT_MONITORING = 0xe305,
USB_XU_METERING = 0xe306
};
enum {
USB_XU_CLOCK_SOURCE_SELECTOR = 0x02, /* clock source*/
USB_XU_CLOCK_RATE_SELECTOR = 0x03, /* clock rate */
USB_XU_DIGITAL_FORMAT_SELECTOR = 0x01, /* the spdif format */
USB_XU_SOFT_LIMIT_SELECTOR = 0x03 /* soft limiter */
};
/*
* manual mapping of mixer names
* if the mixer topology is too complicated and the parsed names are
* ambiguous, add the entries in usbmixer_maps.c.
*/
#include "mixer_maps.c"
static const struct usbmix_name_map *
find_map(struct mixer_build *state, int unitid, int control)
{
const struct usbmix_name_map *p = state->map;
if (!p)
return NULL;
for (p = state->map; p->id; p++) {
if (p->id == unitid &&
(!control || !p->control || control == p->control))
return p;
}
return NULL;
}
/* get the mapped name if the unit matches */
static int
check_mapped_name(const struct usbmix_name_map *p, char *buf, int buflen)
{
if (!p || !p->name)
return 0;
buflen--;
return strlcpy(buf, p->name, buflen);
}
/* ignore the error value if ignore_ctl_error flag is set */
#define filter_error(cval, err) \
((cval)->head.mixer->ignore_ctl_error ? 0 : (err))
/* check whether the control should be ignored */
static inline int
check_ignored_ctl(const struct usbmix_name_map *p)
{
if (!p || p->name || p->dB)
return 0;
return 1;
}
/* dB mapping */
static inline void check_mapped_dB(const struct usbmix_name_map *p,
struct usb_mixer_elem_info *cval)
{
if (p && p->dB) {
cval->dBmin = p->dB->min;
cval->dBmax = p->dB->max;
cval->initialized = 1;
}
}
/* get the mapped selector source name */
static int check_mapped_selector_name(struct mixer_build *state, int unitid,
int index, char *buf, int buflen)
{
const struct usbmix_selector_map *p;
if (!state->selector_map)
return 0;
for (p = state->selector_map; p->id; p++) {
if (p->id == unitid && index < p->count)
return strlcpy(buf, p->names[index], buflen);
}
return 0;
}
/*
* find an audio control unit with the given unit id
*/
static void *find_audio_control_unit(struct mixer_build *state,
unsigned char unit)
{
/* we just parse the header */
struct uac_feature_unit_descriptor *hdr = NULL;
while ((hdr = snd_usb_find_desc(state->buffer, state->buflen, hdr,
USB_DT_CS_INTERFACE)) != NULL) {
if (hdr->bLength >= 4 &&
hdr->bDescriptorSubtype >= UAC_INPUT_TERMINAL &&
hdr->bDescriptorSubtype <= UAC2_SAMPLE_RATE_CONVERTER &&
hdr->bUnitID == unit)
return hdr;
}
return NULL;
}
/*
* copy a string with the given id
*/
static int snd_usb_copy_string_desc(struct mixer_build *state,
int index, char *buf, int maxlen)
{
int len = usb_string(state->chip->dev, index, buf, maxlen - 1);
buf[len] = 0;
return len;
}
/*
* convert from the byte/word on usb descriptor to the zero-based integer
*/
static int convert_signed_value(struct usb_mixer_elem_info *cval, int val)
{
switch (cval->val_type) {
case USB_MIXER_BOOLEAN:
return !!val;
case USB_MIXER_INV_BOOLEAN:
return !val;
case USB_MIXER_U8:
val &= 0xff;
break;
case USB_MIXER_S8:
val &= 0xff;
if (val >= 0x80)
val -= 0x100;
break;
case USB_MIXER_U16:
val &= 0xffff;
break;
case USB_MIXER_S16:
val &= 0xffff;
if (val >= 0x8000)
val -= 0x10000;
break;
}
return val;
}
/*
* convert from the zero-based int to the byte/word for usb descriptor
*/
static int convert_bytes_value(struct usb_mixer_elem_info *cval, int val)
{
switch (cval->val_type) {
case USB_MIXER_BOOLEAN:
return !!val;
case USB_MIXER_INV_BOOLEAN:
return !val;
case USB_MIXER_S8:
case USB_MIXER_U8:
return val & 0xff;
case USB_MIXER_S16:
case USB_MIXER_U16:
return val & 0xffff;
}
return 0; /* not reached */
}
static int get_relative_value(struct usb_mixer_elem_info *cval, int val)
{
if (!cval->res)
cval->res = 1;
if (val < cval->min)
return 0;
else if (val >= cval->max)
return (cval->max - cval->min + cval->res - 1) / cval->res;
else
return (val - cval->min) / cval->res;
}
static int get_abs_value(struct usb_mixer_elem_info *cval, int val)
{
if (val < 0)
return cval->min;
if (!cval->res)
cval->res = 1;
val *= cval->res;
val += cval->min;
if (val > cval->max)
return cval->max;
return val;
}
static int uac2_ctl_value_size(int val_type)
{
switch (val_type) {
case USB_MIXER_S32:
case USB_MIXER_U32:
return 4;
case USB_MIXER_S16:
case USB_MIXER_U16:
return 2;
default:
return 1;
}
return 0; /* unreachable */
}
/*
* retrieve a mixer value
*/
static int get_ctl_value_v1(struct usb_mixer_elem_info *cval, int request,
int validx, int *value_ret)
{
struct snd_usb_audio *chip = cval->head.mixer->chip;
unsigned char buf[2];
int val_len = cval->val_type >= USB_MIXER_S16 ? 2 : 1;
int timeout = 10;
int idx = 0, err;
err = snd_usb_lock_shutdown(chip);
if (err < 0)
return -EIO;
while (timeout-- > 0) {
idx = snd_usb_ctrl_intf(chip) | (cval->head.id << 8);
err = snd_usb_ctl_msg(chip->dev, usb_rcvctrlpipe(chip->dev, 0), request,
USB_RECIP_INTERFACE | USB_TYPE_CLASS | USB_DIR_IN,
validx, idx, buf, val_len);
if (err >= val_len) {
*value_ret = convert_signed_value(cval, snd_usb_combine_bytes(buf, val_len));
err = 0;
goto out;
} else if (err == -ETIMEDOUT) {
goto out;
}
}
usb_audio_dbg(chip,
"cannot get ctl value: req = %#x, wValue = %#x, wIndex = %#x, type = %d\n",
request, validx, idx, cval->val_type);
err = -EINVAL;
out:
snd_usb_unlock_shutdown(chip);
return err;
}
static int get_ctl_value_v2(struct usb_mixer_elem_info *cval, int request,
int validx, int *value_ret)
{
struct snd_usb_audio *chip = cval->head.mixer->chip;
unsigned char buf[4 + 3 * sizeof(__u32)]; /* enough space for one range */
unsigned char *val;
int idx = 0, ret, size;
__u8 bRequest;
if (request == UAC_GET_CUR) {
bRequest = UAC2_CS_CUR;
size = uac2_ctl_value_size(cval->val_type);
} else {
bRequest = UAC2_CS_RANGE;
size = sizeof(buf);
}
memset(buf, 0, sizeof(buf));
ret = snd_usb_lock_shutdown(chip) ? -EIO : 0;
if (ret)
goto error;
idx = snd_usb_ctrl_intf(chip) | (cval->head.id << 8);
ret = snd_usb_ctl_msg(chip->dev, usb_rcvctrlpipe(chip->dev, 0), bRequest,
USB_RECIP_INTERFACE | USB_TYPE_CLASS | USB_DIR_IN,
validx, idx, buf, size);
snd_usb_unlock_shutdown(chip);
if (ret < 0) {
error:
usb_audio_err(chip,
"cannot get ctl value: req = %#x, wValue = %#x, wIndex = %#x, type = %d\n",
request, validx, idx, cval->val_type);
return ret;
}
/* FIXME: how should we handle multiple triplets here? */
switch (request) {
case UAC_GET_CUR:
val = buf;
break;
case UAC_GET_MIN:
val = buf + sizeof(__u16);
break;
case UAC_GET_MAX:
val = buf + sizeof(__u16) * 2;
break;
case UAC_GET_RES:
val = buf + sizeof(__u16) * 3;
break;
default:
return -EINVAL;
}
*value_ret = convert_signed_value(cval, snd_usb_combine_bytes(val, sizeof(__u16)));
return 0;
}
static int get_ctl_value(struct usb_mixer_elem_info *cval, int request,
int validx, int *value_ret)
{
validx += cval->idx_off;
return (cval->head.mixer->protocol == UAC_VERSION_1) ?
get_ctl_value_v1(cval, request, validx, value_ret) :
get_ctl_value_v2(cval, request, validx, value_ret);
}
static int get_cur_ctl_value(struct usb_mixer_elem_info *cval,
int validx, int *value)
{
return get_ctl_value(cval, UAC_GET_CUR, validx, value);
}
/* channel = 0: master, 1 = first channel */
static inline int get_cur_mix_raw(struct usb_mixer_elem_info *cval,
int channel, int *value)
{
return get_ctl_value(cval, UAC_GET_CUR,
(cval->control << 8) | channel,
value);
}
int snd_usb_get_cur_mix_value(struct usb_mixer_elem_info *cval,
int channel, int index, int *value)
{
int err;
if (cval->cached & (1 << channel)) {
*value = cval->cache_val[index];
return 0;
}
err = get_cur_mix_raw(cval, channel, value);
if (err < 0) {
if (!cval->head.mixer->ignore_ctl_error)
usb_audio_dbg(cval->head.mixer->chip,
"cannot get current value for control %d ch %d: err = %d\n",
cval->control, channel, err);
return err;
}
cval->cached |= 1 << channel;
cval->cache_val[index] = *value;
return 0;
}
/*
* set a mixer value
*/
int snd_usb_mixer_set_ctl_value(struct usb_mixer_elem_info *cval,
int request, int validx, int value_set)
{
struct snd_usb_audio *chip = cval->head.mixer->chip;
unsigned char buf[4];
int idx = 0, val_len, err, timeout = 10;
validx += cval->idx_off;
if (cval->head.mixer->protocol == UAC_VERSION_1) {
val_len = cval->val_type >= USB_MIXER_S16 ? 2 : 1;
} else { /* UAC_VERSION_2 */
val_len = uac2_ctl_value_size(cval->val_type);
/* FIXME */
if (request != UAC_SET_CUR) {
usb_audio_dbg(chip, "RANGE setting not yet supported\n");
return -EINVAL;
}
request = UAC2_CS_CUR;
}
value_set = convert_bytes_value(cval, value_set);
buf[0] = value_set & 0xff;
buf[1] = (value_set >> 8) & 0xff;
buf[2] = (value_set >> 16) & 0xff;
buf[3] = (value_set >> 24) & 0xff;
err = snd_usb_lock_shutdown(chip);
if (err < 0)
return -EIO;
while (timeout-- > 0) {
idx = snd_usb_ctrl_intf(chip) | (cval->head.id << 8);
err = snd_usb_ctl_msg(chip->dev,
usb_sndctrlpipe(chip->dev, 0), request,
USB_RECIP_INTERFACE | USB_TYPE_CLASS | USB_DIR_OUT,
validx, idx, buf, val_len);
if (err >= 0) {
err = 0;
goto out;
} else if (err == -ETIMEDOUT) {
goto out;
}
}
usb_audio_dbg(chip, "cannot set ctl value: req = %#x, wValue = %#x, wIndex = %#x, type = %d, data = %#x/%#x\n",
request, validx, idx, cval->val_type, buf[0], buf[1]);
err = -EINVAL;
out:
snd_usb_unlock_shutdown(chip);
return err;
}
static int set_cur_ctl_value(struct usb_mixer_elem_info *cval,
int validx, int value)
{
return snd_usb_mixer_set_ctl_value(cval, UAC_SET_CUR, validx, value);
}
int snd_usb_set_cur_mix_value(struct usb_mixer_elem_info *cval, int channel,
int index, int value)
{
int err;
unsigned int read_only = (channel == 0) ?
cval->master_readonly :
cval->ch_readonly & (1 << (channel - 1));
if (read_only) {
usb_audio_dbg(cval->head.mixer->chip,
"%s(): channel %d of control %d is read_only\n",
__func__, channel, cval->control);
return 0;
}
err = snd_usb_mixer_set_ctl_value(cval,
UAC_SET_CUR, (cval->control << 8) | channel,
value);
if (err < 0)
return err;
cval->cached |= 1 << channel;
cval->cache_val[index] = value;
return 0;
}
/*
* TLV callback for mixer volume controls
*/
int snd_usb_mixer_vol_tlv(struct snd_kcontrol *kcontrol, int op_flag,
unsigned int size, unsigned int __user *_tlv)
{
struct usb_mixer_elem_info *cval = kcontrol->private_data;
DECLARE_TLV_DB_MINMAX(scale, 0, 0);
if (size < sizeof(scale))
return -ENOMEM;
if (cval->min_mute)
scale[0] = SNDRV_CTL_TLVT_DB_MINMAX_MUTE;
scale[2] = cval->dBmin;
scale[3] = cval->dBmax;
if (copy_to_user(_tlv, scale, sizeof(scale)))
return -EFAULT;
return 0;
}
/*
* parser routines begin here...
*/
static int parse_audio_unit(struct mixer_build *state, int unitid);
/*
* check if the input/output channel routing is enabled on the given bitmap.
* used for mixer unit parser
*/
static int check_matrix_bitmap(unsigned char *bmap,
int ich, int och, int num_outs)
{
int idx = ich * num_outs + och;
return bmap[idx >> 3] & (0x80 >> (idx & 7));
}
/*
* add an alsa control element
* search and increment the index until an empty slot is found.
*
* if failed, give up and free the control instance.
*/
int snd_usb_mixer_add_control(struct usb_mixer_elem_list *list,
struct snd_kcontrol *kctl)
{
struct usb_mixer_interface *mixer = list->mixer;
int err;
while (snd_ctl_find_id(mixer->chip->card, &kctl->id))
kctl->id.index++;
if ((err = snd_ctl_add(mixer->chip->card, kctl)) < 0) {
usb_audio_dbg(mixer->chip, "cannot add control (err = %d)\n",
err);
return err;
}
list->kctl = kctl;
list->next_id_elem = mixer->id_elems[list->id];
mixer->id_elems[list->id] = list;
return 0;
}
/*
* get a terminal name string
*/
static struct iterm_name_combo {
int type;
char *name;
} iterm_names[] = {
{ 0x0300, "Output" },
{ 0x0301, "Speaker" },
{ 0x0302, "Headphone" },
{ 0x0303, "HMD Audio" },
{ 0x0304, "Desktop Speaker" },
{ 0x0305, "Room Speaker" },
{ 0x0306, "Com Speaker" },
{ 0x0307, "LFE" },
{ 0x0600, "External In" },
{ 0x0601, "Analog In" },
{ 0x0602, "Digital In" },
{ 0x0603, "Line" },
{ 0x0604, "Legacy In" },
{ 0x0605, "IEC958 In" },
{ 0x0606, "1394 DA Stream" },
{ 0x0607, "1394 DV Stream" },
{ 0x0700, "Embedded" },
{ 0x0701, "Noise Source" },
{ 0x0702, "Equalization Noise" },
{ 0x0703, "CD" },
{ 0x0704, "DAT" },
{ 0x0705, "DCC" },
{ 0x0706, "MiniDisk" },
{ 0x0707, "Analog Tape" },
{ 0x0708, "Phonograph" },
{ 0x0709, "VCR Audio" },
{ 0x070a, "Video Disk Audio" },
{ 0x070b, "DVD Audio" },
{ 0x070c, "TV Tuner Audio" },
{ 0x070d, "Satellite Rec Audio" },
{ 0x070e, "Cable Tuner Audio" },
{ 0x070f, "DSS Audio" },
{ 0x0710, "Radio Receiver" },
{ 0x0711, "Radio Transmitter" },
{ 0x0712, "Multi-Track Recorder" },
{ 0x0713, "Synthesizer" },
{ 0 },
};
static int get_term_name(struct mixer_build *state, struct usb_audio_term *iterm,
unsigned char *name, int maxlen, int term_only)
{
struct iterm_name_combo *names;
if (iterm->name)
return snd_usb_copy_string_desc(state, iterm->name,
name, maxlen);
/* virtual type - not a real terminal */
if (iterm->type >> 16) {
if (term_only)
return 0;
switch (iterm->type >> 16) {
case UAC_SELECTOR_UNIT:
strcpy(name, "Selector");
return 8;
case UAC1_PROCESSING_UNIT:
strcpy(name, "Process Unit");
return 12;
case UAC1_EXTENSION_UNIT:
strcpy(name, "Ext Unit");
return 8;
case UAC_MIXER_UNIT:
strcpy(name, "Mixer");
return 5;
default:
return sprintf(name, "Unit %d", iterm->id);
}
}
switch (iterm->type & 0xff00) {
case 0x0100:
strcpy(name, "PCM");
return 3;
case 0x0200:
strcpy(name, "Mic");
return 3;
case 0x0400:
strcpy(name, "Headset");
return 7;
case 0x0500:
strcpy(name, "Phone");
return 5;
}
for (names = iterm_names; names->type; names++) {
if (names->type == iterm->type) {
strcpy(name, names->name);
return strlen(names->name);
}
}
return 0;
}
/*
* parse the source unit recursively until it reaches to a terminal
* or a branched unit.
*/
static int check_input_term(struct mixer_build *state, int id,
struct usb_audio_term *term)
{
int err;
void *p1;
memset(term, 0, sizeof(*term));
while ((p1 = find_audio_control_unit(state, id)) != NULL) {
unsigned char *hdr = p1;
term->id = id;
switch (hdr[2]) {
case UAC_INPUT_TERMINAL:
if (state->mixer->protocol == UAC_VERSION_1) {
struct uac_input_terminal_descriptor *d = p1;
term->type = le16_to_cpu(d->wTerminalType);
term->channels = d->bNrChannels;
term->chconfig = le16_to_cpu(d->wChannelConfig);
term->name = d->iTerminal;
} else { /* UAC_VERSION_2 */
struct uac2_input_terminal_descriptor *d = p1;
/* call recursively to verify that the
* referenced clock entity is valid */
err = check_input_term(state, d->bCSourceID, term);
if (err < 0)
return err;
/* save input term properties after recursion,
* to ensure they are not overriden by the
* recursion calls */
term->id = id;
term->type = le16_to_cpu(d->wTerminalType);
term->channels = d->bNrChannels;
term->chconfig = le32_to_cpu(d->bmChannelConfig);
term->name = d->iTerminal;
}
return 0;
case UAC_FEATURE_UNIT: {
/* the header is the same for v1 and v2 */
struct uac_feature_unit_descriptor *d = p1;
id = d->bSourceID;
break; /* continue to parse */
}
case UAC_MIXER_UNIT: {
struct uac_mixer_unit_descriptor *d = p1;
term->type = d->bDescriptorSubtype << 16; /* virtual type */
term->channels = uac_mixer_unit_bNrChannels(d);
term->chconfig = uac_mixer_unit_wChannelConfig(d, state->mixer->protocol);
term->name = uac_mixer_unit_iMixer(d);
return 0;
}
case UAC_SELECTOR_UNIT:
case UAC2_CLOCK_SELECTOR: {
struct uac_selector_unit_descriptor *d = p1;
/* call recursively to retrieve the channel info */
err = check_input_term(state, d->baSourceID[0], term);
if (err < 0)
return err;
term->type = d->bDescriptorSubtype << 16; /* virtual type */
term->id = id;
term->name = uac_selector_unit_iSelector(d);
return 0;
}
case UAC1_PROCESSING_UNIT:
case UAC1_EXTENSION_UNIT:
/* UAC2_PROCESSING_UNIT_V2 */
/* UAC2_EFFECT_UNIT */
case UAC2_EXTENSION_UNIT_V2: {
struct uac_processing_unit_descriptor *d = p1;
if (state->mixer->protocol == UAC_VERSION_2 &&
hdr[2] == UAC2_EFFECT_UNIT) {
/* UAC2/UAC1 unit IDs overlap here in an
* uncompatible way. Ignore this unit for now.
*/
return 0;
}
if (d->bNrInPins) {
id = d->baSourceID[0];
break; /* continue to parse */
}
term->type = d->bDescriptorSubtype << 16; /* virtual type */
term->channels = uac_processing_unit_bNrChannels(d);
term->chconfig = uac_processing_unit_wChannelConfig(d, state->mixer->protocol);
term->name = uac_processing_unit_iProcessing(d, state->mixer->protocol);
return 0;
}
case UAC2_CLOCK_SOURCE: {
struct uac_clock_source_descriptor *d = p1;
term->type = d->bDescriptorSubtype << 16; /* virtual type */
term->id = id;
term->name = d->iClockSource;
return 0;
}
default:
return -ENODEV;
}
}
return -ENODEV;
}
/*
* Feature Unit
*/
/* feature unit control information */
struct usb_feature_control_info {
const char *name;
int type; /* data type for uac1 */
int type_uac2; /* data type for uac2 if different from uac1, else -1 */
};
static struct usb_feature_control_info audio_feature_info[] = {
{ "Mute", USB_MIXER_INV_BOOLEAN, -1 },
{ "Volume", USB_MIXER_S16, -1 },
{ "Tone Control - Bass", USB_MIXER_S8, -1 },
{ "Tone Control - Mid", USB_MIXER_S8, -1 },
{ "Tone Control - Treble", USB_MIXER_S8, -1 },
{ "Graphic Equalizer", USB_MIXER_S8, -1 }, /* FIXME: not implemeted yet */
{ "Auto Gain Control", USB_MIXER_BOOLEAN, -1 },
{ "Delay Control", USB_MIXER_U16, USB_MIXER_U32 },
{ "Bass Boost", USB_MIXER_BOOLEAN, -1 },
{ "Loudness", USB_MIXER_BOOLEAN, -1 },
/* UAC2 specific */
{ "Input Gain Control", USB_MIXER_S16, -1 },
{ "Input Gain Pad Control", USB_MIXER_S16, -1 },
{ "Phase Inverter Control", USB_MIXER_BOOLEAN, -1 },
};
/* private_free callback */
void snd_usb_mixer_elem_free(struct snd_kcontrol *kctl)
{
kfree(kctl->private_data);
kctl->private_data = NULL;
}
/*
* interface to ALSA control for feature/mixer units
*/
/* volume control quirks */
static void volume_control_quirks(struct usb_mixer_elem_info *cval,
struct snd_kcontrol *kctl)
{
struct snd_usb_audio *chip = cval->head.mixer->chip;
switch (chip->usb_id) {
case USB_ID(0x0763, 0x2030): /* M-Audio Fast Track C400 */
case USB_ID(0x0763, 0x2031): /* M-Audio Fast Track C600 */
if (strcmp(kctl->id.name, "Effect Duration") == 0) {
cval->min = 0x0000;
cval->max = 0xffff;
cval->res = 0x00e6;
break;
}
if (strcmp(kctl->id.name, "Effect Volume") == 0 ||
strcmp(kctl->id.name, "Effect Feedback Volume") == 0) {
cval->min = 0x00;
cval->max = 0xff;
break;
}
if (strstr(kctl->id.name, "Effect Return") != NULL) {
cval->min = 0xb706;
cval->max = 0xff7b;
cval->res = 0x0073;
break;
}
if ((strstr(kctl->id.name, "Playback Volume") != NULL) ||
(strstr(kctl->id.name, "Effect Send") != NULL)) {
cval->min = 0xb5fb; /* -73 dB = 0xb6ff */
cval->max = 0xfcfe;
cval->res = 0x0073;
}
break;
case USB_ID(0x0763, 0x2081): /* M-Audio Fast Track Ultra 8R */
case USB_ID(0x0763, 0x2080): /* M-Audio Fast Track Ultra */
if (strcmp(kctl->id.name, "Effect Duration") == 0) {
usb_audio_info(chip,
"set quirk for FTU Effect Duration\n");
cval->min = 0x0000;
cval->max = 0x7f00;
cval->res = 0x0100;
break;
}
if (strcmp(kctl->id.name, "Effect Volume") == 0 ||
strcmp(kctl->id.name, "Effect Feedback Volume") == 0) {
usb_audio_info(chip,
"set quirks for FTU Effect Feedback/Volume\n");
cval->min = 0x00;
cval->max = 0x7f;
break;
}
break;
case USB_ID(0x0471, 0x0101):
case USB_ID(0x0471, 0x0104):
case USB_ID(0x0471, 0x0105):
case USB_ID(0x0672, 0x1041):
/* quirk for UDA1321/N101.
* note that detection between firmware 2.1.1.7 (N101)
* and later 2.1.1.21 is not very clear from datasheets.
* I hope that the min value is -15360 for newer firmware --jk
*/
if (!strcmp(kctl->id.name, "PCM Playback Volume") &&
cval->min == -15616) {
usb_audio_info(chip,
"set volume quirk for UDA1321/N101 chip\n");
cval->max = -256;
}
break;
case USB_ID(0x046d, 0x09a4):
if (!strcmp(kctl->id.name, "Mic Capture Volume")) {
usb_audio_info(chip,
"set volume quirk for QuickCam E3500\n");
cval->min = 6080;
cval->max = 8768;
cval->res = 192;
}
break;
case USB_ID(0x046d, 0x0807): /* Logitech Webcam C500 */
case USB_ID(0x046d, 0x0808):
case USB_ID(0x046d, 0x0809):
case USB_ID(0x046d, 0x0819): /* Logitech Webcam C210 */
case USB_ID(0x046d, 0x081b): /* HD Webcam c310 */
case USB_ID(0x046d, 0x081d): /* HD Webcam c510 */
case USB_ID(0x046d, 0x0825): /* HD Webcam c270 */
case USB_ID(0x046d, 0x0826): /* HD Webcam c525 */
case USB_ID(0x046d, 0x08ca): /* Logitech Quickcam Fusion */
case USB_ID(0x046d, 0x0991):
case USB_ID(0x046d, 0x09a2): /* QuickCam Communicate Deluxe/S7500 */
/* Most audio usb devices lie about volume resolution.
* Most Logitech webcams have res = 384.
* Probably there is some logitech magic behind this number --fishor
*/
if (!strcmp(kctl->id.name, "Mic Capture Volume")) {
usb_audio_info(chip,
"set resolution quirk: cval->res = 384\n");
cval->res = 384;
}
break;
}
}
/*
* retrieve the minimum and maximum values for the specified control
*/
static int get_min_max_with_quirks(struct usb_mixer_elem_info *cval,
int default_min, struct snd_kcontrol *kctl)
{
/* for failsafe */
cval->min = default_min;
cval->max = cval->min + 1;
cval->res = 1;
cval->dBmin = cval->dBmax = 0;
if (cval->val_type == USB_MIXER_BOOLEAN ||
cval->val_type == USB_MIXER_INV_BOOLEAN) {
cval->initialized = 1;
} else {
int minchn = 0;
if (cval->cmask) {
int i;
for (i = 0; i < MAX_CHANNELS; i++)
if (cval->cmask & (1 << i)) {
minchn = i + 1;
break;
}
}
if (get_ctl_value(cval, UAC_GET_MAX, (cval->control << 8) | minchn, &cval->max) < 0 ||
get_ctl_value(cval, UAC_GET_MIN, (cval->control << 8) | minchn, &cval->min) < 0) {
usb_audio_err(cval->head.mixer->chip,
"%d:%d: cannot get min/max values for control %d (id %d)\n",
cval->head.id, snd_usb_ctrl_intf(cval->head.mixer->chip),
cval->control, cval->head.id);
return -EINVAL;
}
if (get_ctl_value(cval, UAC_GET_RES,
(cval->control << 8) | minchn,
&cval->res) < 0) {
cval->res = 1;
} else {
int last_valid_res = cval->res;
while (cval->res > 1) {
if (snd_usb_mixer_set_ctl_value(cval, UAC_SET_RES,
(cval->control << 8) | minchn,
cval->res / 2) < 0)
break;
cval->res /= 2;
}
if (get_ctl_value(cval, UAC_GET_RES,
(cval->control << 8) | minchn, &cval->res) < 0)
cval->res = last_valid_res;
}
if (cval->res == 0)
cval->res = 1;
/* Additional checks for the proper resolution
*
* Some devices report smaller resolutions than actually
* reacting. They don't return errors but simply clip
* to the lower aligned value.
*/
if (cval->min + cval->res < cval->max) {
int last_valid_res = cval->res;
int saved, test, check;
get_cur_mix_raw(cval, minchn, &saved);
for (;;) {
test = saved;
if (test < cval->max)
test += cval->res;
else
test -= cval->res;
if (test < cval->min || test > cval->max ||
snd_usb_set_cur_mix_value(cval, minchn, 0, test) ||
get_cur_mix_raw(cval, minchn, &check)) {
cval->res = last_valid_res;
break;
}
if (test == check)
break;
cval->res *= 2;
}
snd_usb_set_cur_mix_value(cval, minchn, 0, saved);
}
cval->initialized = 1;
}
if (kctl)
volume_control_quirks(cval, kctl);
/* USB descriptions contain the dB scale in 1/256 dB unit
* while ALSA TLV contains in 1/100 dB unit
*/
cval->dBmin = (convert_signed_value(cval, cval->min) * 100) / 256;
cval->dBmax = (convert_signed_value(cval, cval->max) * 100) / 256;
if (cval->dBmin > cval->dBmax) {
/* something is wrong; assume it's either from/to 0dB */
if (cval->dBmin < 0)
cval->dBmax = 0;
else if (cval->dBmin > 0)
cval->dBmin = 0;
if (cval->dBmin > cval->dBmax) {
/* totally crap, return an error */
return -EINVAL;
}
}
return 0;
}
#define get_min_max(cval, def) get_min_max_with_quirks(cval, def, NULL)
/* get a feature/mixer unit info */
static int mixer_ctl_feature_info(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_info *uinfo)
{
struct usb_mixer_elem_info *cval = kcontrol->private_data;
if (cval->val_type == USB_MIXER_BOOLEAN ||
cval->val_type == USB_MIXER_INV_BOOLEAN)
uinfo->type = SNDRV_CTL_ELEM_TYPE_BOOLEAN;
else
uinfo->type = SNDRV_CTL_ELEM_TYPE_INTEGER;
uinfo->count = cval->channels;
if (cval->val_type == USB_MIXER_BOOLEAN ||
cval->val_type == USB_MIXER_INV_BOOLEAN) {
uinfo->value.integer.min = 0;
uinfo->value.integer.max = 1;
} else {
if (!cval->initialized) {
get_min_max_with_quirks(cval, 0, kcontrol);
if (cval->initialized && cval->dBmin >= cval->dBmax) {
kcontrol->vd[0].access &=
~(SNDRV_CTL_ELEM_ACCESS_TLV_READ |
SNDRV_CTL_ELEM_ACCESS_TLV_CALLBACK);
snd_ctl_notify(cval->head.mixer->chip->card,
SNDRV_CTL_EVENT_MASK_INFO,
&kcontrol->id);
}
}
uinfo->value.integer.min = 0;
uinfo->value.integer.max =
(cval->max - cval->min + cval->res - 1) / cval->res;
}
return 0;
}
/* get the current value from feature/mixer unit */
static int mixer_ctl_feature_get(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_value *ucontrol)
{
struct usb_mixer_elem_info *cval = kcontrol->private_data;
int c, cnt, val, err;
ucontrol->value.integer.value[0] = cval->min;
if (cval->cmask) {
cnt = 0;
for (c = 0; c < MAX_CHANNELS; c++) {
if (!(cval->cmask & (1 << c)))
continue;
err = snd_usb_get_cur_mix_value(cval, c + 1, cnt, &val);
if (err < 0)
return filter_error(cval, err);
val = get_relative_value(cval, val);
ucontrol->value.integer.value[cnt] = val;
cnt++;
}
return 0;
} else {
/* master channel */
err = snd_usb_get_cur_mix_value(cval, 0, 0, &val);
if (err < 0)
return filter_error(cval, err);
val = get_relative_value(cval, val);
ucontrol->value.integer.value[0] = val;
}
return 0;
}
/* put the current value to feature/mixer unit */
static int mixer_ctl_feature_put(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_value *ucontrol)
{
struct usb_mixer_elem_info *cval = kcontrol->private_data;
int c, cnt, val, oval, err;
int changed = 0;
if (cval->cmask) {
cnt = 0;
for (c = 0; c < MAX_CHANNELS; c++) {
if (!(cval->cmask & (1 << c)))
continue;
err = snd_usb_get_cur_mix_value(cval, c + 1, cnt, &oval);
if (err < 0)
return filter_error(cval, err);
val = ucontrol->value.integer.value[cnt];
val = get_abs_value(cval, val);
if (oval != val) {
snd_usb_set_cur_mix_value(cval, c + 1, cnt, val);
changed = 1;
}
cnt++;
}
} else {
/* master channel */
err = snd_usb_get_cur_mix_value(cval, 0, 0, &oval);
if (err < 0)
return filter_error(cval, err);
val = ucontrol->value.integer.value[0];
val = get_abs_value(cval, val);
if (val != oval) {
snd_usb_set_cur_mix_value(cval, 0, 0, val);
changed = 1;
}
}
return changed;
}
static struct snd_kcontrol_new usb_feature_unit_ctl = {
.iface = SNDRV_CTL_ELEM_IFACE_MIXER,
.name = "", /* will be filled later manually */
.info = mixer_ctl_feature_info,
.get = mixer_ctl_feature_get,
.put = mixer_ctl_feature_put,
};
/* the read-only variant */
static const struct snd_kcontrol_new usb_feature_unit_ctl_ro = {
.iface = SNDRV_CTL_ELEM_IFACE_MIXER,
.name = "", /* will be filled later manually */
.info = mixer_ctl_feature_info,
.get = mixer_ctl_feature_get,
.put = NULL,
};
/*
* This symbol is exported in order to allow the mixer quirks to
* hook up to the standard feature unit control mechanism
*/
struct snd_kcontrol_new *snd_usb_feature_unit_ctl = &usb_feature_unit_ctl;
/*
* build a feature control
*/
static size_t append_ctl_name(struct snd_kcontrol *kctl, const char *str)
{
return strlcat(kctl->id.name, str, sizeof(kctl->id.name));
}
/*
* A lot of headsets/headphones have a "Speaker" mixer. Make sure we
* rename it to "Headphone". We determine if something is a headphone
* similar to how udev determines form factor.
*/
static void check_no_speaker_on_headset(struct snd_kcontrol *kctl,
struct snd_card *card)
{
const char *names_to_check[] = {
"Headset", "headset", "Headphone", "headphone", NULL};
const char **s;
bool found = false;
if (strcmp("Speaker", kctl->id.name))
return;
for (s = names_to_check; *s; s++)
if (strstr(card->shortname, *s)) {
found = true;
break;
}
if (!found)
return;
strlcpy(kctl->id.name, "Headphone", sizeof(kctl->id.name));
}
static void build_feature_ctl(struct mixer_build *state, void *raw_desc,
unsigned int ctl_mask, int control,
struct usb_audio_term *iterm, int unitid,
int readonly_mask)
{
struct uac_feature_unit_descriptor *desc = raw_desc;
struct usb_feature_control_info *ctl_info;
unsigned int len = 0;
int mapped_name = 0;
int nameid = uac_feature_unit_iFeature(desc);
struct snd_kcontrol *kctl;
struct usb_mixer_elem_info *cval;
const struct usbmix_name_map *map;
unsigned int range;
control++; /* change from zero-based to 1-based value */
if (control == UAC_FU_GRAPHIC_EQUALIZER) {
/* FIXME: not supported yet */
return;
}
map = find_map(state, unitid, control);
if (check_ignored_ctl(map))
return;
cval = kzalloc(sizeof(*cval), GFP_KERNEL);
if (!cval)
return;
snd_usb_mixer_elem_init_std(&cval->head, state->mixer, unitid);
cval->control = control;
cval->cmask = ctl_mask;
ctl_info = &audio_feature_info[control-1];
if (state->mixer->protocol == UAC_VERSION_1)
cval->val_type = ctl_info->type;
else /* UAC_VERSION_2 */
cval->val_type = ctl_info->type_uac2 >= 0 ?
ctl_info->type_uac2 : ctl_info->type;
if (ctl_mask == 0) {
cval->channels = 1; /* master channel */
cval->master_readonly = readonly_mask;
} else {
int i, c = 0;
for (i = 0; i < 16; i++)
if (ctl_mask & (1 << i))
c++;
cval->channels = c;
cval->ch_readonly = readonly_mask;
}
/*
* If all channels in the mask are marked read-only, make the control
* read-only. snd_usb_set_cur_mix_value() will check the mask again and won't
* issue write commands to read-only channels.
*/
if (cval->channels == readonly_mask)
kctl = snd_ctl_new1(&usb_feature_unit_ctl_ro, cval);
else
kctl = snd_ctl_new1(&usb_feature_unit_ctl, cval);
if (!kctl) {
usb_audio_err(state->chip, "cannot malloc kcontrol\n");
kfree(cval);
return;
}
kctl->private_free = snd_usb_mixer_elem_free;
len = check_mapped_name(map, kctl->id.name, sizeof(kctl->id.name));
mapped_name = len != 0;
if (!len && nameid)
len = snd_usb_copy_string_desc(state, nameid,
kctl->id.name, sizeof(kctl->id.name));
switch (control) {
case UAC_FU_MUTE:
case UAC_FU_VOLUME:
/*
* determine the control name. the rule is:
* - if a name id is given in descriptor, use it.
* - if the connected input can be determined, then use the name
* of terminal type.
* - if the connected output can be determined, use it.
* - otherwise, anonymous name.
*/
if (!len) {
len = get_term_name(state, iterm, kctl->id.name,
sizeof(kctl->id.name), 1);
if (!len)
len = get_term_name(state, &state->oterm,
kctl->id.name,
sizeof(kctl->id.name), 1);
if (!len)
snprintf(kctl->id.name, sizeof(kctl->id.name),
"Feature %d", unitid);
}
if (!mapped_name)
check_no_speaker_on_headset(kctl, state->mixer->chip->card);
/*
* determine the stream direction:
* if the connected output is USB stream, then it's likely a
* capture stream. otherwise it should be playback (hopefully :)
*/
if (!mapped_name && !(state->oterm.type >> 16)) {
if ((state->oterm.type & 0xff00) == 0x0100)
append_ctl_name(kctl, " Capture");
else
append_ctl_name(kctl, " Playback");
}
append_ctl_name(kctl, control == UAC_FU_MUTE ?
" Switch" : " Volume");
break;
default:
if (!len)
strlcpy(kctl->id.name, audio_feature_info[control-1].name,
sizeof(kctl->id.name));
break;
}
/* get min/max values */
get_min_max_with_quirks(cval, 0, kctl);
if (control == UAC_FU_VOLUME) {
check_mapped_dB(map, cval);
if (cval->dBmin < cval->dBmax || !cval->initialized) {
kctl->tlv.c = snd_usb_mixer_vol_tlv;
kctl->vd[0].access |=
SNDRV_CTL_ELEM_ACCESS_TLV_READ |
SNDRV_CTL_ELEM_ACCESS_TLV_CALLBACK;
}
}
snd_usb_mixer_fu_apply_quirk(state->mixer, cval, unitid, kctl);
range = (cval->max - cval->min) / cval->res;
/*
* Are there devices with volume range more than 255? I use a bit more
* to be sure. 384 is a resolution magic number found on Logitech
* devices. It will definitively catch all buggy Logitech devices.
*/
if (range > 384) {
usb_audio_warn(state->chip,
"Warning! Unlikely big volume range (=%u), cval->res is probably wrong.",
range);
usb_audio_warn(state->chip,
"[%d] FU [%s] ch = %d, val = %d/%d/%d",
cval->head.id, kctl->id.name, cval->channels,
cval->min, cval->max, cval->res);
}
usb_audio_dbg(state->chip, "[%d] FU [%s] ch = %d, val = %d/%d/%d\n",
cval->head.id, kctl->id.name, cval->channels,
cval->min, cval->max, cval->res);
snd_usb_mixer_add_control(&cval->head, kctl);
}
static int parse_clock_source_unit(struct mixer_build *state, int unitid,
void *_ftr)
{
struct uac_clock_source_descriptor *hdr = _ftr;
struct usb_mixer_elem_info *cval;
struct snd_kcontrol *kctl;
char name[SNDRV_CTL_ELEM_ID_NAME_MAXLEN];
int ret;
if (state->mixer->protocol != UAC_VERSION_2)
return -EINVAL;
if (hdr->bLength != sizeof(*hdr)) {
usb_audio_dbg(state->chip,
"Bogus clock source descriptor length of %d, ignoring.\n",
hdr->bLength);
return 0;
}
/*
* The only property of this unit we are interested in is the
* clock source validity. If that isn't readable, just bail out.
*/
if (!uac2_control_is_readable(hdr->bmControls,
ilog2(UAC2_CS_CONTROL_CLOCK_VALID)))
return 0;
cval = kzalloc(sizeof(*cval), GFP_KERNEL);
if (!cval)
return -ENOMEM;
snd_usb_mixer_elem_init_std(&cval->head, state->mixer, hdr->bClockID);
cval->min = 0;
cval->max = 1;
cval->channels = 1;
cval->val_type = USB_MIXER_BOOLEAN;
cval->control = UAC2_CS_CONTROL_CLOCK_VALID;
if (uac2_control_is_writeable(hdr->bmControls,
ilog2(UAC2_CS_CONTROL_CLOCK_VALID)))
kctl = snd_ctl_new1(&usb_feature_unit_ctl, cval);
else {
cval->master_readonly = 1;
kctl = snd_ctl_new1(&usb_feature_unit_ctl_ro, cval);
}
if (!kctl) {
kfree(cval);
return -ENOMEM;
}
kctl->private_free = snd_usb_mixer_elem_free;
ret = snd_usb_copy_string_desc(state, hdr->iClockSource,
name, sizeof(name));
if (ret > 0)
snprintf(kctl->id.name, sizeof(kctl->id.name),
"%s Validity", name);
else
snprintf(kctl->id.name, sizeof(kctl->id.name),
"Clock Source %d Validity", hdr->bClockID);
return snd_usb_mixer_add_control(&cval->head, kctl);
}
/*
* parse a feature unit
*
* most of controls are defined here.
*/
static int parse_audio_feature_unit(struct mixer_build *state, int unitid,
void *_ftr)
{
int channels, i, j;
struct usb_audio_term iterm;
unsigned int master_bits, first_ch_bits;
int err, csize;
struct uac_feature_unit_descriptor *hdr = _ftr;
__u8 *bmaControls;
if (state->mixer->protocol == UAC_VERSION_1) {
csize = hdr->bControlSize;
if (!csize) {
usb_audio_dbg(state->chip,
"unit %u: invalid bControlSize == 0\n",
unitid);
return -EINVAL;
}
channels = (hdr->bLength - 7) / csize - 1;
bmaControls = hdr->bmaControls;
if (hdr->bLength < 7 + csize) {
usb_audio_err(state->chip,
"unit %u: invalid UAC_FEATURE_UNIT descriptor\n",
unitid);
return -EINVAL;
}
} else {
struct uac2_feature_unit_descriptor *ftr = _ftr;
csize = 4;
channels = (hdr->bLength - 6) / 4 - 1;
bmaControls = ftr->bmaControls;
if (hdr->bLength < 6 + csize) {
usb_audio_err(state->chip,
"unit %u: invalid UAC_FEATURE_UNIT descriptor\n",
unitid);
return -EINVAL;
}
}
/* parse the source unit */
if ((err = parse_audio_unit(state, hdr->bSourceID)) < 0)
return err;
/* determine the input source type and name */
err = check_input_term(state, hdr->bSourceID, &iterm);
if (err < 0)
return err;
master_bits = snd_usb_combine_bytes(bmaControls, csize);
/* master configuration quirks */
switch (state->chip->usb_id) {
case USB_ID(0x08bb, 0x2702):
usb_audio_info(state->chip,
"usbmixer: master volume quirk for PCM2702 chip\n");
/* disable non-functional volume control */
master_bits &= ~UAC_CONTROL_BIT(UAC_FU_VOLUME);
break;
case USB_ID(0x1130, 0xf211):
usb_audio_info(state->chip,
"usbmixer: volume control quirk for Tenx TP6911 Audio Headset\n");
/* disable non-functional volume control */
channels = 0;
break;
}
if (channels > 0)
first_ch_bits = snd_usb_combine_bytes(bmaControls + csize, csize);
else
first_ch_bits = 0;
if (state->mixer->protocol == UAC_VERSION_1) {
/* check all control types */
for (i = 0; i < 10; i++) {
unsigned int ch_bits = 0;
for (j = 0; j < channels; j++) {
unsigned int mask;
mask = snd_usb_combine_bytes(bmaControls +
csize * (j+1), csize);
if (mask & (1 << i))
ch_bits |= (1 << j);
}
/* audio class v1 controls are never read-only */
/*
* The first channel must be set
* (for ease of programming).
*/
if (ch_bits & 1)
build_feature_ctl(state, _ftr, ch_bits, i,
&iterm, unitid, 0);
if (master_bits & (1 << i))
build_feature_ctl(state, _ftr, 0, i, &iterm,
unitid, 0);
}
} else { /* UAC_VERSION_2 */
for (i = 0; i < ARRAY_SIZE(audio_feature_info); i++) {
unsigned int ch_bits = 0;
unsigned int ch_read_only = 0;
for (j = 0; j < channels; j++) {
unsigned int mask;
mask = snd_usb_combine_bytes(bmaControls +
csize * (j+1), csize);
if (uac2_control_is_readable(mask, i)) {
ch_bits |= (1 << j);
if (!uac2_control_is_writeable(mask, i))
ch_read_only |= (1 << j);
}
}
/*
* NOTE: build_feature_ctl() will mark the control
* read-only if all channels are marked read-only in
* the descriptors. Otherwise, the control will be
* reported as writeable, but the driver will not
* actually issue a write command for read-only
* channels.
*/
/*
* The first channel must be set
* (for ease of programming).
*/
if (ch_bits & 1)
build_feature_ctl(state, _ftr, ch_bits, i,
&iterm, unitid, ch_read_only);
if (uac2_control_is_readable(master_bits, i))
build_feature_ctl(state, _ftr, 0, i, &iterm, unitid,
!uac2_control_is_writeable(master_bits, i));
}
}
return 0;
}
/*
* Mixer Unit
*/
/*
* build a mixer unit control
*
* the callbacks are identical with feature unit.
* input channel number (zero based) is given in control field instead.
*/
static void build_mixer_unit_ctl(struct mixer_build *state,
struct uac_mixer_unit_descriptor *desc,
int in_pin, int in_ch, int unitid,
struct usb_audio_term *iterm)
{
struct usb_mixer_elem_info *cval;
unsigned int num_outs = uac_mixer_unit_bNrChannels(desc);
unsigned int i, len;
struct snd_kcontrol *kctl;
const struct usbmix_name_map *map;
map = find_map(state, unitid, 0);
if (check_ignored_ctl(map))
return;
cval = kzalloc(sizeof(*cval), GFP_KERNEL);
if (!cval)
return;
snd_usb_mixer_elem_init_std(&cval->head, state->mixer, unitid);
cval->control = in_ch + 1; /* based on 1 */
cval->val_type = USB_MIXER_S16;
for (i = 0; i < num_outs; i++) {
__u8 *c = uac_mixer_unit_bmControls(desc, state->mixer->protocol);
if (check_matrix_bitmap(c, in_ch, i, num_outs)) {
cval->cmask |= (1 << i);
cval->channels++;
}
}
/* get min/max values */
get_min_max(cval, 0);
kctl = snd_ctl_new1(&usb_feature_unit_ctl, cval);
if (!kctl) {
usb_audio_err(state->chip, "cannot malloc kcontrol\n");
kfree(cval);
return;
}
kctl->private_free = snd_usb_mixer_elem_free;
len = check_mapped_name(map, kctl->id.name, sizeof(kctl->id.name));
if (!len)
len = get_term_name(state, iterm, kctl->id.name,
sizeof(kctl->id.name), 0);
if (!len)
len = sprintf(kctl->id.name, "Mixer Source %d", in_ch + 1);
append_ctl_name(kctl, " Volume");
usb_audio_dbg(state->chip, "[%d] MU [%s] ch = %d, val = %d/%d\n",
cval->head.id, kctl->id.name, cval->channels, cval->min, cval->max);
snd_usb_mixer_add_control(&cval->head, kctl);
}
/*
* parse a mixer unit
*/
static int parse_audio_mixer_unit(struct mixer_build *state, int unitid,
void *raw_desc)
{
struct uac_mixer_unit_descriptor *desc = raw_desc;
struct usb_audio_term iterm;
int input_pins, num_ins, num_outs;
int pin, ich, err;
if (desc->bLength < 11 || !(input_pins = desc->bNrInPins) ||
!(num_outs = uac_mixer_unit_bNrChannels(desc))) {
usb_audio_err(state->chip,
"invalid MIXER UNIT descriptor %d\n",
unitid);
return -EINVAL;
}
num_ins = 0;
ich = 0;
for (pin = 0; pin < input_pins; pin++) {
err = parse_audio_unit(state, desc->baSourceID[pin]);
if (err < 0)
continue;
/* no bmControls field (e.g. Maya44) -> ignore */
if (desc->bLength <= 10 + input_pins)
continue;
err = check_input_term(state, desc->baSourceID[pin], &iterm);
if (err < 0)
return err;
num_ins += iterm.channels;
for (; ich < num_ins; ich++) {
int och, ich_has_controls = 0;
for (och = 0; och < num_outs; och++) {
__u8 *c = uac_mixer_unit_bmControls(desc,
state->mixer->protocol);
if (check_matrix_bitmap(c, ich, och, num_outs)) {
ich_has_controls = 1;
break;
}
}
if (ich_has_controls)
build_mixer_unit_ctl(state, desc, pin, ich,
unitid, &iterm);
}
}
return 0;
}
/*
* Processing Unit / Extension Unit
*/
/* get callback for processing/extension unit */
static int mixer_ctl_procunit_get(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_value *ucontrol)
{
struct usb_mixer_elem_info *cval = kcontrol->private_data;
int err, val;
err = get_cur_ctl_value(cval, cval->control << 8, &val);
if (err < 0) {
ucontrol->value.integer.value[0] = cval->min;
return filter_error(cval, err);
}
val = get_relative_value(cval, val);
ucontrol->value.integer.value[0] = val;
return 0;
}
/* put callback for processing/extension unit */
static int mixer_ctl_procunit_put(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_value *ucontrol)
{
struct usb_mixer_elem_info *cval = kcontrol->private_data;
int val, oval, err;
err = get_cur_ctl_value(cval, cval->control << 8, &oval);
if (err < 0)
return filter_error(cval, err);
val = ucontrol->value.integer.value[0];
val = get_abs_value(cval, val);
if (val != oval) {
set_cur_ctl_value(cval, cval->control << 8, val);
return 1;
}
return 0;
}
/* alsa control interface for processing/extension unit */
static const struct snd_kcontrol_new mixer_procunit_ctl = {
.iface = SNDRV_CTL_ELEM_IFACE_MIXER,
.name = "", /* will be filled later */
.info = mixer_ctl_feature_info,
.get = mixer_ctl_procunit_get,
.put = mixer_ctl_procunit_put,
};
/*
* predefined data for processing units
*/
struct procunit_value_info {
int control;
char *suffix;
int val_type;
int min_value;
};
struct procunit_info {
int type;
char *name;
struct procunit_value_info *values;
};
static struct procunit_value_info updown_proc_info[] = {
{ UAC_UD_ENABLE, "Switch", USB_MIXER_BOOLEAN },
{ UAC_UD_MODE_SELECT, "Mode Select", USB_MIXER_U8, 1 },
{ 0 }
};
static struct procunit_value_info prologic_proc_info[] = {
{ UAC_DP_ENABLE, "Switch", USB_MIXER_BOOLEAN },
{ UAC_DP_MODE_SELECT, "Mode Select", USB_MIXER_U8, 1 },
{ 0 }
};
static struct procunit_value_info threed_enh_proc_info[] = {
{ UAC_3D_ENABLE, "Switch", USB_MIXER_BOOLEAN },
{ UAC_3D_SPACE, "Spaciousness", USB_MIXER_U8 },
{ 0 }
};
static struct procunit_value_info reverb_proc_info[] = {
{ UAC_REVERB_ENABLE, "Switch", USB_MIXER_BOOLEAN },
{ UAC_REVERB_LEVEL, "Level", USB_MIXER_U8 },
{ UAC_REVERB_TIME, "Time", USB_MIXER_U16 },
{ UAC_REVERB_FEEDBACK, "Feedback", USB_MIXER_U8 },
{ 0 }
};
static struct procunit_value_info chorus_proc_info[] = {
{ UAC_CHORUS_ENABLE, "Switch", USB_MIXER_BOOLEAN },
{ UAC_CHORUS_LEVEL, "Level", USB_MIXER_U8 },
{ UAC_CHORUS_RATE, "Rate", USB_MIXER_U16 },
{ UAC_CHORUS_DEPTH, "Depth", USB_MIXER_U16 },
{ 0 }
};
static struct procunit_value_info dcr_proc_info[] = {
{ UAC_DCR_ENABLE, "Switch", USB_MIXER_BOOLEAN },
{ UAC_DCR_RATE, "Ratio", USB_MIXER_U16 },
{ UAC_DCR_MAXAMPL, "Max Amp", USB_MIXER_S16 },
{ UAC_DCR_THRESHOLD, "Threshold", USB_MIXER_S16 },
{ UAC_DCR_ATTACK_TIME, "Attack Time", USB_MIXER_U16 },
{ UAC_DCR_RELEASE_TIME, "Release Time", USB_MIXER_U16 },
{ 0 }
};
static struct procunit_info procunits[] = {
{ UAC_PROCESS_UP_DOWNMIX, "Up Down", updown_proc_info },
{ UAC_PROCESS_DOLBY_PROLOGIC, "Dolby Prologic", prologic_proc_info },
{ UAC_PROCESS_STEREO_EXTENDER, "3D Stereo Extender", threed_enh_proc_info },
{ UAC_PROCESS_REVERB, "Reverb", reverb_proc_info },
{ UAC_PROCESS_CHORUS, "Chorus", chorus_proc_info },
{ UAC_PROCESS_DYN_RANGE_COMP, "DCR", dcr_proc_info },
{ 0 },
};
/*
* predefined data for extension units
*/
static struct procunit_value_info clock_rate_xu_info[] = {
{ USB_XU_CLOCK_RATE_SELECTOR, "Selector", USB_MIXER_U8, 0 },
{ 0 }
};
static struct procunit_value_info clock_source_xu_info[] = {
{ USB_XU_CLOCK_SOURCE_SELECTOR, "External", USB_MIXER_BOOLEAN },
{ 0 }
};
static struct procunit_value_info spdif_format_xu_info[] = {
{ USB_XU_DIGITAL_FORMAT_SELECTOR, "SPDIF/AC3", USB_MIXER_BOOLEAN },
{ 0 }
};
static struct procunit_value_info soft_limit_xu_info[] = {
{ USB_XU_SOFT_LIMIT_SELECTOR, " ", USB_MIXER_BOOLEAN },
{ 0 }
};
static struct procunit_info extunits[] = {
{ USB_XU_CLOCK_RATE, "Clock rate", clock_rate_xu_info },
{ USB_XU_CLOCK_SOURCE, "DigitalIn CLK source", clock_source_xu_info },
{ USB_XU_DIGITAL_IO_STATUS, "DigitalOut format:", spdif_format_xu_info },
{ USB_XU_DEVICE_OPTIONS, "AnalogueIn Soft Limit", soft_limit_xu_info },
{ 0 }
};
/*
* build a processing/extension unit
*/
static int build_audio_procunit(struct mixer_build *state, int unitid,
void *raw_desc, struct procunit_info *list,
char *name)
{
struct uac_processing_unit_descriptor *desc = raw_desc;
int num_ins = desc->bNrInPins;
struct usb_mixer_elem_info *cval;
struct snd_kcontrol *kctl;
int i, err, nameid, type, len;
struct procunit_info *info;
struct procunit_value_info *valinfo;
const struct usbmix_name_map *map;
static struct procunit_value_info default_value_info[] = {
{ 0x01, "Switch", USB_MIXER_BOOLEAN },
{ 0 }
};
static struct procunit_info default_info = {
0, NULL, default_value_info
};
if (desc->bLength < 13 || desc->bLength < 13 + num_ins ||
desc->bLength < num_ins + uac_processing_unit_bControlSize(desc, state->mixer->protocol)) {
usb_audio_err(state->chip, "invalid %s descriptor (id %d)\n", name, unitid);
return -EINVAL;
}
for (i = 0; i < num_ins; i++) {
if ((err = parse_audio_unit(state, desc->baSourceID[i])) < 0)
return err;
}
type = le16_to_cpu(desc->wProcessType);
for (info = list; info && info->type; info++)
if (info->type == type)
break;
if (!info || !info->type)
info = &default_info;
for (valinfo = info->values; valinfo->control; valinfo++) {
__u8 *controls = uac_processing_unit_bmControls(desc, state->mixer->protocol);
if (!(controls[valinfo->control / 8] & (1 << ((valinfo->control % 8) - 1))))
continue;
map = find_map(state, unitid, valinfo->control);
if (check_ignored_ctl(map))
continue;
cval = kzalloc(sizeof(*cval), GFP_KERNEL);
if (!cval)
return -ENOMEM;
snd_usb_mixer_elem_init_std(&cval->head, state->mixer, unitid);
cval->control = valinfo->control;
cval->val_type = valinfo->val_type;
cval->channels = 1;
/* get min/max values */
if (type == UAC_PROCESS_UP_DOWNMIX && cval->control == UAC_UD_MODE_SELECT) {
__u8 *control_spec = uac_processing_unit_specific(desc, state->mixer->protocol);
/* FIXME: hard-coded */
cval->min = 1;
cval->max = control_spec[0];
cval->res = 1;
cval->initialized = 1;
} else {
if (type == USB_XU_CLOCK_RATE) {
/*
* E-Mu USB 0404/0202/TrackerPre/0204
* samplerate control quirk
*/
cval->min = 0;
cval->max = 5;
cval->res = 1;
cval->initialized = 1;
} else
get_min_max(cval, valinfo->min_value);
}
kctl = snd_ctl_new1(&mixer_procunit_ctl, cval);
if (!kctl) {
kfree(cval);
return -ENOMEM;
}
kctl->private_free = snd_usb_mixer_elem_free;
if (check_mapped_name(map, kctl->id.name, sizeof(kctl->id.name))) {
/* nothing */ ;
} else if (info->name) {
strlcpy(kctl->id.name, info->name, sizeof(kctl->id.name));
} else {
nameid = uac_processing_unit_iProcessing(desc, state->mixer->protocol);
len = 0;
if (nameid)
len = snd_usb_copy_string_desc(state, nameid,
kctl->id.name,
sizeof(kctl->id.name));
if (!len)
strlcpy(kctl->id.name, name, sizeof(kctl->id.name));
}
append_ctl_name(kctl, " ");
append_ctl_name(kctl, valinfo->suffix);
usb_audio_dbg(state->chip,
"[%d] PU [%s] ch = %d, val = %d/%d\n",
cval->head.id, kctl->id.name, cval->channels,
cval->min, cval->max);
err = snd_usb_mixer_add_control(&cval->head, kctl);
if (err < 0)
return err;
}
return 0;
}
static int parse_audio_processing_unit(struct mixer_build *state, int unitid,
void *raw_desc)
{
return build_audio_procunit(state, unitid, raw_desc,
procunits, "Processing Unit");
}
static int parse_audio_extension_unit(struct mixer_build *state, int unitid,
void *raw_desc)
{
/*
* Note that we parse extension units with processing unit descriptors.
* That's ok as the layout is the same.
*/
return build_audio_procunit(state, unitid, raw_desc,
extunits, "Extension Unit");
}
/*
* Selector Unit
*/
/*
* info callback for selector unit
* use an enumerator type for routing
*/
static int mixer_ctl_selector_info(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_info *uinfo)
{
struct usb_mixer_elem_info *cval = kcontrol->private_data;
const char **itemlist = (const char **)kcontrol->private_value;
if (snd_BUG_ON(!itemlist))
return -EINVAL;
return snd_ctl_enum_info(uinfo, 1, cval->max, itemlist);
}
/* get callback for selector unit */
static int mixer_ctl_selector_get(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_value *ucontrol)
{
struct usb_mixer_elem_info *cval = kcontrol->private_data;
int val, err;
err = get_cur_ctl_value(cval, cval->control << 8, &val);
if (err < 0) {
ucontrol->value.enumerated.item[0] = 0;
return filter_error(cval, err);
}
val = get_relative_value(cval, val);
ucontrol->value.enumerated.item[0] = val;
return 0;
}
/* put callback for selector unit */
static int mixer_ctl_selector_put(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_value *ucontrol)
{
struct usb_mixer_elem_info *cval = kcontrol->private_data;
int val, oval, err;
err = get_cur_ctl_value(cval, cval->control << 8, &oval);
if (err < 0)
return filter_error(cval, err);
val = ucontrol->value.enumerated.item[0];
val = get_abs_value(cval, val);
if (val != oval) {
set_cur_ctl_value(cval, cval->control << 8, val);
return 1;
}
return 0;
}
/* alsa control interface for selector unit */
static const struct snd_kcontrol_new mixer_selectunit_ctl = {
.iface = SNDRV_CTL_ELEM_IFACE_MIXER,
.name = "", /* will be filled later */
.info = mixer_ctl_selector_info,
.get = mixer_ctl_selector_get,
.put = mixer_ctl_selector_put,
};
/*
* private free callback.
* free both private_data and private_value
*/
static void usb_mixer_selector_elem_free(struct snd_kcontrol *kctl)
{
int i, num_ins = 0;
if (kctl->private_data) {
struct usb_mixer_elem_info *cval = kctl->private_data;
num_ins = cval->max;
kfree(cval);
kctl->private_data = NULL;
}
if (kctl->private_value) {
char **itemlist = (char **)kctl->private_value;
for (i = 0; i < num_ins; i++)
kfree(itemlist[i]);
kfree(itemlist);
kctl->private_value = 0;
}
}
/*
* parse a selector unit
*/
static int parse_audio_selector_unit(struct mixer_build *state, int unitid,
void *raw_desc)
{
struct uac_selector_unit_descriptor *desc = raw_desc;
unsigned int i, nameid, len;
int err;
struct usb_mixer_elem_info *cval;
struct snd_kcontrol *kctl;
const struct usbmix_name_map *map;
char **namelist;
if (!desc->bNrInPins || desc->bLength < 5 + desc->bNrInPins) {
usb_audio_err(state->chip,
"invalid SELECTOR UNIT descriptor %d\n", unitid);
return -EINVAL;
}
for (i = 0; i < desc->bNrInPins; i++) {
if ((err = parse_audio_unit(state, desc->baSourceID[i])) < 0)
return err;
}
if (desc->bNrInPins == 1) /* only one ? nonsense! */
return 0;
map = find_map(state, unitid, 0);
if (check_ignored_ctl(map))
return 0;
cval = kzalloc(sizeof(*cval), GFP_KERNEL);
if (!cval)
return -ENOMEM;
snd_usb_mixer_elem_init_std(&cval->head, state->mixer, unitid);
cval->val_type = USB_MIXER_U8;
cval->channels = 1;
cval->min = 1;
cval->max = desc->bNrInPins;
cval->res = 1;
cval->initialized = 1;
if (state->mixer->protocol == UAC_VERSION_1)
cval->control = 0;
else /* UAC_VERSION_2 */
cval->control = (desc->bDescriptorSubtype == UAC2_CLOCK_SELECTOR) ?
UAC2_CX_CLOCK_SELECTOR : UAC2_SU_SELECTOR;
namelist = kmalloc(sizeof(char *) * desc->bNrInPins, GFP_KERNEL);
if (!namelist) {
kfree(cval);
return -ENOMEM;
}
#define MAX_ITEM_NAME_LEN 64
for (i = 0; i < desc->bNrInPins; i++) {
struct usb_audio_term iterm;
len = 0;
namelist[i] = kmalloc(MAX_ITEM_NAME_LEN, GFP_KERNEL);
if (!namelist[i]) {
while (i--)
kfree(namelist[i]);
kfree(namelist);
kfree(cval);
return -ENOMEM;
}
len = check_mapped_selector_name(state, unitid, i, namelist[i],
MAX_ITEM_NAME_LEN);
if (! len && check_input_term(state, desc->baSourceID[i], &iterm) >= 0)
len = get_term_name(state, &iterm, namelist[i], MAX_ITEM_NAME_LEN, 0);
if (! len)
sprintf(namelist[i], "Input %u", i);
}
kctl = snd_ctl_new1(&mixer_selectunit_ctl, cval);
if (! kctl) {
usb_audio_err(state->chip, "cannot malloc kcontrol\n");
kfree(namelist);
kfree(cval);
return -ENOMEM;
}
kctl->private_value = (unsigned long)namelist;
kctl->private_free = usb_mixer_selector_elem_free;
nameid = uac_selector_unit_iSelector(desc);
len = check_mapped_name(map, kctl->id.name, sizeof(kctl->id.name));
if (len)
;
else if (nameid)
snd_usb_copy_string_desc(state, nameid, kctl->id.name,
sizeof(kctl->id.name));
else {
len = get_term_name(state, &state->oterm,
kctl->id.name, sizeof(kctl->id.name), 0);
if (!len)
strlcpy(kctl->id.name, "USB", sizeof(kctl->id.name));
if (desc->bDescriptorSubtype == UAC2_CLOCK_SELECTOR)
append_ctl_name(kctl, " Clock Source");
else if ((state->oterm.type & 0xff00) == 0x0100)
append_ctl_name(kctl, " Capture Source");
else
append_ctl_name(kctl, " Playback Source");
}
usb_audio_dbg(state->chip, "[%d] SU [%s] items = %d\n",
cval->head.id, kctl->id.name, desc->bNrInPins);
return snd_usb_mixer_add_control(&cval->head, kctl);
}
/*
* parse an audio unit recursively
*/
static int parse_audio_unit(struct mixer_build *state, int unitid)
{
unsigned char *p1;
if (test_and_set_bit(unitid, state->unitbitmap))
return 0; /* the unit already visited */
p1 = find_audio_control_unit(state, unitid);
if (!p1) {
usb_audio_err(state->chip, "unit %d not found!\n", unitid);
return -EINVAL;
}
switch (p1[2]) {
case UAC_INPUT_TERMINAL:
return 0; /* NOP */
case UAC_MIXER_UNIT:
return parse_audio_mixer_unit(state, unitid, p1);
case UAC2_CLOCK_SOURCE:
return parse_clock_source_unit(state, unitid, p1);
case UAC_SELECTOR_UNIT:
case UAC2_CLOCK_SELECTOR:
return parse_audio_selector_unit(state, unitid, p1);
case UAC_FEATURE_UNIT:
return parse_audio_feature_unit(state, unitid, p1);
case UAC1_PROCESSING_UNIT:
/* UAC2_EFFECT_UNIT has the same value */
if (state->mixer->protocol == UAC_VERSION_1)
return parse_audio_processing_unit(state, unitid, p1);
else
return 0; /* FIXME - effect units not implemented yet */
case UAC1_EXTENSION_UNIT:
/* UAC2_PROCESSING_UNIT_V2 has the same value */
if (state->mixer->protocol == UAC_VERSION_1)
return parse_audio_extension_unit(state, unitid, p1);
else /* UAC_VERSION_2 */
return parse_audio_processing_unit(state, unitid, p1);
case UAC2_EXTENSION_UNIT_V2:
return parse_audio_extension_unit(state, unitid, p1);
default:
usb_audio_err(state->chip,
"unit %u: unexpected type 0x%02x\n", unitid, p1[2]);
return -EINVAL;
}
}
static void snd_usb_mixer_free(struct usb_mixer_interface *mixer)
{
/* kill pending URBs */
snd_usb_mixer_disconnect(mixer);
kfree(mixer->id_elems);
if (mixer->urb) {
kfree(mixer->urb->transfer_buffer);
usb_free_urb(mixer->urb);
}
usb_free_urb(mixer->rc_urb);
kfree(mixer->rc_setup_packet);
kfree(mixer);
}
static int snd_usb_mixer_dev_free(struct snd_device *device)
{
struct usb_mixer_interface *mixer = device->device_data;
snd_usb_mixer_free(mixer);
return 0;
}
/*
* create mixer controls
*
* walk through all UAC_OUTPUT_TERMINAL descriptors to search for mixers
*/
static int snd_usb_mixer_controls(struct usb_mixer_interface *mixer)
{
struct mixer_build state;
int err;
const struct usbmix_ctl_map *map;
void *p;
memset(&state, 0, sizeof(state));
state.chip = mixer->chip;
state.mixer = mixer;
state.buffer = mixer->hostif->extra;
state.buflen = mixer->hostif->extralen;
/* check the mapping table */
for (map = usbmix_ctl_maps; map->id; map++) {
if (map->id == state.chip->usb_id) {
state.map = map->map;
state.selector_map = map->selector_map;
mixer->ignore_ctl_error = map->ignore_ctl_error;
break;
}
}
p = NULL;
while ((p = snd_usb_find_csint_desc(mixer->hostif->extra,
mixer->hostif->extralen,
p, UAC_OUTPUT_TERMINAL)) != NULL) {
if (mixer->protocol == UAC_VERSION_1) {
struct uac1_output_terminal_descriptor *desc = p;
if (desc->bLength < sizeof(*desc))
continue; /* invalid descriptor? */
/* mark terminal ID as visited */
set_bit(desc->bTerminalID, state.unitbitmap);
state.oterm.id = desc->bTerminalID;
state.oterm.type = le16_to_cpu(desc->wTerminalType);
state.oterm.name = desc->iTerminal;
err = parse_audio_unit(&state, desc->bSourceID);
if (err < 0 && err != -EINVAL)
return err;
} else { /* UAC_VERSION_2 */
struct uac2_output_terminal_descriptor *desc = p;
if (desc->bLength < sizeof(*desc))
continue; /* invalid descriptor? */
/* mark terminal ID as visited */
set_bit(desc->bTerminalID, state.unitbitmap);
state.oterm.id = desc->bTerminalID;
state.oterm.type = le16_to_cpu(desc->wTerminalType);
state.oterm.name = desc->iTerminal;
err = parse_audio_unit(&state, desc->bSourceID);
if (err < 0 && err != -EINVAL)
return err;
/*
* For UAC2, use the same approach to also add the
* clock selectors
*/
err = parse_audio_unit(&state, desc->bCSourceID);
if (err < 0 && err != -EINVAL)
return err;
}
}
return 0;
}
void snd_usb_mixer_notify_id(struct usb_mixer_interface *mixer, int unitid)
{
struct usb_mixer_elem_list *list;
for (list = mixer->id_elems[unitid]; list; list = list->next_id_elem)
snd_ctl_notify(mixer->chip->card, SNDRV_CTL_EVENT_MASK_VALUE,
&list->kctl->id);
}
static void snd_usb_mixer_dump_cval(struct snd_info_buffer *buffer,
struct usb_mixer_elem_list *list)
{
struct usb_mixer_elem_info *cval = (struct usb_mixer_elem_info *)list;
static char *val_types[] = {"BOOLEAN", "INV_BOOLEAN",
"S8", "U8", "S16", "U16"};
snd_iprintf(buffer, " Info: id=%i, control=%i, cmask=0x%x, "
"channels=%i, type=\"%s\"\n", cval->head.id,
cval->control, cval->cmask, cval->channels,
val_types[cval->val_type]);
snd_iprintf(buffer, " Volume: min=%i, max=%i, dBmin=%i, dBmax=%i\n",
cval->min, cval->max, cval->dBmin, cval->dBmax);
}
static void snd_usb_mixer_proc_read(struct snd_info_entry *entry,
struct snd_info_buffer *buffer)
{
struct snd_usb_audio *chip = entry->private_data;
struct usb_mixer_interface *mixer;
struct usb_mixer_elem_list *list;
int unitid;
list_for_each_entry(mixer, &chip->mixer_list, list) {
snd_iprintf(buffer,
"USB Mixer: usb_id=0x%08x, ctrlif=%i, ctlerr=%i\n",
chip->usb_id, snd_usb_ctrl_intf(chip),
mixer->ignore_ctl_error);
snd_iprintf(buffer, "Card: %s\n", chip->card->longname);
for (unitid = 0; unitid < MAX_ID_ELEMS; unitid++) {
for (list = mixer->id_elems[unitid]; list;
list = list->next_id_elem) {
snd_iprintf(buffer, " Unit: %i\n", list->id);
if (list->kctl)
snd_iprintf(buffer,
" Control: name=\"%s\", index=%i\n",
list->kctl->id.name,
list->kctl->id.index);
if (list->dump)
list->dump(buffer, list);
}
}
}
}
static void snd_usb_mixer_interrupt_v2(struct usb_mixer_interface *mixer,
int attribute, int value, int index)
{
struct usb_mixer_elem_list *list;
__u8 unitid = (index >> 8) & 0xff;
__u8 control = (value >> 8) & 0xff;
__u8 channel = value & 0xff;
unsigned int count = 0;
if (channel >= MAX_CHANNELS) {
usb_audio_dbg(mixer->chip,
"%s(): bogus channel number %d\n",
__func__, channel);
return;
}
for (list = mixer->id_elems[unitid]; list; list = list->next_id_elem)
count++;
if (count == 0)
return;
for (list = mixer->id_elems[unitid]; list; list = list->next_id_elem) {
struct usb_mixer_elem_info *info;
if (!list->kctl)
continue;
info = (struct usb_mixer_elem_info *)list;
if (count > 1 && info->control != control)
continue;
switch (attribute) {
case UAC2_CS_CUR:
/* invalidate cache, so the value is read from the device */
if (channel)
info->cached &= ~(1 << channel);
else /* master channel */
info->cached = 0;
snd_ctl_notify(mixer->chip->card, SNDRV_CTL_EVENT_MASK_VALUE,
&info->head.kctl->id);
break;
case UAC2_CS_RANGE:
/* TODO */
break;
case UAC2_CS_MEM:
/* TODO */
break;
default:
usb_audio_dbg(mixer->chip,
"unknown attribute %d in interrupt\n",
attribute);
break;
} /* switch */
}
}
static void snd_usb_mixer_interrupt(struct urb *urb)
{
struct usb_mixer_interface *mixer = urb->context;
int len = urb->actual_length;
int ustatus = urb->status;
if (ustatus != 0)
goto requeue;
if (mixer->protocol == UAC_VERSION_1) {
struct uac1_status_word *status;
for (status = urb->transfer_buffer;
len >= sizeof(*status);
len -= sizeof(*status), status++) {
dev_dbg(&urb->dev->dev, "status interrupt: %02x %02x\n",
status->bStatusType,
status->bOriginator);
/* ignore any notifications not from the control interface */
if ((status->bStatusType & UAC1_STATUS_TYPE_ORIG_MASK) !=
UAC1_STATUS_TYPE_ORIG_AUDIO_CONTROL_IF)
continue;
if (status->bStatusType & UAC1_STATUS_TYPE_MEM_CHANGED)
snd_usb_mixer_rc_memory_change(mixer, status->bOriginator);
else
snd_usb_mixer_notify_id(mixer, status->bOriginator);
}
} else { /* UAC_VERSION_2 */
struct uac2_interrupt_data_msg *msg;
for (msg = urb->transfer_buffer;
len >= sizeof(*msg);
len -= sizeof(*msg), msg++) {
/* drop vendor specific and endpoint requests */
if ((msg->bInfo & UAC2_INTERRUPT_DATA_MSG_VENDOR) ||
(msg->bInfo & UAC2_INTERRUPT_DATA_MSG_EP))
continue;
snd_usb_mixer_interrupt_v2(mixer, msg->bAttribute,
le16_to_cpu(msg->wValue),
le16_to_cpu(msg->wIndex));
}
}
requeue:
if (ustatus != -ENOENT &&
ustatus != -ECONNRESET &&
ustatus != -ESHUTDOWN) {
urb->dev = mixer->chip->dev;
usb_submit_urb(urb, GFP_ATOMIC);
}
}
/* create the handler for the optional status interrupt endpoint */
static int snd_usb_mixer_status_create(struct usb_mixer_interface *mixer)
{
struct usb_endpoint_descriptor *ep;
void *transfer_buffer;
int buffer_length;
unsigned int epnum;
/* we need one interrupt input endpoint */
if (get_iface_desc(mixer->hostif)->bNumEndpoints < 1)
return 0;
ep = get_endpoint(mixer->hostif, 0);
if (!usb_endpoint_dir_in(ep) || !usb_endpoint_xfer_int(ep))
return 0;
epnum = usb_endpoint_num(ep);
buffer_length = le16_to_cpu(ep->wMaxPacketSize);
transfer_buffer = kmalloc(buffer_length, GFP_KERNEL);
if (!transfer_buffer)
return -ENOMEM;
mixer->urb = usb_alloc_urb(0, GFP_KERNEL);
if (!mixer->urb) {
kfree(transfer_buffer);
return -ENOMEM;
}
usb_fill_int_urb(mixer->urb, mixer->chip->dev,
usb_rcvintpipe(mixer->chip->dev, epnum),
transfer_buffer, buffer_length,
snd_usb_mixer_interrupt, mixer, ep->bInterval);
usb_submit_urb(mixer->urb, GFP_KERNEL);
return 0;
}
int snd_usb_create_mixer(struct snd_usb_audio *chip, int ctrlif,
int ignore_error)
{
static struct snd_device_ops dev_ops = {
.dev_free = snd_usb_mixer_dev_free
};
struct usb_mixer_interface *mixer;
struct snd_info_entry *entry;
int err;
strcpy(chip->card->mixername, "USB Mixer");
mixer = kzalloc(sizeof(*mixer), GFP_KERNEL);
if (!mixer)
return -ENOMEM;
mixer->chip = chip;
mixer->ignore_ctl_error = ignore_error;
mixer->id_elems = kcalloc(MAX_ID_ELEMS, sizeof(*mixer->id_elems),
GFP_KERNEL);
if (!mixer->id_elems) {
kfree(mixer);
return -ENOMEM;
}
mixer->hostif = &usb_ifnum_to_if(chip->dev, ctrlif)->altsetting[0];
switch (get_iface_desc(mixer->hostif)->bInterfaceProtocol) {
case UAC_VERSION_1:
default:
mixer->protocol = UAC_VERSION_1;
break;
case UAC_VERSION_2:
mixer->protocol = UAC_VERSION_2;
break;
}
if ((err = snd_usb_mixer_controls(mixer)) < 0 ||
(err = snd_usb_mixer_status_create(mixer)) < 0)
goto _error;
snd_usb_mixer_apply_create_quirk(mixer);
err = snd_device_new(chip->card, SNDRV_DEV_CODEC, mixer, &dev_ops);
if (err < 0)
goto _error;
if (list_empty(&chip->mixer_list) &&
!snd_card_proc_new(chip->card, "usbmixer", &entry))
snd_info_set_text_ops(entry, chip, snd_usb_mixer_proc_read);
list_add(&mixer->list, &chip->mixer_list);
return 0;
_error:
snd_usb_mixer_free(mixer);
return err;
}
void snd_usb_mixer_disconnect(struct usb_mixer_interface *mixer)
{
if (mixer->disconnected)
return;
if (mixer->urb)
usb_kill_urb(mixer->urb);
if (mixer->rc_urb)
usb_kill_urb(mixer->rc_urb);
mixer->disconnected = true;
}
#ifdef CONFIG_PM
/* stop any bus activity of a mixer */
static void snd_usb_mixer_inactivate(struct usb_mixer_interface *mixer)
{
usb_kill_urb(mixer->urb);
usb_kill_urb(mixer->rc_urb);
}
static int snd_usb_mixer_activate(struct usb_mixer_interface *mixer)
{
int err;
if (mixer->urb) {
err = usb_submit_urb(mixer->urb, GFP_NOIO);
if (err < 0)
return err;
}
return 0;
}
int snd_usb_mixer_suspend(struct usb_mixer_interface *mixer)
{
snd_usb_mixer_inactivate(mixer);
return 0;
}
static int restore_mixer_value(struct usb_mixer_elem_list *list)
{
struct usb_mixer_elem_info *cval = (struct usb_mixer_elem_info *)list;
int c, err, idx;
if (cval->cmask) {
idx = 0;
for (c = 0; c < MAX_CHANNELS; c++) {
if (!(cval->cmask & (1 << c)))
continue;
if (cval->cached & (1 << (c + 1))) {
err = snd_usb_set_cur_mix_value(cval, c + 1, idx,
cval->cache_val[idx]);
if (err < 0)
return err;
}
idx++;
}
} else {
/* master */
if (cval->cached) {
err = snd_usb_set_cur_mix_value(cval, 0, 0, *cval->cache_val);
if (err < 0)
return err;
}
}
return 0;
}
int snd_usb_mixer_resume(struct usb_mixer_interface *mixer, bool reset_resume)
{
struct usb_mixer_elem_list *list;
int id, err;
if (reset_resume) {
/* restore cached mixer values */
for (id = 0; id < MAX_ID_ELEMS; id++) {
for (list = mixer->id_elems[id]; list;
list = list->next_id_elem) {
if (list->resume) {
err = list->resume(list);
if (err < 0)
return err;
}
}
}
}
return snd_usb_mixer_activate(mixer);
}
#endif
void snd_usb_mixer_elem_init_std(struct usb_mixer_elem_list *list,
struct usb_mixer_interface *mixer,
int unitid)
{
list->mixer = mixer;
list->id = unitid;
list->dump = snd_usb_mixer_dump_cval;
#ifdef CONFIG_PM
list->resume = restore_mixer_value;
#endif
}
| ./CrossVul/dataset_final_sorted/CWE-416/c/good_2908_0 |
crossvul-cpp_data_good_5376_0 | /*
*
* drivers/staging/android/ion/ion.c
*
* Copyright (C) 2011 Google, Inc.
*
* This software is licensed under the terms of the GNU General Public
* License version 2, as published by the Free Software Foundation, and
* may be copied, distributed, and modified under those terms.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
*/
#include <linux/device.h>
#include <linux/err.h>
#include <linux/file.h>
#include <linux/freezer.h>
#include <linux/fs.h>
#include <linux/anon_inodes.h>
#include <linux/kthread.h>
#include <linux/list.h>
#include <linux/memblock.h>
#include <linux/miscdevice.h>
#include <linux/export.h>
#include <linux/mm.h>
#include <linux/mm_types.h>
#include <linux/rbtree.h>
#include <linux/slab.h>
#include <linux/seq_file.h>
#include <linux/uaccess.h>
#include <linux/vmalloc.h>
#include <linux/debugfs.h>
#include <linux/dma-buf.h>
#include <linux/idr.h>
#include "ion.h"
#include "ion_priv.h"
#include "compat_ion.h"
/**
* struct ion_device - the metadata of the ion device node
* @dev: the actual misc device
* @buffers: an rb tree of all the existing buffers
* @buffer_lock: lock protecting the tree of buffers
* @lock: rwsem protecting the tree of heaps and clients
* @heaps: list of all the heaps in the system
* @user_clients: list of all the clients created from userspace
*/
struct ion_device {
struct miscdevice dev;
struct rb_root buffers;
struct mutex buffer_lock;
struct rw_semaphore lock;
struct plist_head heaps;
long (*custom_ioctl)(struct ion_client *client, unsigned int cmd,
unsigned long arg);
struct rb_root clients;
struct dentry *debug_root;
struct dentry *heaps_debug_root;
struct dentry *clients_debug_root;
};
/**
* struct ion_client - a process/hw block local address space
* @node: node in the tree of all clients
* @dev: backpointer to ion device
* @handles: an rb tree of all the handles in this client
* @idr: an idr space for allocating handle ids
* @lock: lock protecting the tree of handles
* @name: used for debugging
* @display_name: used for debugging (unique version of @name)
* @display_serial: used for debugging (to make display_name unique)
* @task: used for debugging
*
* A client represents a list of buffers this client may access.
* The mutex stored here is used to protect both handles tree
* as well as the handles themselves, and should be held while modifying either.
*/
struct ion_client {
struct rb_node node;
struct ion_device *dev;
struct rb_root handles;
struct idr idr;
struct mutex lock;
const char *name;
char *display_name;
int display_serial;
struct task_struct *task;
pid_t pid;
struct dentry *debug_root;
};
/**
* ion_handle - a client local reference to a buffer
* @ref: reference count
* @client: back pointer to the client the buffer resides in
* @buffer: pointer to the buffer
* @node: node in the client's handle rbtree
* @kmap_cnt: count of times this client has mapped to kernel
* @id: client-unique id allocated by client->idr
*
* Modifications to node, map_cnt or mapping should be protected by the
* lock in the client. Other fields are never changed after initialization.
*/
struct ion_handle {
struct kref ref;
struct ion_client *client;
struct ion_buffer *buffer;
struct rb_node node;
unsigned int kmap_cnt;
int id;
};
bool ion_buffer_fault_user_mappings(struct ion_buffer *buffer)
{
return (buffer->flags & ION_FLAG_CACHED) &&
!(buffer->flags & ION_FLAG_CACHED_NEEDS_SYNC);
}
bool ion_buffer_cached(struct ion_buffer *buffer)
{
return !!(buffer->flags & ION_FLAG_CACHED);
}
static inline struct page *ion_buffer_page(struct page *page)
{
return (struct page *)((unsigned long)page & ~(1UL));
}
static inline bool ion_buffer_page_is_dirty(struct page *page)
{
return !!((unsigned long)page & 1UL);
}
static inline void ion_buffer_page_dirty(struct page **page)
{
*page = (struct page *)((unsigned long)(*page) | 1UL);
}
static inline void ion_buffer_page_clean(struct page **page)
{
*page = (struct page *)((unsigned long)(*page) & ~(1UL));
}
/* this function should only be called while dev->lock is held */
static void ion_buffer_add(struct ion_device *dev,
struct ion_buffer *buffer)
{
struct rb_node **p = &dev->buffers.rb_node;
struct rb_node *parent = NULL;
struct ion_buffer *entry;
while (*p) {
parent = *p;
entry = rb_entry(parent, struct ion_buffer, node);
if (buffer < entry) {
p = &(*p)->rb_left;
} else if (buffer > entry) {
p = &(*p)->rb_right;
} else {
pr_err("%s: buffer already found.", __func__);
BUG();
}
}
rb_link_node(&buffer->node, parent, p);
rb_insert_color(&buffer->node, &dev->buffers);
}
/* this function should only be called while dev->lock is held */
static struct ion_buffer *ion_buffer_create(struct ion_heap *heap,
struct ion_device *dev,
unsigned long len,
unsigned long align,
unsigned long flags)
{
struct ion_buffer *buffer;
struct sg_table *table;
struct scatterlist *sg;
int i, ret;
buffer = kzalloc(sizeof(struct ion_buffer), GFP_KERNEL);
if (!buffer)
return ERR_PTR(-ENOMEM);
buffer->heap = heap;
buffer->flags = flags;
kref_init(&buffer->ref);
ret = heap->ops->allocate(heap, buffer, len, align, flags);
if (ret) {
if (!(heap->flags & ION_HEAP_FLAG_DEFER_FREE))
goto err2;
ion_heap_freelist_drain(heap, 0);
ret = heap->ops->allocate(heap, buffer, len, align,
flags);
if (ret)
goto err2;
}
buffer->dev = dev;
buffer->size = len;
table = heap->ops->map_dma(heap, buffer);
if (WARN_ONCE(table == NULL,
"heap->ops->map_dma should return ERR_PTR on error"))
table = ERR_PTR(-EINVAL);
if (IS_ERR(table)) {
ret = -EINVAL;
goto err1;
}
buffer->sg_table = table;
if (ion_buffer_fault_user_mappings(buffer)) {
int num_pages = PAGE_ALIGN(buffer->size) / PAGE_SIZE;
struct scatterlist *sg;
int i, j, k = 0;
buffer->pages = vmalloc(sizeof(struct page *) * num_pages);
if (!buffer->pages) {
ret = -ENOMEM;
goto err;
}
for_each_sg(table->sgl, sg, table->nents, i) {
struct page *page = sg_page(sg);
for (j = 0; j < sg->length / PAGE_SIZE; j++)
buffer->pages[k++] = page++;
}
}
buffer->dev = dev;
buffer->size = len;
INIT_LIST_HEAD(&buffer->vmas);
mutex_init(&buffer->lock);
/*
* this will set up dma addresses for the sglist -- it is not
* technically correct as per the dma api -- a specific
* device isn't really taking ownership here. However, in practice on
* our systems the only dma_address space is physical addresses.
* Additionally, we can't afford the overhead of invalidating every
* allocation via dma_map_sg. The implicit contract here is that
* memory coming from the heaps is ready for dma, ie if it has a
* cached mapping that mapping has been invalidated
*/
for_each_sg(buffer->sg_table->sgl, sg, buffer->sg_table->nents, i) {
sg_dma_address(sg) = sg_phys(sg);
sg_dma_len(sg) = sg->length;
}
mutex_lock(&dev->buffer_lock);
ion_buffer_add(dev, buffer);
mutex_unlock(&dev->buffer_lock);
return buffer;
err:
heap->ops->unmap_dma(heap, buffer);
err1:
heap->ops->free(buffer);
err2:
kfree(buffer);
return ERR_PTR(ret);
}
void ion_buffer_destroy(struct ion_buffer *buffer)
{
if (WARN_ON(buffer->kmap_cnt > 0))
buffer->heap->ops->unmap_kernel(buffer->heap, buffer);
buffer->heap->ops->unmap_dma(buffer->heap, buffer);
buffer->heap->ops->free(buffer);
vfree(buffer->pages);
kfree(buffer);
}
static void _ion_buffer_destroy(struct kref *kref)
{
struct ion_buffer *buffer = container_of(kref, struct ion_buffer, ref);
struct ion_heap *heap = buffer->heap;
struct ion_device *dev = buffer->dev;
mutex_lock(&dev->buffer_lock);
rb_erase(&buffer->node, &dev->buffers);
mutex_unlock(&dev->buffer_lock);
if (heap->flags & ION_HEAP_FLAG_DEFER_FREE)
ion_heap_freelist_add(heap, buffer);
else
ion_buffer_destroy(buffer);
}
static void ion_buffer_get(struct ion_buffer *buffer)
{
kref_get(&buffer->ref);
}
static int ion_buffer_put(struct ion_buffer *buffer)
{
return kref_put(&buffer->ref, _ion_buffer_destroy);
}
static void ion_buffer_add_to_handle(struct ion_buffer *buffer)
{
mutex_lock(&buffer->lock);
buffer->handle_count++;
mutex_unlock(&buffer->lock);
}
static void ion_buffer_remove_from_handle(struct ion_buffer *buffer)
{
/*
* when a buffer is removed from a handle, if it is not in
* any other handles, copy the taskcomm and the pid of the
* process it's being removed from into the buffer. At this
* point there will be no way to track what processes this buffer is
* being used by, it only exists as a dma_buf file descriptor.
* The taskcomm and pid can provide a debug hint as to where this fd
* is in the system
*/
mutex_lock(&buffer->lock);
buffer->handle_count--;
BUG_ON(buffer->handle_count < 0);
if (!buffer->handle_count) {
struct task_struct *task;
task = current->group_leader;
get_task_comm(buffer->task_comm, task);
buffer->pid = task_pid_nr(task);
}
mutex_unlock(&buffer->lock);
}
static struct ion_handle *ion_handle_create(struct ion_client *client,
struct ion_buffer *buffer)
{
struct ion_handle *handle;
handle = kzalloc(sizeof(struct ion_handle), GFP_KERNEL);
if (!handle)
return ERR_PTR(-ENOMEM);
kref_init(&handle->ref);
RB_CLEAR_NODE(&handle->node);
handle->client = client;
ion_buffer_get(buffer);
ion_buffer_add_to_handle(buffer);
handle->buffer = buffer;
return handle;
}
static void ion_handle_kmap_put(struct ion_handle *);
static void ion_handle_destroy(struct kref *kref)
{
struct ion_handle *handle = container_of(kref, struct ion_handle, ref);
struct ion_client *client = handle->client;
struct ion_buffer *buffer = handle->buffer;
mutex_lock(&buffer->lock);
while (handle->kmap_cnt)
ion_handle_kmap_put(handle);
mutex_unlock(&buffer->lock);
idr_remove(&client->idr, handle->id);
if (!RB_EMPTY_NODE(&handle->node))
rb_erase(&handle->node, &client->handles);
ion_buffer_remove_from_handle(buffer);
ion_buffer_put(buffer);
kfree(handle);
}
struct ion_buffer *ion_handle_buffer(struct ion_handle *handle)
{
return handle->buffer;
}
static void ion_handle_get(struct ion_handle *handle)
{
kref_get(&handle->ref);
}
static int ion_handle_put_nolock(struct ion_handle *handle)
{
int ret;
ret = kref_put(&handle->ref, ion_handle_destroy);
return ret;
}
int ion_handle_put(struct ion_handle *handle)
{
struct ion_client *client = handle->client;
int ret;
mutex_lock(&client->lock);
ret = ion_handle_put_nolock(handle);
mutex_unlock(&client->lock);
return ret;
}
static struct ion_handle *ion_handle_lookup(struct ion_client *client,
struct ion_buffer *buffer)
{
struct rb_node *n = client->handles.rb_node;
while (n) {
struct ion_handle *entry = rb_entry(n, struct ion_handle, node);
if (buffer < entry->buffer)
n = n->rb_left;
else if (buffer > entry->buffer)
n = n->rb_right;
else
return entry;
}
return ERR_PTR(-EINVAL);
}
static struct ion_handle *ion_handle_get_by_id_nolock(struct ion_client *client,
int id)
{
struct ion_handle *handle;
handle = idr_find(&client->idr, id);
if (handle)
ion_handle_get(handle);
return handle ? handle : ERR_PTR(-EINVAL);
}
struct ion_handle *ion_handle_get_by_id(struct ion_client *client,
int id)
{
struct ion_handle *handle;
mutex_lock(&client->lock);
handle = ion_handle_get_by_id_nolock(client, id);
mutex_unlock(&client->lock);
return handle;
}
static bool ion_handle_validate(struct ion_client *client,
struct ion_handle *handle)
{
WARN_ON(!mutex_is_locked(&client->lock));
return idr_find(&client->idr, handle->id) == handle;
}
static int ion_handle_add(struct ion_client *client, struct ion_handle *handle)
{
int id;
struct rb_node **p = &client->handles.rb_node;
struct rb_node *parent = NULL;
struct ion_handle *entry;
id = idr_alloc(&client->idr, handle, 1, 0, GFP_KERNEL);
if (id < 0)
return id;
handle->id = id;
while (*p) {
parent = *p;
entry = rb_entry(parent, struct ion_handle, node);
if (handle->buffer < entry->buffer)
p = &(*p)->rb_left;
else if (handle->buffer > entry->buffer)
p = &(*p)->rb_right;
else
WARN(1, "%s: buffer already found.", __func__);
}
rb_link_node(&handle->node, parent, p);
rb_insert_color(&handle->node, &client->handles);
return 0;
}
struct ion_handle *ion_alloc(struct ion_client *client, size_t len,
size_t align, unsigned int heap_id_mask,
unsigned int flags)
{
struct ion_handle *handle;
struct ion_device *dev = client->dev;
struct ion_buffer *buffer = NULL;
struct ion_heap *heap;
int ret;
pr_debug("%s: len %zu align %zu heap_id_mask %u flags %x\n", __func__,
len, align, heap_id_mask, flags);
/*
* traverse the list of heaps available in this system in priority
* order. If the heap type is supported by the client, and matches the
* request of the caller allocate from it. Repeat until allocate has
* succeeded or all heaps have been tried
*/
len = PAGE_ALIGN(len);
if (!len)
return ERR_PTR(-EINVAL);
down_read(&dev->lock);
plist_for_each_entry(heap, &dev->heaps, node) {
/* if the caller didn't specify this heap id */
if (!((1 << heap->id) & heap_id_mask))
continue;
buffer = ion_buffer_create(heap, dev, len, align, flags);
if (!IS_ERR(buffer))
break;
}
up_read(&dev->lock);
if (buffer == NULL)
return ERR_PTR(-ENODEV);
if (IS_ERR(buffer))
return ERR_CAST(buffer);
handle = ion_handle_create(client, buffer);
/*
* ion_buffer_create will create a buffer with a ref_cnt of 1,
* and ion_handle_create will take a second reference, drop one here
*/
ion_buffer_put(buffer);
if (IS_ERR(handle))
return handle;
mutex_lock(&client->lock);
ret = ion_handle_add(client, handle);
mutex_unlock(&client->lock);
if (ret) {
ion_handle_put(handle);
handle = ERR_PTR(ret);
}
return handle;
}
EXPORT_SYMBOL(ion_alloc);
static void ion_free_nolock(struct ion_client *client, struct ion_handle *handle)
{
bool valid_handle;
BUG_ON(client != handle->client);
valid_handle = ion_handle_validate(client, handle);
if (!valid_handle) {
WARN(1, "%s: invalid handle passed to free.\n", __func__);
return;
}
ion_handle_put_nolock(handle);
}
void ion_free(struct ion_client *client, struct ion_handle *handle)
{
BUG_ON(client != handle->client);
mutex_lock(&client->lock);
ion_free_nolock(client, handle);
mutex_unlock(&client->lock);
}
EXPORT_SYMBOL(ion_free);
int ion_phys(struct ion_client *client, struct ion_handle *handle,
ion_phys_addr_t *addr, size_t *len)
{
struct ion_buffer *buffer;
int ret;
mutex_lock(&client->lock);
if (!ion_handle_validate(client, handle)) {
mutex_unlock(&client->lock);
return -EINVAL;
}
buffer = handle->buffer;
if (!buffer->heap->ops->phys) {
pr_err("%s: ion_phys is not implemented by this heap (name=%s, type=%d).\n",
__func__, buffer->heap->name, buffer->heap->type);
mutex_unlock(&client->lock);
return -ENODEV;
}
mutex_unlock(&client->lock);
ret = buffer->heap->ops->phys(buffer->heap, buffer, addr, len);
return ret;
}
EXPORT_SYMBOL(ion_phys);
static void *ion_buffer_kmap_get(struct ion_buffer *buffer)
{
void *vaddr;
if (buffer->kmap_cnt) {
buffer->kmap_cnt++;
return buffer->vaddr;
}
vaddr = buffer->heap->ops->map_kernel(buffer->heap, buffer);
if (WARN_ONCE(vaddr == NULL,
"heap->ops->map_kernel should return ERR_PTR on error"))
return ERR_PTR(-EINVAL);
if (IS_ERR(vaddr))
return vaddr;
buffer->vaddr = vaddr;
buffer->kmap_cnt++;
return vaddr;
}
static void *ion_handle_kmap_get(struct ion_handle *handle)
{
struct ion_buffer *buffer = handle->buffer;
void *vaddr;
if (handle->kmap_cnt) {
handle->kmap_cnt++;
return buffer->vaddr;
}
vaddr = ion_buffer_kmap_get(buffer);
if (IS_ERR(vaddr))
return vaddr;
handle->kmap_cnt++;
return vaddr;
}
static void ion_buffer_kmap_put(struct ion_buffer *buffer)
{
buffer->kmap_cnt--;
if (!buffer->kmap_cnt) {
buffer->heap->ops->unmap_kernel(buffer->heap, buffer);
buffer->vaddr = NULL;
}
}
static void ion_handle_kmap_put(struct ion_handle *handle)
{
struct ion_buffer *buffer = handle->buffer;
if (!handle->kmap_cnt) {
WARN(1, "%s: Double unmap detected! bailing...\n", __func__);
return;
}
handle->kmap_cnt--;
if (!handle->kmap_cnt)
ion_buffer_kmap_put(buffer);
}
void *ion_map_kernel(struct ion_client *client, struct ion_handle *handle)
{
struct ion_buffer *buffer;
void *vaddr;
mutex_lock(&client->lock);
if (!ion_handle_validate(client, handle)) {
pr_err("%s: invalid handle passed to map_kernel.\n",
__func__);
mutex_unlock(&client->lock);
return ERR_PTR(-EINVAL);
}
buffer = handle->buffer;
if (!handle->buffer->heap->ops->map_kernel) {
pr_err("%s: map_kernel is not implemented by this heap.\n",
__func__);
mutex_unlock(&client->lock);
return ERR_PTR(-ENODEV);
}
mutex_lock(&buffer->lock);
vaddr = ion_handle_kmap_get(handle);
mutex_unlock(&buffer->lock);
mutex_unlock(&client->lock);
return vaddr;
}
EXPORT_SYMBOL(ion_map_kernel);
void ion_unmap_kernel(struct ion_client *client, struct ion_handle *handle)
{
struct ion_buffer *buffer;
mutex_lock(&client->lock);
buffer = handle->buffer;
mutex_lock(&buffer->lock);
ion_handle_kmap_put(handle);
mutex_unlock(&buffer->lock);
mutex_unlock(&client->lock);
}
EXPORT_SYMBOL(ion_unmap_kernel);
static struct mutex debugfs_mutex;
static struct rb_root *ion_root_client;
static int is_client_alive(struct ion_client *client)
{
struct rb_node *node;
struct ion_client *tmp;
struct ion_device *dev;
node = ion_root_client->rb_node;
dev = container_of(ion_root_client, struct ion_device, clients);
down_read(&dev->lock);
while (node) {
tmp = rb_entry(node, struct ion_client, node);
if (client < tmp) {
node = node->rb_left;
} else if (client > tmp) {
node = node->rb_right;
} else {
up_read(&dev->lock);
return 1;
}
}
up_read(&dev->lock);
return 0;
}
static int ion_debug_client_show(struct seq_file *s, void *unused)
{
struct ion_client *client = s->private;
struct rb_node *n;
size_t sizes[ION_NUM_HEAP_IDS] = {0};
const char *names[ION_NUM_HEAP_IDS] = {NULL};
int i;
mutex_lock(&debugfs_mutex);
if (!is_client_alive(client)) {
seq_printf(s, "ion_client 0x%p dead, can't dump its buffers\n",
client);
mutex_unlock(&debugfs_mutex);
return 0;
}
mutex_lock(&client->lock);
for (n = rb_first(&client->handles); n; n = rb_next(n)) {
struct ion_handle *handle = rb_entry(n, struct ion_handle,
node);
unsigned int id = handle->buffer->heap->id;
if (!names[id])
names[id] = handle->buffer->heap->name;
sizes[id] += handle->buffer->size;
}
mutex_unlock(&client->lock);
mutex_unlock(&debugfs_mutex);
seq_printf(s, "%16.16s: %16.16s\n", "heap_name", "size_in_bytes");
for (i = 0; i < ION_NUM_HEAP_IDS; i++) {
if (!names[i])
continue;
seq_printf(s, "%16.16s: %16zu\n", names[i], sizes[i]);
}
return 0;
}
static int ion_debug_client_open(struct inode *inode, struct file *file)
{
return single_open(file, ion_debug_client_show, inode->i_private);
}
static const struct file_operations debug_client_fops = {
.open = ion_debug_client_open,
.read = seq_read,
.llseek = seq_lseek,
.release = single_release,
};
static int ion_get_client_serial(const struct rb_root *root,
const unsigned char *name)
{
int serial = -1;
struct rb_node *node;
for (node = rb_first(root); node; node = rb_next(node)) {
struct ion_client *client = rb_entry(node, struct ion_client,
node);
if (strcmp(client->name, name))
continue;
serial = max(serial, client->display_serial);
}
return serial + 1;
}
struct ion_client *ion_client_create(struct ion_device *dev,
const char *name)
{
struct ion_client *client;
struct task_struct *task;
struct rb_node **p;
struct rb_node *parent = NULL;
struct ion_client *entry;
pid_t pid;
if (!name) {
pr_err("%s: Name cannot be null\n", __func__);
return ERR_PTR(-EINVAL);
}
get_task_struct(current->group_leader);
task_lock(current->group_leader);
pid = task_pid_nr(current->group_leader);
/*
* don't bother to store task struct for kernel threads,
* they can't be killed anyway
*/
if (current->group_leader->flags & PF_KTHREAD) {
put_task_struct(current->group_leader);
task = NULL;
} else {
task = current->group_leader;
}
task_unlock(current->group_leader);
client = kzalloc(sizeof(struct ion_client), GFP_KERNEL);
if (!client)
goto err_put_task_struct;
client->dev = dev;
client->handles = RB_ROOT;
idr_init(&client->idr);
mutex_init(&client->lock);
client->task = task;
client->pid = pid;
client->name = kstrdup(name, GFP_KERNEL);
if (!client->name)
goto err_free_client;
down_write(&dev->lock);
client->display_serial = ion_get_client_serial(&dev->clients, name);
client->display_name = kasprintf(
GFP_KERNEL, "%s-%d", name, client->display_serial);
if (!client->display_name) {
up_write(&dev->lock);
goto err_free_client_name;
}
p = &dev->clients.rb_node;
while (*p) {
parent = *p;
entry = rb_entry(parent, struct ion_client, node);
if (client < entry)
p = &(*p)->rb_left;
else if (client > entry)
p = &(*p)->rb_right;
}
rb_link_node(&client->node, parent, p);
rb_insert_color(&client->node, &dev->clients);
client->debug_root = debugfs_create_file(client->display_name, 0664,
dev->clients_debug_root,
client, &debug_client_fops);
if (!client->debug_root) {
char buf[256], *path;
path = dentry_path(dev->clients_debug_root, buf, 256);
pr_err("Failed to create client debugfs at %s/%s\n",
path, client->display_name);
}
up_write(&dev->lock);
return client;
err_free_client_name:
kfree(client->name);
err_free_client:
kfree(client);
err_put_task_struct:
if (task)
put_task_struct(current->group_leader);
return ERR_PTR(-ENOMEM);
}
EXPORT_SYMBOL(ion_client_create);
void ion_client_destroy(struct ion_client *client)
{
struct ion_device *dev = client->dev;
struct rb_node *n;
pr_debug("%s: %d\n", __func__, __LINE__);
mutex_lock(&debugfs_mutex);
while ((n = rb_first(&client->handles))) {
struct ion_handle *handle = rb_entry(n, struct ion_handle,
node);
ion_handle_destroy(&handle->ref);
}
idr_destroy(&client->idr);
down_write(&dev->lock);
if (client->task)
put_task_struct(client->task);
rb_erase(&client->node, &dev->clients);
debugfs_remove_recursive(client->debug_root);
up_write(&dev->lock);
kfree(client->display_name);
kfree(client->name);
kfree(client);
mutex_unlock(&debugfs_mutex);
}
EXPORT_SYMBOL(ion_client_destroy);
struct sg_table *ion_sg_table(struct ion_client *client,
struct ion_handle *handle)
{
struct ion_buffer *buffer;
struct sg_table *table;
mutex_lock(&client->lock);
if (!ion_handle_validate(client, handle)) {
pr_err("%s: invalid handle passed to map_dma.\n",
__func__);
mutex_unlock(&client->lock);
return ERR_PTR(-EINVAL);
}
buffer = handle->buffer;
table = buffer->sg_table;
mutex_unlock(&client->lock);
return table;
}
EXPORT_SYMBOL(ion_sg_table);
static void ion_buffer_sync_for_device(struct ion_buffer *buffer,
struct device *dev,
enum dma_data_direction direction);
static struct sg_table *ion_map_dma_buf(struct dma_buf_attachment *attachment,
enum dma_data_direction direction)
{
struct dma_buf *dmabuf = attachment->dmabuf;
struct ion_buffer *buffer = dmabuf->priv;
ion_buffer_sync_for_device(buffer, attachment->dev, direction);
return buffer->sg_table;
}
static void ion_unmap_dma_buf(struct dma_buf_attachment *attachment,
struct sg_table *table,
enum dma_data_direction direction)
{
}
void ion_pages_sync_for_device(struct device *dev, struct page *page,
size_t size, enum dma_data_direction dir)
{
struct scatterlist sg;
sg_init_table(&sg, 1);
sg_set_page(&sg, page, size, 0);
/*
* This is not correct - sg_dma_address needs a dma_addr_t that is valid
* for the targeted device, but this works on the currently targeted
* hardware.
*/
sg_dma_address(&sg) = page_to_phys(page);
dma_sync_sg_for_device(dev, &sg, 1, dir);
}
struct ion_vma_list {
struct list_head list;
struct vm_area_struct *vma;
};
static void ion_buffer_sync_for_device(struct ion_buffer *buffer,
struct device *dev,
enum dma_data_direction dir)
{
struct ion_vma_list *vma_list;
int pages = PAGE_ALIGN(buffer->size) / PAGE_SIZE;
int i;
pr_debug("%s: syncing for device %s\n", __func__,
dev ? dev_name(dev) : "null");
if (!ion_buffer_fault_user_mappings(buffer))
return;
mutex_lock(&buffer->lock);
for (i = 0; i < pages; i++) {
struct page *page = buffer->pages[i];
if (ion_buffer_page_is_dirty(page))
ion_pages_sync_for_device(dev, ion_buffer_page(page),
PAGE_SIZE, dir);
ion_buffer_page_clean(buffer->pages + i);
}
list_for_each_entry(vma_list, &buffer->vmas, list) {
struct vm_area_struct *vma = vma_list->vma;
zap_page_range(vma, vma->vm_start, vma->vm_end - vma->vm_start,
NULL);
}
mutex_unlock(&buffer->lock);
}
static int ion_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
{
struct ion_buffer *buffer = vma->vm_private_data;
unsigned long pfn;
int ret;
mutex_lock(&buffer->lock);
ion_buffer_page_dirty(buffer->pages + vmf->pgoff);
BUG_ON(!buffer->pages || !buffer->pages[vmf->pgoff]);
pfn = page_to_pfn(ion_buffer_page(buffer->pages[vmf->pgoff]));
ret = vm_insert_pfn(vma, (unsigned long)vmf->virtual_address, pfn);
mutex_unlock(&buffer->lock);
if (ret)
return VM_FAULT_ERROR;
return VM_FAULT_NOPAGE;
}
static void ion_vm_open(struct vm_area_struct *vma)
{
struct ion_buffer *buffer = vma->vm_private_data;
struct ion_vma_list *vma_list;
vma_list = kmalloc(sizeof(struct ion_vma_list), GFP_KERNEL);
if (!vma_list)
return;
vma_list->vma = vma;
mutex_lock(&buffer->lock);
list_add(&vma_list->list, &buffer->vmas);
mutex_unlock(&buffer->lock);
pr_debug("%s: adding %p\n", __func__, vma);
}
static void ion_vm_close(struct vm_area_struct *vma)
{
struct ion_buffer *buffer = vma->vm_private_data;
struct ion_vma_list *vma_list, *tmp;
pr_debug("%s\n", __func__);
mutex_lock(&buffer->lock);
list_for_each_entry_safe(vma_list, tmp, &buffer->vmas, list) {
if (vma_list->vma != vma)
continue;
list_del(&vma_list->list);
kfree(vma_list);
pr_debug("%s: deleting %p\n", __func__, vma);
break;
}
mutex_unlock(&buffer->lock);
}
static const struct vm_operations_struct ion_vma_ops = {
.open = ion_vm_open,
.close = ion_vm_close,
.fault = ion_vm_fault,
};
static int ion_mmap(struct dma_buf *dmabuf, struct vm_area_struct *vma)
{
struct ion_buffer *buffer = dmabuf->priv;
int ret = 0;
if (!buffer->heap->ops->map_user) {
pr_err("%s: this heap does not define a method for mapping to userspace\n",
__func__);
return -EINVAL;
}
if (ion_buffer_fault_user_mappings(buffer)) {
vma->vm_flags |= VM_IO | VM_PFNMAP | VM_DONTEXPAND |
VM_DONTDUMP;
vma->vm_private_data = buffer;
vma->vm_ops = &ion_vma_ops;
ion_vm_open(vma);
return 0;
}
if (!(buffer->flags & ION_FLAG_CACHED))
vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
mutex_lock(&buffer->lock);
/* now map it to userspace */
ret = buffer->heap->ops->map_user(buffer->heap, buffer, vma);
mutex_unlock(&buffer->lock);
if (ret)
pr_err("%s: failure mapping buffer to userspace\n",
__func__);
return ret;
}
static void ion_dma_buf_release(struct dma_buf *dmabuf)
{
struct ion_buffer *buffer = dmabuf->priv;
ion_buffer_put(buffer);
}
static void *ion_dma_buf_kmap(struct dma_buf *dmabuf, unsigned long offset)
{
struct ion_buffer *buffer = dmabuf->priv;
return buffer->vaddr + offset * PAGE_SIZE;
}
static void ion_dma_buf_kunmap(struct dma_buf *dmabuf, unsigned long offset,
void *ptr)
{
}
static int ion_dma_buf_begin_cpu_access(struct dma_buf *dmabuf, size_t start,
size_t len,
enum dma_data_direction direction)
{
struct ion_buffer *buffer = dmabuf->priv;
void *vaddr;
if (!buffer->heap->ops->map_kernel) {
pr_err("%s: map kernel is not implemented by this heap.\n",
__func__);
return -ENODEV;
}
mutex_lock(&buffer->lock);
vaddr = ion_buffer_kmap_get(buffer);
mutex_unlock(&buffer->lock);
return PTR_ERR_OR_ZERO(vaddr);
}
static void ion_dma_buf_end_cpu_access(struct dma_buf *dmabuf, size_t start,
size_t len,
enum dma_data_direction direction)
{
struct ion_buffer *buffer = dmabuf->priv;
mutex_lock(&buffer->lock);
ion_buffer_kmap_put(buffer);
mutex_unlock(&buffer->lock);
}
static struct dma_buf_ops dma_buf_ops = {
.map_dma_buf = ion_map_dma_buf,
.unmap_dma_buf = ion_unmap_dma_buf,
.mmap = ion_mmap,
.release = ion_dma_buf_release,
.begin_cpu_access = ion_dma_buf_begin_cpu_access,
.end_cpu_access = ion_dma_buf_end_cpu_access,
.kmap_atomic = ion_dma_buf_kmap,
.kunmap_atomic = ion_dma_buf_kunmap,
.kmap = ion_dma_buf_kmap,
.kunmap = ion_dma_buf_kunmap,
};
struct dma_buf *ion_share_dma_buf(struct ion_client *client,
struct ion_handle *handle)
{
DEFINE_DMA_BUF_EXPORT_INFO(exp_info);
struct ion_buffer *buffer;
struct dma_buf *dmabuf;
bool valid_handle;
mutex_lock(&client->lock);
valid_handle = ion_handle_validate(client, handle);
if (!valid_handle) {
WARN(1, "%s: invalid handle passed to share.\n", __func__);
mutex_unlock(&client->lock);
return ERR_PTR(-EINVAL);
}
buffer = handle->buffer;
ion_buffer_get(buffer);
mutex_unlock(&client->lock);
exp_info.ops = &dma_buf_ops;
exp_info.size = buffer->size;
exp_info.flags = O_RDWR;
exp_info.priv = buffer;
dmabuf = dma_buf_export(&exp_info);
if (IS_ERR(dmabuf)) {
ion_buffer_put(buffer);
return dmabuf;
}
return dmabuf;
}
EXPORT_SYMBOL(ion_share_dma_buf);
int ion_share_dma_buf_fd(struct ion_client *client, struct ion_handle *handle)
{
struct dma_buf *dmabuf;
int fd;
dmabuf = ion_share_dma_buf(client, handle);
if (IS_ERR(dmabuf))
return PTR_ERR(dmabuf);
fd = dma_buf_fd(dmabuf, O_CLOEXEC);
if (fd < 0)
dma_buf_put(dmabuf);
return fd;
}
EXPORT_SYMBOL(ion_share_dma_buf_fd);
struct ion_handle *ion_import_dma_buf(struct ion_client *client,
struct dma_buf *dmabuf)
{
struct ion_buffer *buffer;
struct ion_handle *handle;
int ret;
/* if this memory came from ion */
if (dmabuf->ops != &dma_buf_ops) {
pr_err("%s: can not import dmabuf from another exporter\n",
__func__);
return ERR_PTR(-EINVAL);
}
buffer = dmabuf->priv;
mutex_lock(&client->lock);
/* if a handle exists for this buffer just take a reference to it */
handle = ion_handle_lookup(client, buffer);
if (!IS_ERR(handle)) {
ion_handle_get(handle);
mutex_unlock(&client->lock);
goto end;
}
handle = ion_handle_create(client, buffer);
if (IS_ERR(handle)) {
mutex_unlock(&client->lock);
goto end;
}
ret = ion_handle_add(client, handle);
mutex_unlock(&client->lock);
if (ret) {
ion_handle_put(handle);
handle = ERR_PTR(ret);
}
end:
return handle;
}
EXPORT_SYMBOL(ion_import_dma_buf);
struct ion_handle *ion_import_dma_buf_fd(struct ion_client *client, int fd)
{
struct dma_buf *dmabuf;
struct ion_handle *handle;
dmabuf = dma_buf_get(fd);
if (IS_ERR(dmabuf))
return ERR_CAST(dmabuf);
handle = ion_import_dma_buf(client, dmabuf);
dma_buf_put(dmabuf);
return handle;
}
EXPORT_SYMBOL(ion_import_dma_buf_fd);
static int ion_sync_for_device(struct ion_client *client, int fd)
{
struct dma_buf *dmabuf;
struct ion_buffer *buffer;
dmabuf = dma_buf_get(fd);
if (IS_ERR(dmabuf))
return PTR_ERR(dmabuf);
/* if this memory came from ion */
if (dmabuf->ops != &dma_buf_ops) {
pr_err("%s: can not sync dmabuf from another exporter\n",
__func__);
dma_buf_put(dmabuf);
return -EINVAL;
}
buffer = dmabuf->priv;
dma_sync_sg_for_device(NULL, buffer->sg_table->sgl,
buffer->sg_table->nents, DMA_BIDIRECTIONAL);
dma_buf_put(dmabuf);
return 0;
}
/* fix up the cases where the ioctl direction bits are incorrect */
static unsigned int ion_ioctl_dir(unsigned int cmd)
{
switch (cmd) {
case ION_IOC_SYNC:
case ION_IOC_FREE:
case ION_IOC_CUSTOM:
return _IOC_WRITE;
default:
return _IOC_DIR(cmd);
}
}
static long ion_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
{
struct ion_client *client = filp->private_data;
struct ion_device *dev = client->dev;
struct ion_handle *cleanup_handle = NULL;
int ret = 0;
unsigned int dir;
union {
struct ion_fd_data fd;
struct ion_allocation_data allocation;
struct ion_handle_data handle;
struct ion_custom_data custom;
} data;
dir = ion_ioctl_dir(cmd);
if (_IOC_SIZE(cmd) > sizeof(data))
return -EINVAL;
if (dir & _IOC_WRITE)
if (copy_from_user(&data, (void __user *)arg, _IOC_SIZE(cmd)))
return -EFAULT;
switch (cmd) {
case ION_IOC_ALLOC:
{
struct ion_handle *handle;
handle = ion_alloc(client, data.allocation.len,
data.allocation.align,
data.allocation.heap_id_mask,
data.allocation.flags);
if (IS_ERR(handle))
return PTR_ERR(handle);
data.allocation.handle = handle->id;
cleanup_handle = handle;
break;
}
case ION_IOC_FREE:
{
struct ion_handle *handle;
mutex_lock(&client->lock);
handle = ion_handle_get_by_id_nolock(client, data.handle.handle);
if (IS_ERR(handle)) {
mutex_unlock(&client->lock);
return PTR_ERR(handle);
}
ion_free_nolock(client, handle);
ion_handle_put_nolock(handle);
mutex_unlock(&client->lock);
break;
}
case ION_IOC_SHARE:
case ION_IOC_MAP:
{
struct ion_handle *handle;
handle = ion_handle_get_by_id(client, data.handle.handle);
if (IS_ERR(handle))
return PTR_ERR(handle);
data.fd.fd = ion_share_dma_buf_fd(client, handle);
ion_handle_put(handle);
if (data.fd.fd < 0)
ret = data.fd.fd;
break;
}
case ION_IOC_IMPORT:
{
struct ion_handle *handle;
handle = ion_import_dma_buf_fd(client, data.fd.fd);
if (IS_ERR(handle))
ret = PTR_ERR(handle);
else
data.handle.handle = handle->id;
break;
}
case ION_IOC_SYNC:
{
ret = ion_sync_for_device(client, data.fd.fd);
break;
}
case ION_IOC_CUSTOM:
{
if (!dev->custom_ioctl)
return -ENOTTY;
ret = dev->custom_ioctl(client, data.custom.cmd,
data.custom.arg);
break;
}
default:
return -ENOTTY;
}
if (dir & _IOC_READ) {
if (copy_to_user((void __user *)arg, &data, _IOC_SIZE(cmd))) {
if (cleanup_handle)
ion_free(client, cleanup_handle);
return -EFAULT;
}
}
return ret;
}
static int ion_release(struct inode *inode, struct file *file)
{
struct ion_client *client = file->private_data;
pr_debug("%s: %d\n", __func__, __LINE__);
ion_client_destroy(client);
return 0;
}
static int ion_open(struct inode *inode, struct file *file)
{
struct miscdevice *miscdev = file->private_data;
struct ion_device *dev = container_of(miscdev, struct ion_device, dev);
struct ion_client *client;
char debug_name[64];
pr_debug("%s: %d\n", __func__, __LINE__);
snprintf(debug_name, 64, "%u", task_pid_nr(current->group_leader));
client = ion_client_create(dev, debug_name);
if (IS_ERR(client))
return PTR_ERR(client);
file->private_data = client;
return 0;
}
static const struct file_operations ion_fops = {
.owner = THIS_MODULE,
.open = ion_open,
.release = ion_release,
.unlocked_ioctl = ion_ioctl,
.compat_ioctl = compat_ion_ioctl,
};
static size_t ion_debug_heap_total(struct ion_client *client,
unsigned int id)
{
size_t size = 0;
struct rb_node *n;
mutex_lock(&client->lock);
for (n = rb_first(&client->handles); n; n = rb_next(n)) {
struct ion_handle *handle = rb_entry(n,
struct ion_handle,
node);
if (handle->buffer->heap->id == id)
size += handle->buffer->size;
}
mutex_unlock(&client->lock);
return size;
}
static int ion_debug_heap_show(struct seq_file *s, void *unused)
{
struct ion_heap *heap = s->private;
struct ion_device *dev = heap->dev;
struct rb_node *n;
size_t total_size = 0;
size_t total_orphaned_size = 0;
seq_printf(s, "%16s %16s %16s\n", "client", "pid", "size");
seq_puts(s, "----------------------------------------------------\n");
mutex_lock(&debugfs_mutex);
for (n = rb_first(&dev->clients); n; n = rb_next(n)) {
struct ion_client *client = rb_entry(n, struct ion_client,
node);
size_t size = ion_debug_heap_total(client, heap->id);
if (!size)
continue;
if (client->task) {
char task_comm[TASK_COMM_LEN];
get_task_comm(task_comm, client->task);
seq_printf(s, "%16s %16u %16zu\n", task_comm,
client->pid, size);
} else {
seq_printf(s, "%16s %16u %16zu\n", client->name,
client->pid, size);
}
}
mutex_unlock(&debugfs_mutex);
seq_puts(s, "----------------------------------------------------\n");
seq_puts(s, "orphaned allocations (info is from last known client):\n");
mutex_lock(&dev->buffer_lock);
for (n = rb_first(&dev->buffers); n; n = rb_next(n)) {
struct ion_buffer *buffer = rb_entry(n, struct ion_buffer,
node);
if (buffer->heap->id != heap->id)
continue;
total_size += buffer->size;
if (!buffer->handle_count) {
seq_printf(s, "%16s %16u %16zu %d %d\n",
buffer->task_comm, buffer->pid,
buffer->size, buffer->kmap_cnt,
atomic_read(&buffer->ref.refcount));
total_orphaned_size += buffer->size;
}
}
mutex_unlock(&dev->buffer_lock);
seq_puts(s, "----------------------------------------------------\n");
seq_printf(s, "%16s %16zu\n", "total orphaned",
total_orphaned_size);
seq_printf(s, "%16s %16zu\n", "total ", total_size);
if (heap->flags & ION_HEAP_FLAG_DEFER_FREE)
seq_printf(s, "%16s %16zu\n", "deferred free",
heap->free_list_size);
seq_puts(s, "----------------------------------------------------\n");
if (heap->debug_show)
heap->debug_show(heap, s, unused);
return 0;
}
static int ion_debug_heap_open(struct inode *inode, struct file *file)
{
return single_open(file, ion_debug_heap_show, inode->i_private);
}
static const struct file_operations debug_heap_fops = {
.open = ion_debug_heap_open,
.read = seq_read,
.llseek = seq_lseek,
.release = single_release,
};
static int debug_shrink_set(void *data, u64 val)
{
struct ion_heap *heap = data;
struct shrink_control sc;
int objs;
sc.gfp_mask = -1;
sc.nr_to_scan = val;
if (!val) {
objs = heap->shrinker.count_objects(&heap->shrinker, &sc);
sc.nr_to_scan = objs;
}
heap->shrinker.scan_objects(&heap->shrinker, &sc);
return 0;
}
static int debug_shrink_get(void *data, u64 *val)
{
struct ion_heap *heap = data;
struct shrink_control sc;
int objs;
sc.gfp_mask = -1;
sc.nr_to_scan = 0;
objs = heap->shrinker.count_objects(&heap->shrinker, &sc);
*val = objs;
return 0;
}
DEFINE_SIMPLE_ATTRIBUTE(debug_shrink_fops, debug_shrink_get,
debug_shrink_set, "%llu\n");
void ion_device_add_heap(struct ion_device *dev, struct ion_heap *heap)
{
struct dentry *debug_file;
if (!heap->ops->allocate || !heap->ops->free || !heap->ops->map_dma ||
!heap->ops->unmap_dma)
pr_err("%s: can not add heap with invalid ops struct.\n",
__func__);
spin_lock_init(&heap->free_lock);
heap->free_list_size = 0;
if (heap->flags & ION_HEAP_FLAG_DEFER_FREE)
ion_heap_init_deferred_free(heap);
if ((heap->flags & ION_HEAP_FLAG_DEFER_FREE) || heap->ops->shrink)
ion_heap_init_shrinker(heap);
heap->dev = dev;
down_write(&dev->lock);
/*
* use negative heap->id to reverse the priority -- when traversing
* the list later attempt higher id numbers first
*/
plist_node_init(&heap->node, -heap->id);
plist_add(&heap->node, &dev->heaps);
debug_file = debugfs_create_file(heap->name, 0664,
dev->heaps_debug_root, heap,
&debug_heap_fops);
if (!debug_file) {
char buf[256], *path;
path = dentry_path(dev->heaps_debug_root, buf, 256);
pr_err("Failed to create heap debugfs at %s/%s\n",
path, heap->name);
}
if (heap->shrinker.count_objects && heap->shrinker.scan_objects) {
char debug_name[64];
snprintf(debug_name, 64, "%s_shrink", heap->name);
debug_file = debugfs_create_file(
debug_name, 0644, dev->heaps_debug_root, heap,
&debug_shrink_fops);
if (!debug_file) {
char buf[256], *path;
path = dentry_path(dev->heaps_debug_root, buf, 256);
pr_err("Failed to create heap shrinker debugfs at %s/%s\n",
path, debug_name);
}
}
up_write(&dev->lock);
}
EXPORT_SYMBOL(ion_device_add_heap);
struct ion_device *ion_device_create(long (*custom_ioctl)
(struct ion_client *client,
unsigned int cmd,
unsigned long arg))
{
struct ion_device *idev;
int ret;
idev = kzalloc(sizeof(struct ion_device), GFP_KERNEL);
if (!idev)
return ERR_PTR(-ENOMEM);
idev->dev.minor = MISC_DYNAMIC_MINOR;
idev->dev.name = "ion";
idev->dev.fops = &ion_fops;
idev->dev.parent = NULL;
ret = misc_register(&idev->dev);
if (ret) {
pr_err("ion: failed to register misc device.\n");
kfree(idev);
return ERR_PTR(ret);
}
idev->debug_root = debugfs_create_dir("ion", NULL);
if (!idev->debug_root) {
pr_err("ion: failed to create debugfs root directory.\n");
goto debugfs_done;
}
idev->heaps_debug_root = debugfs_create_dir("heaps", idev->debug_root);
if (!idev->heaps_debug_root) {
pr_err("ion: failed to create debugfs heaps directory.\n");
goto debugfs_done;
}
idev->clients_debug_root = debugfs_create_dir("clients",
idev->debug_root);
if (!idev->clients_debug_root)
pr_err("ion: failed to create debugfs clients directory.\n");
debugfs_done:
idev->custom_ioctl = custom_ioctl;
idev->buffers = RB_ROOT;
mutex_init(&idev->buffer_lock);
init_rwsem(&idev->lock);
plist_head_init(&idev->heaps);
idev->clients = RB_ROOT;
ion_root_client = &idev->clients;
mutex_init(&debugfs_mutex);
return idev;
}
EXPORT_SYMBOL(ion_device_create);
void ion_device_destroy(struct ion_device *dev)
{
misc_deregister(&dev->dev);
debugfs_remove_recursive(dev->debug_root);
/* XXX need to free the heaps and clients ? */
kfree(dev);
}
EXPORT_SYMBOL(ion_device_destroy);
void __init ion_reserve(struct ion_platform_data *data)
{
int i;
for (i = 0; i < data->nr; i++) {
if (data->heaps[i].size == 0)
continue;
if (data->heaps[i].base == 0) {
phys_addr_t paddr;
paddr = memblock_alloc_base(data->heaps[i].size,
data->heaps[i].align,
MEMBLOCK_ALLOC_ANYWHERE);
if (!paddr) {
pr_err("%s: error allocating memblock for heap %d\n",
__func__, i);
continue;
}
data->heaps[i].base = paddr;
} else {
int ret = memblock_reserve(data->heaps[i].base,
data->heaps[i].size);
if (ret)
pr_err("memblock reserve of %zx@%lx failed\n",
data->heaps[i].size,
data->heaps[i].base);
}
pr_info("%s: %s reserved base %lx size %zu\n", __func__,
data->heaps[i].name,
data->heaps[i].base,
data->heaps[i].size);
}
}
| ./CrossVul/dataset_final_sorted/CWE-416/c/good_5376_0 |
crossvul-cpp_data_bad_859_0 | /* $Id: minissdpd.c,v 1.53 2016/03/01 18:06:46 nanard Exp $ */
/* vim: tabstop=4 shiftwidth=4 noexpandtab
* MiniUPnP project
* (c) 2007-2018 Thomas Bernard
* website : http://miniupnp.free.fr/ or https://miniupnp.tuxfamily.org/
* This software is subject to the conditions detailed
* in the LICENCE file provided within the distribution */
#include "config.h"
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include <signal.h>
#include <errno.h>
#include <sys/time.h>
#include <sys/types.h>
#include <sys/socket.h>
#include <unistd.h>
#include <netinet/in.h>
#include <arpa/inet.h>
#include <syslog.h>
#include <ctype.h>
#include <time.h>
#include <sys/queue.h>
/* for chmod : */
#include <sys/stat.h>
/* unix sockets */
#include <sys/un.h>
/* for getpwnam() and getgrnam() */
#if 0
#include <pwd.h>
#include <grp.h>
#endif
/* LOG_PERROR does not exist on Solaris */
#ifndef LOG_PERROR
#define LOG_PERROR 0
#endif /* LOG_PERROR */
#include "getifaddr.h"
#include "upnputils.h"
#include "openssdpsocket.h"
#include "daemonize.h"
#include "codelength.h"
#include "ifacewatch.h"
#include "minissdpdtypes.h"
#include "asyncsendto.h"
#define SET_MAX(max, x) if((x) > (max)) (max) = (x)
#ifndef MIN
#define MIN(x,y) (((x)<(y))?(x):(y))
#endif
/* current request management structure */
struct reqelem {
int socket;
int is_notify; /* has subscribed to notifications */
LIST_ENTRY(reqelem) entries;
unsigned char * output_buffer;
int output_buffer_offset;
int output_buffer_len;
};
/* device data structures */
struct header {
const char * p; /* string pointer */
int l; /* string length */
};
#define HEADER_NT 0
#define HEADER_USN 1
#define HEADER_LOCATION 2
struct device {
struct device * next;
time_t t; /* validity time */
struct header headers[3]; /* NT, USN and LOCATION headers */
char data[];
};
/* Services stored for answering to M-SEARCH */
struct service {
char * st; /* Service type */
char * usn; /* Unique identifier */
char * server; /* Server string */
char * location; /* URL */
LIST_ENTRY(service) entries;
};
LIST_HEAD(servicehead, service) servicelisthead;
#define NTS_SSDP_ALIVE 1
#define NTS_SSDP_BYEBYE 2
#define NTS_SSDP_UPDATE 3
/* request types */
enum request_type {
MINISSDPD_GET_VERSION = 0,
MINISSDPD_SEARCH_TYPE = 1,
MINISSDPD_SEARCH_USN = 2,
MINISSDPD_SEARCH_ALL = 3,
MINISSDPD_SUBMIT = 4,
MINISSDPD_NOTIF = 5
};
/* discovered device list kept in memory */
struct device * devlist = 0;
/* bootid and configid */
unsigned int upnp_bootid = 1;
unsigned int upnp_configid = 1337;
/* LAN interfaces/addresses */
struct lan_addr_list lan_addrs;
/* connected clients */
LIST_HEAD(reqstructhead, reqelem) reqlisthead;
/* functions prototypes */
#define NOTIF_NEW 1
#define NOTIF_UPDATE 2
#define NOTIF_REMOVE 3
static void
sendNotifications(int notif_type, const struct device * dev, const struct service * serv);
/* functions */
/* parselanaddr()
* parse address with mask
* ex: 192.168.1.1/24 or 192.168.1.1/255.255.255.0
*
* Can also use the interface name (ie eth0)
*
* return value :
* 0 : ok
* -1 : error */
static int
parselanaddr(struct lan_addr_s * lan_addr, const char * str)
{
const char * p;
int n;
char tmp[16];
memset(lan_addr, 0, sizeof(struct lan_addr_s));
p = str;
while(*p && *p != '/' && !isspace(*p))
p++;
n = p - str;
if(!isdigit(str[0]) && n < (int)sizeof(lan_addr->ifname)) {
/* not starting with a digit : suppose it is an interface name */
memcpy(lan_addr->ifname, str, n);
lan_addr->ifname[n] = '\0';
if(getifaddr(lan_addr->ifname, lan_addr->str, sizeof(lan_addr->str),
&lan_addr->addr, &lan_addr->mask) < 0)
goto parselan_error;
/*printf("%s => %s\n", lan_addr->ifname, lan_addr->str);*/
} else {
if(n>15)
goto parselan_error;
memcpy(lan_addr->str, str, n);
lan_addr->str[n] = '\0';
if(!inet_aton(lan_addr->str, &lan_addr->addr))
goto parselan_error;
}
if(*p == '/') {
const char * q = ++p;
while(*p && isdigit(*p))
p++;
if(*p=='.') {
/* parse mask in /255.255.255.0 format */
while(*p && (*p=='.' || isdigit(*p)))
p++;
n = p - q;
if(n>15)
goto parselan_error;
memcpy(tmp, q, n);
tmp[n] = '\0';
if(!inet_aton(tmp, &lan_addr->mask))
goto parselan_error;
} else {
/* it is a /24 format */
int nbits = atoi(q);
if(nbits > 32 || nbits < 0)
goto parselan_error;
lan_addr->mask.s_addr = htonl(nbits ? (0xffffffffu << (32 - nbits)) : 0);
}
} else if(lan_addr->mask.s_addr == 0) {
/* by default, networks are /24 */
lan_addr->mask.s_addr = htonl(0xffffff00u);
}
#ifdef ENABLE_IPV6
if(lan_addr->ifname[0] != '\0') {
lan_addr->index = if_nametoindex(lan_addr->ifname);
if(lan_addr->index == 0)
fprintf(stderr, "Cannot get index for network interface %s",
lan_addr->ifname);
} else {
fprintf(stderr,
"Error: please specify LAN network interface by name instead of IPv4 address : %s\n",
str);
return -1;
}
#endif /* ENABLE_IPV6 */
return 0;
parselan_error:
fprintf(stderr, "Error parsing address/mask (or interface name) : %s\n",
str);
return -1;
}
static int
write_buffer(struct reqelem * req)
{
if(req->output_buffer && req->output_buffer_len > 0) {
int n = write(req->socket,
req->output_buffer + req->output_buffer_offset,
req->output_buffer_len);
if(n >= 0) {
req->output_buffer_offset += n;
req->output_buffer_len -= n;
} else if(errno == EINTR || errno == EWOULDBLOCK || errno == EAGAIN) {
return 0;
}
return n;
} else {
return 0;
}
}
static int
add_to_buffer(struct reqelem * req, const unsigned char * data, int len)
{
unsigned char * tmp;
if(req->output_buffer_offset > 0) {
memmove(req->output_buffer, req->output_buffer + req->output_buffer_offset, req->output_buffer_len);
req->output_buffer_offset = 0;
}
tmp = realloc(req->output_buffer, req->output_buffer_len + len);
if(tmp == NULL) {
syslog(LOG_ERR, "%s: failed to allocate %d bytes",
__func__, req->output_buffer_len + len);
return -1;
}
req->output_buffer = tmp;
memcpy(req->output_buffer + req->output_buffer_len, data, len);
req->output_buffer_len += len;
return len;
}
static int
write_or_buffer(struct reqelem * req, const unsigned char * data, int len)
{
if(write_buffer(req) < 0)
return -1;
if(req->output_buffer && req->output_buffer_len > 0) {
return add_to_buffer(req, data, len);
} else {
int n = write(req->socket, data, len);
if(n == len)
return len;
if(n < 0) {
if(errno == EINTR || errno == EWOULDBLOCK || errno == EAGAIN) {
n = add_to_buffer(req, data, len);
if(n < 0) return n;
} else {
return n;
}
} else {
n = add_to_buffer(req, data + n, len - n);
if(n < 0) return n;
}
}
return len;
}
static const char *
nts_to_str(int nts)
{
switch(nts)
{
case NTS_SSDP_ALIVE:
return "ssdp:alive";
case NTS_SSDP_BYEBYE:
return "ssdp:byebye";
case NTS_SSDP_UPDATE:
return "ssdp:update";
}
return "unknown";
}
/* updateDevice() :
* adds or updates the device to the list.
* return value :
* 0 : the device was updated (or nothing done)
* 1 : the device was new */
static int
updateDevice(const struct header * headers, time_t t)
{
struct device ** pp = &devlist;
struct device * p = *pp; /* = devlist; */
while(p)
{
if( p->headers[HEADER_NT].l == headers[HEADER_NT].l
&& (0==memcmp(p->headers[HEADER_NT].p, headers[HEADER_NT].p, headers[HEADER_NT].l))
&& p->headers[HEADER_USN].l == headers[HEADER_USN].l
&& (0==memcmp(p->headers[HEADER_USN].p, headers[HEADER_USN].p, headers[HEADER_USN].l)) )
{
/*printf("found! %d\n", (int)(t - p->t));*/
syslog(LOG_DEBUG, "device updated : %.*s", headers[HEADER_USN].l, headers[HEADER_USN].p);
p->t = t;
/* update Location ! */
if(headers[HEADER_LOCATION].l > p->headers[HEADER_LOCATION].l)
{
struct device * tmp;
tmp = realloc(p, sizeof(struct device)
+ headers[0].l+headers[1].l+headers[2].l);
if(!tmp) /* allocation error */
{
syslog(LOG_ERR, "updateDevice() : memory allocation error");
free(p);
return 0;
}
p = tmp;
*pp = p;
}
memcpy(p->data + p->headers[0].l + p->headers[1].l,
headers[2].p, headers[2].l);
/* TODO : check p->headers[HEADER_LOCATION].l */
return 0;
}
pp = &p->next;
p = *pp; /* p = p->next; */
}
syslog(LOG_INFO, "new device discovered : %.*s",
headers[HEADER_USN].l, headers[HEADER_USN].p);
/* add */
{
char * pc;
int i;
p = malloc( sizeof(struct device)
+ headers[0].l+headers[1].l+headers[2].l );
if(!p) {
syslog(LOG_ERR, "updateDevice(): cannot allocate memory");
return -1;
}
p->next = devlist;
p->t = t;
pc = p->data;
for(i = 0; i < 3; i++)
{
p->headers[i].p = pc;
p->headers[i].l = headers[i].l;
memcpy(pc, headers[i].p, headers[i].l);
pc += headers[i].l;
}
devlist = p;
sendNotifications(NOTIF_NEW, p, NULL);
}
return 1;
}
/* removeDevice() :
* remove a device from the list
* return value :
* 0 : no device removed
* -1 : device removed */
static int
removeDevice(const struct header * headers)
{
struct device ** pp = &devlist;
struct device * p = *pp; /* = devlist */
while(p)
{
if( p->headers[HEADER_NT].l == headers[HEADER_NT].l
&& (0==memcmp(p->headers[HEADER_NT].p, headers[HEADER_NT].p, headers[HEADER_NT].l))
&& p->headers[HEADER_USN].l == headers[HEADER_USN].l
&& (0==memcmp(p->headers[HEADER_USN].p, headers[HEADER_USN].p, headers[HEADER_USN].l)) )
{
syslog(LOG_INFO, "remove device : %.*s", headers[HEADER_USN].l, headers[HEADER_USN].p);
sendNotifications(NOTIF_REMOVE, p, NULL);
*pp = p->next;
free(p);
return -1;
}
pp = &p->next;
p = *pp; /* p = p->next; */
}
syslog(LOG_WARNING, "device not found for removing : %.*s", headers[HEADER_USN].l, headers[HEADER_USN].p);
return 0;
}
/* sent notifications to client having subscribed */
static void
sendNotifications(int notif_type, const struct device * dev, const struct service * serv)
{
struct reqelem * req;
unsigned int m;
unsigned char rbuf[RESPONSE_BUFFER_SIZE];
unsigned char * rp;
for(req = reqlisthead.lh_first; req; req = req->entries.le_next) {
if(!req->is_notify) continue;
rbuf[0] = '\xff'; /* special code for notifications */
rbuf[1] = (unsigned char)notif_type;
rbuf[2] = 0;
rp = rbuf + 3;
if(dev) {
/* response :
* 1 - Location
* 2 - NT (device/service type)
* 3 - usn */
m = dev->headers[HEADER_LOCATION].l;
CODELENGTH(m, rp);
memcpy(rp, dev->headers[HEADER_LOCATION].p, dev->headers[HEADER_LOCATION].l);
rp += dev->headers[HEADER_LOCATION].l;
m = dev->headers[HEADER_NT].l;
CODELENGTH(m, rp);
memcpy(rp, dev->headers[HEADER_NT].p, dev->headers[HEADER_NT].l);
rp += dev->headers[HEADER_NT].l;
m = dev->headers[HEADER_USN].l;
CODELENGTH(m, rp);
memcpy(rp, dev->headers[HEADER_USN].p, dev->headers[HEADER_USN].l);
rp += dev->headers[HEADER_USN].l;
rbuf[2]++;
}
if(serv) {
/* response :
* 1 - Location
* 2 - NT (device/service type)
* 3 - usn */
m = strlen(serv->location);
CODELENGTH(m, rp);
memcpy(rp, serv->location, m);
rp += m;
m = strlen(serv->st);
CODELENGTH(m, rp);
memcpy(rp, serv->st, m);
rp += m;
m = strlen(serv->usn);
CODELENGTH(m, rp);
memcpy(rp, serv->usn, m);
rp += m;
rbuf[2]++;
}
if(rbuf[2] > 0) {
if(write_or_buffer(req, rbuf, rp - rbuf) < 0) {
syslog(LOG_ERR, "(s=%d) write: %m", req->socket);
/*goto error;*/
}
}
}
}
/* SendSSDPMSEARCHResponse() :
* build and send response to M-SEARCH SSDP packets. */
static void
SendSSDPMSEARCHResponse(int s, const struct sockaddr * sockname,
const char * st, size_t st_len, const char * usn,
const char * server, const char * location)
{
int l, n;
char buf[1024];
socklen_t sockname_len;
/*
* follow guideline from document "UPnP Device Architecture 1.0"
* uppercase is recommended.
* DATE: is recommended
* SERVER: OS/ver UPnP/1.0 miniupnpd/1.0
* - check what to put in the 'Cache-Control' header
*
* have a look at the document "UPnP Device Architecture v1.1 */
l = snprintf(buf, sizeof(buf), "HTTP/1.1 200 OK\r\n"
"CACHE-CONTROL: max-age=120\r\n"
/*"DATE: ...\r\n"*/
"ST: %.*s\r\n"
"USN: %s\r\n"
"EXT:\r\n"
"SERVER: %s\r\n"
"LOCATION: %s\r\n"
"OPT: \"http://schemas.upnp.org/upnp/1/0/\"; ns=01\r\n" /* UDA v1.1 */
"01-NLS: %u\r\n" /* same as BOOTID. UDA v1.1 */
"BOOTID.UPNP.ORG: %u\r\n" /* UDA v1.1 */
"CONFIGID.UPNP.ORG: %u\r\n" /* UDA v1.1 */
"\r\n",
(int)st_len, st, usn,
server, location,
upnp_bootid, upnp_bootid, upnp_configid);
#ifdef ENABLE_IPV6
sockname_len = (sockname->sa_family == PF_INET6)
? sizeof(struct sockaddr_in6)
: sizeof(struct sockaddr_in);
#else /* ENABLE_IPV6 */
sockname_len = sizeof(struct sockaddr_in);
#endif /* ENABLE_IPV6 */
n = sendto_or_schedule(s, buf, l, 0, sockname, sockname_len);
if(n < 0) {
syslog(LOG_ERR, "%s: sendto(udp): %m", __func__);
}
}
/* Process M-SEARCH requests */
static void
processMSEARCH(int s, const char * st, size_t st_len,
const struct sockaddr * addr)
{
struct service * serv;
#ifdef ENABLE_IPV6
char buf[64];
#endif /* ENABLE_IPV6 */
if(!st || st_len==0)
return;
#ifdef ENABLE_IPV6
sockaddr_to_string(addr, buf, sizeof(buf));
syslog(LOG_INFO, "SSDP M-SEARCH from %s ST:%.*s",
buf, (int)st_len, st);
#else /* ENABLE_IPV6 */
syslog(LOG_INFO, "SSDP M-SEARCH from %s:%d ST: %.*s",
inet_ntoa(((const struct sockaddr_in *)addr)->sin_addr),
ntohs(((const struct sockaddr_in *)addr)->sin_port),
(int)st_len, st);
#endif /* ENABLE_IPV6 */
if(st_len==8 && (0==memcmp(st, "ssdp:all", 8))) {
/* send a response for all services */
for(serv = servicelisthead.lh_first;
serv;
serv = serv->entries.le_next) {
SendSSDPMSEARCHResponse(s, addr,
serv->st, strlen(serv->st), serv->usn,
serv->server, serv->location);
}
} else if(st_len > 5 && (0==memcmp(st, "uuid:", 5))) {
/* find a matching UUID value */
for(serv = servicelisthead.lh_first;
serv;
serv = serv->entries.le_next) {
if(0 == strncmp(serv->usn, st, st_len)) {
SendSSDPMSEARCHResponse(s, addr,
serv->st, strlen(serv->st), serv->usn,
serv->server, serv->location);
}
}
} else {
size_t l;
int st_ver = 0;
char atoi_buffer[8];
/* remove version at the end of the ST string */
for (l = st_len; l > 0; l--) {
if (st[l-1] == ':') {
memset(atoi_buffer, 0, sizeof(atoi_buffer));
memcpy(atoi_buffer, st + l, MIN((sizeof(atoi_buffer) - 1), st_len - l));
st_ver = atoi(atoi_buffer);
break;
}
}
if (l == 0)
l = st_len;
/* answer for each matching service */
/* From UPnP Device Architecture v1.1 :
* 1.3.2 [...] Updated versions of device and service types
* are REQUIRED to be full backward compatible with
* previous versions. Devices MUST respond to M-SEARCH
* requests for any supported version. For example, if a
* device implements “urn:schemas-upnporg:service:xyz:2”,
* it MUST respond to search requests for both that type
* and “urn:schemas-upnp-org:service:xyz:1”. The response
* MUST specify the same version as was contained in the
* search request. [...] */
for(serv = servicelisthead.lh_first;
serv;
serv = serv->entries.le_next) {
if(0 == strncmp(serv->st, st, l)) {
syslog(LOG_DEBUG, "Found matching service : %s %s", serv->st, serv->location);
SendSSDPMSEARCHResponse(s, addr,
st, st_len, serv->usn,
serv->server, serv->location);
}
}
}
}
/**
* helper function.
* reject any non ASCII or non printable character.
*/
static int
containsForbiddenChars(const unsigned char * p, int len)
{
while(len > 0) {
if(*p < ' ' || *p >= '\x7f')
return 1;
p++;
len--;
}
return 0;
}
#define METHOD_MSEARCH 1
#define METHOD_NOTIFY 2
/* ParseSSDPPacket() :
* parse a received SSDP Packet and call
* updateDevice() or removeDevice() as needed
* return value :
* -1 : a device was removed
* 0 : no device removed nor added
* 1 : a device was added. */
static int
ParseSSDPPacket(int s, const char * p, ssize_t n,
const struct sockaddr * addr,
const char * searched_device)
{
const char * linestart;
const char * lineend;
const char * nameend;
const char * valuestart;
struct header headers[3];
int i, r = 0;
int methodlen;
int nts = -1;
int method = -1;
unsigned int lifetime = 180; /* 3 minutes by default */
const char * st = NULL;
int st_len = 0;
/* first check from what subnet is the sender */
if(get_lan_for_peer(addr) == NULL) {
char addr_str[64];
sockaddr_to_string(addr, addr_str, sizeof(addr_str));
syslog(LOG_WARNING, "peer %s is not from a LAN",
addr_str);
return 0;
}
/* do the parsing */
memset(headers, 0, sizeof(headers));
for(methodlen = 0;
methodlen < n && (isalpha(p[methodlen]) || p[methodlen]=='-');
methodlen++);
if(methodlen==8 && 0==memcmp(p, "M-SEARCH", 8))
method = METHOD_MSEARCH;
else if(methodlen==6 && 0==memcmp(p, "NOTIFY", 6))
method = METHOD_NOTIFY;
else if(methodlen==4 && 0==memcmp(p, "HTTP", 4)) {
/* answer to a M-SEARCH => process it as a NOTIFY
* with NTS: ssdp:alive */
method = METHOD_NOTIFY;
nts = NTS_SSDP_ALIVE;
}
linestart = p;
while(linestart < p + n - 2) {
/* start parsing the line : detect line end */
lineend = linestart;
while(lineend < p + n && *lineend != '\n' && *lineend != '\r')
lineend++;
/*printf("line: '%.*s'\n", lineend - linestart, linestart);*/
/* detect name end : ':' character */
nameend = linestart;
while(nameend < lineend && *nameend != ':')
nameend++;
/* detect value */
if(nameend < lineend)
valuestart = nameend + 1;
else
valuestart = nameend;
/* trim spaces */
while(valuestart < lineend && isspace(*valuestart))
valuestart++;
/* suppress leading " if needed */
if(valuestart < lineend && *valuestart=='\"')
valuestart++;
if(nameend > linestart && valuestart < lineend) {
int l = nameend - linestart; /* header name length */
int m = lineend - valuestart; /* header value length */
/* suppress tailing spaces */
while(m>0 && isspace(valuestart[m-1]))
m--;
/* suppress tailing ' if needed */
if(m>0 && valuestart[m-1] == '\"')
m--;
i = -1;
/*printf("--%.*s: (%d)%.*s--\n", l, linestart,
m, m, valuestart);*/
if(l==2 && 0==strncasecmp(linestart, "nt", 2))
i = HEADER_NT;
else if(l==3 && 0==strncasecmp(linestart, "usn", 3))
i = HEADER_USN;
else if(l==3 && 0==strncasecmp(linestart, "nts", 3)) {
if(m==10 && 0==strncasecmp(valuestart, "ssdp:alive", 10))
nts = NTS_SSDP_ALIVE;
else if(m==11 && 0==strncasecmp(valuestart, "ssdp:byebye", 11))
nts = NTS_SSDP_BYEBYE;
else if(m==11 && 0==strncasecmp(valuestart, "ssdp:update", 11))
nts = NTS_SSDP_UPDATE;
}
else if(l==8 && 0==strncasecmp(linestart, "location", 8))
i = HEADER_LOCATION;
else if(l==13 && 0==strncasecmp(linestart, "cache-control", 13)) {
/* parse "name1=value1, name_alone, name2=value2" string */
const char * name = valuestart; /* name */
const char * val; /* value */
int rem = m; /* remaining bytes to process */
while(rem > 0) {
val = name;
while(val < name + rem && *val != '=' && *val != ',')
val++;
if(val >= name + rem)
break;
if(*val == '=') {
while(val < name + rem && (*val == '=' || isspace(*val)))
val++;
if(val >= name + rem)
break;
if(0==strncasecmp(name, "max-age", 7))
lifetime = (unsigned int)strtoul(val, 0, 0);
/* move to the next name=value pair */
while(rem > 0 && *name != ',') {
rem--;
name++;
}
/* skip spaces */
while(rem > 0 && (*name == ',' || isspace(*name))) {
rem--;
name++;
}
} else {
rem -= (val - name);
name = val;
while(rem > 0 && (*name == ',' || isspace(*name))) {
rem--;
name++;
}
}
}
/*syslog(LOG_DEBUG, "**%.*s**%u", m, valuestart, lifetime);*/
} else if(l==2 && 0==strncasecmp(linestart, "st", 2)) {
st = valuestart;
st_len = m;
if(method == METHOD_NOTIFY)
i = HEADER_NT; /* it was a M-SEARCH response */
}
if(i>=0) {
headers[i].p = valuestart;
headers[i].l = m;
}
}
linestart = lineend;
while((linestart < p + n) && (*linestart == '\n' || *linestart == '\r'))
linestart++;
}
#if 0
printf("NTS=%d\n", nts);
for(i=0; i<3; i++) {
if(headers[i].p)
printf("%d-'%.*s'\n", i, headers[i].l, headers[i].p);
}
#endif
syslog(LOG_DEBUG,"SSDP request: '%.*s' (%d) %s %s=%.*s",
methodlen, p, method, nts_to_str(nts),
(method==METHOD_NOTIFY)?"nt":"st",
(method==METHOD_NOTIFY)?headers[HEADER_NT].l:st_len,
(method==METHOD_NOTIFY)?headers[HEADER_NT].p:st);
switch(method) {
case METHOD_NOTIFY:
if(nts==NTS_SSDP_ALIVE || nts==NTS_SSDP_UPDATE) {
if(headers[HEADER_NT].p && headers[HEADER_USN].p && headers[HEADER_LOCATION].p) {
/* filter if needed */
if(searched_device &&
0 != memcmp(headers[HEADER_NT].p, searched_device, headers[HEADER_NT].l))
break;
r = updateDevice(headers, time(NULL) + lifetime);
} else {
syslog(LOG_WARNING, "missing header nt=%p usn=%p location=%p",
headers[HEADER_NT].p, headers[HEADER_USN].p,
headers[HEADER_LOCATION].p);
}
} else if(nts==NTS_SSDP_BYEBYE) {
if(headers[HEADER_NT].p && headers[HEADER_USN].p) {
r = removeDevice(headers);
} else {
syslog(LOG_WARNING, "missing header nt=%p usn=%p",
headers[HEADER_NT].p, headers[HEADER_USN].p);
}
}
break;
case METHOD_MSEARCH:
processMSEARCH(s, st, st_len, addr);
break;
default:
{
char addr_str[64];
sockaddr_to_string(addr, addr_str, sizeof(addr_str));
syslog(LOG_WARNING, "method %.*s, don't know what to do (from %s)",
methodlen, p, addr_str);
}
}
return r;
}
/* OpenUnixSocket()
* open the unix socket and call bind() and listen()
* return -1 in case of error */
static int
OpenUnixSocket(const char * path)
{
struct sockaddr_un addr;
int s;
int rv;
s = socket(AF_UNIX, SOCK_STREAM, 0);
if(s < 0)
{
syslog(LOG_ERR, "socket(AF_UNIX): %m");
return -1;
}
/* unlink the socket pseudo file before binding */
rv = unlink(path);
if(rv < 0 && errno != ENOENT)
{
syslog(LOG_ERR, "unlink(unixsocket, \"%s\"): %m", path);
close(s);
return -1;
}
addr.sun_family = AF_UNIX;
strncpy(addr.sun_path, path, sizeof(addr.sun_path));
if(bind(s, (struct sockaddr *)&addr,
sizeof(struct sockaddr_un)) < 0)
{
syslog(LOG_ERR, "bind(unixsocket, \"%s\"): %m", path);
close(s);
return -1;
}
else if(listen(s, 5) < 0)
{
syslog(LOG_ERR, "listen(unixsocket): %m");
close(s);
return -1;
}
/* Change rights so everyone can communicate with us */
if(chmod(path, 0666) < 0)
{
syslog(LOG_WARNING, "chmod(\"%s\"): %m", path);
}
return s;
}
static ssize_t processRequestSub(struct reqelem * req, const unsigned char * buf, ssize_t n);
/* processRequest() :
* process the request coming from a unix socket */
void processRequest(struct reqelem * req)
{
ssize_t n, r;
unsigned char buf[2048];
const unsigned char * p;
n = read(req->socket, buf, sizeof(buf));
if(n<0) {
if(errno == EINTR || errno == EAGAIN || errno == EWOULDBLOCK)
return; /* try again later */
syslog(LOG_ERR, "(s=%d) processRequest(): read(): %m", req->socket);
goto error;
}
if(n==0) {
syslog(LOG_INFO, "(s=%d) request connection closed", req->socket);
goto error;
}
p = buf;
while (n > 0)
{
r = processRequestSub(req, p, n);
if (r < 0)
goto error;
p += r;
n -= r;
}
return;
error:
close(req->socket);
req->socket = -1;
}
static ssize_t processRequestSub(struct reqelem * req, const unsigned char * buf, ssize_t n)
{
unsigned int l, m;
unsigned int baselen; /* without the version */
const unsigned char * p;
enum request_type type;
struct device * d = devlist;
unsigned char rbuf[RESPONSE_BUFFER_SIZE];
unsigned char * rp;
unsigned char nrep = 0;
time_t t;
struct service * newserv = NULL;
struct service * serv;
t = time(NULL);
type = buf[0];
p = buf + 1;
DECODELENGTH_CHECKLIMIT(l, p, buf + n);
if(l > (unsigned)(buf+n-p)) {
syslog(LOG_WARNING, "bad request (length encoding l=%u n=%u)",
l, (unsigned)n);
goto error;
}
if(l == 0 && type != MINISSDPD_SEARCH_ALL
&& type != MINISSDPD_GET_VERSION && type != MINISSDPD_NOTIF) {
syslog(LOG_WARNING, "bad request (length=0, type=%d)", type);
goto error;
}
syslog(LOG_INFO, "(s=%d) request type=%d str='%.*s'",
req->socket, type, l, p);
switch(type) {
case MINISSDPD_GET_VERSION:
rp = rbuf;
CODELENGTH((sizeof(MINISSDPD_VERSION) - 1), rp);
memcpy(rp, MINISSDPD_VERSION, sizeof(MINISSDPD_VERSION) - 1);
rp += (sizeof(MINISSDPD_VERSION) - 1);
if(write_or_buffer(req, rbuf, rp - rbuf) < 0) {
syslog(LOG_ERR, "(s=%d) write: %m", req->socket);
goto error;
}
p += l;
break;
case MINISSDPD_SEARCH_TYPE: /* request by type */
case MINISSDPD_SEARCH_USN: /* request by USN (unique id) */
case MINISSDPD_SEARCH_ALL: /* everything */
rp = rbuf+1;
/* From UPnP Device Architecture v1.1 :
* 1.3.2 [...] Updated versions of device and service types
* are REQUIRED to be full backward compatible with
* previous versions. Devices MUST respond to M-SEARCH
* requests for any supported version. For example, if a
* device implements “urn:schemas-upnporg:service:xyz:2”,
* it MUST respond to search requests for both that type
* and “urn:schemas-upnp-org:service:xyz:1”. The response
* MUST specify the same version as was contained in the
* search request. [...] */
baselen = l; /* remove the version */
while(baselen > 0) {
if(p[baselen-1] == ':')
break;
if(!(p[baselen-1] >= '0' && p[baselen-1] <= '9'))
break;
baselen--;
}
while(d && (nrep < 255)) {
if(d->t < t) {
syslog(LOG_INFO, "outdated device");
} else {
/* test if we can put more responses in the buffer */
if(d->headers[HEADER_LOCATION].l + d->headers[HEADER_NT].l
+ d->headers[HEADER_USN].l + 6
+ (rp - rbuf) >= (int)sizeof(rbuf))
break;
if( (type==MINISSDPD_SEARCH_TYPE && 0==memcmp(d->headers[HEADER_NT].p, p, baselen))
||(type==MINISSDPD_SEARCH_USN && 0==memcmp(d->headers[HEADER_USN].p, p, l))
||(type==MINISSDPD_SEARCH_ALL) ) {
/* response :
* 1 - Location
* 2 - NT (device/service type)
* 3 - usn */
m = d->headers[HEADER_LOCATION].l;
CODELENGTH(m, rp);
memcpy(rp, d->headers[HEADER_LOCATION].p, d->headers[HEADER_LOCATION].l);
rp += d->headers[HEADER_LOCATION].l;
m = d->headers[HEADER_NT].l;
CODELENGTH(m, rp);
memcpy(rp, d->headers[HEADER_NT].p, d->headers[HEADER_NT].l);
rp += d->headers[HEADER_NT].l;
m = d->headers[HEADER_USN].l;
CODELENGTH(m, rp);
memcpy(rp, d->headers[HEADER_USN].p, d->headers[HEADER_USN].l);
rp += d->headers[HEADER_USN].l;
nrep++;
}
}
d = d->next;
}
/* Also look in service list */
for(serv = servicelisthead.lh_first;
serv && (nrep < 255);
serv = serv->entries.le_next) {
/* test if we can put more responses in the buffer */
if(strlen(serv->location) + strlen(serv->st)
+ strlen(serv->usn) + 6 + (rp - rbuf) >= sizeof(rbuf))
break;
if( (type==MINISSDPD_SEARCH_TYPE && 0==strncmp(serv->st, (const char *)p, l))
||(type==MINISSDPD_SEARCH_USN && 0==strncmp(serv->usn, (const char *)p, l))
||(type==MINISSDPD_SEARCH_ALL) ) {
/* response :
* 1 - Location
* 2 - NT (device/service type)
* 3 - usn */
m = strlen(serv->location);
CODELENGTH(m, rp);
memcpy(rp, serv->location, m);
rp += m;
m = strlen(serv->st);
CODELENGTH(m, rp);
memcpy(rp, serv->st, m);
rp += m;
m = strlen(serv->usn);
CODELENGTH(m, rp);
memcpy(rp, serv->usn, m);
rp += m;
nrep++;
}
}
rbuf[0] = nrep;
syslog(LOG_DEBUG, "(s=%d) response : %d device%s",
req->socket, nrep, (nrep > 1) ? "s" : "");
if(write_or_buffer(req, rbuf, rp - rbuf) < 0) {
syslog(LOG_ERR, "(s=%d) write: %m", req->socket);
goto error;
}
p += l;
break;
case MINISSDPD_SUBMIT: /* submit service */
newserv = malloc(sizeof(struct service));
if(!newserv) {
syslog(LOG_ERR, "cannot allocate memory");
goto error;
}
memset(newserv, 0, sizeof(struct service)); /* set pointers to NULL */
if(containsForbiddenChars(p, l)) {
syslog(LOG_ERR, "bad request (st contains forbidden chars)");
goto error;
}
newserv->st = malloc(l + 1);
if(!newserv->st) {
syslog(LOG_ERR, "cannot allocate memory");
goto error;
}
memcpy(newserv->st, p, l);
newserv->st[l] = '\0';
p += l;
if(p >= buf + n) {
syslog(LOG_WARNING, "bad request (missing usn)");
goto error;
}
DECODELENGTH_CHECKLIMIT(l, p, buf + n);
if(l > (unsigned)(buf+n-p)) {
syslog(LOG_WARNING, "bad request (length encoding)");
goto error;
}
if(containsForbiddenChars(p, l)) {
syslog(LOG_ERR, "bad request (usn contains forbidden chars)");
goto error;
}
syslog(LOG_INFO, "usn='%.*s'", l, p);
newserv->usn = malloc(l + 1);
if(!newserv->usn) {
syslog(LOG_ERR, "cannot allocate memory");
goto error;
}
memcpy(newserv->usn, p, l);
newserv->usn[l] = '\0';
p += l;
DECODELENGTH_CHECKLIMIT(l, p, buf + n);
if(l > (unsigned)(buf+n-p)) {
syslog(LOG_WARNING, "bad request (length encoding)");
goto error;
}
if(containsForbiddenChars(p, l)) {
syslog(LOG_ERR, "bad request (server contains forbidden chars)");
goto error;
}
syslog(LOG_INFO, "server='%.*s'", l, p);
newserv->server = malloc(l + 1);
if(!newserv->server) {
syslog(LOG_ERR, "cannot allocate memory");
goto error;
}
memcpy(newserv->server, p, l);
newserv->server[l] = '\0';
p += l;
DECODELENGTH_CHECKLIMIT(l, p, buf + n);
if(l > (unsigned)(buf+n-p)) {
syslog(LOG_WARNING, "bad request (length encoding)");
goto error;
}
if(containsForbiddenChars(p, l)) {
syslog(LOG_ERR, "bad request (location contains forbidden chars)");
goto error;
}
syslog(LOG_INFO, "location='%.*s'", l, p);
newserv->location = malloc(l + 1);
if(!newserv->location) {
syslog(LOG_ERR, "cannot allocate memory");
goto error;
}
memcpy(newserv->location, p, l);
newserv->location[l] = '\0';
p += l;
/* look in service list for duplicate */
for(serv = servicelisthead.lh_first;
serv;
serv = serv->entries.le_next) {
if(0 == strcmp(newserv->usn, serv->usn)
&& 0 == strcmp(newserv->st, serv->st)) {
syslog(LOG_INFO, "Service already in the list. Updating...");
free(newserv->st);
free(newserv->usn);
free(serv->server);
serv->server = newserv->server;
free(serv->location);
serv->location = newserv->location;
free(newserv);
newserv = NULL;
return (p - buf);
}
}
/* Inserting new service */
LIST_INSERT_HEAD(&servicelisthead, newserv, entries);
sendNotifications(NOTIF_NEW, NULL, newserv);
newserv = NULL;
break;
case MINISSDPD_NOTIF: /* switch socket to notify */
rbuf[0] = '\0';
if(write_or_buffer(req, rbuf, 1) < 0) {
syslog(LOG_ERR, "(s=%d) write: %m", req->socket);
goto error;
}
req->is_notify = 1;
p += l;
break;
default:
syslog(LOG_WARNING, "Unknown request type %d", type);
rbuf[0] = '\0';
if(write_or_buffer(req, rbuf, 1) < 0) {
syslog(LOG_ERR, "(s=%d) write: %m", req->socket);
goto error;
}
}
return (p - buf);
error:
if(newserv) {
free(newserv->st);
free(newserv->usn);
free(newserv->server);
free(newserv->location);
free(newserv);
newserv = NULL;
}
return -1;
}
static volatile sig_atomic_t quitting = 0;
/* SIGTERM signal handler */
static void
sigterm(int sig)
{
(void)sig;
/*int save_errno = errno;*/
/*signal(sig, SIG_IGN);*/
#if 0
/* calling syslog() is forbidden in a signal handler according to
* signal(3) */
syslog(LOG_NOTICE, "received signal %d, good-bye", sig);
#endif
quitting = 1;
/*errno = save_errno;*/
}
#define PORT 1900
#define XSTR(s) STR(s)
#define STR(s) #s
#define UPNP_MCAST_ADDR "239.255.255.250"
/* for IPv6 */
#define UPNP_MCAST_LL_ADDR "FF02::C" /* link-local */
#define UPNP_MCAST_SL_ADDR "FF05::C" /* site-local */
/* send the M-SEARCH request for devices
* either all devices (third argument is NULL or "*") or a specific one */
static void ssdpDiscover(int s, int ipv6, const char * search)
{
static const char MSearchMsgFmt[] =
"M-SEARCH * HTTP/1.1\r\n"
"HOST: %s:" XSTR(PORT) "\r\n"
"ST: %s\r\n"
"MAN: \"ssdp:discover\"\r\n"
"MX: %u\r\n"
"\r\n";
char bufr[512];
int n;
int mx = 3;
int linklocal = 1;
struct sockaddr_storage sockudp_w;
{
n = snprintf(bufr, sizeof(bufr),
MSearchMsgFmt,
ipv6 ?
(linklocal ? "[" UPNP_MCAST_LL_ADDR "]" : "[" UPNP_MCAST_SL_ADDR "]")
: UPNP_MCAST_ADDR,
(search ? search : "ssdp:all"), mx);
memset(&sockudp_w, 0, sizeof(struct sockaddr_storage));
if(ipv6) {
struct sockaddr_in6 * p = (struct sockaddr_in6 *)&sockudp_w;
p->sin6_family = AF_INET6;
p->sin6_port = htons(PORT);
inet_pton(AF_INET6,
linklocal ? UPNP_MCAST_LL_ADDR : UPNP_MCAST_SL_ADDR,
&(p->sin6_addr));
} else {
struct sockaddr_in * p = (struct sockaddr_in *)&sockudp_w;
p->sin_family = AF_INET;
p->sin_port = htons(PORT);
p->sin_addr.s_addr = inet_addr(UPNP_MCAST_ADDR);
}
n = sendto_or_schedule(s, bufr, n, 0, (const struct sockaddr *)&sockudp_w,
ipv6 ? sizeof(struct sockaddr_in6) : sizeof(struct sockaddr_in));
if (n < 0) {
syslog(LOG_ERR, "%s: sendto: %m", __func__);
}
}
}
/* main(): program entry point */
int main(int argc, char * * argv)
{
int ret = 0;
int pid;
struct sigaction sa;
char buf[1500];
ssize_t n;
int s_ssdp = -1; /* udp socket receiving ssdp packets */
#ifdef ENABLE_IPV6
int s_ssdp6 = -1; /* udp socket receiving ssdp packets IPv6*/
#else /* ENABLE_IPV6 */
#define s_ssdp6 (-1)
#endif /* ENABLE_IPV6 */
int s_unix = -1; /* unix socket communicating with clients */
int s_ifacewatch = -1; /* socket to receive Route / network interface config changes */
struct reqelem * req;
struct reqelem * reqnext;
fd_set readfds;
fd_set writefds;
struct timeval now;
int max_fd;
struct lan_addr_s * lan_addr;
int i;
const char * sockpath = "/var/run/minissdpd.sock";
const char * pidfilename = "/var/run/minissdpd.pid";
int debug_flag = 0;
#ifdef ENABLE_IPV6
int ipv6 = 0;
#endif /* ENABLE_IPV6 */
int deltadev = 0;
struct sockaddr_in sendername;
socklen_t sendername_len;
#ifdef ENABLE_IPV6
struct sockaddr_in6 sendername6;
socklen_t sendername6_len;
#endif /* ENABLE_IPV6 */
unsigned char ttl = 2; /* UDA says it should default to 2 */
const char * searched_device = NULL; /* if not NULL, search/filter a specific device type */
LIST_INIT(&reqlisthead);
LIST_INIT(&servicelisthead);
LIST_INIT(&lan_addrs);
/* process command line */
for(i=1; i<argc; i++)
{
if(0==strcmp(argv[i], "-d"))
debug_flag = 1;
#ifdef ENABLE_IPV6
else if(0==strcmp(argv[i], "-6"))
ipv6 = 1;
#endif /* ENABLE_IPV6 */
else {
if((i + 1) >= argc) {
fprintf(stderr, "option %s needs an argument.\n", argv[i]);
break;
}
if(0==strcmp(argv[i], "-i")) {
lan_addr = malloc(sizeof(struct lan_addr_s));
if(lan_addr == NULL) {
fprintf(stderr, "malloc(%d) FAILED\n", (int)sizeof(struct lan_addr_s));
break;
}
if(parselanaddr(lan_addr, argv[++i]) != 0) {
fprintf(stderr, "can't parse \"%s\" as a valid address or interface name\n", argv[i]);
free(lan_addr);
} else {
LIST_INSERT_HEAD(&lan_addrs, lan_addr, list);
}
} else if(0==strcmp(argv[i], "-s"))
sockpath = argv[++i];
else if(0==strcmp(argv[i], "-p"))
pidfilename = argv[++i];
else if(0==strcmp(argv[i], "-t"))
ttl = (unsigned char)atoi(argv[++i]);
else if(0==strcmp(argv[i], "-f"))
searched_device = argv[++i];
else
fprintf(stderr, "unknown commandline option %s.\n", argv[i]);
}
}
if(lan_addrs.lh_first == NULL)
{
fprintf(stderr,
"Usage: %s [-d] "
#ifdef ENABLE_IPV6
"[-6] "
#endif /* ENABLE_IPV6 */
"[-s socket] [-p pidfile] [-t TTL] "
"[-f device] "
"-i <interface> [-i <interface2>] ...\n",
argv[0]);
fprintf(stderr,
"\n <interface> is either an IPv4 address with mask such as\n"
" 192.168.1.42/255.255.255.0, or an interface name such as eth0.\n");
fprintf(stderr,
"\n By default, socket will be open as %s\n"
" and pid written to file %s\n",
sockpath, pidfilename);
return 1;
}
/* open log */
openlog("minissdpd",
LOG_CONS|LOG_PID|(debug_flag?LOG_PERROR:0),
LOG_MINISSDPD);
if(!debug_flag) /* speed things up and ignore LOG_INFO and LOG_DEBUG */
setlogmask(LOG_UPTO(LOG_NOTICE));
if(checkforrunning(pidfilename) < 0)
{
syslog(LOG_ERR, "MiniSSDPd is already running. EXITING");
return 1;
}
upnp_bootid = (unsigned int)time(NULL);
/* set signal handlers */
memset(&sa, 0, sizeof(struct sigaction));
sa.sa_handler = sigterm;
if(sigaction(SIGTERM, &sa, NULL))
{
syslog(LOG_ERR, "Failed to set SIGTERM handler. EXITING");
ret = 1;
goto quit;
}
if(sigaction(SIGINT, &sa, NULL))
{
syslog(LOG_ERR, "Failed to set SIGINT handler. EXITING");
ret = 1;
goto quit;
}
/* open route/interface config changes socket */
s_ifacewatch = OpenAndConfInterfaceWatchSocket();
/* open UDP socket(s) for receiving SSDP packets */
s_ssdp = OpenAndConfSSDPReceiveSocket(0, ttl);
if(s_ssdp < 0)
{
syslog(LOG_ERR, "Cannot open socket for receiving SSDP messages, exiting");
ret = 1;
goto quit;
}
#ifdef ENABLE_IPV6
if(ipv6) {
s_ssdp6 = OpenAndConfSSDPReceiveSocket(1, ttl);
if(s_ssdp6 < 0)
{
syslog(LOG_ERR, "Cannot open socket for receiving SSDP messages (IPv6), exiting");
ret = 1;
goto quit;
}
}
#endif /* ENABLE_IPV6 */
/* Open Unix socket to communicate with other programs on
* the same machine */
s_unix = OpenUnixSocket(sockpath);
if(s_unix < 0)
{
syslog(LOG_ERR, "Cannot open unix socket for communicating with clients. Exiting");
ret = 1;
goto quit;
}
/* drop privileges */
#if 0
/* if we drop privileges, how to unlink(/var/run/minissdpd.sock) ? */
if(getuid() == 0) {
struct passwd * user;
struct group * group;
user = getpwnam("nobody");
if(!user) {
syslog(LOG_ERR, "getpwnam(\"%s\") : %m", "nobody");
ret = 1;
goto quit;
}
group = getgrnam("nogroup");
if(!group) {
syslog(LOG_ERR, "getgrnam(\"%s\") : %m", "nogroup");
ret = 1;
goto quit;
}
if(setgid(group->gr_gid) < 0) {
syslog(LOG_ERR, "setgit(%d) : %m", group->gr_gid);
ret = 1;
goto quit;
}
if(setuid(user->pw_uid) < 0) {
syslog(LOG_ERR, "setuid(%d) : %m", user->pw_uid);
ret = 1;
goto quit;
}
}
#endif
/* daemonize or in any case get pid ! */
if(debug_flag)
pid = getpid();
else {
#ifdef USE_DAEMON
if(daemon(0, 0) < 0)
perror("daemon()");
pid = getpid();
#else /* USE_DAEMON */
pid = daemonize();
#endif /* USE_DAEMON */
}
writepidfile(pidfilename, pid);
/* send M-SEARCH ssdp:all Requests */
if(s_ssdp >= 0)
ssdpDiscover(s_ssdp, 0, searched_device);
if(s_ssdp6 >= 0)
ssdpDiscover(s_ssdp6, 1, searched_device);
/* Main loop */
while(!quitting) {
/* fill readfds fd_set */
FD_ZERO(&readfds);
FD_ZERO(&writefds);
FD_SET(s_unix, &readfds);
max_fd = s_unix;
if(s_ssdp >= 0) {
FD_SET(s_ssdp, &readfds);
SET_MAX(max_fd, s_ssdp);
}
#ifdef ENABLE_IPV6
if(s_ssdp6 >= 0) {
FD_SET(s_ssdp6, &readfds);
SET_MAX(max_fd, s_ssdp6);
}
#endif /* ENABLE_IPV6 */
if(s_ifacewatch >= 0) {
FD_SET(s_ifacewatch, &readfds);
SET_MAX(max_fd, s_ifacewatch);
}
for(req = reqlisthead.lh_first; req; req = req->entries.le_next) {
if(req->socket >= 0) {
FD_SET(req->socket, &readfds);
SET_MAX(max_fd, req->socket);
}
if(req->output_buffer_len > 0) {
FD_SET(req->socket, &writefds);
SET_MAX(max_fd, req->socket);
}
}
gettimeofday(&now, NULL);
i = get_sendto_fds(&writefds, &max_fd, &now);
/* select call */
if(select(max_fd + 1, &readfds, &writefds, 0, 0) < 0) {
if(errno != EINTR) {
syslog(LOG_ERR, "select: %m");
break; /* quit */
}
continue; /* try again */
}
if(try_sendto(&writefds) < 0) {
syslog(LOG_ERR, "try_sendto: %m");
break;
}
#ifdef ENABLE_IPV6
if((s_ssdp6 >= 0) && FD_ISSET(s_ssdp6, &readfds))
{
sendername6_len = sizeof(struct sockaddr_in6);
n = recvfrom(s_ssdp6, buf, sizeof(buf), 0,
(struct sockaddr *)&sendername6, &sendername6_len);
if(n<0)
{
/* EAGAIN, EWOULDBLOCK, EINTR : silently ignore (try again next time)
* other errors : log to LOG_ERR */
if(errno != EAGAIN && errno != EWOULDBLOCK && errno != EINTR)
syslog(LOG_ERR, "recvfrom: %m");
}
else
{
/* Parse and process the packet received */
/*printf("%.*s", n, buf);*/
i = ParseSSDPPacket(s_ssdp6, buf, n,
(struct sockaddr *)&sendername6, searched_device);
syslog(LOG_DEBUG, "** i=%d deltadev=%d **", i, deltadev);
if(i==0 || (i*deltadev < 0))
{
if(deltadev > 0)
syslog(LOG_NOTICE, "%d new devices added", deltadev);
else if(deltadev < 0)
syslog(LOG_NOTICE, "%d devices removed (good-bye!)", -deltadev);
deltadev = i;
}
else if((i*deltadev) >= 0)
{
deltadev += i;
}
}
}
#endif /* ENABLE_IPV6 */
if((s_ssdp >= 0) && FD_ISSET(s_ssdp, &readfds))
{
sendername_len = sizeof(struct sockaddr_in);
n = recvfrom(s_ssdp, buf, sizeof(buf), 0,
(struct sockaddr *)&sendername, &sendername_len);
if(n<0)
{
/* EAGAIN, EWOULDBLOCK, EINTR : silently ignore (try again next time)
* other errors : log to LOG_ERR */
if(errno != EAGAIN && errno != EWOULDBLOCK && errno != EINTR)
syslog(LOG_ERR, "recvfrom: %m");
}
else
{
/* Parse and process the packet received */
/*printf("%.*s", n, buf);*/
i = ParseSSDPPacket(s_ssdp, buf, n,
(struct sockaddr *)&sendername, searched_device);
syslog(LOG_DEBUG, "** i=%d deltadev=%d **", i, deltadev);
if(i==0 || (i*deltadev < 0))
{
if(deltadev > 0)
syslog(LOG_NOTICE, "%d new devices added", deltadev);
else if(deltadev < 0)
syslog(LOG_NOTICE, "%d devices removed (good-bye!)", -deltadev);
deltadev = i;
}
else if((i*deltadev) >= 0)
{
deltadev += i;
}
}
}
/* processing unix socket requests */
for(req = reqlisthead.lh_first; req;) {
reqnext = req->entries.le_next;
if((req->socket >= 0) && FD_ISSET(req->socket, &readfds)) {
processRequest(req);
}
if((req->socket >= 0) && FD_ISSET(req->socket, &writefds)) {
write_buffer(req);
}
if(req->socket < 0) {
LIST_REMOVE(req, entries);
free(req->output_buffer);
free(req);
}
req = reqnext;
}
/* processing new requests */
if(FD_ISSET(s_unix, &readfds))
{
struct reqelem * tmp;
int s = accept(s_unix, NULL, NULL);
if(s < 0) {
syslog(LOG_ERR, "accept(s_unix): %m");
} else {
syslog(LOG_INFO, "(s=%d) new request connection", s);
if(!set_non_blocking(s))
syslog(LOG_WARNING, "Failed to set new socket non blocking : %m");
tmp = malloc(sizeof(struct reqelem));
if(!tmp) {
syslog(LOG_ERR, "cannot allocate memory for request");
close(s);
} else {
memset(tmp, 0, sizeof(struct reqelem));
tmp->socket = s;
LIST_INSERT_HEAD(&reqlisthead, tmp, entries);
}
}
}
/* processing route/network interface config changes */
if((s_ifacewatch >= 0) && FD_ISSET(s_ifacewatch, &readfds)) {
ProcessInterfaceWatch(s_ifacewatch, s_ssdp, s_ssdp6);
}
}
syslog(LOG_DEBUG, "quitting...");
finalize_sendto();
/* closing and cleaning everything */
quit:
if(s_ssdp >= 0) {
close(s_ssdp);
s_ssdp = -1;
}
#ifdef ENABLE_IPV6
if(s_ssdp6 >= 0) {
close(s_ssdp6);
s_ssdp6 = -1;
}
#endif /* ENABLE_IPV6 */
if(s_unix >= 0) {
close(s_unix);
s_unix = -1;
if(unlink(sockpath) < 0)
syslog(LOG_ERR, "unlink(%s): %m", sockpath);
}
if(s_ifacewatch >= 0) {
close(s_ifacewatch);
s_ifacewatch = -1;
}
/* empty LAN interface/address list */
while(lan_addrs.lh_first != NULL) {
lan_addr = lan_addrs.lh_first;
LIST_REMOVE(lan_addrs.lh_first, list);
free(lan_addr);
}
/* empty device list */
while(devlist != NULL) {
struct device * next = devlist->next;
free(devlist);
devlist = next;
}
/* empty service list */
while(servicelisthead.lh_first != NULL) {
struct service * serv = servicelisthead.lh_first;
LIST_REMOVE(servicelisthead.lh_first, entries);
free(serv->st);
free(serv->usn);
free(serv->server);
free(serv->location);
free(serv);
}
if(unlink(pidfilename) < 0)
syslog(LOG_ERR, "unlink(%s): %m", pidfilename);
closelog();
return ret;
}
| ./CrossVul/dataset_final_sorted/CWE-416/c/bad_859_0 |
crossvul-cpp_data_bad_2437_0 | /*
* common UDP/RAW code
* Linux INET implementation
*
* Authors:
* Hideaki YOSHIFUJI <yoshfuji@linux-ipv6.org>
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version
* 2 of the License, or (at your option) any later version.
*/
#include <linux/types.h>
#include <linux/module.h>
#include <linux/ip.h>
#include <linux/in.h>
#include <net/ip.h>
#include <net/sock.h>
#include <net/route.h>
#include <net/tcp_states.h>
int ip4_datagram_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len)
{
struct inet_sock *inet = inet_sk(sk);
struct sockaddr_in *usin = (struct sockaddr_in *) uaddr;
struct flowi4 *fl4;
struct rtable *rt;
__be32 saddr;
int oif;
int err;
if (addr_len < sizeof(*usin))
return -EINVAL;
if (usin->sin_family != AF_INET)
return -EAFNOSUPPORT;
sk_dst_reset(sk);
lock_sock(sk);
oif = sk->sk_bound_dev_if;
saddr = inet->inet_saddr;
if (ipv4_is_multicast(usin->sin_addr.s_addr)) {
if (!oif)
oif = inet->mc_index;
if (!saddr)
saddr = inet->mc_addr;
}
fl4 = &inet->cork.fl.u.ip4;
rt = ip_route_connect(fl4, usin->sin_addr.s_addr, saddr,
RT_CONN_FLAGS(sk), oif,
sk->sk_protocol,
inet->inet_sport, usin->sin_port, sk);
if (IS_ERR(rt)) {
err = PTR_ERR(rt);
if (err == -ENETUNREACH)
IP_INC_STATS(sock_net(sk), IPSTATS_MIB_OUTNOROUTES);
goto out;
}
if ((rt->rt_flags & RTCF_BROADCAST) && !sock_flag(sk, SOCK_BROADCAST)) {
ip_rt_put(rt);
err = -EACCES;
goto out;
}
if (!inet->inet_saddr)
inet->inet_saddr = fl4->saddr; /* Update source address */
if (!inet->inet_rcv_saddr) {
inet->inet_rcv_saddr = fl4->saddr;
if (sk->sk_prot->rehash)
sk->sk_prot->rehash(sk);
}
inet->inet_daddr = fl4->daddr;
inet->inet_dport = usin->sin_port;
sk->sk_state = TCP_ESTABLISHED;
inet->inet_id = jiffies;
sk_dst_set(sk, &rt->dst);
err = 0;
out:
release_sock(sk);
return err;
}
EXPORT_SYMBOL(ip4_datagram_connect);
void ip4_datagram_release_cb(struct sock *sk)
{
const struct inet_sock *inet = inet_sk(sk);
const struct ip_options_rcu *inet_opt;
__be32 daddr = inet->inet_daddr;
struct flowi4 fl4;
struct rtable *rt;
if (! __sk_dst_get(sk) || __sk_dst_check(sk, 0))
return;
rcu_read_lock();
inet_opt = rcu_dereference(inet->inet_opt);
if (inet_opt && inet_opt->opt.srr)
daddr = inet_opt->opt.faddr;
rt = ip_route_output_ports(sock_net(sk), &fl4, sk, daddr,
inet->inet_saddr, inet->inet_dport,
inet->inet_sport, sk->sk_protocol,
RT_CONN_FLAGS(sk), sk->sk_bound_dev_if);
if (!IS_ERR(rt))
__sk_dst_set(sk, &rt->dst);
rcu_read_unlock();
}
EXPORT_SYMBOL_GPL(ip4_datagram_release_cb);
| ./CrossVul/dataset_final_sorted/CWE-416/c/bad_2437_0 |
crossvul-cpp_data_good_3348_1 | /*
Copyright (c) 2013-2014. The YARA Authors. All Rights Reserved.
Redistribution and use in source and binary forms, with or without modification,
are permitted provided that the following conditions are met:
1. Redistributions of source code must retain the above copyright notice, this
list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation and/or
other materials provided with the distribution.
3. Neither the name of the copyright holder nor the names of its contributors
may be used to endorse or promote products derived from this software without
specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR
ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#define _GNU_SOURCE
#include <string.h>
#include <assert.h>
#include <time.h>
#include <math.h>
#include <yara/arena.h>
#include <yara/endian.h>
#include <yara/exec.h>
#include <yara/limits.h>
#include <yara/error.h>
#include <yara/object.h>
#include <yara/modules.h>
#include <yara/re.h>
#include <yara/strutils.h>
#include <yara/utils.h>
#include <yara/mem.h>
#include <yara.h>
#define MEM_SIZE MAX_LOOP_NESTING * LOOP_LOCAL_VARS
#define push(x) \
if (sp < stack_size) \
{ \
stack[sp++] = (x); \
} \
else \
{ \
result = ERROR_EXEC_STACK_OVERFLOW; \
stop = TRUE; \
break; \
} \
#define pop(x) x = stack[--sp]
#define is_undef(x) IS_UNDEFINED((x).i)
#define ensure_defined(x) \
if (is_undef(x)) \
{ \
r1.i = UNDEFINED; \
push(r1); \
break; \
}
#define little_endian_uint8_t(x) (x)
#define little_endian_int8_t(x) (x)
#define little_endian_uint16_t(x) yr_le16toh(x)
#define little_endian_int16_t(x) yr_le16toh(x)
#define little_endian_uint32_t(x) yr_le32toh(x)
#define little_endian_int32_t(x) yr_le32toh(x)
#define big_endian_uint8_t(x) (x)
#define big_endian_int8_t(x) (x)
#define big_endian_uint16_t(x) yr_be16toh(x)
#define big_endian_int16_t(x) yr_be16toh(x)
#define big_endian_uint32_t(x) yr_be32toh(x)
#define big_endian_int32_t(x) yr_be32toh(x)
#define function_read(type, endianess) \
int64_t read_##type##_##endianess(YR_MEMORY_BLOCK_ITERATOR* iterator, size_t offset) \
{ \
YR_MEMORY_BLOCK* block = iterator->first(iterator); \
while (block != NULL) \
{ \
if (offset >= block->base && \
block->size >= sizeof(type) && \
offset <= block->base + block->size - sizeof(type)) \
{ \
type result; \
uint8_t* data = block->fetch_data(block); \
if (data == NULL) \
return UNDEFINED; \
result = *(type *)(data + offset - block->base); \
result = endianess##_##type(result); \
return result; \
} \
block = iterator->next(iterator); \
} \
return UNDEFINED; \
};
function_read(uint8_t, little_endian)
function_read(uint16_t, little_endian)
function_read(uint32_t, little_endian)
function_read(int8_t, little_endian)
function_read(int16_t, little_endian)
function_read(int32_t, little_endian)
function_read(uint8_t, big_endian)
function_read(uint16_t, big_endian)
function_read(uint32_t, big_endian)
function_read(int8_t, big_endian)
function_read(int16_t, big_endian)
function_read(int32_t, big_endian)
static uint8_t* jmp_if(
int condition,
uint8_t* ip)
{
uint8_t* result;
if (condition)
{
result = *(uint8_t**)(ip + 1);
// ip will be incremented at the end of the execution loop,
// decrement it here to compensate.
result--;
}
else
{
result = ip + sizeof(uint64_t);
}
return result;
}
int yr_execute_code(
YR_RULES* rules,
YR_SCAN_CONTEXT* context,
int timeout,
time_t start_time)
{
int64_t mem[MEM_SIZE];
int32_t sp = 0;
uint8_t* ip = rules->code_start;
YR_VALUE args[MAX_FUNCTION_ARGS];
YR_VALUE *stack;
YR_VALUE r1;
YR_VALUE r2;
YR_VALUE r3;
#ifdef PROFILING_ENABLED
YR_RULE* current_rule = NULL;
#endif
YR_RULE* rule;
YR_MATCH* match;
YR_OBJECT_FUNCTION* function;
YR_OBJECT** obj_ptr;
YR_ARENA* obj_arena;
char* identifier;
char* args_fmt;
int i;
int found;
int count;
int result = ERROR_SUCCESS;
int stop = FALSE;
int cycle = 0;
int tidx = context->tidx;
int stack_size;
#ifdef PROFILING_ENABLED
clock_t start = clock();
#endif
yr_get_configuration(YR_CONFIG_STACK_SIZE, (void*) &stack_size);
stack = (YR_VALUE*) yr_malloc(stack_size * sizeof(YR_VALUE));
if (stack == NULL)
return ERROR_INSUFFICIENT_MEMORY;
FAIL_ON_ERROR_WITH_CLEANUP(
yr_arena_create(1024, 0, &obj_arena),
yr_free(stack));
while(!stop)
{
switch(*ip)
{
case OP_NOP:
break;
case OP_HALT:
assert(sp == 0); // When HALT is reached the stack should be empty.
stop = TRUE;
break;
case OP_PUSH:
r1.i = *(uint64_t*)(ip + 1);
ip += sizeof(uint64_t);
push(r1);
break;
case OP_POP:
pop(r1);
break;
case OP_CLEAR_M:
r1.i = *(uint64_t*)(ip + 1);
ip += sizeof(uint64_t);
mem[r1.i] = 0;
break;
case OP_ADD_M:
r1.i = *(uint64_t*)(ip + 1);
ip += sizeof(uint64_t);
pop(r2);
if (!is_undef(r2))
mem[r1.i] += r2.i;
break;
case OP_INCR_M:
r1.i = *(uint64_t*)(ip + 1);
ip += sizeof(uint64_t);
mem[r1.i]++;
break;
case OP_PUSH_M:
r1.i = *(uint64_t*)(ip + 1);
ip += sizeof(uint64_t);
r1.i = mem[r1.i];
push(r1);
break;
case OP_POP_M:
r1.i = *(uint64_t*)(ip + 1);
ip += sizeof(uint64_t);
pop(r2);
mem[r1.i] = r2.i;
break;
case OP_SWAPUNDEF:
r1.i = *(uint64_t*)(ip + 1);
ip += sizeof(uint64_t);
pop(r2);
if (is_undef(r2))
{
r1.i = mem[r1.i];
push(r1);
}
else
{
push(r2);
}
break;
case OP_JNUNDEF:
pop(r1);
push(r1);
ip = jmp_if(!is_undef(r1), ip);
break;
case OP_JLE:
pop(r2);
pop(r1);
push(r1);
push(r2);
ip = jmp_if(r1.i <= r2.i, ip);
break;
case OP_JTRUE:
pop(r1);
push(r1);
ip = jmp_if(!is_undef(r1) && r1.i, ip);
break;
case OP_JFALSE:
pop(r1);
push(r1);
ip = jmp_if(is_undef(r1) || !r1.i, ip);
break;
case OP_AND:
pop(r2);
pop(r1);
if (is_undef(r1) || is_undef(r2))
r1.i = 0;
else
r1.i = r1.i && r2.i;
push(r1);
break;
case OP_OR:
pop(r2);
pop(r1);
if (is_undef(r1))
{
push(r2);
}
else if (is_undef(r2))
{
push(r1);
}
else
{
r1.i = r1.i || r2.i;
push(r1);
}
break;
case OP_NOT:
pop(r1);
if (is_undef(r1))
r1.i = UNDEFINED;
else
r1.i= !r1.i;
push(r1);
break;
case OP_MOD:
pop(r2);
pop(r1);
ensure_defined(r2);
ensure_defined(r1);
if (r2.i != 0)
r1.i = r1.i % r2.i;
else
r1.i = UNDEFINED;
push(r1);
break;
case OP_SHR:
pop(r2);
pop(r1);
ensure_defined(r2);
ensure_defined(r1);
r1.i = r1.i >> r2.i;
push(r1);
break;
case OP_SHL:
pop(r2);
pop(r1);
ensure_defined(r2);
ensure_defined(r1);
r1.i = r1.i << r2.i;
push(r1);
break;
case OP_BITWISE_NOT:
pop(r1);
ensure_defined(r1);
r1.i = ~r1.i;
push(r1);
break;
case OP_BITWISE_AND:
pop(r2);
pop(r1);
ensure_defined(r2);
ensure_defined(r1);
r1.i = r1.i & r2.i;
push(r1);
break;
case OP_BITWISE_OR:
pop(r2);
pop(r1);
ensure_defined(r2);
ensure_defined(r1);
r1.i = r1.i | r2.i;
push(r1);
break;
case OP_BITWISE_XOR:
pop(r2);
pop(r1);
ensure_defined(r2);
ensure_defined(r1);
r1.i = r1.i ^ r2.i;
push(r1);
break;
case OP_PUSH_RULE:
rule = *(YR_RULE**)(ip + 1);
ip += sizeof(uint64_t);
r1.i = rule->t_flags[tidx] & RULE_TFLAGS_MATCH ? 1 : 0;
push(r1);
break;
case OP_INIT_RULE:
#ifdef PROFILING_ENABLED
current_rule = *(YR_RULE**)(ip + 1);
#endif
ip += sizeof(uint64_t);
break;
case OP_MATCH_RULE:
pop(r1);
rule = *(YR_RULE**)(ip + 1);
ip += sizeof(uint64_t);
if (!is_undef(r1) && r1.i)
rule->t_flags[tidx] |= RULE_TFLAGS_MATCH;
else if (RULE_IS_GLOBAL(rule))
rule->ns->t_flags[tidx] |= NAMESPACE_TFLAGS_UNSATISFIED_GLOBAL;
#ifdef PROFILING_ENABLED
rule->clock_ticks += clock() - start;
start = clock();
#endif
assert(sp == 0);
break;
case OP_OBJ_LOAD:
identifier = *(char**)(ip + 1);
ip += sizeof(uint64_t);
r1.o = (YR_OBJECT*) yr_hash_table_lookup(
context->objects_table,
identifier,
NULL);
assert(r1.o != NULL);
push(r1);
break;
case OP_OBJ_FIELD:
identifier = *(char**)(ip + 1);
ip += sizeof(uint64_t);
pop(r1);
ensure_defined(r1);
r1.o = yr_object_lookup_field(r1.o, identifier);
assert(r1.o != NULL);
push(r1);
break;
case OP_OBJ_VALUE:
pop(r1);
ensure_defined(r1);
switch(r1.o->type)
{
case OBJECT_TYPE_INTEGER:
r1.i = ((YR_OBJECT_INTEGER*) r1.o)->value;
break;
case OBJECT_TYPE_FLOAT:
if (isnan(((YR_OBJECT_DOUBLE*) r1.o)->value))
r1.i = UNDEFINED;
else
r1.d = ((YR_OBJECT_DOUBLE*) r1.o)->value;
break;
case OBJECT_TYPE_STRING:
if (((YR_OBJECT_STRING*) r1.o)->value == NULL)
r1.i = UNDEFINED;
else
r1.p = ((YR_OBJECT_STRING*) r1.o)->value;
break;
default:
assert(FALSE);
}
push(r1);
break;
case OP_INDEX_ARRAY:
pop(r1); // index
pop(r2); // array
ensure_defined(r1);
ensure_defined(r2);
assert(r2.o->type == OBJECT_TYPE_ARRAY);
r1.o = yr_object_array_get_item(r2.o, 0, (int) r1.i);
if (r1.o == NULL)
r1.i = UNDEFINED;
push(r1);
break;
case OP_LOOKUP_DICT:
pop(r1); // key
pop(r2); // dictionary
ensure_defined(r1);
ensure_defined(r2);
assert(r2.o->type == OBJECT_TYPE_DICTIONARY);
r1.o = yr_object_dict_get_item(
r2.o, 0, r1.ss->c_string);
if (r1.o == NULL)
r1.i = UNDEFINED;
push(r1);
break;
case OP_CALL:
args_fmt = *(char**)(ip + 1);
ip += sizeof(uint64_t);
i = (int) strlen(args_fmt);
count = 0;
// pop arguments from stack and copy them to args array
while (i > 0)
{
pop(r1);
if (is_undef(r1)) // count the number of undefined args
count++;
args[i - 1] = r1;
i--;
}
pop(r2);
ensure_defined(r2);
if (count > 0)
{
// if there are undefined args, result for function call
// is undefined as well.
r1.i = UNDEFINED;
push(r1);
break;
}
function = (YR_OBJECT_FUNCTION*) r2.o;
result = ERROR_INTERNAL_FATAL_ERROR;
for (i = 0; i < MAX_OVERLOADED_FUNCTIONS; i++)
{
if (function->prototypes[i].arguments_fmt == NULL)
break;
if (strcmp(function->prototypes[i].arguments_fmt, args_fmt) == 0)
{
result = function->prototypes[i].code(args, context, function);
break;
}
}
// if i == MAX_OVERLOADED_FUNCTIONS at this point no matching
// prototype was found, but this shouldn't happen.
assert(i < MAX_OVERLOADED_FUNCTIONS);
// make a copy of the returned object and push the copy into the stack
// function->return_obj can't be pushed because it can change in
// subsequent calls to the same function.
if (result == ERROR_SUCCESS)
result = yr_object_copy(function->return_obj, &r1.o);
// a pointer to the copied object is stored in a arena in order to
// free the object before exiting yr_execute_code
if (result == ERROR_SUCCESS)
result = yr_arena_write_data(obj_arena, &r1.o, sizeof(r1.o), NULL);
stop = (result != ERROR_SUCCESS);
push(r1);
break;
case OP_FOUND:
pop(r1);
r1.i = r1.s->matches[tidx].tail != NULL ? 1 : 0;
push(r1);
break;
case OP_FOUND_AT:
pop(r2);
pop(r1);
if (is_undef(r1))
{
r1.i = 0;
push(r1);
break;
}
match = r2.s->matches[tidx].head;
r3.i = FALSE;
while (match != NULL)
{
if (r1.i == match->base + match->offset)
{
r3.i = TRUE;
break;
}
if (r1.i < match->base + match->offset)
break;
match = match->next;
}
push(r3);
break;
case OP_FOUND_IN:
pop(r3);
pop(r2);
pop(r1);
ensure_defined(r1);
ensure_defined(r2);
match = r3.s->matches[tidx].head;
r3.i = FALSE;
while (match != NULL && !r3.i)
{
if (match->base + match->offset >= r1.i &&
match->base + match->offset <= r2.i)
{
r3.i = TRUE;
}
if (match->base + match->offset > r2.i)
break;
match = match->next;
}
push(r3);
break;
case OP_COUNT:
pop(r1);
r1.i = r1.s->matches[tidx].count;
push(r1);
break;
case OP_OFFSET:
pop(r2);
pop(r1);
ensure_defined(r1);
match = r2.s->matches[tidx].head;
i = 1;
r3.i = UNDEFINED;
while (match != NULL && r3.i == UNDEFINED)
{
if (r1.i == i)
r3.i = match->base + match->offset;
i++;
match = match->next;
}
push(r3);
break;
case OP_LENGTH:
pop(r2);
pop(r1);
ensure_defined(r1);
match = r2.s->matches[tidx].head;
i = 1;
r3.i = UNDEFINED;
while (match != NULL && r3.i == UNDEFINED)
{
if (r1.i == i)
r3.i = match->match_length;
i++;
match = match->next;
}
push(r3);
break;
case OP_OF:
found = 0;
count = 0;
pop(r1);
while (!is_undef(r1))
{
if (r1.s->matches[tidx].tail != NULL)
found++;
count++;
pop(r1);
}
pop(r2);
if (is_undef(r2))
r1.i = found >= count ? 1 : 0;
else
r1.i = found >= r2.i ? 1 : 0;
push(r1);
break;
case OP_FILESIZE:
r1.i = context->file_size;
push(r1);
break;
case OP_ENTRYPOINT:
r1.i = context->entry_point;
push(r1);
break;
case OP_INT8:
pop(r1);
r1.i = read_int8_t_little_endian(context->iterator, (size_t) r1.i);
push(r1);
break;
case OP_INT16:
pop(r1);
r1.i = read_int16_t_little_endian(context->iterator, (size_t) r1.i);
push(r1);
break;
case OP_INT32:
pop(r1);
r1.i = read_int32_t_little_endian(context->iterator, (size_t) r1.i);
push(r1);
break;
case OP_UINT8:
pop(r1);
r1.i = read_uint8_t_little_endian(context->iterator, (size_t) r1.i);
push(r1);
break;
case OP_UINT16:
pop(r1);
r1.i = read_uint16_t_little_endian(context->iterator, (size_t) r1.i);
push(r1);
break;
case OP_UINT32:
pop(r1);
r1.i = read_uint32_t_little_endian(context->iterator, (size_t) r1.i);
push(r1);
break;
case OP_INT8BE:
pop(r1);
r1.i = read_int8_t_big_endian(context->iterator, (size_t) r1.i);
push(r1);
break;
case OP_INT16BE:
pop(r1);
r1.i = read_int16_t_big_endian(context->iterator, (size_t) r1.i);
push(r1);
break;
case OP_INT32BE:
pop(r1);
r1.i = read_int32_t_big_endian(context->iterator, (size_t) r1.i);
push(r1);
break;
case OP_UINT8BE:
pop(r1);
r1.i = read_uint8_t_big_endian(context->iterator, (size_t) r1.i);
push(r1);
break;
case OP_UINT16BE:
pop(r1);
r1.i = read_uint16_t_big_endian(context->iterator, (size_t) r1.i);
push(r1);
break;
case OP_UINT32BE:
pop(r1);
r1.i = read_uint32_t_big_endian(context->iterator, (size_t) r1.i);
push(r1);
break;
case OP_CONTAINS:
pop(r2);
pop(r1);
ensure_defined(r1);
ensure_defined(r2);
r1.i = memmem(r1.ss->c_string, r1.ss->length,
r2.ss->c_string, r2.ss->length) != NULL;
push(r1);
break;
case OP_IMPORT:
r1.i = *(uint64_t*)(ip + 1);
ip += sizeof(uint64_t);
result = yr_modules_load((char*) r1.p, context);
if (result != ERROR_SUCCESS)
stop = TRUE;
break;
case OP_MATCHES:
pop(r2);
pop(r1);
ensure_defined(r2);
ensure_defined(r1);
if (r1.ss->length == 0)
{
r1.i = FALSE;
push(r1);
break;
}
result = yr_re_exec(
(uint8_t*) r2.re->code,
(uint8_t*) r1.ss->c_string,
r1.ss->length,
0,
r2.re->flags | RE_FLAGS_SCAN,
NULL,
NULL,
&found);
if (result != ERROR_SUCCESS)
stop = TRUE;
r1.i = found >= 0;
push(r1);
break;
case OP_INT_TO_DBL:
r1.i = *(uint64_t*)(ip + 1);
ip += sizeof(uint64_t);
r2 = stack[sp - r1.i];
if (is_undef(r2))
stack[sp - r1.i].i = UNDEFINED;
else
stack[sp - r1.i].d = (double) r2.i;
break;
case OP_STR_TO_BOOL:
pop(r1);
ensure_defined(r1);
r1.i = r1.ss->length > 0;
push(r1);
break;
case OP_INT_EQ:
pop(r2);
pop(r1);
ensure_defined(r2);
ensure_defined(r1);
r1.i = r1.i == r2.i;
push(r1);
break;
case OP_INT_NEQ:
pop(r2);
pop(r1);
ensure_defined(r2);
ensure_defined(r1);
r1.i = r1.i != r2.i;
push(r1);
break;
case OP_INT_LT:
pop(r2);
pop(r1);
ensure_defined(r2);
ensure_defined(r1);
r1.i = r1.i < r2.i;
push(r1);
break;
case OP_INT_GT:
pop(r2);
pop(r1);
ensure_defined(r2);
ensure_defined(r1);
r1.i = r1.i > r2.i;
push(r1);
break;
case OP_INT_LE:
pop(r2);
pop(r1);
ensure_defined(r2);
ensure_defined(r1);
r1.i = r1.i <= r2.i;
push(r1);
break;
case OP_INT_GE:
pop(r2);
pop(r1);
ensure_defined(r2);
ensure_defined(r1);
r1.i = r1.i >= r2.i;
push(r1);
break;
case OP_INT_ADD:
pop(r2);
pop(r1);
ensure_defined(r2);
ensure_defined(r1);
r1.i = r1.i + r2.i;
push(r1);
break;
case OP_INT_SUB:
pop(r2);
pop(r1);
ensure_defined(r2);
ensure_defined(r1);
r1.i = r1.i - r2.i;
push(r1);
break;
case OP_INT_MUL:
pop(r2);
pop(r1);
ensure_defined(r2);
ensure_defined(r1);
r1.i = r1.i * r2.i;
push(r1);
break;
case OP_INT_DIV:
pop(r2);
pop(r1);
ensure_defined(r2);
ensure_defined(r1);
if (r2.i != 0)
r1.i = r1.i / r2.i;
else
r1.i = UNDEFINED;
push(r1);
break;
case OP_INT_MINUS:
pop(r1);
ensure_defined(r1);
r1.i = -r1.i;
push(r1);
break;
case OP_DBL_LT:
pop(r2);
pop(r1);
ensure_defined(r2);
ensure_defined(r1);
r1.i = r1.d < r2.d;
push(r1);
break;
case OP_DBL_GT:
pop(r2);
pop(r1);
ensure_defined(r2);
ensure_defined(r1);
r1.i = r1.d > r2.d;
push(r1);
break;
case OP_DBL_LE:
pop(r2);
pop(r1);
ensure_defined(r2);
ensure_defined(r1);
r1.i = r1.d <= r2.d;
push(r1);
break;
case OP_DBL_GE:
pop(r2);
pop(r1);
ensure_defined(r2);
ensure_defined(r1);
r1.i = r1.d >= r2.d;
push(r1);
break;
case OP_DBL_EQ:
pop(r2);
pop(r1);
ensure_defined(r2);
ensure_defined(r1);
r1.i = r1.d == r2.d;
push(r1);
break;
case OP_DBL_NEQ:
pop(r2);
pop(r1);
ensure_defined(r2);
ensure_defined(r1);
r1.i = r1.d != r2.d;
push(r1);
break;
case OP_DBL_ADD:
pop(r2);
pop(r1);
ensure_defined(r2);
ensure_defined(r1);
r1.d = r1.d + r2.d;
push(r1);
break;
case OP_DBL_SUB:
pop(r2);
pop(r1);
ensure_defined(r2);
ensure_defined(r1);
r1.d = r1.d - r2.d;
push(r1);
break;
case OP_DBL_MUL:
pop(r2);
pop(r1);
ensure_defined(r2);
ensure_defined(r1);
r1.d = r1.d * r2.d;
push(r1);
break;
case OP_DBL_DIV:
pop(r2);
pop(r1);
ensure_defined(r2);
ensure_defined(r1);
r1.d = r1.d / r2.d;
push(r1);
break;
case OP_DBL_MINUS:
pop(r1);
ensure_defined(r1);
r1.d = -r1.d;
push(r1);
break;
case OP_STR_EQ:
case OP_STR_NEQ:
case OP_STR_LT:
case OP_STR_LE:
case OP_STR_GT:
case OP_STR_GE:
pop(r2);
pop(r1);
ensure_defined(r1);
ensure_defined(r2);
switch(*ip)
{
case OP_STR_EQ:
r1.i = (sized_string_cmp(r1.ss, r2.ss) == 0);
break;
case OP_STR_NEQ:
r1.i = (sized_string_cmp(r1.ss, r2.ss) != 0);
break;
case OP_STR_LT:
r1.i = (sized_string_cmp(r1.ss, r2.ss) < 0);
break;
case OP_STR_LE:
r1.i = (sized_string_cmp(r1.ss, r2.ss) <= 0);
break;
case OP_STR_GT:
r1.i = (sized_string_cmp(r1.ss, r2.ss) > 0);
break;
case OP_STR_GE:
r1.i = (sized_string_cmp(r1.ss, r2.ss) >= 0);
break;
}
push(r1);
break;
default:
// Unknown instruction, this shouldn't happen.
assert(FALSE);
}
if (timeout > 0) // timeout == 0 means no timeout
{
// Check for timeout every 10 instruction cycles.
if (++cycle == 10)
{
if (difftime(time(NULL), start_time) > timeout)
{
#ifdef PROFILING_ENABLED
assert(current_rule != NULL);
current_rule->clock_ticks += clock() - start;
#endif
result = ERROR_SCAN_TIMEOUT;
stop = TRUE;
}
cycle = 0;
}
}
ip++;
}
obj_ptr = (YR_OBJECT**) yr_arena_base_address(obj_arena);
while (obj_ptr != NULL)
{
yr_object_destroy(*obj_ptr);
obj_ptr = (YR_OBJECT**) yr_arena_next_address(
obj_arena, obj_ptr, sizeof(YR_OBJECT*));
}
yr_arena_destroy(obj_arena);
yr_modules_unload_all(context);
yr_free(stack);
return result;
}
| ./CrossVul/dataset_final_sorted/CWE-416/c/good_3348_1 |
crossvul-cpp_data_good_3188_0 | /*
* Copyright (C) 2015 Cisco Systems, Inc. and/or its affiliates. All rights reserved.
* Copyright (C) 2007-2008 Sourcefire, Inc.
*
* Authors: Alberto Wu
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston,
* MA 02110-1301, USA.
*/
#if HAVE_CONFIG_H
#include "clamav-config.h"
#endif
#include "clamav.h"
#include "cltypes.h"
#include "others.h"
#include "execs.h"
#include "wwunpack.h"
#if HAVE_STRING_H
#include <string.h>
#endif
#define RESEED \
if (CLI_ISCONTAINED(compd, szd, ccur, 4)) { \
bt = cli_readint32(ccur); \
ccur+=4; \
} else { \
cli_dbgmsg("WWPack: Out of bits\n"); \
error=1; \
} \
bc = 32;
#define BIT \
bits = bt>>31; \
bt<<=1; \
if(!--bc) { \
RESEED; \
}
#define BITS(N) \
bits = bt>>(32-(N)); \
if (bc>=(N)) { \
bc -= (N); \
bt<<=(N); \
if (!bc) { \
RESEED; \
} \
} else { \
if (CLI_ISCONTAINED(compd, szd, ccur, 4)) { \
bt = cli_readint32(ccur); \
ccur+=4; \
bc += 32 - (N); \
bits |= bt>>(bc); \
bt <<= (32-bc); \
} else { \
cli_dbgmsg("WWPack: Out of bits\n"); \
error=1; \
} \
}
int wwunpack(uint8_t *exe, uint32_t exesz, uint8_t *wwsect, struct cli_exe_section *sects, uint16_t scount, uint32_t pe, int desc) {
uint8_t *structs = wwsect + 0x2a1, *compd, *ccur, *unpd, *ucur, bc;
uint32_t src, srcend, szd, bt, bits;
int error=0, i;
cli_dbgmsg("in wwunpack\n");
while (1) {
if (!CLI_ISCONTAINED(wwsect, sects[scount].rsz, structs, 17)) {
cli_dbgmsg("WWPack: Array of structs out of section\n");
break;
}
src = sects[scount].rva - cli_readint32(structs); /* src delta / dst delta - not used / dwords / end of src */
structs+=8;
szd = cli_readint32(structs) * 4;
structs+=4;
srcend = cli_readint32(structs);
structs+=4;
unpd = ucur = exe+src+srcend+4-szd;
if (!szd || !CLI_ISCONTAINED(exe, exesz, unpd, szd)) {
cli_dbgmsg("WWPack: Compressed data out of file\n");
break;
}
cli_dbgmsg("WWP: src: %x, szd: %x, srcend: %x - %x\n", src, szd, srcend, srcend+4-szd);
if (!(compd = cli_malloc(szd))) {
cli_dbgmsg("WWPack: Unable to allocate memory for compd\n");
break;
}
memcpy(compd, unpd, szd);
memset(unpd, -1, szd); /*FIXME*/
ccur=compd;
RESEED;
while(!error) {
uint32_t backbytes, backsize;
uint8_t saved;
BIT;
if (!bits) { /* BYTE copy */
if(ccur-compd>=szd || !CLI_ISCONTAINED(exe, exesz, ucur, 1))
error=1;
else
*ucur++=*ccur++;
continue;
}
BITS(2);
if(bits==3) { /* WORD backcopy */
uint8_t shifted, subbed = 31;
BITS(2);
shifted = bits + 5;
if(bits>=2) {
shifted++;
subbed += 0x80;
}
backbytes = (1<<shifted)-subbed; /* 1h, 21h, 61h, 161h */
BITS(shifted); /* 5, 6, 8, 9 */
if(error || bits == 0x1ff) break;
backbytes+=bits;
if(!CLI_ISCONTAINED(exe, exesz, ucur, 2) || !CLI_ISCONTAINED(exe, exesz, ucur-backbytes, 2)) {
error=1;
} else {
ucur[0]=*(ucur-backbytes);
ucur[1]=*(ucur-backbytes+1);
ucur+=2;
}
continue;
}
/* BLOCK backcopy */
saved = bits; /* cmp al, 1 / pushf */
BITS(3);
if (bits<6) {
backbytes = bits;
switch(bits) {
case 4: /* 10,11 */
backbytes++;
case 3: /* 8,9 */
BIT;
backbytes+=bits;
case 0: case 1: case 2: /* 5,6,7 */
backbytes+=5;
break;
case 5: /* 12 */
backbytes=12;
break;
}
BITS(backbytes);
bits+=(1<<backbytes)-31;
} else if(bits==6) {
BITS(0x0e);
bits+=0x1fe1;
} else {
BITS(0x0f);
bits+=0x5fe1;
}
backbytes = bits;
/* popf / jb */
if (!saved) {
BIT;
if(!bits) {
BIT;
bits+=5;
} else {
BITS(3);
if(bits) {
bits+=6;
} else {
BITS(4);
if(bits) {
bits+=13;
} else {
uint8_t cnt = 4;
uint16_t shifted = 0x0d;
do {
if(cnt==7) { cnt = 0x0e; shifted = 0; break; }
shifted=((shifted+2)<<1)-1;
BIT;
cnt++;
} while(!bits);
BITS(cnt);
bits+=shifted;
}
}
}
backsize = bits;
} else {
backsize = saved+2;
}
if(!CLI_ISCONTAINED(exe, exesz, ucur, backsize) || !CLI_ISCONTAINED(exe, exesz, ucur-backbytes, backsize)) error=1;
else while(backsize--) {
*ucur=*(ucur-backbytes);
ucur++;
}
}
free(compd);
if(error) {
cli_dbgmsg("WWPack: decompression error\n");
break;
}
if (error || !*structs++) break;
}
if(!error) {
if (pe+6 > exesz || pe+7 > exesz || pe+0x28 > exesz ||
pe+0x50 > exesz || pe+0x14 > exesz)
return CL_EFORMAT;
exe[pe+6]=(uint8_t)scount;
exe[pe+7]=(uint8_t)(scount>>8);
if (!CLI_ISCONTAINED(wwsect, sects[scount].rsz, wwsect+0x295, 4) ||
!CLI_ISCONTAINED(wwsect, sects[scount].rsz, wwsect+0x295+sects[scount].rva, 4) ||
!CLI_ISCONTAINED(wwsect, sects[scount].rsz, wwsect+0x295+sects[scount].rva+0x299, 4)) {
cli_dbgmsg("WWPack: unpack memory address out of bounds.\n");
return CL_EFORMAT;
}
cli_writeint32(&exe[pe+0x28], cli_readint32(wwsect+0x295)+sects[scount].rva+0x299);
cli_writeint32(&exe[pe+0x50], cli_readint32(&exe[pe+0x50])-sects[scount].vsz);
structs = &exe[(0xffff&cli_readint32(&exe[pe+0x14]))+pe+0x18];
for(i=0 ; i<scount ; i++) {
if (!CLI_ISCONTAINED(exe, exesz, structs, 0x28)) {
cli_dbgmsg("WWPack: structs pointer out of bounds\n");
return CL_EFORMAT;
}
cli_writeint32(structs+8, sects[i].vsz);
cli_writeint32(structs+12, sects[i].rva);
cli_writeint32(structs+16, sects[i].vsz);
cli_writeint32(structs+20, sects[i].rva);
structs+=0x28;
}
if (!CLI_ISCONTAINED(exe, exesz, structs, 0x28)) {
cli_dbgmsg("WWPack: structs pointer out of bounds\n");
return CL_EFORMAT;
}
memset(structs, 0, 0x28);
error = (uint32_t)cli_writen(desc, exe, exesz)!=exesz;
}
return error;
}
| ./CrossVul/dataset_final_sorted/CWE-416/c/good_3188_0 |
crossvul-cpp_data_bad_5171_1 | /*
+----------------------------------------------------------------------+
| PHP Version 5 |
+----------------------------------------------------------------------+
| Copyright (c) 1997-2015 The PHP Group |
+----------------------------------------------------------------------+
| This source file is subject to version 3.01 of the PHP license, |
| that is bundled with this package in the file LICENSE, and is |
| available through the world-wide-web at the following url: |
| http://www.php.net/license/3_01.txt |
| If you did not receive a copy of the PHP license and are unable to |
| obtain it through the world-wide-web, please send a note to |
| license@php.net so we can mail you a copy immediately. |
+----------------------------------------------------------------------+
| Authors: Marcus Boerger <helly@php.net> |
+----------------------------------------------------------------------+
*/
/* $Id$ */
#ifdef HAVE_CONFIG_H
# include "config.h"
#endif
#include "php.h"
#include "php_ini.h"
#include "ext/standard/info.h"
#include "ext/standard/php_var.h"
#include "ext/standard/php_smart_str.h"
#include "zend_interfaces.h"
#include "zend_exceptions.h"
#include "php_spl.h"
#include "spl_functions.h"
#include "spl_engine.h"
#include "spl_iterators.h"
#include "spl_array.h"
#include "spl_exceptions.h"
zend_object_handlers spl_handler_ArrayObject;
PHPAPI zend_class_entry *spl_ce_ArrayObject;
zend_object_handlers spl_handler_ArrayIterator;
PHPAPI zend_class_entry *spl_ce_ArrayIterator;
PHPAPI zend_class_entry *spl_ce_RecursiveArrayIterator;
#define SPL_ARRAY_STD_PROP_LIST 0x00000001
#define SPL_ARRAY_ARRAY_AS_PROPS 0x00000002
#define SPL_ARRAY_CHILD_ARRAYS_ONLY 0x00000004
#define SPL_ARRAY_OVERLOADED_REWIND 0x00010000
#define SPL_ARRAY_OVERLOADED_VALID 0x00020000
#define SPL_ARRAY_OVERLOADED_KEY 0x00040000
#define SPL_ARRAY_OVERLOADED_CURRENT 0x00080000
#define SPL_ARRAY_OVERLOADED_NEXT 0x00100000
#define SPL_ARRAY_IS_REF 0x01000000
#define SPL_ARRAY_IS_SELF 0x02000000
#define SPL_ARRAY_USE_OTHER 0x04000000
#define SPL_ARRAY_INT_MASK 0xFFFF0000
#define SPL_ARRAY_CLONE_MASK 0x0300FFFF
#define SPL_ARRAY_METHOD_NO_ARG 0
#define SPL_ARRAY_METHOD_USE_ARG 1
#define SPL_ARRAY_METHOD_MAY_USER_ARG 2
typedef struct _spl_array_object {
zend_object std;
zval *array;
zval *retval;
HashPosition pos;
ulong pos_h;
int ar_flags;
int is_self;
zend_function *fptr_offset_get;
zend_function *fptr_offset_set;
zend_function *fptr_offset_has;
zend_function *fptr_offset_del;
zend_function *fptr_count;
zend_class_entry* ce_get_iterator;
HashTable *debug_info;
unsigned char nApplyCount;
} spl_array_object;
static inline HashTable *spl_array_get_hash_table(spl_array_object* intern, int check_std_props TSRMLS_DC) { /* {{{ */
if ((intern->ar_flags & SPL_ARRAY_IS_SELF) != 0) {
if (!intern->std.properties) {
rebuild_object_properties(&intern->std);
}
return intern->std.properties;
} else if ((intern->ar_flags & SPL_ARRAY_USE_OTHER) && (check_std_props == 0 || (intern->ar_flags & SPL_ARRAY_STD_PROP_LIST) == 0) && Z_TYPE_P(intern->array) == IS_OBJECT) {
spl_array_object *other = (spl_array_object*)zend_object_store_get_object(intern->array TSRMLS_CC);
return spl_array_get_hash_table(other, check_std_props TSRMLS_CC);
} else if ((intern->ar_flags & ((check_std_props ? SPL_ARRAY_STD_PROP_LIST : 0) | SPL_ARRAY_IS_SELF)) != 0) {
if (!intern->std.properties) {
rebuild_object_properties(&intern->std);
}
return intern->std.properties;
} else {
return HASH_OF(intern->array);
}
} /* }}} */
static void spl_array_rewind(spl_array_object *intern TSRMLS_DC);
static void spl_array_update_pos(spl_array_object* intern) /* {{{ */
{
Bucket *pos = intern->pos;
if (pos != NULL) {
intern->pos_h = pos->h;
}
} /* }}} */
static void spl_array_set_pos(spl_array_object* intern, HashPosition pos) /* {{{ */
{
intern->pos = pos;
spl_array_update_pos(intern);
} /* }}} */
SPL_API int spl_hash_verify_pos_ex(spl_array_object * intern, HashTable * ht TSRMLS_DC) /* {{{ */
{
Bucket *p;
/* IS_CONSISTENT(ht);*/
/* HASH_PROTECT_RECURSION(ht);*/
p = ht->arBuckets[intern->pos_h & ht->nTableMask];
while (p != NULL) {
if (p == intern->pos) {
return SUCCESS;
}
p = p->pNext;
}
/* HASH_UNPROTECT_RECURSION(ht); */
spl_array_rewind(intern TSRMLS_CC);
return FAILURE;
} /* }}} */
SPL_API int spl_hash_verify_pos(spl_array_object * intern TSRMLS_DC) /* {{{ */
{
HashTable *ht = spl_array_get_hash_table(intern, 0 TSRMLS_CC);
return spl_hash_verify_pos_ex(intern, ht TSRMLS_CC);
}
/* }}} */
/* {{{ spl_array_object_free_storage */
static void spl_array_object_free_storage(void *object TSRMLS_DC)
{
spl_array_object *intern = (spl_array_object *)object;
zend_object_std_dtor(&intern->std TSRMLS_CC);
zval_ptr_dtor(&intern->array);
zval_ptr_dtor(&intern->retval);
if (intern->debug_info != NULL) {
zend_hash_destroy(intern->debug_info);
efree(intern->debug_info);
}
efree(object);
}
/* }}} */
zend_object_iterator *spl_array_get_iterator(zend_class_entry *ce, zval *object, int by_ref TSRMLS_DC);
/* {{{ spl_array_object_new_ex */
static zend_object_value spl_array_object_new_ex(zend_class_entry *class_type, spl_array_object **obj, zval *orig, int clone_orig TSRMLS_DC)
{
zend_object_value retval = {0};
spl_array_object *intern;
zval *tmp;
zend_class_entry * parent = class_type;
int inherited = 0;
intern = emalloc(sizeof(spl_array_object));
memset(intern, 0, sizeof(spl_array_object));
*obj = intern;
ALLOC_INIT_ZVAL(intern->retval);
zend_object_std_init(&intern->std, class_type TSRMLS_CC);
object_properties_init(&intern->std, class_type);
intern->ar_flags = 0;
intern->debug_info = NULL;
intern->ce_get_iterator = spl_ce_ArrayIterator;
if (orig) {
spl_array_object *other = (spl_array_object*)zend_object_store_get_object(orig TSRMLS_CC);
intern->ar_flags &= ~ SPL_ARRAY_CLONE_MASK;
intern->ar_flags |= (other->ar_flags & SPL_ARRAY_CLONE_MASK);
intern->ce_get_iterator = other->ce_get_iterator;
if (clone_orig) {
intern->array = other->array;
if (Z_OBJ_HT_P(orig) == &spl_handler_ArrayObject) {
MAKE_STD_ZVAL(intern->array);
array_init(intern->array);
zend_hash_copy(HASH_OF(intern->array), HASH_OF(other->array), (copy_ctor_func_t) zval_add_ref, &tmp, sizeof(zval*));
}
if (Z_OBJ_HT_P(orig) == &spl_handler_ArrayIterator) {
Z_ADDREF_P(other->array);
}
} else {
intern->array = orig;
Z_ADDREF_P(intern->array);
intern->ar_flags |= SPL_ARRAY_IS_REF | SPL_ARRAY_USE_OTHER;
}
} else {
MAKE_STD_ZVAL(intern->array);
array_init(intern->array);
intern->ar_flags &= ~SPL_ARRAY_IS_REF;
}
retval.handle = zend_objects_store_put(intern, (zend_objects_store_dtor_t)zend_objects_destroy_object, (zend_objects_free_object_storage_t) spl_array_object_free_storage, NULL TSRMLS_CC);
while (parent) {
if (parent == spl_ce_ArrayIterator || parent == spl_ce_RecursiveArrayIterator) {
retval.handlers = &spl_handler_ArrayIterator;
class_type->get_iterator = spl_array_get_iterator;
break;
} else if (parent == spl_ce_ArrayObject) {
retval.handlers = &spl_handler_ArrayObject;
break;
}
parent = parent->parent;
inherited = 1;
}
if (!parent) { /* this must never happen */
php_error_docref(NULL TSRMLS_CC, E_COMPILE_ERROR, "Internal compiler error, Class is not child of ArrayObject or ArrayIterator");
}
if (inherited) {
zend_hash_find(&class_type->function_table, "offsetget", sizeof("offsetget"), (void **) &intern->fptr_offset_get);
if (intern->fptr_offset_get->common.scope == parent) {
intern->fptr_offset_get = NULL;
}
zend_hash_find(&class_type->function_table, "offsetset", sizeof("offsetset"), (void **) &intern->fptr_offset_set);
if (intern->fptr_offset_set->common.scope == parent) {
intern->fptr_offset_set = NULL;
}
zend_hash_find(&class_type->function_table, "offsetexists", sizeof("offsetexists"), (void **) &intern->fptr_offset_has);
if (intern->fptr_offset_has->common.scope == parent) {
intern->fptr_offset_has = NULL;
}
zend_hash_find(&class_type->function_table, "offsetunset", sizeof("offsetunset"), (void **) &intern->fptr_offset_del);
if (intern->fptr_offset_del->common.scope == parent) {
intern->fptr_offset_del = NULL;
}
zend_hash_find(&class_type->function_table, "count", sizeof("count"), (void **) &intern->fptr_count);
if (intern->fptr_count->common.scope == parent) {
intern->fptr_count = NULL;
}
}
/* Cache iterator functions if ArrayIterator or derived. Check current's */
/* cache since only current is always required */
if (retval.handlers == &spl_handler_ArrayIterator) {
if (!class_type->iterator_funcs.zf_current) {
zend_hash_find(&class_type->function_table, "rewind", sizeof("rewind"), (void **) &class_type->iterator_funcs.zf_rewind);
zend_hash_find(&class_type->function_table, "valid", sizeof("valid"), (void **) &class_type->iterator_funcs.zf_valid);
zend_hash_find(&class_type->function_table, "key", sizeof("key"), (void **) &class_type->iterator_funcs.zf_key);
zend_hash_find(&class_type->function_table, "current", sizeof("current"), (void **) &class_type->iterator_funcs.zf_current);
zend_hash_find(&class_type->function_table, "next", sizeof("next"), (void **) &class_type->iterator_funcs.zf_next);
}
if (inherited) {
if (class_type->iterator_funcs.zf_rewind->common.scope != parent) intern->ar_flags |= SPL_ARRAY_OVERLOADED_REWIND;
if (class_type->iterator_funcs.zf_valid->common.scope != parent) intern->ar_flags |= SPL_ARRAY_OVERLOADED_VALID;
if (class_type->iterator_funcs.zf_key->common.scope != parent) intern->ar_flags |= SPL_ARRAY_OVERLOADED_KEY;
if (class_type->iterator_funcs.zf_current->common.scope != parent) intern->ar_flags |= SPL_ARRAY_OVERLOADED_CURRENT;
if (class_type->iterator_funcs.zf_next->common.scope != parent) intern->ar_flags |= SPL_ARRAY_OVERLOADED_NEXT;
}
}
spl_array_rewind(intern TSRMLS_CC);
return retval;
}
/* }}} */
/* {{{ spl_array_object_new */
static zend_object_value spl_array_object_new(zend_class_entry *class_type TSRMLS_DC)
{
spl_array_object *tmp;
return spl_array_object_new_ex(class_type, &tmp, NULL, 0 TSRMLS_CC);
}
/* }}} */
/* {{{ spl_array_object_clone */
static zend_object_value spl_array_object_clone(zval *zobject TSRMLS_DC)
{
zend_object_value new_obj_val;
zend_object *old_object;
zend_object *new_object;
zend_object_handle handle = Z_OBJ_HANDLE_P(zobject);
spl_array_object *intern;
old_object = zend_objects_get_address(zobject TSRMLS_CC);
new_obj_val = spl_array_object_new_ex(old_object->ce, &intern, zobject, 1 TSRMLS_CC);
new_object = &intern->std;
zend_objects_clone_members(new_object, new_obj_val, old_object, handle TSRMLS_CC);
return new_obj_val;
}
/* }}} */
static zval **spl_array_get_dimension_ptr_ptr(int check_inherited, zval *object, zval *offset, int type TSRMLS_DC) /* {{{ */
{
spl_array_object *intern = (spl_array_object*)zend_object_store_get_object(object TSRMLS_CC);
zval **retval;
long index;
HashTable *ht = spl_array_get_hash_table(intern, 0 TSRMLS_CC);
if (!offset) {
return &EG(uninitialized_zval_ptr);
}
if ((type == BP_VAR_W || type == BP_VAR_RW) && (ht->nApplyCount > 0)) {
zend_error(E_WARNING, "Modification of ArrayObject during sorting is prohibited");
return &EG(error_zval_ptr);;
}
switch(Z_TYPE_P(offset)) {
case IS_NULL:
Z_STRVAL_P(offset) = "";
Z_STRLEN_P(offset) = 0;
case IS_STRING:
if (zend_symtable_find(ht, Z_STRVAL_P(offset), Z_STRLEN_P(offset)+1, (void **) &retval) == FAILURE) {
switch (type) {
case BP_VAR_R:
zend_error(E_NOTICE, "Undefined index: %s", Z_STRVAL_P(offset));
case BP_VAR_UNSET:
case BP_VAR_IS:
retval = &EG(uninitialized_zval_ptr);
break;
case BP_VAR_RW:
zend_error(E_NOTICE,"Undefined index: %s", Z_STRVAL_P(offset));
case BP_VAR_W: {
zval *value;
ALLOC_INIT_ZVAL(value);
zend_symtable_update(ht, Z_STRVAL_P(offset), Z_STRLEN_P(offset)+1, (void**)&value, sizeof(void*), (void **)&retval);
}
}
}
return retval;
case IS_RESOURCE:
zend_error(E_STRICT, "Resource ID#%ld used as offset, casting to integer (%ld)", Z_LVAL_P(offset), Z_LVAL_P(offset));
case IS_DOUBLE:
case IS_BOOL:
case IS_LONG:
if (offset->type == IS_DOUBLE) {
index = (long)Z_DVAL_P(offset);
} else {
index = Z_LVAL_P(offset);
}
if (zend_hash_index_find(ht, index, (void **) &retval) == FAILURE) {
switch (type) {
case BP_VAR_R:
zend_error(E_NOTICE, "Undefined offset: %ld", index);
case BP_VAR_UNSET:
case BP_VAR_IS:
retval = &EG(uninitialized_zval_ptr);
break;
case BP_VAR_RW:
zend_error(E_NOTICE, "Undefined offset: %ld", index);
case BP_VAR_W: {
zval *value;
ALLOC_INIT_ZVAL(value);
zend_hash_index_update(ht, index, (void**)&value, sizeof(void*), (void **)&retval);
}
}
}
return retval;
default:
zend_error(E_WARNING, "Illegal offset type");
return (type == BP_VAR_W || type == BP_VAR_RW) ?
&EG(error_zval_ptr) : &EG(uninitialized_zval_ptr);
}
} /* }}} */
static zval *spl_array_read_dimension_ex(int check_inherited, zval *object, zval *offset, int type TSRMLS_DC) /* {{{ */
{
zval **ret;
if (check_inherited) {
spl_array_object *intern = (spl_array_object*)zend_object_store_get_object(object TSRMLS_CC);
if (intern->fptr_offset_get) {
zval *rv;
if (!offset) {
ALLOC_INIT_ZVAL(offset);
} else {
SEPARATE_ARG_IF_REF(offset);
}
zend_call_method_with_1_params(&object, Z_OBJCE_P(object), &intern->fptr_offset_get, "offsetGet", &rv, offset);
zval_ptr_dtor(&offset);
if (rv) {
zval_ptr_dtor(&intern->retval);
MAKE_STD_ZVAL(intern->retval);
ZVAL_ZVAL(intern->retval, rv, 1, 1);
return intern->retval;
}
return EG(uninitialized_zval_ptr);
}
}
ret = spl_array_get_dimension_ptr_ptr(check_inherited, object, offset, type TSRMLS_CC);
/* When in a write context,
* ZE has to be fooled into thinking this is in a reference set
* by separating (if necessary) and returning as an is_ref=1 zval (even if refcount == 1) */
if ((type == BP_VAR_W || type == BP_VAR_RW || type == BP_VAR_UNSET) && !Z_ISREF_PP(ret) && ret != &EG(uninitialized_zval_ptr)) {
if (Z_REFCOUNT_PP(ret) > 1) {
zval *newval;
/* Separate */
MAKE_STD_ZVAL(newval);
*newval = **ret;
zval_copy_ctor(newval);
Z_SET_REFCOUNT_P(newval, 1);
/* Replace */
Z_DELREF_PP(ret);
*ret = newval;
}
Z_SET_ISREF_PP(ret);
}
return *ret;
} /* }}} */
static zval *spl_array_read_dimension(zval *object, zval *offset, int type TSRMLS_DC) /* {{{ */
{
return spl_array_read_dimension_ex(1, object, offset, type TSRMLS_CC);
} /* }}} */
static void spl_array_write_dimension_ex(int check_inherited, zval *object, zval *offset, zval *value TSRMLS_DC) /* {{{ */
{
spl_array_object *intern = (spl_array_object*)zend_object_store_get_object(object TSRMLS_CC);
long index;
HashTable *ht;
if (check_inherited && intern->fptr_offset_set) {
if (!offset) {
ALLOC_INIT_ZVAL(offset);
} else {
SEPARATE_ARG_IF_REF(offset);
}
zend_call_method_with_2_params(&object, Z_OBJCE_P(object), &intern->fptr_offset_set, "offsetSet", NULL, offset, value);
zval_ptr_dtor(&offset);
return;
}
if (!offset) {
ht = spl_array_get_hash_table(intern, 0 TSRMLS_CC);
if (ht->nApplyCount > 0) {
zend_error(E_WARNING, "Modification of ArrayObject during sorting is prohibited");
return;
}
Z_ADDREF_P(value);
zend_hash_next_index_insert(ht, (void**)&value, sizeof(void*), NULL);
return;
}
switch(Z_TYPE_P(offset)) {
case IS_STRING:
ht = spl_array_get_hash_table(intern, 0 TSRMLS_CC);
if (ht->nApplyCount > 0) {
zend_error(E_WARNING, "Modification of ArrayObject during sorting is prohibited");
return;
}
Z_ADDREF_P(value);
zend_symtable_update(ht, Z_STRVAL_P(offset), Z_STRLEN_P(offset)+1, (void**)&value, sizeof(void*), NULL);
return;
case IS_DOUBLE:
case IS_RESOURCE:
case IS_BOOL:
case IS_LONG:
ht = spl_array_get_hash_table(intern, 0 TSRMLS_CC);
if (ht->nApplyCount > 0) {
zend_error(E_WARNING, "Modification of ArrayObject during sorting is prohibited");
return;
}
if (offset->type == IS_DOUBLE) {
index = (long)Z_DVAL_P(offset);
} else {
index = Z_LVAL_P(offset);
}
Z_ADDREF_P(value);
zend_hash_index_update(ht, index, (void**)&value, sizeof(void*), NULL);
return;
case IS_NULL:
ht = spl_array_get_hash_table(intern, 0 TSRMLS_CC);
if (ht->nApplyCount > 0) {
zend_error(E_WARNING, "Modification of ArrayObject during sorting is prohibited");
return;
}
Z_ADDREF_P(value);
zend_hash_next_index_insert(ht, (void**)&value, sizeof(void*), NULL);
return;
default:
zend_error(E_WARNING, "Illegal offset type");
return;
}
} /* }}} */
static void spl_array_write_dimension(zval *object, zval *offset, zval *value TSRMLS_DC) /* {{{ */
{
spl_array_write_dimension_ex(1, object, offset, value TSRMLS_CC);
} /* }}} */
static void spl_array_unset_dimension_ex(int check_inherited, zval *object, zval *offset TSRMLS_DC) /* {{{ */
{
spl_array_object *intern = (spl_array_object*)zend_object_store_get_object(object TSRMLS_CC);
long index;
HashTable *ht;
if (check_inherited && intern->fptr_offset_del) {
SEPARATE_ARG_IF_REF(offset);
zend_call_method_with_1_params(&object, Z_OBJCE_P(object), &intern->fptr_offset_del, "offsetUnset", NULL, offset);
zval_ptr_dtor(&offset);
return;
}
switch(Z_TYPE_P(offset)) {
case IS_STRING:
ht = spl_array_get_hash_table(intern, 0 TSRMLS_CC);
if (ht->nApplyCount > 0) {
zend_error(E_WARNING, "Modification of ArrayObject during sorting is prohibited");
return;
}
if (ht == &EG(symbol_table)) {
if (zend_delete_global_variable(Z_STRVAL_P(offset), Z_STRLEN_P(offset) TSRMLS_CC)) {
zend_error(E_NOTICE,"Undefined index: %s", Z_STRVAL_P(offset));
}
} else {
if (zend_symtable_del(ht, Z_STRVAL_P(offset), Z_STRLEN_P(offset)+1) == FAILURE) {
zend_error(E_NOTICE,"Undefined index: %s", Z_STRVAL_P(offset));
} else {
spl_array_object *obj = intern;
while (1) {
if ((obj->ar_flags & SPL_ARRAY_IS_SELF) != 0) {
break;
} else if (Z_TYPE_P(obj->array) == IS_OBJECT) {
if ((obj->ar_flags & SPL_ARRAY_USE_OTHER) == 0) {
obj = (spl_array_object*)zend_object_store_get_object(obj->array TSRMLS_CC);
break;
} else {
obj = (spl_array_object*)zend_object_store_get_object(obj->array TSRMLS_CC);
}
} else {
obj = NULL;
break;
}
}
if (obj) {
zend_property_info *property_info = zend_get_property_info(obj->std.ce, offset, 1 TSRMLS_CC);
if (property_info &&
(property_info->flags & ZEND_ACC_STATIC) == 0 &&
property_info->offset >= 0) {
obj->std.properties_table[property_info->offset] = NULL;
}
}
}
}
break;
case IS_DOUBLE:
case IS_RESOURCE:
case IS_BOOL:
case IS_LONG:
if (offset->type == IS_DOUBLE) {
index = (long)Z_DVAL_P(offset);
} else {
index = Z_LVAL_P(offset);
}
ht = spl_array_get_hash_table(intern, 0 TSRMLS_CC);
if (ht->nApplyCount > 0) {
zend_error(E_WARNING, "Modification of ArrayObject during sorting is prohibited");
return;
}
if (zend_hash_index_del(ht, index) == FAILURE) {
zend_error(E_NOTICE,"Undefined offset: %ld", Z_LVAL_P(offset));
}
break;
default:
zend_error(E_WARNING, "Illegal offset type");
return;
}
spl_hash_verify_pos(intern TSRMLS_CC); /* call rewind on FAILURE */
} /* }}} */
static void spl_array_unset_dimension(zval *object, zval *offset TSRMLS_DC) /* {{{ */
{
spl_array_unset_dimension_ex(1, object, offset TSRMLS_CC);
} /* }}} */
static int spl_array_has_dimension_ex(int check_inherited, zval *object, zval *offset, int check_empty TSRMLS_DC) /* {{{ */
{
spl_array_object *intern = (spl_array_object*)zend_object_store_get_object(object TSRMLS_CC);
long index;
zval *rv, **tmp;
if (check_inherited && intern->fptr_offset_has) {
SEPARATE_ARG_IF_REF(offset);
zend_call_method_with_1_params(&object, Z_OBJCE_P(object), &intern->fptr_offset_has, "offsetExists", &rv, offset);
zval_ptr_dtor(&offset);
if (rv && zend_is_true(rv)) {
zval_ptr_dtor(&rv);
return 1;
}
if (rv) {
zval_ptr_dtor(&rv);
}
return 0;
}
switch(Z_TYPE_P(offset)) {
case IS_STRING:
{
HashTable *ht = spl_array_get_hash_table(intern, 0 TSRMLS_CC);
if (zend_symtable_find(ht, Z_STRVAL_P(offset), Z_STRLEN_P(offset)+1, (void **) &tmp) != FAILURE) {
switch (check_empty) {
case 0:
return Z_TYPE_PP(tmp) != IS_NULL;
case 2:
return 1;
default:
return zend_is_true(*tmp);
}
}
}
return 0;
case IS_DOUBLE:
case IS_RESOURCE:
case IS_BOOL:
case IS_LONG:
{
HashTable *ht = spl_array_get_hash_table(intern, 0 TSRMLS_CC);
if (offset->type == IS_DOUBLE) {
index = (long)Z_DVAL_P(offset);
} else {
index = Z_LVAL_P(offset);
}
if (zend_hash_index_find(ht, index, (void **)&tmp) != FAILURE) {
switch (check_empty) {
case 0:
return Z_TYPE_PP(tmp) != IS_NULL;
case 2:
return 1;
default:
return zend_is_true(*tmp);
}
}
return 0;
}
default:
zend_error(E_WARNING, "Illegal offset type");
}
return 0;
} /* }}} */
static int spl_array_has_dimension(zval *object, zval *offset, int check_empty TSRMLS_DC) /* {{{ */
{
return spl_array_has_dimension_ex(1, object, offset, check_empty TSRMLS_CC);
} /* }}} */
/* {{{ spl_array_object_verify_pos_ex */
static inline int spl_array_object_verify_pos_ex(spl_array_object *object, HashTable *ht, const char *msg_prefix TSRMLS_DC)
{
if (!ht) {
php_error_docref(NULL TSRMLS_CC, E_NOTICE, "%sArray was modified outside object and is no longer an array", msg_prefix);
return FAILURE;
}
if (object->pos && (object->ar_flags & SPL_ARRAY_IS_REF) && spl_hash_verify_pos_ex(object, ht TSRMLS_CC) == FAILURE) {
php_error_docref(NULL TSRMLS_CC, E_NOTICE, "%sArray was modified outside object and internal position is no longer valid", msg_prefix);
return FAILURE;
}
return SUCCESS;
} /* }}} */
/* {{{ spl_array_object_verify_pos */
static inline int spl_array_object_verify_pos(spl_array_object *object, HashTable *ht TSRMLS_DC)
{
return spl_array_object_verify_pos_ex(object, ht, "" TSRMLS_CC);
} /* }}} */
/* {{{ proto bool ArrayObject::offsetExists(mixed $index)
proto bool ArrayIterator::offsetExists(mixed $index)
Returns whether the requested $index exists. */
SPL_METHOD(Array, offsetExists)
{
zval *index;
if (zend_parse_parameters(ZEND_NUM_ARGS() TSRMLS_CC, "z", &index) == FAILURE) {
return;
}
RETURN_BOOL(spl_array_has_dimension_ex(0, getThis(), index, 2 TSRMLS_CC));
} /* }}} */
/* {{{ proto mixed ArrayObject::offsetGet(mixed $index)
proto mixed ArrayIterator::offsetGet(mixed $index)
Returns the value at the specified $index. */
SPL_METHOD(Array, offsetGet)
{
zval *index, *value;
if (zend_parse_parameters(ZEND_NUM_ARGS() TSRMLS_CC, "z", &index) == FAILURE) {
return;
}
value = spl_array_read_dimension_ex(0, getThis(), index, BP_VAR_R TSRMLS_CC);
RETURN_ZVAL(value, 1, 0);
} /* }}} */
/* {{{ proto void ArrayObject::offsetSet(mixed $index, mixed $newval)
proto void ArrayIterator::offsetSet(mixed $index, mixed $newval)
Sets the value at the specified $index to $newval. */
SPL_METHOD(Array, offsetSet)
{
zval *index, *value;
if (zend_parse_parameters(ZEND_NUM_ARGS() TSRMLS_CC, "zz", &index, &value) == FAILURE) {
return;
}
spl_array_write_dimension_ex(0, getThis(), index, value TSRMLS_CC);
} /* }}} */
void spl_array_iterator_append(zval *object, zval *append_value TSRMLS_DC) /* {{{ */
{
spl_array_object *intern = (spl_array_object*)zend_object_store_get_object(object TSRMLS_CC);
HashTable *aht = spl_array_get_hash_table(intern, 0 TSRMLS_CC);
if (!aht) {
php_error_docref(NULL TSRMLS_CC, E_NOTICE, "Array was modified outside object and is no longer an array");
return;
}
if (Z_TYPE_P(intern->array) == IS_OBJECT) {
php_error_docref(NULL TSRMLS_CC, E_RECOVERABLE_ERROR, "Cannot append properties to objects, use %s::offsetSet() instead", Z_OBJCE_P(object)->name);
return;
}
spl_array_write_dimension(object, NULL, append_value TSRMLS_CC);
if (!intern->pos) {
spl_array_set_pos(intern, aht->pListTail);
}
} /* }}} */
/* {{{ proto void ArrayObject::append(mixed $newval)
proto void ArrayIterator::append(mixed $newval)
Appends the value (cannot be called for objects). */
SPL_METHOD(Array, append)
{
zval *value;
if (zend_parse_parameters(ZEND_NUM_ARGS() TSRMLS_CC, "z", &value) == FAILURE) {
return;
}
spl_array_iterator_append(getThis(), value TSRMLS_CC);
} /* }}} */
/* {{{ proto void ArrayObject::offsetUnset(mixed $index)
proto void ArrayIterator::offsetUnset(mixed $index)
Unsets the value at the specified $index. */
SPL_METHOD(Array, offsetUnset)
{
zval *index;
if (zend_parse_parameters(ZEND_NUM_ARGS() TSRMLS_CC, "z", &index) == FAILURE) {
return;
}
spl_array_unset_dimension_ex(0, getThis(), index TSRMLS_CC);
} /* }}} */
/* {{{ proto array ArrayObject::getArrayCopy()
proto array ArrayIterator::getArrayCopy()
Return a copy of the contained array */
SPL_METHOD(Array, getArrayCopy)
{
zval *object = getThis(), *tmp;
spl_array_object *intern = (spl_array_object*)zend_object_store_get_object(object TSRMLS_CC);
array_init(return_value);
zend_hash_copy(HASH_OF(return_value), spl_array_get_hash_table(intern, 0 TSRMLS_CC), (copy_ctor_func_t) zval_add_ref, &tmp, sizeof(zval*));
} /* }}} */
static HashTable *spl_array_get_properties(zval *object TSRMLS_DC) /* {{{ */
{
spl_array_object *intern = (spl_array_object*)zend_object_store_get_object(object TSRMLS_CC);
HashTable *result;
if (intern->nApplyCount > 1) {
php_error_docref(NULL TSRMLS_CC, E_ERROR, "Nesting level too deep - recursive dependency?");
}
intern->nApplyCount++;
result = spl_array_get_hash_table(intern, 1 TSRMLS_CC);
intern->nApplyCount--;
return result;
} /* }}} */
static HashTable* spl_array_get_debug_info(zval *obj, int *is_temp TSRMLS_DC) /* {{{ */
{
spl_array_object *intern = (spl_array_object*)zend_object_store_get_object(obj TSRMLS_CC);
zval *tmp, *storage;
int name_len;
char *zname;
zend_class_entry *base;
*is_temp = 0;
if (!intern->std.properties) {
rebuild_object_properties(&intern->std);
}
if (HASH_OF(intern->array) == intern->std.properties) {
return intern->std.properties;
} else {
if (intern->debug_info == NULL) {
ALLOC_HASHTABLE(intern->debug_info);
ZEND_INIT_SYMTABLE_EX(intern->debug_info, zend_hash_num_elements(intern->std.properties) + 1, 0);
}
if (intern->debug_info->nApplyCount == 0) {
zend_hash_clean(intern->debug_info);
zend_hash_copy(intern->debug_info, intern->std.properties, (copy_ctor_func_t) zval_add_ref, (void *) &tmp, sizeof(zval *));
storage = intern->array;
zval_add_ref(&storage);
base = (Z_OBJ_HT_P(obj) == &spl_handler_ArrayIterator) ? spl_ce_ArrayIterator : spl_ce_ArrayObject;
zname = spl_gen_private_prop_name(base, "storage", sizeof("storage")-1, &name_len TSRMLS_CC);
zend_symtable_update(intern->debug_info, zname, name_len+1, &storage, sizeof(zval *), NULL);
efree(zname);
}
return intern->debug_info;
}
}
/* }}} */
static zval *spl_array_read_property(zval *object, zval *member, int type, const zend_literal *key TSRMLS_DC) /* {{{ */
{
spl_array_object *intern = (spl_array_object*)zend_object_store_get_object(object TSRMLS_CC);
if ((intern->ar_flags & SPL_ARRAY_ARRAY_AS_PROPS) != 0
&& !std_object_handlers.has_property(object, member, 2, key TSRMLS_CC)) {
return spl_array_read_dimension(object, member, type TSRMLS_CC);
}
return std_object_handlers.read_property(object, member, type, key TSRMLS_CC);
} /* }}} */
static void spl_array_write_property(zval *object, zval *member, zval *value, const zend_literal *key TSRMLS_DC) /* {{{ */
{
spl_array_object *intern = (spl_array_object*)zend_object_store_get_object(object TSRMLS_CC);
if ((intern->ar_flags & SPL_ARRAY_ARRAY_AS_PROPS) != 0
&& !std_object_handlers.has_property(object, member, 2, key TSRMLS_CC)) {
spl_array_write_dimension(object, member, value TSRMLS_CC);
return;
}
std_object_handlers.write_property(object, member, value, key TSRMLS_CC);
} /* }}} */
static zval **spl_array_get_property_ptr_ptr(zval *object, zval *member, int type, const zend_literal *key TSRMLS_DC) /* {{{ */
{
spl_array_object *intern = (spl_array_object*)zend_object_store_get_object(object TSRMLS_CC);
if ((intern->ar_flags & SPL_ARRAY_ARRAY_AS_PROPS) != 0
&& !std_object_handlers.has_property(object, member, 2, key TSRMLS_CC)) {
return spl_array_get_dimension_ptr_ptr(1, object, member, type TSRMLS_CC);
}
return std_object_handlers.get_property_ptr_ptr(object, member, type, key TSRMLS_CC);
} /* }}} */
static int spl_array_has_property(zval *object, zval *member, int has_set_exists, const zend_literal *key TSRMLS_DC) /* {{{ */
{
spl_array_object *intern = (spl_array_object*)zend_object_store_get_object(object TSRMLS_CC);
if ((intern->ar_flags & SPL_ARRAY_ARRAY_AS_PROPS) != 0
&& !std_object_handlers.has_property(object, member, 2, key TSRMLS_CC)) {
return spl_array_has_dimension(object, member, has_set_exists TSRMLS_CC);
}
return std_object_handlers.has_property(object, member, has_set_exists, key TSRMLS_CC);
} /* }}} */
static void spl_array_unset_property(zval *object, zval *member, const zend_literal *key TSRMLS_DC) /* {{{ */
{
spl_array_object *intern = (spl_array_object*)zend_object_store_get_object(object TSRMLS_CC);
if ((intern->ar_flags & SPL_ARRAY_ARRAY_AS_PROPS) != 0
&& !std_object_handlers.has_property(object, member, 2, key TSRMLS_CC)) {
spl_array_unset_dimension(object, member TSRMLS_CC);
spl_array_rewind(intern TSRMLS_CC); /* because deletion might invalidate position */
return;
}
std_object_handlers.unset_property(object, member, key TSRMLS_CC);
} /* }}} */
static int spl_array_compare_objects(zval *o1, zval *o2 TSRMLS_DC) /* {{{ */
{
HashTable *ht1,
*ht2;
spl_array_object *intern1,
*intern2;
int result = 0;
zval temp_zv;
intern1 = (spl_array_object*)zend_object_store_get_object(o1 TSRMLS_CC);
intern2 = (spl_array_object*)zend_object_store_get_object(o2 TSRMLS_CC);
ht1 = spl_array_get_hash_table(intern1, 0 TSRMLS_CC);
ht2 = spl_array_get_hash_table(intern2, 0 TSRMLS_CC);
zend_compare_symbol_tables(&temp_zv, ht1, ht2 TSRMLS_CC);
result = (int)Z_LVAL(temp_zv);
/* if we just compared std.properties, don't do it again */
if (result == 0 &&
!(ht1 == intern1->std.properties && ht2 == intern2->std.properties)) {
result = std_object_handlers.compare_objects(o1, o2 TSRMLS_CC);
}
return result;
} /* }}} */
static int spl_array_skip_protected(spl_array_object *intern, HashTable *aht TSRMLS_DC) /* {{{ */
{
char *string_key;
uint string_length;
ulong num_key;
if (Z_TYPE_P(intern->array) == IS_OBJECT) {
do {
if (zend_hash_get_current_key_ex(aht, &string_key, &string_length, &num_key, 0, &intern->pos) == HASH_KEY_IS_STRING) {
/* zend_hash_get_current_key_ex() should never set
* string_length to 0 when returning HASH_KEY_IS_STRING, but we
* may as well be defensive and consider that successful.
* Beyond that, we're looking for protected keys (which will
* have a null byte at string_key[0]), but want to avoid
* skipping completely empty keys (which will also have the
* null byte, but a string_length of 1). */
if (!string_length || string_key[0] || string_length == 1) {
return SUCCESS;
}
} else {
return SUCCESS;
}
if (zend_hash_has_more_elements_ex(aht, &intern->pos) != SUCCESS) {
return FAILURE;
}
zend_hash_move_forward_ex(aht, &intern->pos);
spl_array_update_pos(intern);
} while (1);
}
return FAILURE;
} /* }}} */
static int spl_array_next_no_verify(spl_array_object *intern, HashTable *aht TSRMLS_DC) /* {{{ */
{
zend_hash_move_forward_ex(aht, &intern->pos);
spl_array_update_pos(intern);
if (Z_TYPE_P(intern->array) == IS_OBJECT) {
return spl_array_skip_protected(intern, aht TSRMLS_CC);
} else {
return zend_hash_has_more_elements_ex(aht, &intern->pos);
}
} /* }}} */
static int spl_array_next_ex(spl_array_object *intern, HashTable *aht TSRMLS_DC) /* {{{ */
{
if ((intern->ar_flags & SPL_ARRAY_IS_REF) && spl_hash_verify_pos_ex(intern, aht TSRMLS_CC) == FAILURE) {
php_error_docref(NULL TSRMLS_CC, E_NOTICE, "Array was modified outside object and internal position is no longer valid");
return FAILURE;
}
return spl_array_next_no_verify(intern, aht TSRMLS_CC);
} /* }}} */
static int spl_array_next(spl_array_object *intern TSRMLS_DC) /* {{{ */
{
HashTable *aht = spl_array_get_hash_table(intern, 0 TSRMLS_CC);
return spl_array_next_ex(intern, aht TSRMLS_CC);
} /* }}} */
/* define an overloaded iterator structure */
typedef struct {
zend_user_iterator intern;
spl_array_object *object;
} spl_array_it;
static void spl_array_it_dtor(zend_object_iterator *iter TSRMLS_DC) /* {{{ */
{
spl_array_it *iterator = (spl_array_it *)iter;
zend_user_it_invalidate_current(iter TSRMLS_CC);
zval_ptr_dtor((zval**)&iterator->intern.it.data);
efree(iterator);
}
/* }}} */
static int spl_array_it_valid(zend_object_iterator *iter TSRMLS_DC) /* {{{ */
{
spl_array_it *iterator = (spl_array_it *)iter;
spl_array_object *object = iterator->object;
HashTable *aht = spl_array_get_hash_table(object, 0 TSRMLS_CC);
if (object->ar_flags & SPL_ARRAY_OVERLOADED_VALID) {
return zend_user_it_valid(iter TSRMLS_CC);
} else {
if (spl_array_object_verify_pos_ex(object, aht, "ArrayIterator::valid(): " TSRMLS_CC) == FAILURE) {
return FAILURE;
}
return zend_hash_has_more_elements_ex(aht, &object->pos);
}
}
/* }}} */
static void spl_array_it_get_current_data(zend_object_iterator *iter, zval ***data TSRMLS_DC) /* {{{ */
{
spl_array_it *iterator = (spl_array_it *)iter;
spl_array_object *object = iterator->object;
HashTable *aht = spl_array_get_hash_table(object, 0 TSRMLS_CC);
if (object->ar_flags & SPL_ARRAY_OVERLOADED_CURRENT) {
zend_user_it_get_current_data(iter, data TSRMLS_CC);
} else {
if (zend_hash_get_current_data_ex(aht, (void**)data, &object->pos) == FAILURE) {
*data = NULL;
}
}
}
/* }}} */
static void spl_array_it_get_current_key(zend_object_iterator *iter, zval *key TSRMLS_DC) /* {{{ */
{
spl_array_it *iterator = (spl_array_it *)iter;
spl_array_object *object = iterator->object;
HashTable *aht = spl_array_get_hash_table(object, 0 TSRMLS_CC);
if (object->ar_flags & SPL_ARRAY_OVERLOADED_KEY) {
zend_user_it_get_current_key(iter, key TSRMLS_CC);
} else {
if (spl_array_object_verify_pos_ex(object, aht, "ArrayIterator::current(): " TSRMLS_CC) == FAILURE) {
ZVAL_NULL(key);
} else {
zend_hash_get_current_key_zval_ex(aht, key, &object->pos);
}
}
}
/* }}} */
static void spl_array_it_move_forward(zend_object_iterator *iter TSRMLS_DC) /* {{{ */
{
spl_array_it *iterator = (spl_array_it *)iter;
spl_array_object *object = iterator->object;
HashTable *aht = spl_array_get_hash_table(object, 0 TSRMLS_CC);
if (object->ar_flags & SPL_ARRAY_OVERLOADED_NEXT) {
zend_user_it_move_forward(iter TSRMLS_CC);
} else {
zend_user_it_invalidate_current(iter TSRMLS_CC);
if (!aht) {
php_error_docref(NULL TSRMLS_CC, E_NOTICE, "ArrayIterator::current(): Array was modified outside object and is no longer an array");
return;
}
if ((object->ar_flags & SPL_ARRAY_IS_REF) && spl_hash_verify_pos_ex(object, aht TSRMLS_CC) == FAILURE) {
php_error_docref(NULL TSRMLS_CC, E_NOTICE, "ArrayIterator::next(): Array was modified outside object and internal position is no longer valid");
} else {
spl_array_next_no_verify(object, aht TSRMLS_CC);
}
}
}
/* }}} */
static void spl_array_rewind_ex(spl_array_object *intern, HashTable *aht TSRMLS_DC) /* {{{ */
{
zend_hash_internal_pointer_reset_ex(aht, &intern->pos);
spl_array_update_pos(intern);
spl_array_skip_protected(intern, aht TSRMLS_CC);
} /* }}} */
static void spl_array_rewind(spl_array_object *intern TSRMLS_DC) /* {{{ */
{
HashTable *aht = spl_array_get_hash_table(intern, 0 TSRMLS_CC);
if (!aht) {
php_error_docref(NULL TSRMLS_CC, E_NOTICE, "ArrayIterator::rewind(): Array was modified outside object and is no longer an array");
return;
}
spl_array_rewind_ex(intern, aht TSRMLS_CC);
}
/* }}} */
static void spl_array_it_rewind(zend_object_iterator *iter TSRMLS_DC) /* {{{ */
{
spl_array_it *iterator = (spl_array_it *)iter;
spl_array_object *object = iterator->object;
if (object->ar_flags & SPL_ARRAY_OVERLOADED_REWIND) {
zend_user_it_rewind(iter TSRMLS_CC);
} else {
zend_user_it_invalidate_current(iter TSRMLS_CC);
spl_array_rewind(object TSRMLS_CC);
}
}
/* }}} */
/* {{{ spl_array_set_array */
static void spl_array_set_array(zval *object, spl_array_object *intern, zval **array, long ar_flags, int just_array TSRMLS_DC) {
if (Z_TYPE_PP(array) == IS_ARRAY) {
SEPARATE_ZVAL_IF_NOT_REF(array);
}
if (Z_TYPE_PP(array) == IS_OBJECT && (Z_OBJ_HT_PP(array) == &spl_handler_ArrayObject || Z_OBJ_HT_PP(array) == &spl_handler_ArrayIterator)) {
zval_ptr_dtor(&intern->array);
if (just_array) {
spl_array_object *other = (spl_array_object*)zend_object_store_get_object(*array TSRMLS_CC);
ar_flags = other->ar_flags & ~SPL_ARRAY_INT_MASK;
}
ar_flags |= SPL_ARRAY_USE_OTHER;
intern->array = *array;
} else {
if (Z_TYPE_PP(array) != IS_OBJECT && Z_TYPE_PP(array) != IS_ARRAY) {
zend_throw_exception(spl_ce_InvalidArgumentException, "Passed variable is not an array or object, using empty array instead", 0 TSRMLS_CC);
return;
}
zval_ptr_dtor(&intern->array);
intern->array = *array;
}
if (object == *array) {
intern->ar_flags |= SPL_ARRAY_IS_SELF;
intern->ar_flags &= ~SPL_ARRAY_USE_OTHER;
} else {
intern->ar_flags &= ~SPL_ARRAY_IS_SELF;
}
intern->ar_flags |= ar_flags;
Z_ADDREF_P(intern->array);
if (Z_TYPE_PP(array) == IS_OBJECT) {
zend_object_get_properties_t handler = Z_OBJ_HANDLER_PP(array, get_properties);
if ((handler != std_object_handlers.get_properties && handler != spl_array_get_properties)
|| !spl_array_get_hash_table(intern, 0 TSRMLS_CC)) {
zend_throw_exception_ex(spl_ce_InvalidArgumentException, 0 TSRMLS_CC, "Overloaded object of type %s is not compatible with %s", Z_OBJCE_PP(array)->name, intern->std.ce->name);
}
}
spl_array_rewind(intern TSRMLS_CC);
}
/* }}} */
/* iterator handler table */
zend_object_iterator_funcs spl_array_it_funcs = {
spl_array_it_dtor,
spl_array_it_valid,
spl_array_it_get_current_data,
spl_array_it_get_current_key,
spl_array_it_move_forward,
spl_array_it_rewind
};
zend_object_iterator *spl_array_get_iterator(zend_class_entry *ce, zval *object, int by_ref TSRMLS_DC) /* {{{ */
{
spl_array_it *iterator;
spl_array_object *array_object = (spl_array_object*)zend_object_store_get_object(object TSRMLS_CC);
if (by_ref && (array_object->ar_flags & SPL_ARRAY_OVERLOADED_CURRENT)) {
zend_error(E_ERROR, "An iterator cannot be used with foreach by reference");
}
iterator = emalloc(sizeof(spl_array_it));
Z_ADDREF_P(object);
iterator->intern.it.data = (void*)object;
iterator->intern.it.funcs = &spl_array_it_funcs;
iterator->intern.ce = ce;
iterator->intern.value = NULL;
iterator->object = array_object;
return (zend_object_iterator*)iterator;
}
/* }}} */
/* {{{ proto void ArrayObject::__construct(array|object ar = array() [, int flags = 0 [, string iterator_class = "ArrayIterator"]])
proto void ArrayIterator::__construct(array|object ar = array() [, int flags = 0])
Constructs a new array iterator from a path. */
SPL_METHOD(Array, __construct)
{
zval *object = getThis();
spl_array_object *intern;
zval **array;
long ar_flags = 0;
zend_class_entry *ce_get_iterator = spl_ce_Iterator;
zend_error_handling error_handling;
if (ZEND_NUM_ARGS() == 0) {
return; /* nothing to do */
}
zend_replace_error_handling(EH_THROW, spl_ce_InvalidArgumentException, &error_handling TSRMLS_CC);
intern = (spl_array_object*)zend_object_store_get_object(object TSRMLS_CC);
if (zend_parse_parameters(ZEND_NUM_ARGS() TSRMLS_CC, "Z|lC", &array, &ar_flags, &ce_get_iterator) == FAILURE) {
zend_restore_error_handling(&error_handling TSRMLS_CC);
return;
}
if (ZEND_NUM_ARGS() > 2) {
intern->ce_get_iterator = ce_get_iterator;
}
ar_flags &= ~SPL_ARRAY_INT_MASK;
spl_array_set_array(object, intern, array, ar_flags, ZEND_NUM_ARGS() == 1 TSRMLS_CC);
zend_restore_error_handling(&error_handling TSRMLS_CC);
}
/* }}} */
/* {{{ proto void ArrayObject::setIteratorClass(string iterator_class)
Set the class used in getIterator. */
SPL_METHOD(Array, setIteratorClass)
{
zval *object = getThis();
spl_array_object *intern = (spl_array_object*)zend_object_store_get_object(object TSRMLS_CC);
zend_class_entry * ce_get_iterator = spl_ce_Iterator;
if (zend_parse_parameters(ZEND_NUM_ARGS() TSRMLS_CC, "C", &ce_get_iterator) == FAILURE) {
return;
}
intern->ce_get_iterator = ce_get_iterator;
}
/* }}} */
/* {{{ proto string ArrayObject::getIteratorClass()
Get the class used in getIterator. */
SPL_METHOD(Array, getIteratorClass)
{
zval *object = getThis();
spl_array_object *intern = (spl_array_object*)zend_object_store_get_object(object TSRMLS_CC);
if (zend_parse_parameters_none() == FAILURE) {
return;
}
RETURN_STRING(intern->ce_get_iterator->name, 1);
}
/* }}} */
/* {{{ proto int ArrayObject::getFlags()
Get flags */
SPL_METHOD(Array, getFlags)
{
zval *object = getThis();
spl_array_object *intern = (spl_array_object*)zend_object_store_get_object(object TSRMLS_CC);
if (zend_parse_parameters_none() == FAILURE) {
return;
}
RETURN_LONG(intern->ar_flags & ~SPL_ARRAY_INT_MASK);
}
/* }}} */
/* {{{ proto void ArrayObject::setFlags(int flags)
Set flags */
SPL_METHOD(Array, setFlags)
{
zval *object = getThis();
spl_array_object *intern = (spl_array_object*)zend_object_store_get_object(object TSRMLS_CC);
long ar_flags = 0;
if (zend_parse_parameters(ZEND_NUM_ARGS() TSRMLS_CC, "l", &ar_flags) == FAILURE) {
return;
}
intern->ar_flags = (intern->ar_flags & SPL_ARRAY_INT_MASK) | (ar_flags & ~SPL_ARRAY_INT_MASK);
}
/* }}} */
/* {{{ proto Array|Object ArrayObject::exchangeArray(Array|Object ar = array())
Replace the referenced array or object with a new one and return the old one (right now copy - to be changed) */
SPL_METHOD(Array, exchangeArray)
{
zval *object = getThis(), *tmp, **array;
spl_array_object *intern = (spl_array_object*)zend_object_store_get_object(object TSRMLS_CC);
array_init(return_value);
zend_hash_copy(HASH_OF(return_value), spl_array_get_hash_table(intern, 0 TSRMLS_CC), (copy_ctor_func_t) zval_add_ref, &tmp, sizeof(zval*));
if (zend_parse_parameters(ZEND_NUM_ARGS() TSRMLS_CC, "Z", &array) == FAILURE) {
return;
}
spl_array_set_array(object, intern, array, 0L, 1 TSRMLS_CC);
}
/* }}} */
/* {{{ proto ArrayIterator ArrayObject::getIterator()
Create a new iterator from a ArrayObject instance */
SPL_METHOD(Array, getIterator)
{
zval *object = getThis();
spl_array_object *intern = (spl_array_object*)zend_object_store_get_object(object TSRMLS_CC);
spl_array_object *iterator;
HashTable *aht = spl_array_get_hash_table(intern, 0 TSRMLS_CC);
if (zend_parse_parameters_none() == FAILURE) {
return;
}
if (!aht) {
php_error_docref(NULL TSRMLS_CC, E_NOTICE, "Array was modified outside object and is no longer an array");
return;
}
return_value->type = IS_OBJECT;
return_value->value.obj = spl_array_object_new_ex(intern->ce_get_iterator, &iterator, object, 0 TSRMLS_CC);
Z_SET_REFCOUNT_P(return_value, 1);
Z_SET_ISREF_P(return_value);
}
/* }}} */
/* {{{ proto void ArrayIterator::rewind()
Rewind array back to the start */
SPL_METHOD(Array, rewind)
{
zval *object = getThis();
spl_array_object *intern = (spl_array_object*)zend_object_store_get_object(object TSRMLS_CC);
if (zend_parse_parameters_none() == FAILURE) {
return;
}
spl_array_rewind(intern TSRMLS_CC);
}
/* }}} */
/* {{{ proto void ArrayIterator::seek(int $position)
Seek to position. */
SPL_METHOD(Array, seek)
{
long opos, position;
zval *object = getThis();
spl_array_object *intern = (spl_array_object*)zend_object_store_get_object(object TSRMLS_CC);
HashTable *aht = spl_array_get_hash_table(intern, 0 TSRMLS_CC);
int result;
if (zend_parse_parameters(ZEND_NUM_ARGS() TSRMLS_CC, "l", &position) == FAILURE) {
return;
}
if (!aht) {
php_error_docref(NULL TSRMLS_CC, E_NOTICE, "Array was modified outside object and is no longer an array");
return;
}
opos = position;
if (position >= 0) { /* negative values are not supported */
spl_array_rewind(intern TSRMLS_CC);
result = SUCCESS;
while (position-- > 0 && (result = spl_array_next(intern TSRMLS_CC)) == SUCCESS);
if (result == SUCCESS && zend_hash_has_more_elements_ex(aht, &intern->pos) == SUCCESS) {
return; /* ok */
}
}
zend_throw_exception_ex(spl_ce_OutOfBoundsException, 0 TSRMLS_CC, "Seek position %ld is out of range", opos);
} /* }}} */
int static spl_array_object_count_elements_helper(spl_array_object *intern, long *count TSRMLS_DC) /* {{{ */
{
HashTable *aht = spl_array_get_hash_table(intern, 0 TSRMLS_CC);
HashPosition pos;
if (!aht) {
php_error_docref(NULL TSRMLS_CC, E_NOTICE, "Array was modified outside object and is no longer an array");
*count = 0;
return FAILURE;
}
if (Z_TYPE_P(intern->array) == IS_OBJECT) {
/* We need to store the 'pos' since we'll modify it in the functions
* we're going to call and which do not support 'pos' as parameter. */
pos = intern->pos;
*count = 0;
spl_array_rewind(intern TSRMLS_CC);
while(intern->pos && spl_array_next(intern TSRMLS_CC) == SUCCESS) {
(*count)++;
}
spl_array_set_pos(intern, pos);
return SUCCESS;
} else {
*count = zend_hash_num_elements(aht);
return SUCCESS;
}
} /* }}} */
int spl_array_object_count_elements(zval *object, long *count TSRMLS_DC) /* {{{ */
{
spl_array_object *intern = (spl_array_object*)zend_object_store_get_object(object TSRMLS_CC);
if (intern->fptr_count) {
zval *rv;
zend_call_method_with_0_params(&object, intern->std.ce, &intern->fptr_count, "count", &rv);
if (rv) {
zval_ptr_dtor(&intern->retval);
MAKE_STD_ZVAL(intern->retval);
ZVAL_ZVAL(intern->retval, rv, 1, 1);
convert_to_long(intern->retval);
*count = (long) Z_LVAL_P(intern->retval);
return SUCCESS;
}
*count = 0;
return FAILURE;
}
return spl_array_object_count_elements_helper(intern, count TSRMLS_CC);
} /* }}} */
/* {{{ proto int ArrayObject::count()
proto int ArrayIterator::count()
Return the number of elements in the Iterator. */
SPL_METHOD(Array, count)
{
long count;
spl_array_object *intern = (spl_array_object*)zend_object_store_get_object(getThis() TSRMLS_CC);
if (zend_parse_parameters_none() == FAILURE) {
return;
}
spl_array_object_count_elements_helper(intern, &count TSRMLS_CC);
RETURN_LONG(count);
} /* }}} */
static void spl_array_method(INTERNAL_FUNCTION_PARAMETERS, char *fname, int fname_len, int use_arg) /* {{{ */
{
spl_array_object *intern = (spl_array_object*)zend_object_store_get_object(getThis() TSRMLS_CC);
HashTable *aht = spl_array_get_hash_table(intern, 0 TSRMLS_CC);
zval *tmp, *arg = NULL;
zval *retval_ptr = NULL;
MAKE_STD_ZVAL(tmp);
Z_TYPE_P(tmp) = IS_ARRAY;
Z_ARRVAL_P(tmp) = aht;
if (!use_arg) {
aht->nApplyCount++;
zend_call_method(NULL, NULL, NULL, fname, fname_len, &retval_ptr, 1, tmp, NULL TSRMLS_CC);
aht->nApplyCount--;
} else if (use_arg == SPL_ARRAY_METHOD_MAY_USER_ARG) {
if (zend_parse_parameters_ex(ZEND_PARSE_PARAMS_QUIET, ZEND_NUM_ARGS() TSRMLS_CC, "|z", &arg) == FAILURE) {
Z_TYPE_P(tmp) = IS_NULL;
zval_ptr_dtor(&tmp);
zend_throw_exception(spl_ce_BadMethodCallException, "Function expects one argument at most", 0 TSRMLS_CC);
return;
}
aht->nApplyCount++;
zend_call_method(NULL, NULL, NULL, fname, fname_len, &retval_ptr, arg? 2 : 1, tmp, arg TSRMLS_CC);
aht->nApplyCount--;
} else {
if (ZEND_NUM_ARGS() != 1 || zend_parse_parameters_ex(ZEND_PARSE_PARAMS_QUIET, ZEND_NUM_ARGS() TSRMLS_CC, "z", &arg) == FAILURE) {
Z_TYPE_P(tmp) = IS_NULL;
zval_ptr_dtor(&tmp);
zend_throw_exception(spl_ce_BadMethodCallException, "Function expects exactly one argument", 0 TSRMLS_CC);
return;
}
aht->nApplyCount++;
zend_call_method(NULL, NULL, NULL, fname, fname_len, &retval_ptr, 2, tmp, arg TSRMLS_CC);
aht->nApplyCount--;
}
Z_TYPE_P(tmp) = IS_NULL; /* we want to destroy the zval, not the hashtable */
zval_ptr_dtor(&tmp);
if (retval_ptr) {
COPY_PZVAL_TO_ZVAL(*return_value, retval_ptr);
}
} /* }}} */
#define SPL_ARRAY_METHOD(cname, fname, use_arg) \
SPL_METHOD(cname, fname) \
{ \
spl_array_method(INTERNAL_FUNCTION_PARAM_PASSTHRU, #fname, sizeof(#fname)-1, use_arg); \
}
/* {{{ proto int ArrayObject::asort([int $sort_flags = SORT_REGULAR ])
proto int ArrayIterator::asort([int $sort_flags = SORT_REGULAR ])
Sort the entries by values. */
SPL_ARRAY_METHOD(Array, asort, SPL_ARRAY_METHOD_MAY_USER_ARG) /* }}} */
/* {{{ proto int ArrayObject::ksort([int $sort_flags = SORT_REGULAR ])
proto int ArrayIterator::ksort([int $sort_flags = SORT_REGULAR ])
Sort the entries by key. */
SPL_ARRAY_METHOD(Array, ksort, SPL_ARRAY_METHOD_MAY_USER_ARG) /* }}} */
/* {{{ proto int ArrayObject::uasort(callback cmp_function)
proto int ArrayIterator::uasort(callback cmp_function)
Sort the entries by values user defined function. */
SPL_ARRAY_METHOD(Array, uasort, SPL_ARRAY_METHOD_USE_ARG) /* }}} */
/* {{{ proto int ArrayObject::uksort(callback cmp_function)
proto int ArrayIterator::uksort(callback cmp_function)
Sort the entries by key using user defined function. */
SPL_ARRAY_METHOD(Array, uksort, SPL_ARRAY_METHOD_USE_ARG) /* }}} */
/* {{{ proto int ArrayObject::natsort()
proto int ArrayIterator::natsort()
Sort the entries by values using "natural order" algorithm. */
SPL_ARRAY_METHOD(Array, natsort, SPL_ARRAY_METHOD_NO_ARG) /* }}} */
/* {{{ proto int ArrayObject::natcasesort()
proto int ArrayIterator::natcasesort()
Sort the entries by key using case insensitive "natural order" algorithm. */
SPL_ARRAY_METHOD(Array, natcasesort, SPL_ARRAY_METHOD_NO_ARG) /* }}} */
/* {{{ proto mixed|NULL ArrayIterator::current()
Return current array entry */
SPL_METHOD(Array, current)
{
zval *object = getThis();
spl_array_object *intern = (spl_array_object*)zend_object_store_get_object(object TSRMLS_CC);
zval **entry;
HashTable *aht = spl_array_get_hash_table(intern, 0 TSRMLS_CC);
if (zend_parse_parameters_none() == FAILURE) {
return;
}
if (spl_array_object_verify_pos(intern, aht TSRMLS_CC) == FAILURE) {
return;
}
if (zend_hash_get_current_data_ex(aht, (void **) &entry, &intern->pos) == FAILURE) {
return;
}
RETVAL_ZVAL(*entry, 1, 0);
}
/* }}} */
/* {{{ proto mixed|NULL ArrayIterator::key()
Return current array key */
SPL_METHOD(Array, key)
{
if (zend_parse_parameters_none() == FAILURE) {
return;
}
spl_array_iterator_key(getThis(), return_value TSRMLS_CC);
} /* }}} */
void spl_array_iterator_key(zval *object, zval *return_value TSRMLS_DC) /* {{{ */
{
spl_array_object *intern = (spl_array_object*)zend_object_store_get_object(object TSRMLS_CC);
HashTable *aht = spl_array_get_hash_table(intern, 0 TSRMLS_CC);
if (spl_array_object_verify_pos(intern, aht TSRMLS_CC) == FAILURE) {
return;
}
zend_hash_get_current_key_zval_ex(aht, return_value, &intern->pos);
}
/* }}} */
/* {{{ proto void ArrayIterator::next()
Move to next entry */
SPL_METHOD(Array, next)
{
zval *object = getThis();
spl_array_object *intern = (spl_array_object*)zend_object_store_get_object(object TSRMLS_CC);
HashTable *aht = spl_array_get_hash_table(intern, 0 TSRMLS_CC);
if (zend_parse_parameters_none() == FAILURE) {
return;
}
if (spl_array_object_verify_pos(intern, aht TSRMLS_CC) == FAILURE) {
return;
}
spl_array_next_no_verify(intern, aht TSRMLS_CC);
}
/* }}} */
/* {{{ proto bool ArrayIterator::valid()
Check whether array contains more entries */
SPL_METHOD(Array, valid)
{
zval *object = getThis();
spl_array_object *intern = (spl_array_object*)zend_object_store_get_object(object TSRMLS_CC);
HashTable *aht = spl_array_get_hash_table(intern, 0 TSRMLS_CC);
if (zend_parse_parameters_none() == FAILURE) {
return;
}
if (spl_array_object_verify_pos(intern, aht TSRMLS_CC) == FAILURE) {
RETURN_FALSE;
} else {
RETURN_BOOL(zend_hash_has_more_elements_ex(aht, &intern->pos) == SUCCESS);
}
}
/* }}} */
/* {{{ proto bool RecursiveArrayIterator::hasChildren()
Check whether current element has children (e.g. is an array) */
SPL_METHOD(Array, hasChildren)
{
zval *object = getThis(), **entry;
spl_array_object *intern = (spl_array_object*)zend_object_store_get_object(object TSRMLS_CC);
HashTable *aht = spl_array_get_hash_table(intern, 0 TSRMLS_CC);
if (zend_parse_parameters_none() == FAILURE) {
return;
}
if (spl_array_object_verify_pos(intern, aht TSRMLS_CC) == FAILURE) {
RETURN_FALSE;
}
if (zend_hash_get_current_data_ex(aht, (void **) &entry, &intern->pos) == FAILURE) {
RETURN_FALSE;
}
RETURN_BOOL(Z_TYPE_PP(entry) == IS_ARRAY || (Z_TYPE_PP(entry) == IS_OBJECT && (intern->ar_flags & SPL_ARRAY_CHILD_ARRAYS_ONLY) == 0));
}
/* }}} */
/* {{{ proto object RecursiveArrayIterator::getChildren()
Create a sub iterator for the current element (same class as $this) */
SPL_METHOD(Array, getChildren)
{
zval *object = getThis(), **entry, *flags;
spl_array_object *intern = (spl_array_object*)zend_object_store_get_object(object TSRMLS_CC);
HashTable *aht = spl_array_get_hash_table(intern, 0 TSRMLS_CC);
if (zend_parse_parameters_none() == FAILURE) {
return;
}
if (spl_array_object_verify_pos(intern, aht TSRMLS_CC) == FAILURE) {
return;
}
if (zend_hash_get_current_data_ex(aht, (void **) &entry, &intern->pos) == FAILURE) {
return;
}
if (Z_TYPE_PP(entry) == IS_OBJECT) {
if ((intern->ar_flags & SPL_ARRAY_CHILD_ARRAYS_ONLY) != 0) {
return;
}
if (instanceof_function(Z_OBJCE_PP(entry), Z_OBJCE_P(getThis()) TSRMLS_CC)) {
RETURN_ZVAL(*entry, 1, 0);
}
}
MAKE_STD_ZVAL(flags);
ZVAL_LONG(flags, SPL_ARRAY_USE_OTHER | intern->ar_flags);
spl_instantiate_arg_ex2(Z_OBJCE_P(getThis()), &return_value, 0, *entry, flags TSRMLS_CC);
zval_ptr_dtor(&flags);
}
/* }}} */
/* {{{ proto string ArrayObject::serialize()
Serialize the object */
SPL_METHOD(Array, serialize)
{
zval *object = getThis();
spl_array_object *intern = (spl_array_object*)zend_object_store_get_object(object TSRMLS_CC);
HashTable *aht = spl_array_get_hash_table(intern, 0 TSRMLS_CC);
zval members, *pmembers;
php_serialize_data_t var_hash;
smart_str buf = {0};
zval *flags;
if (zend_parse_parameters_none() == FAILURE) {
return;
}
if (!aht) {
php_error_docref(NULL TSRMLS_CC, E_NOTICE, "Array was modified outside object and is no longer an array");
return;
}
PHP_VAR_SERIALIZE_INIT(var_hash);
MAKE_STD_ZVAL(flags);
ZVAL_LONG(flags, (intern->ar_flags & SPL_ARRAY_CLONE_MASK));
/* storage */
smart_str_appendl(&buf, "x:", 2);
php_var_serialize(&buf, &flags, &var_hash TSRMLS_CC);
zval_ptr_dtor(&flags);
if (!(intern->ar_flags & SPL_ARRAY_IS_SELF)) {
php_var_serialize(&buf, &intern->array, &var_hash TSRMLS_CC);
smart_str_appendc(&buf, ';');
}
/* members */
smart_str_appendl(&buf, "m:", 2);
INIT_PZVAL(&members);
if (!intern->std.properties) {
rebuild_object_properties(&intern->std);
}
Z_ARRVAL(members) = intern->std.properties;
Z_TYPE(members) = IS_ARRAY;
pmembers = &members;
php_var_serialize(&buf, &pmembers, &var_hash TSRMLS_CC); /* finishes the string */
/* done */
PHP_VAR_SERIALIZE_DESTROY(var_hash);
if (buf.c) {
RETURN_STRINGL(buf.c, buf.len, 0);
}
RETURN_NULL();
} /* }}} */
/* {{{ proto void ArrayObject::unserialize(string serialized)
* unserialize the object
*/
SPL_METHOD(Array, unserialize)
{
spl_array_object *intern = (spl_array_object*)zend_object_store_get_object(getThis() TSRMLS_CC);
char *buf;
int buf_len;
const unsigned char *p, *s;
php_unserialize_data_t var_hash;
zval *pmembers, *pflags = NULL;
HashTable *aht;
long flags;
if (zend_parse_parameters(ZEND_NUM_ARGS() TSRMLS_CC, "s", &buf, &buf_len) == FAILURE) {
return;
}
if (buf_len == 0) {
zend_throw_exception_ex(spl_ce_UnexpectedValueException, 0 TSRMLS_CC, "Empty serialized string cannot be empty");
return;
}
aht = spl_array_get_hash_table(intern, 0 TSRMLS_CC);
if (aht->nApplyCount > 0) {
zend_error(E_WARNING, "Modification of ArrayObject during sorting is prohibited");
return;
}
/* storage */
s = p = (const unsigned char*)buf;
PHP_VAR_UNSERIALIZE_INIT(var_hash);
if (*p!= 'x' || *++p != ':') {
goto outexcept;
}
++p;
ALLOC_INIT_ZVAL(pflags);
if (!php_var_unserialize(&pflags, &p, s + buf_len, &var_hash TSRMLS_CC) || Z_TYPE_P(pflags) != IS_LONG) {
goto outexcept;
}
var_push_dtor(&var_hash, &pflags);
--p; /* for ';' */
flags = Z_LVAL_P(pflags);
/* flags needs to be verified and we also need to verify whether the next
* thing we get is ';'. After that we require an 'm' or somethign else
* where 'm' stands for members and anything else should be an array. If
* neither 'a' or 'm' follows we have an error. */
if (*p != ';') {
goto outexcept;
}
++p;
if (*p!='m') {
if (*p!='a' && *p!='O' && *p!='C' && *p!='r') {
goto outexcept;
}
intern->ar_flags &= ~SPL_ARRAY_CLONE_MASK;
intern->ar_flags |= flags & SPL_ARRAY_CLONE_MASK;
zval_ptr_dtor(&intern->array);
ALLOC_INIT_ZVAL(intern->array);
if (!php_var_unserialize(&intern->array, &p, s + buf_len, &var_hash TSRMLS_CC)) {
goto outexcept;
}
var_push_dtor(&var_hash, &intern->array);
}
if (*p != ';') {
goto outexcept;
}
++p;
/* members */
if (*p!= 'm' || *++p != ':') {
goto outexcept;
}
++p;
ALLOC_INIT_ZVAL(pmembers);
if (!php_var_unserialize(&pmembers, &p, s + buf_len, &var_hash TSRMLS_CC) || Z_TYPE_P(pmembers) != IS_ARRAY) {
zval_ptr_dtor(&pmembers);
goto outexcept;
}
var_push_dtor(&var_hash, &pmembers);
/* copy members */
if (!intern->std.properties) {
rebuild_object_properties(&intern->std);
}
zend_hash_copy(intern->std.properties, Z_ARRVAL_P(pmembers), (copy_ctor_func_t) zval_add_ref, (void *) NULL, sizeof(zval *));
zval_ptr_dtor(&pmembers);
/* done reading $serialized */
PHP_VAR_UNSERIALIZE_DESTROY(var_hash);
if (pflags) {
zval_ptr_dtor(&pflags);
}
return;
outexcept:
PHP_VAR_UNSERIALIZE_DESTROY(var_hash);
if (pflags) {
zval_ptr_dtor(&pflags);
}
zend_throw_exception_ex(spl_ce_UnexpectedValueException, 0 TSRMLS_CC, "Error at offset %ld of %d bytes", (long)((char*)p - buf), buf_len);
return;
} /* }}} */
/* {{{ arginfo and function tbale */
ZEND_BEGIN_ARG_INFO(arginfo_array___construct, 0)
ZEND_ARG_INFO(0, array)
ZEND_END_ARG_INFO()
ZEND_BEGIN_ARG_INFO_EX(arginfo_array_offsetGet, 0, 0, 1)
ZEND_ARG_INFO(0, index)
ZEND_END_ARG_INFO()
ZEND_BEGIN_ARG_INFO_EX(arginfo_array_offsetSet, 0, 0, 2)
ZEND_ARG_INFO(0, index)
ZEND_ARG_INFO(0, newval)
ZEND_END_ARG_INFO()
ZEND_BEGIN_ARG_INFO(arginfo_array_append, 0)
ZEND_ARG_INFO(0, value)
ZEND_END_ARG_INFO()
ZEND_BEGIN_ARG_INFO(arginfo_array_seek, 0)
ZEND_ARG_INFO(0, position)
ZEND_END_ARG_INFO()
ZEND_BEGIN_ARG_INFO(arginfo_array_exchangeArray, 0)
ZEND_ARG_INFO(0, array)
ZEND_END_ARG_INFO()
ZEND_BEGIN_ARG_INFO(arginfo_array_setFlags, 0)
ZEND_ARG_INFO(0, flags)
ZEND_END_ARG_INFO()
ZEND_BEGIN_ARG_INFO(arginfo_array_setIteratorClass, 0)
ZEND_ARG_INFO(0, iteratorClass)
ZEND_END_ARG_INFO()
ZEND_BEGIN_ARG_INFO(arginfo_array_uXsort, 0)
ZEND_ARG_INFO(0, cmp_function)
ZEND_END_ARG_INFO();
ZEND_BEGIN_ARG_INFO(arginfo_array_unserialize, 0)
ZEND_ARG_INFO(0, serialized)
ZEND_END_ARG_INFO();
ZEND_BEGIN_ARG_INFO(arginfo_array_void, 0)
ZEND_END_ARG_INFO()
static const zend_function_entry spl_funcs_ArrayObject[] = {
SPL_ME(Array, __construct, arginfo_array___construct, ZEND_ACC_PUBLIC)
SPL_ME(Array, offsetExists, arginfo_array_offsetGet, ZEND_ACC_PUBLIC)
SPL_ME(Array, offsetGet, arginfo_array_offsetGet, ZEND_ACC_PUBLIC)
SPL_ME(Array, offsetSet, arginfo_array_offsetSet, ZEND_ACC_PUBLIC)
SPL_ME(Array, offsetUnset, arginfo_array_offsetGet, ZEND_ACC_PUBLIC)
SPL_ME(Array, append, arginfo_array_append, ZEND_ACC_PUBLIC)
SPL_ME(Array, getArrayCopy, arginfo_array_void, ZEND_ACC_PUBLIC)
SPL_ME(Array, count, arginfo_array_void, ZEND_ACC_PUBLIC)
SPL_ME(Array, getFlags, arginfo_array_void, ZEND_ACC_PUBLIC)
SPL_ME(Array, setFlags, arginfo_array_setFlags, ZEND_ACC_PUBLIC)
SPL_ME(Array, asort, arginfo_array_void, ZEND_ACC_PUBLIC)
SPL_ME(Array, ksort, arginfo_array_void, ZEND_ACC_PUBLIC)
SPL_ME(Array, uasort, arginfo_array_uXsort, ZEND_ACC_PUBLIC)
SPL_ME(Array, uksort, arginfo_array_uXsort, ZEND_ACC_PUBLIC)
SPL_ME(Array, natsort, arginfo_array_void, ZEND_ACC_PUBLIC)
SPL_ME(Array, natcasesort, arginfo_array_void, ZEND_ACC_PUBLIC)
SPL_ME(Array, unserialize, arginfo_array_unserialize, ZEND_ACC_PUBLIC)
SPL_ME(Array, serialize, arginfo_array_void, ZEND_ACC_PUBLIC)
/* ArrayObject specific */
SPL_ME(Array, getIterator, arginfo_array_void, ZEND_ACC_PUBLIC)
SPL_ME(Array, exchangeArray, arginfo_array_exchangeArray, ZEND_ACC_PUBLIC)
SPL_ME(Array, setIteratorClass, arginfo_array_setIteratorClass, ZEND_ACC_PUBLIC)
SPL_ME(Array, getIteratorClass, arginfo_array_void, ZEND_ACC_PUBLIC)
PHP_FE_END
};
static const zend_function_entry spl_funcs_ArrayIterator[] = {
SPL_ME(Array, __construct, arginfo_array___construct, ZEND_ACC_PUBLIC)
SPL_ME(Array, offsetExists, arginfo_array_offsetGet, ZEND_ACC_PUBLIC)
SPL_ME(Array, offsetGet, arginfo_array_offsetGet, ZEND_ACC_PUBLIC)
SPL_ME(Array, offsetSet, arginfo_array_offsetSet, ZEND_ACC_PUBLIC)
SPL_ME(Array, offsetUnset, arginfo_array_offsetGet, ZEND_ACC_PUBLIC)
SPL_ME(Array, append, arginfo_array_append, ZEND_ACC_PUBLIC)
SPL_ME(Array, getArrayCopy, arginfo_array_void, ZEND_ACC_PUBLIC)
SPL_ME(Array, count, arginfo_array_void, ZEND_ACC_PUBLIC)
SPL_ME(Array, getFlags, arginfo_array_void, ZEND_ACC_PUBLIC)
SPL_ME(Array, setFlags, arginfo_array_setFlags, ZEND_ACC_PUBLIC)
SPL_ME(Array, asort, arginfo_array_void, ZEND_ACC_PUBLIC)
SPL_ME(Array, ksort, arginfo_array_void, ZEND_ACC_PUBLIC)
SPL_ME(Array, uasort, arginfo_array_uXsort, ZEND_ACC_PUBLIC)
SPL_ME(Array, uksort, arginfo_array_uXsort, ZEND_ACC_PUBLIC)
SPL_ME(Array, natsort, arginfo_array_void, ZEND_ACC_PUBLIC)
SPL_ME(Array, natcasesort, arginfo_array_void, ZEND_ACC_PUBLIC)
SPL_ME(Array, unserialize, arginfo_array_unserialize, ZEND_ACC_PUBLIC)
SPL_ME(Array, serialize, arginfo_array_void, ZEND_ACC_PUBLIC)
/* ArrayIterator specific */
SPL_ME(Array, rewind, arginfo_array_void, ZEND_ACC_PUBLIC)
SPL_ME(Array, current, arginfo_array_void, ZEND_ACC_PUBLIC)
SPL_ME(Array, key, arginfo_array_void, ZEND_ACC_PUBLIC)
SPL_ME(Array, next, arginfo_array_void, ZEND_ACC_PUBLIC)
SPL_ME(Array, valid, arginfo_array_void, ZEND_ACC_PUBLIC)
SPL_ME(Array, seek, arginfo_array_seek, ZEND_ACC_PUBLIC)
PHP_FE_END
};
static const zend_function_entry spl_funcs_RecursiveArrayIterator[] = {
SPL_ME(Array, hasChildren, arginfo_array_void, ZEND_ACC_PUBLIC)
SPL_ME(Array, getChildren, arginfo_array_void, ZEND_ACC_PUBLIC)
PHP_FE_END
};
/* }}} */
/* {{{ PHP_MINIT_FUNCTION(spl_array) */
PHP_MINIT_FUNCTION(spl_array)
{
REGISTER_SPL_STD_CLASS_EX(ArrayObject, spl_array_object_new, spl_funcs_ArrayObject);
REGISTER_SPL_IMPLEMENTS(ArrayObject, Aggregate);
REGISTER_SPL_IMPLEMENTS(ArrayObject, ArrayAccess);
REGISTER_SPL_IMPLEMENTS(ArrayObject, Serializable);
REGISTER_SPL_IMPLEMENTS(ArrayObject, Countable);
memcpy(&spl_handler_ArrayObject, zend_get_std_object_handlers(), sizeof(zend_object_handlers));
spl_handler_ArrayObject.clone_obj = spl_array_object_clone;
spl_handler_ArrayObject.read_dimension = spl_array_read_dimension;
spl_handler_ArrayObject.write_dimension = spl_array_write_dimension;
spl_handler_ArrayObject.unset_dimension = spl_array_unset_dimension;
spl_handler_ArrayObject.has_dimension = spl_array_has_dimension;
spl_handler_ArrayObject.count_elements = spl_array_object_count_elements;
spl_handler_ArrayObject.get_properties = spl_array_get_properties;
spl_handler_ArrayObject.get_debug_info = spl_array_get_debug_info;
spl_handler_ArrayObject.read_property = spl_array_read_property;
spl_handler_ArrayObject.write_property = spl_array_write_property;
spl_handler_ArrayObject.get_property_ptr_ptr = spl_array_get_property_ptr_ptr;
spl_handler_ArrayObject.has_property = spl_array_has_property;
spl_handler_ArrayObject.unset_property = spl_array_unset_property;
spl_handler_ArrayObject.compare_objects = spl_array_compare_objects;
REGISTER_SPL_STD_CLASS_EX(ArrayIterator, spl_array_object_new, spl_funcs_ArrayIterator);
REGISTER_SPL_IMPLEMENTS(ArrayIterator, Iterator);
REGISTER_SPL_IMPLEMENTS(ArrayIterator, ArrayAccess);
REGISTER_SPL_IMPLEMENTS(ArrayIterator, SeekableIterator);
REGISTER_SPL_IMPLEMENTS(ArrayIterator, Serializable);
REGISTER_SPL_IMPLEMENTS(ArrayIterator, Countable);
memcpy(&spl_handler_ArrayIterator, &spl_handler_ArrayObject, sizeof(zend_object_handlers));
spl_ce_ArrayIterator->get_iterator = spl_array_get_iterator;
REGISTER_SPL_SUB_CLASS_EX(RecursiveArrayIterator, ArrayIterator, spl_array_object_new, spl_funcs_RecursiveArrayIterator);
REGISTER_SPL_IMPLEMENTS(RecursiveArrayIterator, RecursiveIterator);
spl_ce_RecursiveArrayIterator->get_iterator = spl_array_get_iterator;
REGISTER_SPL_CLASS_CONST_LONG(ArrayObject, "STD_PROP_LIST", SPL_ARRAY_STD_PROP_LIST);
REGISTER_SPL_CLASS_CONST_LONG(ArrayObject, "ARRAY_AS_PROPS", SPL_ARRAY_ARRAY_AS_PROPS);
REGISTER_SPL_CLASS_CONST_LONG(ArrayIterator, "STD_PROP_LIST", SPL_ARRAY_STD_PROP_LIST);
REGISTER_SPL_CLASS_CONST_LONG(ArrayIterator, "ARRAY_AS_PROPS", SPL_ARRAY_ARRAY_AS_PROPS);
REGISTER_SPL_CLASS_CONST_LONG(RecursiveArrayIterator, "CHILD_ARRAYS_ONLY", SPL_ARRAY_CHILD_ARRAYS_ONLY);
return SUCCESS;
}
/* }}} */
/*
* Local variables:
* tab-width: 4
* c-basic-offset: 4
* End:
* vim600: fdm=marker
* vim: noet sw=4 ts=4
*/
| ./CrossVul/dataset_final_sorted/CWE-416/c/bad_5171_1 |
crossvul-cpp_data_good_388_4 | // SPDX-License-Identifier: GPL-2.0
/*
* mm/debug.c
*
* mm/ specific debug routines.
*
*/
#include <linux/kernel.h>
#include <linux/mm.h>
#include <linux/trace_events.h>
#include <linux/memcontrol.h>
#include <trace/events/mmflags.h>
#include <linux/migrate.h>
#include <linux/page_owner.h>
#include "internal.h"
char *migrate_reason_names[MR_TYPES] = {
"compaction",
"memory_failure",
"memory_hotplug",
"syscall_or_cpuset",
"mempolicy_mbind",
"numa_misplaced",
"cma",
};
const struct trace_print_flags pageflag_names[] = {
__def_pageflag_names,
{0, NULL}
};
const struct trace_print_flags gfpflag_names[] = {
__def_gfpflag_names,
{0, NULL}
};
const struct trace_print_flags vmaflag_names[] = {
__def_vmaflag_names,
{0, NULL}
};
void __dump_page(struct page *page, const char *reason)
{
bool page_poisoned = PagePoisoned(page);
int mapcount;
/*
* If struct page is poisoned don't access Page*() functions as that
* leads to recursive loop. Page*() check for poisoned pages, and calls
* dump_page() when detected.
*/
if (page_poisoned) {
pr_emerg("page:%px is uninitialized and poisoned", page);
goto hex_only;
}
/*
* Avoid VM_BUG_ON() in page_mapcount().
* page->_mapcount space in struct page is used by sl[aou]b pages to
* encode own info.
*/
mapcount = PageSlab(page) ? 0 : page_mapcount(page);
pr_emerg("page:%px count:%d mapcount:%d mapping:%px index:%#lx",
page, page_ref_count(page), mapcount,
page->mapping, page_to_pgoff(page));
if (PageCompound(page))
pr_cont(" compound_mapcount: %d", compound_mapcount(page));
pr_cont("\n");
BUILD_BUG_ON(ARRAY_SIZE(pageflag_names) != __NR_PAGEFLAGS + 1);
pr_emerg("flags: %#lx(%pGp)\n", page->flags, &page->flags);
hex_only:
print_hex_dump(KERN_ALERT, "raw: ", DUMP_PREFIX_NONE, 32,
sizeof(unsigned long), page,
sizeof(struct page), false);
if (reason)
pr_alert("page dumped because: %s\n", reason);
#ifdef CONFIG_MEMCG
if (!page_poisoned && page->mem_cgroup)
pr_alert("page->mem_cgroup:%px\n", page->mem_cgroup);
#endif
}
void dump_page(struct page *page, const char *reason)
{
__dump_page(page, reason);
dump_page_owner(page);
}
EXPORT_SYMBOL(dump_page);
#ifdef CONFIG_DEBUG_VM
void dump_vma(const struct vm_area_struct *vma)
{
pr_emerg("vma %px start %px end %px\n"
"next %px prev %px mm %px\n"
"prot %lx anon_vma %px vm_ops %px\n"
"pgoff %lx file %px private_data %px\n"
"flags: %#lx(%pGv)\n",
vma, (void *)vma->vm_start, (void *)vma->vm_end, vma->vm_next,
vma->vm_prev, vma->vm_mm,
(unsigned long)pgprot_val(vma->vm_page_prot),
vma->anon_vma, vma->vm_ops, vma->vm_pgoff,
vma->vm_file, vma->vm_private_data,
vma->vm_flags, &vma->vm_flags);
}
EXPORT_SYMBOL(dump_vma);
void dump_mm(const struct mm_struct *mm)
{
pr_emerg("mm %px mmap %px seqnum %llu task_size %lu\n"
#ifdef CONFIG_MMU
"get_unmapped_area %px\n"
#endif
"mmap_base %lu mmap_legacy_base %lu highest_vm_end %lu\n"
"pgd %px mm_users %d mm_count %d pgtables_bytes %lu map_count %d\n"
"hiwater_rss %lx hiwater_vm %lx total_vm %lx locked_vm %lx\n"
"pinned_vm %lx data_vm %lx exec_vm %lx stack_vm %lx\n"
"start_code %lx end_code %lx start_data %lx end_data %lx\n"
"start_brk %lx brk %lx start_stack %lx\n"
"arg_start %lx arg_end %lx env_start %lx env_end %lx\n"
"binfmt %px flags %lx core_state %px\n"
#ifdef CONFIG_AIO
"ioctx_table %px\n"
#endif
#ifdef CONFIG_MEMCG
"owner %px "
#endif
"exe_file %px\n"
#ifdef CONFIG_MMU_NOTIFIER
"mmu_notifier_mm %px\n"
#endif
#ifdef CONFIG_NUMA_BALANCING
"numa_next_scan %lu numa_scan_offset %lu numa_scan_seq %d\n"
#endif
"tlb_flush_pending %d\n"
"def_flags: %#lx(%pGv)\n",
mm, mm->mmap, (long long) mm->vmacache_seqnum, mm->task_size,
#ifdef CONFIG_MMU
mm->get_unmapped_area,
#endif
mm->mmap_base, mm->mmap_legacy_base, mm->highest_vm_end,
mm->pgd, atomic_read(&mm->mm_users),
atomic_read(&mm->mm_count),
mm_pgtables_bytes(mm),
mm->map_count,
mm->hiwater_rss, mm->hiwater_vm, mm->total_vm, mm->locked_vm,
mm->pinned_vm, mm->data_vm, mm->exec_vm, mm->stack_vm,
mm->start_code, mm->end_code, mm->start_data, mm->end_data,
mm->start_brk, mm->brk, mm->start_stack,
mm->arg_start, mm->arg_end, mm->env_start, mm->env_end,
mm->binfmt, mm->flags, mm->core_state,
#ifdef CONFIG_AIO
mm->ioctx_table,
#endif
#ifdef CONFIG_MEMCG
mm->owner,
#endif
mm->exe_file,
#ifdef CONFIG_MMU_NOTIFIER
mm->mmu_notifier_mm,
#endif
#ifdef CONFIG_NUMA_BALANCING
mm->numa_next_scan, mm->numa_scan_offset, mm->numa_scan_seq,
#endif
atomic_read(&mm->tlb_flush_pending),
mm->def_flags, &mm->def_flags
);
}
#endif /* CONFIG_DEBUG_VM */
| ./CrossVul/dataset_final_sorted/CWE-416/c/good_388_4 |
crossvul-cpp_data_bad_1830_0 | /*
* The copyright in this software is being made available under the 2-clauses
* BSD License, included below. This software may be subject to other third
* party and contributor rights, including patent rights, and no such rights
* are granted under this license.
*
* Copyright (c) 2002-2014, Universite catholique de Louvain (UCL), Belgium
* Copyright (c) 2002-2014, Professor Benoit Macq
* Copyright (c) 2001-2003, David Janssens
* Copyright (c) 2002-2003, Yannick Verschueren
* Copyright (c) 2003-2007, Francois-Olivier Devaux
* Copyright (c) 2003-2014, Antonin Descampe
* Copyright (c) 2005, Herve Drolon, FreeImage Team
* Copyright (c) 2008, Jerome Fimes, Communications & Systemes <jerome.fimes@c-s.fr>
* Copyright (c) 2006-2007, Parvatha Elangovan
* Copyright (c) 2010-2011, Kaori Hagihara
* Copyright (c) 2011-2012, Centre National d'Etudes Spatiales (CNES), France
* Copyright (c) 2012, CS Systemes d'Information, France
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS `AS IS'
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
#include "opj_includes.h"
/** @defgroup J2K J2K - JPEG-2000 codestream reader/writer */
/*@{*/
/** @name Local static functions */
/*@{*/
/**
* Sets up the procedures to do on reading header. Developpers wanting to extend the library can add their own reading procedures.
*/
static OPJ_BOOL opj_j2k_setup_header_reading (opj_j2k_t *p_j2k, opj_event_mgr_t * p_manager);
/**
* The read header procedure.
*/
static OPJ_BOOL opj_j2k_read_header_procedure( opj_j2k_t *p_j2k,
opj_stream_private_t *p_stream,
opj_event_mgr_t * p_manager);
/**
* The default encoding validation procedure without any extension.
*
* @param p_j2k the jpeg2000 codec to validate.
* @param p_stream the input stream to validate.
* @param p_manager the user event manager.
*
* @return true if the parameters are correct.
*/
static OPJ_BOOL opj_j2k_encoding_validation ( opj_j2k_t * p_j2k,
opj_stream_private_t *p_stream,
opj_event_mgr_t * p_manager );
/**
* The default decoding validation procedure without any extension.
*
* @param p_j2k the jpeg2000 codec to validate.
* @param p_stream the input stream to validate.
* @param p_manager the user event manager.
*
* @return true if the parameters are correct.
*/
static OPJ_BOOL opj_j2k_decoding_validation ( opj_j2k_t * p_j2k,
opj_stream_private_t *p_stream,
opj_event_mgr_t * p_manager );
/**
* Sets up the validation ,i.e. adds the procedures to lauch to make sure the codec parameters
* are valid. Developpers wanting to extend the library can add their own validation procedures.
*/
static OPJ_BOOL opj_j2k_setup_encoding_validation (opj_j2k_t *p_j2k, opj_event_mgr_t * p_manager);
/**
* Sets up the validation ,i.e. adds the procedures to lauch to make sure the codec parameters
* are valid. Developpers wanting to extend the library can add their own validation procedures.
*/
static OPJ_BOOL opj_j2k_setup_decoding_validation (opj_j2k_t *p_j2k, opj_event_mgr_t * p_manager);
/**
* Sets up the validation ,i.e. adds the procedures to lauch to make sure the codec parameters
* are valid. Developpers wanting to extend the library can add their own validation procedures.
*/
static OPJ_BOOL opj_j2k_setup_end_compress (opj_j2k_t *p_j2k, opj_event_mgr_t * p_manager);
/**
* The mct encoding validation procedure.
*
* @param p_j2k the jpeg2000 codec to validate.
* @param p_stream the input stream to validate.
* @param p_manager the user event manager.
*
* @return true if the parameters are correct.
*/
static OPJ_BOOL opj_j2k_mct_validation (opj_j2k_t * p_j2k,
opj_stream_private_t *p_stream,
opj_event_mgr_t * p_manager );
/**
* Builds the tcd decoder to use to decode tile.
*/
static OPJ_BOOL opj_j2k_build_decoder ( opj_j2k_t * p_j2k,
opj_stream_private_t *p_stream,
opj_event_mgr_t * p_manager );
/**
* Builds the tcd encoder to use to encode tile.
*/
static OPJ_BOOL opj_j2k_build_encoder ( opj_j2k_t * p_j2k,
opj_stream_private_t *p_stream,
opj_event_mgr_t * p_manager );
/**
* Creates a tile-coder decoder.
*
* @param p_stream the stream to write data to.
* @param p_j2k J2K codec.
* @param p_manager the user event manager.
*/
static OPJ_BOOL opj_j2k_create_tcd( opj_j2k_t *p_j2k,
opj_stream_private_t *p_stream,
opj_event_mgr_t * p_manager );
/**
* Excutes the given procedures on the given codec.
*
* @param p_procedure_list the list of procedures to execute
* @param p_j2k the jpeg2000 codec to execute the procedures on.
* @param p_stream the stream to execute the procedures on.
* @param p_manager the user manager.
*
* @return true if all the procedures were successfully executed.
*/
static OPJ_BOOL opj_j2k_exec ( opj_j2k_t * p_j2k,
opj_procedure_list_t * p_procedure_list,
opj_stream_private_t *p_stream,
opj_event_mgr_t * p_manager);
/**
* Updates the rates of the tcp.
*
* @param p_stream the stream to write data to.
* @param p_j2k J2K codec.
* @param p_manager the user event manager.
*/
static OPJ_BOOL opj_j2k_update_rates( opj_j2k_t *p_j2k,
opj_stream_private_t *p_stream,
opj_event_mgr_t * p_manager );
/**
* Copies the decoding tile parameters onto all the tile parameters.
* Creates also the tile decoder.
*/
static OPJ_BOOL opj_j2k_copy_default_tcp_and_create_tcd ( opj_j2k_t * p_j2k,
opj_stream_private_t *p_stream,
opj_event_mgr_t * p_manager );
/**
* Destroys the memory associated with the decoding of headers.
*/
static OPJ_BOOL opj_j2k_destroy_header_memory ( opj_j2k_t * p_j2k,
opj_stream_private_t *p_stream,
opj_event_mgr_t * p_manager );
/**
* Reads the lookup table containing all the marker, status and action, and returns the handler associated
* with the marker value.
* @param p_id Marker value to look up
*
* @return the handler associated with the id.
*/
static const struct opj_dec_memory_marker_handler * opj_j2k_get_marker_handler (OPJ_UINT32 p_id);
/**
* Destroys a tile coding parameter structure.
*
* @param p_tcp the tile coding parameter to destroy.
*/
static void opj_j2k_tcp_destroy (opj_tcp_t *p_tcp);
/**
* Destroys the data inside a tile coding parameter structure.
*
* @param p_tcp the tile coding parameter which contain data to destroy.
*/
static void opj_j2k_tcp_data_destroy (opj_tcp_t *p_tcp);
/**
* Destroys a coding parameter structure.
*
* @param p_cp the coding parameter to destroy.
*/
static void opj_j2k_cp_destroy (opj_cp_t *p_cp);
/**
* Writes a SPCod or SPCoc element, i.e. the coding style of a given component of a tile.
*
* @param p_j2k J2K codec.
* @param p_tile_no FIXME DOC
* @param p_comp_no the component number to output.
* @param p_data FIXME DOC
* @param p_header_size FIXME DOC
* @param p_manager the user event manager.
*
* @return FIXME DOC
*/
static OPJ_BOOL opj_j2k_write_SPCod_SPCoc( opj_j2k_t *p_j2k,
OPJ_UINT32 p_tile_no,
OPJ_UINT32 p_comp_no,
OPJ_BYTE * p_data,
OPJ_UINT32 * p_header_size,
opj_event_mgr_t * p_manager );
/**
* Gets the size taken by writing a SPCod or SPCoc for the given tile and component.
*
* @param p_j2k the J2K codec.
* @param p_tile_no the tile index.
* @param p_comp_no the component being outputted.
*
* @return the number of bytes taken by the SPCod element.
*/
static OPJ_UINT32 opj_j2k_get_SPCod_SPCoc_size (opj_j2k_t *p_j2k,
OPJ_UINT32 p_tile_no,
OPJ_UINT32 p_comp_no );
/**
* Reads a SPCod or SPCoc element, i.e. the coding style of a given component of a tile.
* @param p_j2k the jpeg2000 codec.
* @param compno FIXME DOC
* @param p_header_data the data contained in the COM box.
* @param p_header_size the size of the data contained in the COM marker.
* @param p_manager the user event manager.
*/
static OPJ_BOOL opj_j2k_read_SPCod_SPCoc( opj_j2k_t *p_j2k,
OPJ_UINT32 compno,
OPJ_BYTE * p_header_data,
OPJ_UINT32 * p_header_size,
opj_event_mgr_t * p_manager );
/**
* Gets the size taken by writing SQcd or SQcc element, i.e. the quantization values of a band in the QCD or QCC.
*
* @param p_tile_no the tile index.
* @param p_comp_no the component being outputted.
* @param p_j2k the J2K codec.
*
* @return the number of bytes taken by the SPCod element.
*/
static OPJ_UINT32 opj_j2k_get_SQcd_SQcc_size ( opj_j2k_t *p_j2k,
OPJ_UINT32 p_tile_no,
OPJ_UINT32 p_comp_no );
/**
* Writes a SQcd or SQcc element, i.e. the quantization values of a band in the QCD or QCC.
*
* @param p_tile_no the tile to output.
* @param p_comp_no the component number to output.
* @param p_data the data buffer.
* @param p_header_size pointer to the size of the data buffer, it is changed by the function.
* @param p_j2k J2K codec.
* @param p_manager the user event manager.
*
*/
static OPJ_BOOL opj_j2k_write_SQcd_SQcc(opj_j2k_t *p_j2k,
OPJ_UINT32 p_tile_no,
OPJ_UINT32 p_comp_no,
OPJ_BYTE * p_data,
OPJ_UINT32 * p_header_size,
opj_event_mgr_t * p_manager);
/**
* Updates the Tile Length Marker.
*/
static void opj_j2k_update_tlm ( opj_j2k_t * p_j2k, OPJ_UINT32 p_tile_part_size);
/**
* Reads a SQcd or SQcc element, i.e. the quantization values of a band in the QCD or QCC.
*
* @param p_j2k J2K codec.
* @param compno the component number to output.
* @param p_header_data the data buffer.
* @param p_header_size pointer to the size of the data buffer, it is changed by the function.
* @param p_manager the user event manager.
*
*/
static OPJ_BOOL opj_j2k_read_SQcd_SQcc( opj_j2k_t *p_j2k,
OPJ_UINT32 compno,
OPJ_BYTE * p_header_data,
OPJ_UINT32 * p_header_size,
opj_event_mgr_t * p_manager );
/**
* Copies the tile component parameters of all the component from the first tile component.
*
* @param p_j2k the J2k codec.
*/
static void opj_j2k_copy_tile_component_parameters( opj_j2k_t *p_j2k );
/**
* Copies the tile quantization parameters of all the component from the first tile component.
*
* @param p_j2k the J2k codec.
*/
static void opj_j2k_copy_tile_quantization_parameters( opj_j2k_t *p_j2k );
/**
* Reads the tiles.
*/
static OPJ_BOOL opj_j2k_decode_tiles ( opj_j2k_t *p_j2k,
opj_stream_private_t *p_stream,
opj_event_mgr_t * p_manager);
static OPJ_BOOL opj_j2k_pre_write_tile ( opj_j2k_t * p_j2k,
OPJ_UINT32 p_tile_index,
opj_stream_private_t *p_stream,
opj_event_mgr_t * p_manager );
static OPJ_BOOL opj_j2k_update_image_data (opj_tcd_t * p_tcd, OPJ_BYTE * p_data, opj_image_t* p_output_image);
static void opj_get_tile_dimensions(opj_image_t * l_image,
opj_tcd_tilecomp_t * l_tilec,
opj_image_comp_t * l_img_comp,
OPJ_UINT32* l_size_comp,
OPJ_UINT32* l_width,
OPJ_UINT32* l_height,
OPJ_UINT32* l_offset_x,
OPJ_UINT32* l_offset_y,
OPJ_UINT32* l_image_width,
OPJ_UINT32* l_stride,
OPJ_UINT32* l_tile_offset);
static void opj_j2k_get_tile_data (opj_tcd_t * p_tcd, OPJ_BYTE * p_data);
static OPJ_BOOL opj_j2k_post_write_tile (opj_j2k_t * p_j2k,
opj_stream_private_t *p_stream,
opj_event_mgr_t * p_manager );
/**
* Sets up the procedures to do on writing header.
* Developers wanting to extend the library can add their own writing procedures.
*/
static OPJ_BOOL opj_j2k_setup_header_writing (opj_j2k_t *p_j2k, opj_event_mgr_t * p_manager);
static OPJ_BOOL opj_j2k_write_first_tile_part( opj_j2k_t *p_j2k,
OPJ_BYTE * p_data,
OPJ_UINT32 * p_data_written,
OPJ_UINT32 p_total_data_size,
opj_stream_private_t *p_stream,
struct opj_event_mgr * p_manager );
static OPJ_BOOL opj_j2k_write_all_tile_parts( opj_j2k_t *p_j2k,
OPJ_BYTE * p_data,
OPJ_UINT32 * p_data_written,
OPJ_UINT32 p_total_data_size,
opj_stream_private_t *p_stream,
struct opj_event_mgr * p_manager );
/**
* Gets the offset of the header.
*
* @param p_stream the stream to write data to.
* @param p_j2k J2K codec.
* @param p_manager the user event manager.
*/
static OPJ_BOOL opj_j2k_get_end_header( opj_j2k_t *p_j2k,
opj_stream_private_t *p_stream,
opj_event_mgr_t * p_manager );
static OPJ_BOOL opj_j2k_allocate_tile_element_cstr_index(opj_j2k_t *p_j2k);
/*
* -----------------------------------------------------------------------
* -----------------------------------------------------------------------
* -----------------------------------------------------------------------
*/
/**
* Writes the SOC marker (Start Of Codestream)
*
* @param p_stream the stream to write data to.
* @param p_j2k J2K codec.
* @param p_manager the user event manager.
*/
static OPJ_BOOL opj_j2k_write_soc( opj_j2k_t *p_j2k,
opj_stream_private_t *p_stream,
opj_event_mgr_t * p_manager );
/**
* Reads a SOC marker (Start of Codestream)
* @param p_j2k the jpeg2000 file codec.
* @param p_stream XXX needs data
* @param p_manager the user event manager.
*/
static OPJ_BOOL opj_j2k_read_soc( opj_j2k_t *p_j2k,
opj_stream_private_t *p_stream,
opj_event_mgr_t * p_manager );
/**
* Writes the SIZ marker (image and tile size)
*
* @param p_j2k J2K codec.
* @param p_stream the stream to write data to.
* @param p_manager the user event manager.
*/
static OPJ_BOOL opj_j2k_write_siz( opj_j2k_t *p_j2k,
opj_stream_private_t *p_stream,
opj_event_mgr_t * p_manager );
/**
* Reads a SIZ marker (image and tile size)
* @param p_j2k the jpeg2000 file codec.
* @param p_header_data the data contained in the SIZ box.
* @param p_header_size the size of the data contained in the SIZ marker.
* @param p_manager the user event manager.
*/
static OPJ_BOOL opj_j2k_read_siz(opj_j2k_t *p_j2k,
OPJ_BYTE * p_header_data,
OPJ_UINT32 p_header_size,
opj_event_mgr_t * p_manager);
/**
* Writes the COM marker (comment)
*
* @param p_stream the stream to write data to.
* @param p_j2k J2K codec.
* @param p_manager the user event manager.
*/
static OPJ_BOOL opj_j2k_write_com( opj_j2k_t *p_j2k,
opj_stream_private_t *p_stream,
opj_event_mgr_t * p_manager );
/**
* Reads a COM marker (comments)
* @param p_j2k the jpeg2000 file codec.
* @param p_header_data the data contained in the COM box.
* @param p_header_size the size of the data contained in the COM marker.
* @param p_manager the user event manager.
*/
static OPJ_BOOL opj_j2k_read_com ( opj_j2k_t *p_j2k,
OPJ_BYTE * p_header_data,
OPJ_UINT32 p_header_size,
opj_event_mgr_t * p_manager );
/**
* Writes the COD marker (Coding style default)
*
* @param p_stream the stream to write data to.
* @param p_j2k J2K codec.
* @param p_manager the user event manager.
*/
static OPJ_BOOL opj_j2k_write_cod( opj_j2k_t *p_j2k,
opj_stream_private_t *p_stream,
opj_event_mgr_t * p_manager );
/**
* Reads a COD marker (Coding Styke defaults)
* @param p_header_data the data contained in the COD box.
* @param p_j2k the jpeg2000 codec.
* @param p_header_size the size of the data contained in the COD marker.
* @param p_manager the user event manager.
*/
static OPJ_BOOL opj_j2k_read_cod ( opj_j2k_t *p_j2k,
OPJ_BYTE * p_header_data,
OPJ_UINT32 p_header_size,
opj_event_mgr_t * p_manager);
#if 0
/**
* Writes the COC marker (Coding style component)
*
* @param p_j2k J2K codec.
* @param p_comp_no the index of the component to output.
* @param p_stream the stream to write data to.
* @param p_manager the user event manager.
*/
static OPJ_BOOL opj_j2k_write_coc( opj_j2k_t *p_j2k,
OPJ_UINT32 p_comp_no,
opj_stream_private_t *p_stream,
opj_event_mgr_t * p_manager );
#endif
#if 0
/**
* Writes the COC marker (Coding style component)
*
* @param p_j2k J2K codec.
* @param p_comp_no the index of the component to output.
* @param p_data FIXME DOC
* @param p_data_written FIXME DOC
* @param p_manager the user event manager.
*/
static void opj_j2k_write_coc_in_memory(opj_j2k_t *p_j2k,
OPJ_UINT32 p_comp_no,
OPJ_BYTE * p_data,
OPJ_UINT32 * p_data_written,
opj_event_mgr_t * p_manager );
#endif
/**
* Gets the maximum size taken by a coc.
*
* @param p_j2k the jpeg2000 codec to use.
*/
static OPJ_UINT32 opj_j2k_get_max_coc_size(opj_j2k_t *p_j2k);
/**
* Reads a COC marker (Coding Style Component)
* @param p_header_data the data contained in the COC box.
* @param p_j2k the jpeg2000 codec.
* @param p_header_size the size of the data contained in the COC marker.
* @param p_manager the user event manager.
*/
static OPJ_BOOL opj_j2k_read_coc ( opj_j2k_t *p_j2k,
OPJ_BYTE * p_header_data,
OPJ_UINT32 p_header_size,
opj_event_mgr_t * p_manager );
/**
* Writes the QCD marker (quantization default)
*
* @param p_j2k J2K codec.
* @param p_stream the stream to write data to.
* @param p_manager the user event manager.
*/
static OPJ_BOOL opj_j2k_write_qcd( opj_j2k_t *p_j2k,
opj_stream_private_t *p_stream,
opj_event_mgr_t * p_manager );
/**
* Reads a QCD marker (Quantization defaults)
* @param p_header_data the data contained in the QCD box.
* @param p_j2k the jpeg2000 codec.
* @param p_header_size the size of the data contained in the QCD marker.
* @param p_manager the user event manager.
*/
static OPJ_BOOL opj_j2k_read_qcd ( opj_j2k_t *p_j2k,
OPJ_BYTE * p_header_data,
OPJ_UINT32 p_header_size,
opj_event_mgr_t * p_manager );
#if 0
/**
* Writes the QCC marker (quantization component)
*
* @param p_comp_no the index of the component to output.
* @param p_stream the stream to write data to.
* @param p_j2k J2K codec.
* @param p_manager the user event manager.
*/
static OPJ_BOOL opj_j2k_write_qcc( opj_j2k_t *p_j2k,
OPJ_UINT32 p_comp_no,
opj_stream_private_t *p_stream,
opj_event_mgr_t * p_manager );
#endif
#if 0
/**
* Writes the QCC marker (quantization component)
*
* @param p_j2k J2K codec.
* @param p_comp_no the index of the component to output.
* @param p_data FIXME DOC
* @param p_data_written the stream to write data to.
* @param p_manager the user event manager.
*/
static void opj_j2k_write_qcc_in_memory(opj_j2k_t *p_j2k,
OPJ_UINT32 p_comp_no,
OPJ_BYTE * p_data,
OPJ_UINT32 * p_data_written,
opj_event_mgr_t * p_manager );
#endif
/**
* Gets the maximum size taken by a qcc.
*/
static OPJ_UINT32 opj_j2k_get_max_qcc_size (opj_j2k_t *p_j2k);
/**
* Reads a QCC marker (Quantization component)
* @param p_header_data the data contained in the QCC box.
* @param p_j2k the jpeg2000 codec.
* @param p_header_size the size of the data contained in the QCC marker.
* @param p_manager the user event manager.
*/
static OPJ_BOOL opj_j2k_read_qcc( opj_j2k_t *p_j2k,
OPJ_BYTE * p_header_data,
OPJ_UINT32 p_header_size,
opj_event_mgr_t * p_manager);
/**
* Writes the POC marker (Progression Order Change)
*
* @param p_stream the stream to write data to.
* @param p_j2k J2K codec.
* @param p_manager the user event manager.
*/
static OPJ_BOOL opj_j2k_write_poc( opj_j2k_t *p_j2k,
opj_stream_private_t *p_stream,
opj_event_mgr_t * p_manager );
/**
* Writes the POC marker (Progression Order Change)
*
* @param p_j2k J2K codec.
* @param p_data FIXME DOC
* @param p_data_written the stream to write data to.
* @param p_manager the user event manager.
*/
static void opj_j2k_write_poc_in_memory(opj_j2k_t *p_j2k,
OPJ_BYTE * p_data,
OPJ_UINT32 * p_data_written,
opj_event_mgr_t * p_manager );
/**
* Gets the maximum size taken by the writing of a POC.
*/
static OPJ_UINT32 opj_j2k_get_max_poc_size(opj_j2k_t *p_j2k);
/**
* Reads a POC marker (Progression Order Change)
*
* @param p_header_data the data contained in the POC box.
* @param p_j2k the jpeg2000 codec.
* @param p_header_size the size of the data contained in the POC marker.
* @param p_manager the user event manager.
*/
static OPJ_BOOL opj_j2k_read_poc ( opj_j2k_t *p_j2k,
OPJ_BYTE * p_header_data,
OPJ_UINT32 p_header_size,
opj_event_mgr_t * p_manager );
/**
* Gets the maximum size taken by the toc headers of all the tile parts of any given tile.
*/
static OPJ_UINT32 opj_j2k_get_max_toc_size (opj_j2k_t *p_j2k);
/**
* Gets the maximum size taken by the headers of the SOT.
*
* @param p_j2k the jpeg2000 codec to use.
*/
static OPJ_UINT32 opj_j2k_get_specific_header_sizes(opj_j2k_t *p_j2k);
/**
* Reads a CRG marker (Component registration)
*
* @param p_header_data the data contained in the TLM box.
* @param p_j2k the jpeg2000 codec.
* @param p_header_size the size of the data contained in the TLM marker.
* @param p_manager the user event manager.
*/
static OPJ_BOOL opj_j2k_read_crg ( opj_j2k_t *p_j2k,
OPJ_BYTE * p_header_data,
OPJ_UINT32 p_header_size,
opj_event_mgr_t * p_manager );
/**
* Reads a TLM marker (Tile Length Marker)
*
* @param p_header_data the data contained in the TLM box.
* @param p_j2k the jpeg2000 codec.
* @param p_header_size the size of the data contained in the TLM marker.
* @param p_manager the user event manager.
*/
static OPJ_BOOL opj_j2k_read_tlm ( opj_j2k_t *p_j2k,
OPJ_BYTE * p_header_data,
OPJ_UINT32 p_header_size,
opj_event_mgr_t * p_manager);
/**
* Writes the updated tlm.
*
* @param p_stream the stream to write data to.
* @param p_j2k J2K codec.
* @param p_manager the user event manager.
*/
static OPJ_BOOL opj_j2k_write_updated_tlm( opj_j2k_t *p_j2k,
opj_stream_private_t *p_stream,
opj_event_mgr_t * p_manager );
/**
* Reads a PLM marker (Packet length, main header marker)
*
* @param p_header_data the data contained in the TLM box.
* @param p_j2k the jpeg2000 codec.
* @param p_header_size the size of the data contained in the TLM marker.
* @param p_manager the user event manager.
*/
static OPJ_BOOL opj_j2k_read_plm ( opj_j2k_t *p_j2k,
OPJ_BYTE * p_header_data,
OPJ_UINT32 p_header_size,
opj_event_mgr_t * p_manager);
/**
* Reads a PLT marker (Packet length, tile-part header)
*
* @param p_header_data the data contained in the PLT box.
* @param p_j2k the jpeg2000 codec.
* @param p_header_size the size of the data contained in the PLT marker.
* @param p_manager the user event manager.
*/
static OPJ_BOOL opj_j2k_read_plt ( opj_j2k_t *p_j2k,
OPJ_BYTE * p_header_data,
OPJ_UINT32 p_header_size,
opj_event_mgr_t * p_manager );
/**
* Reads a PPM marker (Packed headers, main header)
*
* @param p_header_data the data contained in the POC box.
* @param p_j2k the jpeg2000 codec.
* @param p_header_size the size of the data contained in the POC marker.
* @param p_manager the user event manager.
*/
static OPJ_BOOL opj_j2k_read_ppm (
opj_j2k_t *p_j2k,
OPJ_BYTE * p_header_data,
OPJ_UINT32 p_header_size,
opj_event_mgr_t * p_manager );
/**
* Merges all PPM markers read (Packed headers, main header)
*
* @param p_cp main coding parameters.
* @param p_manager the user event manager.
*/
static OPJ_BOOL opj_j2k_merge_ppm ( opj_cp_t *p_cp, opj_event_mgr_t * p_manager );
/**
* Reads a PPT marker (Packed packet headers, tile-part header)
*
* @param p_header_data the data contained in the PPT box.
* @param p_j2k the jpeg2000 codec.
* @param p_header_size the size of the data contained in the PPT marker.
* @param p_manager the user event manager.
*/
static OPJ_BOOL opj_j2k_read_ppt ( opj_j2k_t *p_j2k,
OPJ_BYTE * p_header_data,
OPJ_UINT32 p_header_size,
opj_event_mgr_t * p_manager );
/**
* Merges all PPT markers read (Packed headers, tile-part header)
*
* @param p_tcp the tile.
* @param p_manager the user event manager.
*/
static OPJ_BOOL opj_j2k_merge_ppt ( opj_tcp_t *p_tcp,
opj_event_mgr_t * p_manager );
/**
* Writes the TLM marker (Tile Length Marker)
*
* @param p_stream the stream to write data to.
* @param p_j2k J2K codec.
* @param p_manager the user event manager.
*/
static OPJ_BOOL opj_j2k_write_tlm( opj_j2k_t *p_j2k,
opj_stream_private_t *p_stream,
opj_event_mgr_t * p_manager );
/**
* Writes the SOT marker (Start of tile-part)
*
* @param p_j2k J2K codec.
* @param p_data FIXME DOC
* @param p_data_written FIXME DOC
* @param p_stream the stream to write data to.
* @param p_manager the user event manager.
*/
static OPJ_BOOL opj_j2k_write_sot( opj_j2k_t *p_j2k,
OPJ_BYTE * p_data,
OPJ_UINT32 * p_data_written,
const opj_stream_private_t *p_stream,
opj_event_mgr_t * p_manager );
/**
* Reads values from a SOT marker (Start of tile-part)
*
* the j2k decoder state is not affected. No side effects, no checks except for p_header_size.
*
* @param p_header_data the data contained in the SOT marker.
* @param p_header_size the size of the data contained in the SOT marker.
* @param p_tile_no Isot.
* @param p_tot_len Psot.
* @param p_current_part TPsot.
* @param p_num_parts TNsot.
* @param p_manager the user event manager.
*/
static OPJ_BOOL opj_j2k_get_sot_values(OPJ_BYTE * p_header_data,
OPJ_UINT32 p_header_size,
OPJ_UINT32* p_tile_no,
OPJ_UINT32* p_tot_len,
OPJ_UINT32* p_current_part,
OPJ_UINT32* p_num_parts,
opj_event_mgr_t * p_manager );
/**
* Reads a SOT marker (Start of tile-part)
*
* @param p_header_data the data contained in the SOT marker.
* @param p_j2k the jpeg2000 codec.
* @param p_header_size the size of the data contained in the PPT marker.
* @param p_manager the user event manager.
*/
static OPJ_BOOL opj_j2k_read_sot ( opj_j2k_t *p_j2k,
OPJ_BYTE * p_header_data,
OPJ_UINT32 p_header_size,
opj_event_mgr_t * p_manager );
/**
* Writes the SOD marker (Start of data)
*
* @param p_j2k J2K codec.
* @param p_tile_coder FIXME DOC
* @param p_data FIXME DOC
* @param p_data_written FIXME DOC
* @param p_total_data_size FIXME DOC
* @param p_stream the stream to write data to.
* @param p_manager the user event manager.
*/
static OPJ_BOOL opj_j2k_write_sod( opj_j2k_t *p_j2k,
opj_tcd_t * p_tile_coder,
OPJ_BYTE * p_data,
OPJ_UINT32 * p_data_written,
OPJ_UINT32 p_total_data_size,
const opj_stream_private_t *p_stream,
opj_event_mgr_t * p_manager );
/**
* Reads a SOD marker (Start Of Data)
*
* @param p_j2k the jpeg2000 codec.
* @param p_stream FIXME DOC
* @param p_manager the user event manager.
*/
static OPJ_BOOL opj_j2k_read_sod( opj_j2k_t *p_j2k,
opj_stream_private_t *p_stream,
opj_event_mgr_t * p_manager );
static void opj_j2k_update_tlm (opj_j2k_t * p_j2k, OPJ_UINT32 p_tile_part_size )
{
opj_write_bytes(p_j2k->m_specific_param.m_encoder.m_tlm_sot_offsets_current,p_j2k->m_current_tile_number,1); /* PSOT */
++p_j2k->m_specific_param.m_encoder.m_tlm_sot_offsets_current;
opj_write_bytes(p_j2k->m_specific_param.m_encoder.m_tlm_sot_offsets_current,p_tile_part_size,4); /* PSOT */
p_j2k->m_specific_param.m_encoder.m_tlm_sot_offsets_current += 4;
}
/**
* Writes the RGN marker (Region Of Interest)
*
* @param p_tile_no the tile to output
* @param p_comp_no the component to output
* @param nb_comps the number of components
* @param p_stream the stream to write data to.
* @param p_j2k J2K codec.
* @param p_manager the user event manager.
*/
static OPJ_BOOL opj_j2k_write_rgn( opj_j2k_t *p_j2k,
OPJ_UINT32 p_tile_no,
OPJ_UINT32 p_comp_no,
OPJ_UINT32 nb_comps,
opj_stream_private_t *p_stream,
opj_event_mgr_t * p_manager );
/**
* Reads a RGN marker (Region Of Interest)
*
* @param p_header_data the data contained in the POC box.
* @param p_j2k the jpeg2000 codec.
* @param p_header_size the size of the data contained in the POC marker.
* @param p_manager the user event manager.
*/
static OPJ_BOOL opj_j2k_read_rgn (opj_j2k_t *p_j2k,
OPJ_BYTE * p_header_data,
OPJ_UINT32 p_header_size,
opj_event_mgr_t * p_manager );
/**
* Writes the EOC marker (End of Codestream)
*
* @param p_stream the stream to write data to.
* @param p_j2k J2K codec.
* @param p_manager the user event manager.
*/
static OPJ_BOOL opj_j2k_write_eoc( opj_j2k_t *p_j2k,
opj_stream_private_t *p_stream,
opj_event_mgr_t * p_manager );
#if 0
/**
* Reads a EOC marker (End Of Codestream)
*
* @param p_j2k the jpeg2000 codec.
* @param p_stream FIXME DOC
* @param p_manager the user event manager.
*/
static OPJ_BOOL opj_j2k_read_eoc ( opj_j2k_t *p_j2k,
opj_stream_private_t *p_stream,
opj_event_mgr_t * p_manager );
#endif
/**
* Writes the CBD-MCT-MCC-MCO markers (Multi components transform)
*
* @param p_stream the stream to write data to.
* @param p_j2k J2K codec.
* @param p_manager the user event manager.
*/
static OPJ_BOOL opj_j2k_write_mct_data_group( opj_j2k_t *p_j2k,
opj_stream_private_t *p_stream,
opj_event_mgr_t * p_manager );
/**
* Inits the Info
*
* @param p_stream the stream to write data to.
* @param p_j2k J2K codec.
* @param p_manager the user event manager.
*/
static OPJ_BOOL opj_j2k_init_info( opj_j2k_t *p_j2k,
opj_stream_private_t *p_stream,
opj_event_mgr_t * p_manager );
/**
Add main header marker information
@param cstr_index Codestream information structure
@param type marker type
@param pos byte offset of marker segment
@param len length of marker segment
*/
static OPJ_BOOL opj_j2k_add_mhmarker(opj_codestream_index_t *cstr_index, OPJ_UINT32 type, OPJ_OFF_T pos, OPJ_UINT32 len) ;
/**
Add tile header marker information
@param tileno tile index number
@param cstr_index Codestream information structure
@param type marker type
@param pos byte offset of marker segment
@param len length of marker segment
*/
static OPJ_BOOL opj_j2k_add_tlmarker(OPJ_UINT32 tileno, opj_codestream_index_t *cstr_index, OPJ_UINT32 type, OPJ_OFF_T pos, OPJ_UINT32 len);
/**
* Reads an unknown marker
*
* @param p_j2k the jpeg2000 codec.
* @param p_stream the stream object to read from.
* @param output_marker FIXME DOC
* @param p_manager the user event manager.
*
* @return true if the marker could be deduced.
*/
static OPJ_BOOL opj_j2k_read_unk( opj_j2k_t *p_j2k,
opj_stream_private_t *p_stream,
OPJ_UINT32 *output_marker,
opj_event_mgr_t * p_manager );
/**
* Writes the MCT marker (Multiple Component Transform)
*
* @param p_j2k J2K codec.
* @param p_mct_record FIXME DOC
* @param p_stream the stream to write data to.
* @param p_manager the user event manager.
*/
static OPJ_BOOL opj_j2k_write_mct_record( opj_j2k_t *p_j2k,
opj_mct_data_t * p_mct_record,
opj_stream_private_t *p_stream,
opj_event_mgr_t * p_manager );
/**
* Reads a MCT marker (Multiple Component Transform)
*
* @param p_header_data the data contained in the MCT box.
* @param p_j2k the jpeg2000 codec.
* @param p_header_size the size of the data contained in the MCT marker.
* @param p_manager the user event manager.
*/
static OPJ_BOOL opj_j2k_read_mct ( opj_j2k_t *p_j2k,
OPJ_BYTE * p_header_data,
OPJ_UINT32 p_header_size,
opj_event_mgr_t * p_manager );
/**
* Writes the MCC marker (Multiple Component Collection)
*
* @param p_j2k J2K codec.
* @param p_mcc_record FIXME DOC
* @param p_stream the stream to write data to.
* @param p_manager the user event manager.
*/
static OPJ_BOOL opj_j2k_write_mcc_record( opj_j2k_t *p_j2k,
opj_simple_mcc_decorrelation_data_t * p_mcc_record,
opj_stream_private_t *p_stream,
opj_event_mgr_t * p_manager );
/**
* Reads a MCC marker (Multiple Component Collection)
*
* @param p_header_data the data contained in the MCC box.
* @param p_j2k the jpeg2000 codec.
* @param p_header_size the size of the data contained in the MCC marker.
* @param p_manager the user event manager.
*/
static OPJ_BOOL opj_j2k_read_mcc ( opj_j2k_t *p_j2k,
OPJ_BYTE * p_header_data,
OPJ_UINT32 p_header_size,
opj_event_mgr_t * p_manager );
/**
* Writes the MCO marker (Multiple component transformation ordering)
*
* @param p_stream the stream to write data to.
* @param p_j2k J2K codec.
* @param p_manager the user event manager.
*/
static OPJ_BOOL opj_j2k_write_mco( opj_j2k_t *p_j2k,
opj_stream_private_t *p_stream,
opj_event_mgr_t * p_manager );
/**
* Reads a MCO marker (Multiple Component Transform Ordering)
*
* @param p_header_data the data contained in the MCO box.
* @param p_j2k the jpeg2000 codec.
* @param p_header_size the size of the data contained in the MCO marker.
* @param p_manager the user event manager.
*/
static OPJ_BOOL opj_j2k_read_mco ( opj_j2k_t *p_j2k,
OPJ_BYTE * p_header_data,
OPJ_UINT32 p_header_size,
opj_event_mgr_t * p_manager );
static OPJ_BOOL opj_j2k_add_mct(opj_tcp_t * p_tcp, opj_image_t * p_image, OPJ_UINT32 p_index);
static void opj_j2k_read_int16_to_float (const void * p_src_data, void * p_dest_data, OPJ_UINT32 p_nb_elem);
static void opj_j2k_read_int32_to_float (const void * p_src_data, void * p_dest_data, OPJ_UINT32 p_nb_elem);
static void opj_j2k_read_float32_to_float (const void * p_src_data, void * p_dest_data, OPJ_UINT32 p_nb_elem);
static void opj_j2k_read_float64_to_float (const void * p_src_data, void * p_dest_data, OPJ_UINT32 p_nb_elem);
static void opj_j2k_read_int16_to_int32 (const void * p_src_data, void * p_dest_data, OPJ_UINT32 p_nb_elem);
static void opj_j2k_read_int32_to_int32 (const void * p_src_data, void * p_dest_data, OPJ_UINT32 p_nb_elem);
static void opj_j2k_read_float32_to_int32 (const void * p_src_data, void * p_dest_data, OPJ_UINT32 p_nb_elem);
static void opj_j2k_read_float64_to_int32 (const void * p_src_data, void * p_dest_data, OPJ_UINT32 p_nb_elem);
static void opj_j2k_write_float_to_int16 (const void * p_src_data, void * p_dest_data, OPJ_UINT32 p_nb_elem);
static void opj_j2k_write_float_to_int32 (const void * p_src_data, void * p_dest_data, OPJ_UINT32 p_nb_elem);
static void opj_j2k_write_float_to_float (const void * p_src_data, void * p_dest_data, OPJ_UINT32 p_nb_elem);
static void opj_j2k_write_float_to_float64 (const void * p_src_data, void * p_dest_data, OPJ_UINT32 p_nb_elem);
/**
* Ends the encoding, i.e. frees memory.
*
* @param p_stream the stream to write data to.
* @param p_j2k J2K codec.
* @param p_manager the user event manager.
*/
static OPJ_BOOL opj_j2k_end_encoding( opj_j2k_t *p_j2k,
opj_stream_private_t *p_stream,
opj_event_mgr_t * p_manager );
/**
* Writes the CBD marker (Component bit depth definition)
*
* @param p_stream the stream to write data to.
* @param p_j2k J2K codec.
* @param p_manager the user event manager.
*/
static OPJ_BOOL opj_j2k_write_cbd( opj_j2k_t *p_j2k,
opj_stream_private_t *p_stream,
opj_event_mgr_t * p_manager );
/**
* Reads a CBD marker (Component bit depth definition)
* @param p_header_data the data contained in the CBD box.
* @param p_j2k the jpeg2000 codec.
* @param p_header_size the size of the data contained in the CBD marker.
* @param p_manager the user event manager.
*/
static OPJ_BOOL opj_j2k_read_cbd ( opj_j2k_t *p_j2k,
OPJ_BYTE * p_header_data,
OPJ_UINT32 p_header_size,
opj_event_mgr_t * p_manager);
#if 0
/**
* Writes COC marker for each component.
*
* @param p_stream the stream to write data to.
* @param p_j2k J2K codec.
* @param p_manager the user event manager.
*/
static OPJ_BOOL opj_j2k_write_all_coc( opj_j2k_t *p_j2k,
opj_stream_private_t *p_stream,
opj_event_mgr_t * p_manager );
#endif
#if 0
/**
* Writes QCC marker for each component.
*
* @param p_stream the stream to write data to.
* @param p_j2k J2K codec.
* @param p_manager the user event manager.
*/
static OPJ_BOOL opj_j2k_write_all_qcc( opj_j2k_t *p_j2k,
opj_stream_private_t *p_stream,
opj_event_mgr_t * p_manager );
#endif
/**
* Writes regions of interests.
*
* @param p_stream the stream to write data to.
* @param p_j2k J2K codec.
* @param p_manager the user event manager.
*/
static OPJ_BOOL opj_j2k_write_regions( opj_j2k_t *p_j2k,
opj_stream_private_t *p_stream,
opj_event_mgr_t * p_manager );
/**
* Writes EPC ????
*
* @param p_stream the stream to write data to.
* @param p_j2k J2K codec.
* @param p_manager the user event manager.
*/
static OPJ_BOOL opj_j2k_write_epc( opj_j2k_t *p_j2k,
opj_stream_private_t *p_stream,
opj_event_mgr_t * p_manager );
/**
* Checks the progression order changes values. Tells of the poc given as input are valid.
* A nice message is outputted at errors.
*
* @param p_pocs the progression order changes.
* @param p_nb_pocs the number of progression order changes.
* @param p_nb_resolutions the number of resolutions.
* @param numcomps the number of components
* @param numlayers the number of layers.
* @param p_manager the user event manager.
*
* @return true if the pocs are valid.
*/
static OPJ_BOOL opj_j2k_check_poc_val( const opj_poc_t *p_pocs,
OPJ_UINT32 p_nb_pocs,
OPJ_UINT32 p_nb_resolutions,
OPJ_UINT32 numcomps,
OPJ_UINT32 numlayers,
opj_event_mgr_t * p_manager);
/**
* Gets the number of tile parts used for the given change of progression (if any) and the given tile.
*
* @param cp the coding parameters.
* @param pino the offset of the given poc (i.e. its position in the coding parameter).
* @param tileno the given tile.
*
* @return the number of tile parts.
*/
static OPJ_UINT32 opj_j2k_get_num_tp( opj_cp_t *cp, OPJ_UINT32 pino, OPJ_UINT32 tileno);
/**
* Calculates the total number of tile parts needed by the encoder to
* encode such an image. If not enough memory is available, then the function return false.
*
* @param p_nb_tiles pointer that will hold the number of tile parts.
* @param cp the coding parameters for the image.
* @param image the image to encode.
* @param p_j2k the p_j2k encoder.
* @param p_manager the user event manager.
*
* @return true if the function was successful, false else.
*/
static OPJ_BOOL opj_j2k_calculate_tp( opj_j2k_t *p_j2k,
opj_cp_t *cp,
OPJ_UINT32 * p_nb_tiles,
opj_image_t *image,
opj_event_mgr_t * p_manager);
static void opj_j2k_dump_MH_info(opj_j2k_t* p_j2k, FILE* out_stream);
static void opj_j2k_dump_MH_index(opj_j2k_t* p_j2k, FILE* out_stream);
static opj_codestream_index_t* opj_j2k_create_cstr_index(void);
static OPJ_FLOAT32 opj_j2k_get_tp_stride (opj_tcp_t * p_tcp);
static OPJ_FLOAT32 opj_j2k_get_default_stride (opj_tcp_t * p_tcp);
static int opj_j2k_initialise_4K_poc(opj_poc_t *POC, int numres);
static void opj_j2k_set_cinema_parameters(opj_cparameters_t *parameters, opj_image_t *image, opj_event_mgr_t *p_manager);
static OPJ_BOOL opj_j2k_is_cinema_compliant(opj_image_t *image, OPJ_UINT16 rsiz, opj_event_mgr_t *p_manager);
/**
* Checks for invalid number of tile-parts in SOT marker (TPsot==TNsot). See issue 254.
*
* @param p_stream the stream to read data from.
* @param tile_no tile number we're looking for.
* @param p_correction_needed output value. if true, non conformant codestream needs TNsot correction.
* @param p_manager the user event manager.
*
* @return true if the function was successful, false else.
*/
static OPJ_BOOL opj_j2k_need_nb_tile_parts_correction(opj_stream_private_t *p_stream, OPJ_UINT32 tile_no, OPJ_BOOL* p_correction_needed, opj_event_mgr_t * p_manager );
/*@}*/
/*@}*/
/* ----------------------------------------------------------------------- */
typedef struct j2k_prog_order{
OPJ_PROG_ORDER enum_prog;
char str_prog[5];
}j2k_prog_order_t;
static j2k_prog_order_t j2k_prog_order_list[] = {
{OPJ_CPRL, "CPRL"},
{OPJ_LRCP, "LRCP"},
{OPJ_PCRL, "PCRL"},
{OPJ_RLCP, "RLCP"},
{OPJ_RPCL, "RPCL"},
{(OPJ_PROG_ORDER)-1, ""}
};
/**
* FIXME DOC
*/
static const OPJ_UINT32 MCT_ELEMENT_SIZE [] =
{
2,
4,
4,
8
};
typedef void (* opj_j2k_mct_function) (const void * p_src_data, void * p_dest_data, OPJ_UINT32 p_nb_elem);
static const opj_j2k_mct_function j2k_mct_read_functions_to_float [] =
{
opj_j2k_read_int16_to_float,
opj_j2k_read_int32_to_float,
opj_j2k_read_float32_to_float,
opj_j2k_read_float64_to_float
};
static const opj_j2k_mct_function j2k_mct_read_functions_to_int32 [] =
{
opj_j2k_read_int16_to_int32,
opj_j2k_read_int32_to_int32,
opj_j2k_read_float32_to_int32,
opj_j2k_read_float64_to_int32
};
static const opj_j2k_mct_function j2k_mct_write_functions_from_float [] =
{
opj_j2k_write_float_to_int16,
opj_j2k_write_float_to_int32,
opj_j2k_write_float_to_float,
opj_j2k_write_float_to_float64
};
typedef struct opj_dec_memory_marker_handler
{
/** marker value */
OPJ_UINT32 id;
/** value of the state when the marker can appear */
OPJ_UINT32 states;
/** action linked to the marker */
OPJ_BOOL (*handler) ( opj_j2k_t *p_j2k,
OPJ_BYTE * p_header_data,
OPJ_UINT32 p_header_size,
opj_event_mgr_t * p_manager );
}
opj_dec_memory_marker_handler_t;
static const opj_dec_memory_marker_handler_t j2k_memory_marker_handler_tab [] =
{
{J2K_MS_SOT, J2K_STATE_MH | J2K_STATE_TPHSOT, opj_j2k_read_sot},
{J2K_MS_COD, J2K_STATE_MH | J2K_STATE_TPH, opj_j2k_read_cod},
{J2K_MS_COC, J2K_STATE_MH | J2K_STATE_TPH, opj_j2k_read_coc},
{J2K_MS_RGN, J2K_STATE_MH | J2K_STATE_TPH, opj_j2k_read_rgn},
{J2K_MS_QCD, J2K_STATE_MH | J2K_STATE_TPH, opj_j2k_read_qcd},
{J2K_MS_QCC, J2K_STATE_MH | J2K_STATE_TPH, opj_j2k_read_qcc},
{J2K_MS_POC, J2K_STATE_MH | J2K_STATE_TPH, opj_j2k_read_poc},
{J2K_MS_SIZ, J2K_STATE_MHSIZ, opj_j2k_read_siz},
{J2K_MS_TLM, J2K_STATE_MH, opj_j2k_read_tlm},
{J2K_MS_PLM, J2K_STATE_MH, opj_j2k_read_plm},
{J2K_MS_PLT, J2K_STATE_TPH, opj_j2k_read_plt},
{J2K_MS_PPM, J2K_STATE_MH, opj_j2k_read_ppm},
{J2K_MS_PPT, J2K_STATE_TPH, opj_j2k_read_ppt},
{J2K_MS_SOP, 0, 0},
{J2K_MS_CRG, J2K_STATE_MH, opj_j2k_read_crg},
{J2K_MS_COM, J2K_STATE_MH | J2K_STATE_TPH, opj_j2k_read_com},
{J2K_MS_MCT, J2K_STATE_MH | J2K_STATE_TPH, opj_j2k_read_mct},
{J2K_MS_CBD, J2K_STATE_MH , opj_j2k_read_cbd},
{J2K_MS_MCC, J2K_STATE_MH | J2K_STATE_TPH, opj_j2k_read_mcc},
{J2K_MS_MCO, J2K_STATE_MH | J2K_STATE_TPH, opj_j2k_read_mco},
#ifdef USE_JPWL
#ifdef TODO_MS /* remove these functions which are not commpatible with the v2 API */
{J2K_MS_EPC, J2K_STATE_MH | J2K_STATE_TPH, j2k_read_epc},
{J2K_MS_EPB, J2K_STATE_MH | J2K_STATE_TPH, j2k_read_epb},
{J2K_MS_ESD, J2K_STATE_MH | J2K_STATE_TPH, j2k_read_esd},
{J2K_MS_RED, J2K_STATE_MH | J2K_STATE_TPH, j2k_read_red},
#endif
#endif /* USE_JPWL */
#ifdef USE_JPSEC
{J2K_MS_SEC, J2K_DEC_STATE_MH, j2k_read_sec},
{J2K_MS_INSEC, 0, j2k_read_insec}
#endif /* USE_JPSEC */
{J2K_MS_UNK, J2K_STATE_MH | J2K_STATE_TPH, 0}/*opj_j2k_read_unk is directly used*/
};
static void opj_j2k_read_int16_to_float (const void * p_src_data, void * p_dest_data, OPJ_UINT32 p_nb_elem)
{
OPJ_BYTE * l_src_data = (OPJ_BYTE *) p_src_data;
OPJ_FLOAT32 * l_dest_data = (OPJ_FLOAT32 *) p_dest_data;
OPJ_UINT32 i;
OPJ_UINT32 l_temp;
for (i=0;i<p_nb_elem;++i) {
opj_read_bytes(l_src_data,&l_temp,2);
l_src_data+=sizeof(OPJ_INT16);
*(l_dest_data++) = (OPJ_FLOAT32) l_temp;
}
}
static void opj_j2k_read_int32_to_float (const void * p_src_data, void * p_dest_data, OPJ_UINT32 p_nb_elem)
{
OPJ_BYTE * l_src_data = (OPJ_BYTE *) p_src_data;
OPJ_FLOAT32 * l_dest_data = (OPJ_FLOAT32 *) p_dest_data;
OPJ_UINT32 i;
OPJ_UINT32 l_temp;
for (i=0;i<p_nb_elem;++i) {
opj_read_bytes(l_src_data,&l_temp,4);
l_src_data+=sizeof(OPJ_INT32);
*(l_dest_data++) = (OPJ_FLOAT32) l_temp;
}
}
static void opj_j2k_read_float32_to_float (const void * p_src_data, void * p_dest_data, OPJ_UINT32 p_nb_elem)
{
OPJ_BYTE * l_src_data = (OPJ_BYTE *) p_src_data;
OPJ_FLOAT32 * l_dest_data = (OPJ_FLOAT32 *) p_dest_data;
OPJ_UINT32 i;
OPJ_FLOAT32 l_temp;
for (i=0;i<p_nb_elem;++i) {
opj_read_float(l_src_data,&l_temp);
l_src_data+=sizeof(OPJ_FLOAT32);
*(l_dest_data++) = l_temp;
}
}
static void opj_j2k_read_float64_to_float (const void * p_src_data, void * p_dest_data, OPJ_UINT32 p_nb_elem)
{
OPJ_BYTE * l_src_data = (OPJ_BYTE *) p_src_data;
OPJ_FLOAT32 * l_dest_data = (OPJ_FLOAT32 *) p_dest_data;
OPJ_UINT32 i;
OPJ_FLOAT64 l_temp;
for (i=0;i<p_nb_elem;++i) {
opj_read_double(l_src_data,&l_temp);
l_src_data+=sizeof(OPJ_FLOAT64);
*(l_dest_data++) = (OPJ_FLOAT32) l_temp;
}
}
static void opj_j2k_read_int16_to_int32 (const void * p_src_data, void * p_dest_data, OPJ_UINT32 p_nb_elem)
{
OPJ_BYTE * l_src_data = (OPJ_BYTE *) p_src_data;
OPJ_INT32 * l_dest_data = (OPJ_INT32 *) p_dest_data;
OPJ_UINT32 i;
OPJ_UINT32 l_temp;
for (i=0;i<p_nb_elem;++i) {
opj_read_bytes(l_src_data,&l_temp,2);
l_src_data+=sizeof(OPJ_INT16);
*(l_dest_data++) = (OPJ_INT32) l_temp;
}
}
static void opj_j2k_read_int32_to_int32 (const void * p_src_data, void * p_dest_data, OPJ_UINT32 p_nb_elem)
{
OPJ_BYTE * l_src_data = (OPJ_BYTE *) p_src_data;
OPJ_INT32 * l_dest_data = (OPJ_INT32 *) p_dest_data;
OPJ_UINT32 i;
OPJ_UINT32 l_temp;
for (i=0;i<p_nb_elem;++i) {
opj_read_bytes(l_src_data,&l_temp,4);
l_src_data+=sizeof(OPJ_INT32);
*(l_dest_data++) = (OPJ_INT32) l_temp;
}
}
static void opj_j2k_read_float32_to_int32 (const void * p_src_data, void * p_dest_data, OPJ_UINT32 p_nb_elem)
{
OPJ_BYTE * l_src_data = (OPJ_BYTE *) p_src_data;
OPJ_INT32 * l_dest_data = (OPJ_INT32 *) p_dest_data;
OPJ_UINT32 i;
OPJ_FLOAT32 l_temp;
for (i=0;i<p_nb_elem;++i) {
opj_read_float(l_src_data,&l_temp);
l_src_data+=sizeof(OPJ_FLOAT32);
*(l_dest_data++) = (OPJ_INT32) l_temp;
}
}
static void opj_j2k_read_float64_to_int32 (const void * p_src_data, void * p_dest_data, OPJ_UINT32 p_nb_elem)
{
OPJ_BYTE * l_src_data = (OPJ_BYTE *) p_src_data;
OPJ_INT32 * l_dest_data = (OPJ_INT32 *) p_dest_data;
OPJ_UINT32 i;
OPJ_FLOAT64 l_temp;
for (i=0;i<p_nb_elem;++i) {
opj_read_double(l_src_data,&l_temp);
l_src_data+=sizeof(OPJ_FLOAT64);
*(l_dest_data++) = (OPJ_INT32) l_temp;
}
}
static void opj_j2k_write_float_to_int16 (const void * p_src_data, void * p_dest_data, OPJ_UINT32 p_nb_elem)
{
OPJ_BYTE * l_dest_data = (OPJ_BYTE *) p_dest_data;
OPJ_FLOAT32 * l_src_data = (OPJ_FLOAT32 *) p_src_data;
OPJ_UINT32 i;
OPJ_UINT32 l_temp;
for (i=0;i<p_nb_elem;++i) {
l_temp = (OPJ_UINT32) *(l_src_data++);
opj_write_bytes(l_dest_data,l_temp,sizeof(OPJ_INT16));
l_dest_data+=sizeof(OPJ_INT16);
}
}
static void opj_j2k_write_float_to_int32 (const void * p_src_data, void * p_dest_data, OPJ_UINT32 p_nb_elem)
{
OPJ_BYTE * l_dest_data = (OPJ_BYTE *) p_dest_data;
OPJ_FLOAT32 * l_src_data = (OPJ_FLOAT32 *) p_src_data;
OPJ_UINT32 i;
OPJ_UINT32 l_temp;
for (i=0;i<p_nb_elem;++i) {
l_temp = (OPJ_UINT32) *(l_src_data++);
opj_write_bytes(l_dest_data,l_temp,sizeof(OPJ_INT32));
l_dest_data+=sizeof(OPJ_INT32);
}
}
static void opj_j2k_write_float_to_float (const void * p_src_data, void * p_dest_data, OPJ_UINT32 p_nb_elem)
{
OPJ_BYTE * l_dest_data = (OPJ_BYTE *) p_dest_data;
OPJ_FLOAT32 * l_src_data = (OPJ_FLOAT32 *) p_src_data;
OPJ_UINT32 i;
OPJ_FLOAT32 l_temp;
for (i=0;i<p_nb_elem;++i) {
l_temp = (OPJ_FLOAT32) *(l_src_data++);
opj_write_float(l_dest_data,l_temp);
l_dest_data+=sizeof(OPJ_FLOAT32);
}
}
static void opj_j2k_write_float_to_float64 (const void * p_src_data, void * p_dest_data, OPJ_UINT32 p_nb_elem)
{
OPJ_BYTE * l_dest_data = (OPJ_BYTE *) p_dest_data;
OPJ_FLOAT32 * l_src_data = (OPJ_FLOAT32 *) p_src_data;
OPJ_UINT32 i;
OPJ_FLOAT64 l_temp;
for (i=0;i<p_nb_elem;++i) {
l_temp = (OPJ_FLOAT64) *(l_src_data++);
opj_write_double(l_dest_data,l_temp);
l_dest_data+=sizeof(OPJ_FLOAT64);
}
}
char *opj_j2k_convert_progression_order(OPJ_PROG_ORDER prg_order){
j2k_prog_order_t *po;
for(po = j2k_prog_order_list; po->enum_prog != -1; po++ ){
if(po->enum_prog == prg_order){
return po->str_prog;
}
}
return po->str_prog;
}
static OPJ_BOOL opj_j2k_check_poc_val( const opj_poc_t *p_pocs,
OPJ_UINT32 p_nb_pocs,
OPJ_UINT32 p_nb_resolutions,
OPJ_UINT32 p_num_comps,
OPJ_UINT32 p_num_layers,
opj_event_mgr_t * p_manager)
{
OPJ_UINT32* packet_array;
OPJ_UINT32 index , resno, compno, layno;
OPJ_UINT32 i;
OPJ_UINT32 step_c = 1;
OPJ_UINT32 step_r = p_num_comps * step_c;
OPJ_UINT32 step_l = p_nb_resolutions * step_r;
OPJ_BOOL loss = OPJ_FALSE;
OPJ_UINT32 layno0 = 0;
packet_array = (OPJ_UINT32*) opj_calloc(step_l * p_num_layers, sizeof(OPJ_UINT32));
if (packet_array == 00) {
opj_event_msg(p_manager , EVT_ERROR, "Not enough memory for checking the poc values.\n");
return OPJ_FALSE;
}
if (p_nb_pocs == 0) {
opj_free(packet_array);
return OPJ_TRUE;
}
index = step_r * p_pocs->resno0;
/* take each resolution for each poc */
for (resno = p_pocs->resno0 ; resno < p_pocs->resno1 ; ++resno)
{
OPJ_UINT32 res_index = index + p_pocs->compno0 * step_c;
/* take each comp of each resolution for each poc */
for (compno = p_pocs->compno0 ; compno < p_pocs->compno1 ; ++compno) {
OPJ_UINT32 comp_index = res_index + layno0 * step_l;
/* and finally take each layer of each res of ... */
for (layno = layno0; layno < p_pocs->layno1 ; ++layno) {
/*index = step_r * resno + step_c * compno + step_l * layno;*/
packet_array[comp_index] = 1;
comp_index += step_l;
}
res_index += step_c;
}
index += step_r;
}
++p_pocs;
/* iterate through all the pocs */
for (i = 1; i < p_nb_pocs ; ++i) {
OPJ_UINT32 l_last_layno1 = (p_pocs-1)->layno1 ;
layno0 = (p_pocs->layno1 > l_last_layno1)? l_last_layno1 : 0;
index = step_r * p_pocs->resno0;
/* take each resolution for each poc */
for (resno = p_pocs->resno0 ; resno < p_pocs->resno1 ; ++resno) {
OPJ_UINT32 res_index = index + p_pocs->compno0 * step_c;
/* take each comp of each resolution for each poc */
for (compno = p_pocs->compno0 ; compno < p_pocs->compno1 ; ++compno) {
OPJ_UINT32 comp_index = res_index + layno0 * step_l;
/* and finally take each layer of each res of ... */
for (layno = layno0; layno < p_pocs->layno1 ; ++layno) {
/*index = step_r * resno + step_c * compno + step_l * layno;*/
packet_array[comp_index] = 1;
comp_index += step_l;
}
res_index += step_c;
}
index += step_r;
}
++p_pocs;
}
index = 0;
for (layno = 0; layno < p_num_layers ; ++layno) {
for (resno = 0; resno < p_nb_resolutions; ++resno) {
for (compno = 0; compno < p_num_comps; ++compno) {
loss |= (packet_array[index]!=1);
/*index = step_r * resno + step_c * compno + step_l * layno;*/
index += step_c;
}
}
}
if (loss) {
opj_event_msg(p_manager , EVT_ERROR, "Missing packets possible loss of data\n");
}
opj_free(packet_array);
return !loss;
}
/* ----------------------------------------------------------------------- */
static OPJ_UINT32 opj_j2k_get_num_tp(opj_cp_t *cp, OPJ_UINT32 pino, OPJ_UINT32 tileno)
{
const OPJ_CHAR *prog = 00;
OPJ_INT32 i;
OPJ_UINT32 tpnum = 1;
opj_tcp_t *tcp = 00;
opj_poc_t * l_current_poc = 00;
/* preconditions */
assert(tileno < (cp->tw * cp->th));
assert(pino < (cp->tcps[tileno].numpocs + 1));
/* get the given tile coding parameter */
tcp = &cp->tcps[tileno];
assert(tcp != 00);
l_current_poc = &(tcp->pocs[pino]);
assert(l_current_poc != 0);
/* get the progression order as a character string */
prog = opj_j2k_convert_progression_order(tcp->prg);
assert(strlen(prog) > 0);
if (cp->m_specific_param.m_enc.m_tp_on == 1) {
for (i=0;i<4;++i) {
switch (prog[i])
{
/* component wise */
case 'C':
tpnum *= l_current_poc->compE;
break;
/* resolution wise */
case 'R':
tpnum *= l_current_poc->resE;
break;
/* precinct wise */
case 'P':
tpnum *= l_current_poc->prcE;
break;
/* layer wise */
case 'L':
tpnum *= l_current_poc->layE;
break;
}
/* whould we split here ? */
if ( cp->m_specific_param.m_enc.m_tp_flag == prog[i] ) {
cp->m_specific_param.m_enc.m_tp_pos=i;
break;
}
}
}
else {
tpnum=1;
}
return tpnum;
}
static OPJ_BOOL opj_j2k_calculate_tp( opj_j2k_t *p_j2k,
opj_cp_t *cp,
OPJ_UINT32 * p_nb_tiles,
opj_image_t *image,
opj_event_mgr_t * p_manager
)
{
OPJ_UINT32 pino,tileno;
OPJ_UINT32 l_nb_tiles;
opj_tcp_t *tcp;
/* preconditions */
assert(p_nb_tiles != 00);
assert(cp != 00);
assert(image != 00);
assert(p_j2k != 00);
assert(p_manager != 00);
l_nb_tiles = cp->tw * cp->th;
* p_nb_tiles = 0;
tcp = cp->tcps;
/* INDEX >> */
/* TODO mergeV2: check this part which use cstr_info */
/*if (p_j2k->cstr_info) {
opj_tile_info_t * l_info_tile_ptr = p_j2k->cstr_info->tile;
for (tileno = 0; tileno < l_nb_tiles; ++tileno) {
OPJ_UINT32 cur_totnum_tp = 0;
opj_pi_update_encoding_parameters(image,cp,tileno);
for (pino = 0; pino <= tcp->numpocs; ++pino)
{
OPJ_UINT32 tp_num = opj_j2k_get_num_tp(cp,pino,tileno);
*p_nb_tiles = *p_nb_tiles + tp_num;
cur_totnum_tp += tp_num;
}
tcp->m_nb_tile_parts = cur_totnum_tp;
l_info_tile_ptr->tp = (opj_tp_info_t *) opj_malloc(cur_totnum_tp * sizeof(opj_tp_info_t));
if (l_info_tile_ptr->tp == 00) {
return OPJ_FALSE;
}
memset(l_info_tile_ptr->tp,0,cur_totnum_tp * sizeof(opj_tp_info_t));
l_info_tile_ptr->num_tps = cur_totnum_tp;
++l_info_tile_ptr;
++tcp;
}
}
else */{
for (tileno = 0; tileno < l_nb_tiles; ++tileno) {
OPJ_UINT32 cur_totnum_tp = 0;
opj_pi_update_encoding_parameters(image,cp,tileno);
for (pino = 0; pino <= tcp->numpocs; ++pino) {
OPJ_UINT32 tp_num = opj_j2k_get_num_tp(cp,pino,tileno);
*p_nb_tiles = *p_nb_tiles + tp_num;
cur_totnum_tp += tp_num;
}
tcp->m_nb_tile_parts = cur_totnum_tp;
++tcp;
}
}
return OPJ_TRUE;
}
static OPJ_BOOL opj_j2k_write_soc( opj_j2k_t *p_j2k,
opj_stream_private_t *p_stream,
opj_event_mgr_t * p_manager )
{
/* 2 bytes will be written */
OPJ_BYTE * l_start_stream = 00;
/* preconditions */
assert(p_stream != 00);
assert(p_j2k != 00);
assert(p_manager != 00);
l_start_stream = p_j2k->m_specific_param.m_encoder.m_header_tile_data;
/* write SOC identifier */
opj_write_bytes(l_start_stream,J2K_MS_SOC,2);
if (opj_stream_write_data(p_stream,l_start_stream,2,p_manager) != 2) {
return OPJ_FALSE;
}
/* UniPG>> */
#ifdef USE_JPWL
/* update markers struct */
/*
OPJ_BOOL res = j2k_add_marker(p_j2k->cstr_info, J2K_MS_SOC, p_stream_tell(p_stream) - 2, 2);
*/
assert( 0 && "TODO" );
#endif /* USE_JPWL */
/* <<UniPG */
return OPJ_TRUE;
}
/**
* Reads a SOC marker (Start of Codestream)
* @param p_j2k the jpeg2000 file codec.
* @param p_stream FIXME DOC
* @param p_manager the user event manager.
*/
static OPJ_BOOL opj_j2k_read_soc( opj_j2k_t *p_j2k,
opj_stream_private_t *p_stream,
opj_event_mgr_t * p_manager
)
{
OPJ_BYTE l_data [2];
OPJ_UINT32 l_marker;
/* preconditions */
assert(p_j2k != 00);
assert(p_manager != 00);
assert(p_stream != 00);
if (opj_stream_read_data(p_stream,l_data,2,p_manager) != 2) {
return OPJ_FALSE;
}
opj_read_bytes(l_data,&l_marker,2);
if (l_marker != J2K_MS_SOC) {
return OPJ_FALSE;
}
/* Next marker should be a SIZ marker in the main header */
p_j2k->m_specific_param.m_decoder.m_state = J2K_STATE_MHSIZ;
/* FIXME move it in a index structure included in p_j2k*/
p_j2k->cstr_index->main_head_start = opj_stream_tell(p_stream) - 2;
opj_event_msg(p_manager, EVT_INFO, "Start to read j2k main header (%d).\n", p_j2k->cstr_index->main_head_start);
/* Add the marker to the codestream index*/
if (OPJ_FALSE == opj_j2k_add_mhmarker(p_j2k->cstr_index, J2K_MS_SOC, p_j2k->cstr_index->main_head_start, 2)) {
opj_event_msg(p_manager, EVT_ERROR, "Not enough memory to add mh marker\n");
return OPJ_FALSE;
}
return OPJ_TRUE;
}
static OPJ_BOOL opj_j2k_write_siz( opj_j2k_t *p_j2k,
opj_stream_private_t *p_stream,
opj_event_mgr_t * p_manager )
{
OPJ_UINT32 i;
OPJ_UINT32 l_size_len;
OPJ_BYTE * l_current_ptr;
opj_image_t * l_image = 00;
opj_cp_t *cp = 00;
opj_image_comp_t * l_img_comp = 00;
/* preconditions */
assert(p_stream != 00);
assert(p_j2k != 00);
assert(p_manager != 00);
l_image = p_j2k->m_private_image;
cp = &(p_j2k->m_cp);
l_size_len = 40 + 3 * l_image->numcomps;
l_img_comp = l_image->comps;
if (l_size_len > p_j2k->m_specific_param.m_encoder.m_header_tile_data_size) {
OPJ_BYTE *new_header_tile_data = (OPJ_BYTE *) opj_realloc(p_j2k->m_specific_param.m_encoder.m_header_tile_data, l_size_len);
if (! new_header_tile_data) {
opj_free(p_j2k->m_specific_param.m_encoder.m_header_tile_data);
p_j2k->m_specific_param.m_encoder.m_header_tile_data = NULL;
p_j2k->m_specific_param.m_encoder.m_header_tile_data_size = 0;
opj_event_msg(p_manager, EVT_ERROR, "Not enough memory for the SIZ marker\n");
return OPJ_FALSE;
}
p_j2k->m_specific_param.m_encoder.m_header_tile_data = new_header_tile_data;
p_j2k->m_specific_param.m_encoder.m_header_tile_data_size = l_size_len;
}
l_current_ptr = p_j2k->m_specific_param.m_encoder.m_header_tile_data;
/* write SOC identifier */
opj_write_bytes(l_current_ptr,J2K_MS_SIZ,2); /* SIZ */
l_current_ptr+=2;
opj_write_bytes(l_current_ptr,l_size_len-2,2); /* L_SIZ */
l_current_ptr+=2;
opj_write_bytes(l_current_ptr, cp->rsiz, 2); /* Rsiz (capabilities) */
l_current_ptr+=2;
opj_write_bytes(l_current_ptr, l_image->x1, 4); /* Xsiz */
l_current_ptr+=4;
opj_write_bytes(l_current_ptr, l_image->y1, 4); /* Ysiz */
l_current_ptr+=4;
opj_write_bytes(l_current_ptr, l_image->x0, 4); /* X0siz */
l_current_ptr+=4;
opj_write_bytes(l_current_ptr, l_image->y0, 4); /* Y0siz */
l_current_ptr+=4;
opj_write_bytes(l_current_ptr, cp->tdx, 4); /* XTsiz */
l_current_ptr+=4;
opj_write_bytes(l_current_ptr, cp->tdy, 4); /* YTsiz */
l_current_ptr+=4;
opj_write_bytes(l_current_ptr, cp->tx0, 4); /* XT0siz */
l_current_ptr+=4;
opj_write_bytes(l_current_ptr, cp->ty0, 4); /* YT0siz */
l_current_ptr+=4;
opj_write_bytes(l_current_ptr, l_image->numcomps, 2); /* Csiz */
l_current_ptr+=2;
for (i = 0; i < l_image->numcomps; ++i) {
/* TODO here with MCT ? */
opj_write_bytes(l_current_ptr, l_img_comp->prec - 1 + (l_img_comp->sgnd << 7), 1); /* Ssiz_i */
++l_current_ptr;
opj_write_bytes(l_current_ptr, l_img_comp->dx, 1); /* XRsiz_i */
++l_current_ptr;
opj_write_bytes(l_current_ptr, l_img_comp->dy, 1); /* YRsiz_i */
++l_current_ptr;
++l_img_comp;
}
if (opj_stream_write_data(p_stream,p_j2k->m_specific_param.m_encoder.m_header_tile_data,l_size_len,p_manager) != l_size_len) {
return OPJ_FALSE;
}
return OPJ_TRUE;
}
/**
* Reads a SIZ marker (image and tile size)
* @param p_j2k the jpeg2000 file codec.
* @param p_header_data the data contained in the SIZ box.
* @param p_header_size the size of the data contained in the SIZ marker.
* @param p_manager the user event manager.
*/
static OPJ_BOOL opj_j2k_read_siz(opj_j2k_t *p_j2k,
OPJ_BYTE * p_header_data,
OPJ_UINT32 p_header_size,
opj_event_mgr_t * p_manager
)
{
OPJ_UINT32 i;
OPJ_UINT32 l_nb_comp;
OPJ_UINT32 l_nb_comp_remain;
OPJ_UINT32 l_remaining_size;
OPJ_UINT32 l_nb_tiles;
OPJ_UINT32 l_tmp, l_tx1, l_ty1;
opj_image_t *l_image = 00;
opj_cp_t *l_cp = 00;
opj_image_comp_t * l_img_comp = 00;
opj_tcp_t * l_current_tile_param = 00;
/* preconditions */
assert(p_j2k != 00);
assert(p_manager != 00);
assert(p_header_data != 00);
l_image = p_j2k->m_private_image;
l_cp = &(p_j2k->m_cp);
/* minimum size == 39 - 3 (= minimum component parameter) */
if (p_header_size < 36) {
opj_event_msg(p_manager, EVT_ERROR, "Error with SIZ marker size\n");
return OPJ_FALSE;
}
l_remaining_size = p_header_size - 36;
l_nb_comp = l_remaining_size / 3;
l_nb_comp_remain = l_remaining_size % 3;
if (l_nb_comp_remain != 0){
opj_event_msg(p_manager, EVT_ERROR, "Error with SIZ marker size\n");
return OPJ_FALSE;
}
opj_read_bytes(p_header_data,&l_tmp ,2); /* Rsiz (capabilities) */
p_header_data+=2;
l_cp->rsiz = (OPJ_UINT16) l_tmp;
opj_read_bytes(p_header_data, (OPJ_UINT32*) &l_image->x1, 4); /* Xsiz */
p_header_data+=4;
opj_read_bytes(p_header_data, (OPJ_UINT32*) &l_image->y1, 4); /* Ysiz */
p_header_data+=4;
opj_read_bytes(p_header_data, (OPJ_UINT32*) &l_image->x0, 4); /* X0siz */
p_header_data+=4;
opj_read_bytes(p_header_data, (OPJ_UINT32*) &l_image->y0, 4); /* Y0siz */
p_header_data+=4;
opj_read_bytes(p_header_data, (OPJ_UINT32*) &l_cp->tdx, 4); /* XTsiz */
p_header_data+=4;
opj_read_bytes(p_header_data, (OPJ_UINT32*) &l_cp->tdy, 4); /* YTsiz */
p_header_data+=4;
opj_read_bytes(p_header_data, (OPJ_UINT32*) &l_cp->tx0, 4); /* XT0siz */
p_header_data+=4;
opj_read_bytes(p_header_data, (OPJ_UINT32*) &l_cp->ty0, 4); /* YT0siz */
p_header_data+=4;
opj_read_bytes(p_header_data, (OPJ_UINT32*) &l_tmp, 2); /* Csiz */
p_header_data+=2;
if (l_tmp < 16385)
l_image->numcomps = (OPJ_UINT16) l_tmp;
else {
opj_event_msg(p_manager, EVT_ERROR, "Error with SIZ marker: number of component is illegal -> %d\n", l_tmp);
return OPJ_FALSE;
}
if (l_image->numcomps != l_nb_comp) {
opj_event_msg(p_manager, EVT_ERROR, "Error with SIZ marker: number of component is not compatible with the remaining number of parameters ( %d vs %d)\n", l_image->numcomps, l_nb_comp);
return OPJ_FALSE;
}
/* testcase 4035.pdf.SIGSEGV.d8b.3375 */
/* testcase issue427-null-image-size.jp2 */
if ((l_image->x0 >= l_image->x1) || (l_image->y0 >= l_image->y1)) {
opj_event_msg(p_manager, EVT_ERROR, "Error with SIZ marker: negative or zero image size (%d x %d)\n", l_image->x1 - l_image->x0, l_image->y1 - l_image->y0);
return OPJ_FALSE;
}
/* testcase 2539.pdf.SIGFPE.706.1712 (also 3622.pdf.SIGFPE.706.2916 and 4008.pdf.SIGFPE.706.3345 and maybe more) */
if (!(l_cp->tdx * l_cp->tdy)) {
opj_event_msg(p_manager, EVT_ERROR, "Error with SIZ marker: invalid tile size (tdx: %d, tdy: %d)\n", l_cp->tdx, l_cp->tdy);
return OPJ_FALSE;
}
/* testcase 1610.pdf.SIGSEGV.59c.681 */
if (((OPJ_UINT64)l_image->x1) * ((OPJ_UINT64)l_image->y1) != (l_image->x1 * l_image->y1)) {
opj_event_msg(p_manager, EVT_ERROR, "Prevent buffer overflow (x1: %d, y1: %d)\n", l_image->x1, l_image->y1);
return OPJ_FALSE;
}
/* testcase issue427-illegal-tile-offset.jp2 */
l_tx1 = opj_uint_adds(l_cp->tx0, l_cp->tdx); /* manage overflow */
l_ty1 = opj_uint_adds(l_cp->ty0, l_cp->tdy); /* manage overflow */
if ((l_cp->tx0 > l_image->x0) || (l_cp->ty0 > l_image->y0) || (l_tx1 <= l_image->x0) || (l_ty1 <= l_image->y0) ) {
opj_event_msg(p_manager, EVT_ERROR, "Error with SIZ marker: illegal tile offset\n");
return OPJ_FALSE;
}
#ifdef USE_JPWL
if (l_cp->correct) {
/* if JPWL is on, we check whether TX errors have damaged
too much the SIZ parameters */
if (!(l_image->x1 * l_image->y1)) {
opj_event_msg(p_manager, EVT_ERROR,
"JPWL: bad image size (%d x %d)\n",
l_image->x1, l_image->y1);
if (!JPWL_ASSUME || JPWL_ASSUME) {
opj_event_msg(p_manager, EVT_ERROR, "JPWL: giving up\n");
return OPJ_FALSE;
}
}
/* FIXME check previously in the function so why keep this piece of code ? Need by the norm ?
if (l_image->numcomps != ((len - 38) / 3)) {
opj_event_msg(p_manager, JPWL_ASSUME ? EVT_WARNING : EVT_ERROR,
"JPWL: Csiz is %d => space in SIZ only for %d comps.!!!\n",
l_image->numcomps, ((len - 38) / 3));
if (!JPWL_ASSUME) {
opj_event_msg(p_manager, EVT_ERROR, "JPWL: giving up\n");
return OPJ_FALSE;
}
*/ /* we try to correct */
/* opj_event_msg(p_manager, EVT_WARNING, "- trying to adjust this\n");
if (l_image->numcomps < ((len - 38) / 3)) {
len = 38 + 3 * l_image->numcomps;
opj_event_msg(p_manager, EVT_WARNING, "- setting Lsiz to %d => HYPOTHESIS!!!\n",
len);
} else {
l_image->numcomps = ((len - 38) / 3);
opj_event_msg(p_manager, EVT_WARNING, "- setting Csiz to %d => HYPOTHESIS!!!\n",
l_image->numcomps);
}
}
*/
/* update components number in the jpwl_exp_comps filed */
l_cp->exp_comps = l_image->numcomps;
}
#endif /* USE_JPWL */
/* Allocate the resulting image components */
l_image->comps = (opj_image_comp_t*) opj_calloc(l_image->numcomps, sizeof(opj_image_comp_t));
if (l_image->comps == 00){
l_image->numcomps = 0;
opj_event_msg(p_manager, EVT_ERROR, "Not enough memory to take in charge SIZ marker\n");
return OPJ_FALSE;
}
l_img_comp = l_image->comps;
/* Read the component information */
for (i = 0; i < l_image->numcomps; ++i){
OPJ_UINT32 tmp;
opj_read_bytes(p_header_data,&tmp,1); /* Ssiz_i */
++p_header_data;
l_img_comp->prec = (tmp & 0x7f) + 1;
l_img_comp->sgnd = tmp >> 7;
opj_read_bytes(p_header_data,&tmp,1); /* XRsiz_i */
++p_header_data;
l_img_comp->dx = (OPJ_UINT32)tmp; /* should be between 1 and 255 */
opj_read_bytes(p_header_data,&tmp,1); /* YRsiz_i */
++p_header_data;
l_img_comp->dy = (OPJ_UINT32)tmp; /* should be between 1 and 255 */
if( l_img_comp->dx < 1 || l_img_comp->dx > 255 ||
l_img_comp->dy < 1 || l_img_comp->dy > 255 ) {
opj_event_msg(p_manager, EVT_ERROR,
"Invalid values for comp = %d : dx=%u dy=%u\n (should be between 1 and 255 according the JPEG2000 norm)",
i, l_img_comp->dx, l_img_comp->dy);
return OPJ_FALSE;
}
#ifdef USE_JPWL
if (l_cp->correct) {
/* if JPWL is on, we check whether TX errors have damaged
too much the SIZ parameters, again */
if (!(l_image->comps[i].dx * l_image->comps[i].dy)) {
opj_event_msg(p_manager, JPWL_ASSUME ? EVT_WARNING : EVT_ERROR,
"JPWL: bad XRsiz_%d/YRsiz_%d (%d x %d)\n",
i, i, l_image->comps[i].dx, l_image->comps[i].dy);
if (!JPWL_ASSUME) {
opj_event_msg(p_manager, EVT_ERROR, "JPWL: giving up\n");
return OPJ_FALSE;
}
/* we try to correct */
opj_event_msg(p_manager, EVT_WARNING, "- trying to adjust them\n");
if (!l_image->comps[i].dx) {
l_image->comps[i].dx = 1;
opj_event_msg(p_manager, EVT_WARNING, "- setting XRsiz_%d to %d => HYPOTHESIS!!!\n",
i, l_image->comps[i].dx);
}
if (!l_image->comps[i].dy) {
l_image->comps[i].dy = 1;
opj_event_msg(p_manager, EVT_WARNING, "- setting YRsiz_%d to %d => HYPOTHESIS!!!\n",
i, l_image->comps[i].dy);
}
}
}
#endif /* USE_JPWL */
l_img_comp->resno_decoded = 0; /* number of resolution decoded */
l_img_comp->factor = l_cp->m_specific_param.m_dec.m_reduce; /* reducing factor per component */
++l_img_comp;
}
/* Compute the number of tiles */
l_cp->tw = (OPJ_UINT32)opj_int_ceildiv((OPJ_INT32)(l_image->x1 - l_cp->tx0), (OPJ_INT32)l_cp->tdx);
l_cp->th = (OPJ_UINT32)opj_int_ceildiv((OPJ_INT32)(l_image->y1 - l_cp->ty0), (OPJ_INT32)l_cp->tdy);
/* Check that the number of tiles is valid */
if (l_cp->tw == 0 || l_cp->th == 0 || l_cp->tw > 65535 / l_cp->th) {
opj_event_msg( p_manager, EVT_ERROR,
"Invalid number of tiles : %u x %u (maximum fixed by jpeg2000 norm is 65535 tiles)\n",
l_cp->tw, l_cp->th);
return OPJ_FALSE;
}
l_nb_tiles = l_cp->tw * l_cp->th;
/* Define the tiles which will be decoded */
if (p_j2k->m_specific_param.m_decoder.m_discard_tiles) {
p_j2k->m_specific_param.m_decoder.m_start_tile_x = (p_j2k->m_specific_param.m_decoder.m_start_tile_x - l_cp->tx0) / l_cp->tdx;
p_j2k->m_specific_param.m_decoder.m_start_tile_y = (p_j2k->m_specific_param.m_decoder.m_start_tile_y - l_cp->ty0) / l_cp->tdy;
p_j2k->m_specific_param.m_decoder.m_end_tile_x = (OPJ_UINT32)opj_int_ceildiv((OPJ_INT32)(p_j2k->m_specific_param.m_decoder.m_end_tile_x - l_cp->tx0), (OPJ_INT32)l_cp->tdx);
p_j2k->m_specific_param.m_decoder.m_end_tile_y = (OPJ_UINT32)opj_int_ceildiv((OPJ_INT32)(p_j2k->m_specific_param.m_decoder.m_end_tile_y - l_cp->ty0), (OPJ_INT32)l_cp->tdy);
}
else {
p_j2k->m_specific_param.m_decoder.m_start_tile_x = 0;
p_j2k->m_specific_param.m_decoder.m_start_tile_y = 0;
p_j2k->m_specific_param.m_decoder.m_end_tile_x = l_cp->tw;
p_j2k->m_specific_param.m_decoder.m_end_tile_y = l_cp->th;
}
#ifdef USE_JPWL
if (l_cp->correct) {
/* if JPWL is on, we check whether TX errors have damaged
too much the SIZ parameters */
if ((l_cp->tw < 1) || (l_cp->th < 1) || (l_cp->tw > l_cp->max_tiles) || (l_cp->th > l_cp->max_tiles)) {
opj_event_msg(p_manager, JPWL_ASSUME ? EVT_WARNING : EVT_ERROR,
"JPWL: bad number of tiles (%d x %d)\n",
l_cp->tw, l_cp->th);
if (!JPWL_ASSUME) {
opj_event_msg(p_manager, EVT_ERROR, "JPWL: giving up\n");
return OPJ_FALSE;
}
/* we try to correct */
opj_event_msg(p_manager, EVT_WARNING, "- trying to adjust them\n");
if (l_cp->tw < 1) {
l_cp->tw= 1;
opj_event_msg(p_manager, EVT_WARNING, "- setting %d tiles in x => HYPOTHESIS!!!\n",
l_cp->tw);
}
if (l_cp->tw > l_cp->max_tiles) {
l_cp->tw= 1;
opj_event_msg(p_manager, EVT_WARNING, "- too large x, increase expectance of %d\n"
"- setting %d tiles in x => HYPOTHESIS!!!\n",
l_cp->max_tiles, l_cp->tw);
}
if (l_cp->th < 1) {
l_cp->th= 1;
opj_event_msg(p_manager, EVT_WARNING, "- setting %d tiles in y => HYPOTHESIS!!!\n",
l_cp->th);
}
if (l_cp->th > l_cp->max_tiles) {
l_cp->th= 1;
opj_event_msg(p_manager, EVT_WARNING, "- too large y, increase expectance of %d to continue\n",
"- setting %d tiles in y => HYPOTHESIS!!!\n",
l_cp->max_tiles, l_cp->th);
}
}
}
#endif /* USE_JPWL */
/* memory allocations */
l_cp->tcps = (opj_tcp_t*) opj_calloc(l_nb_tiles, sizeof(opj_tcp_t));
if (l_cp->tcps == 00) {
opj_event_msg(p_manager, EVT_ERROR, "Not enough memory to take in charge SIZ marker\n");
return OPJ_FALSE;
}
#ifdef USE_JPWL
if (l_cp->correct) {
if (!l_cp->tcps) {
opj_event_msg(p_manager, JPWL_ASSUME ? EVT_WARNING : EVT_ERROR,
"JPWL: could not alloc tcps field of cp\n");
if (!JPWL_ASSUME || JPWL_ASSUME) {
opj_event_msg(p_manager, EVT_ERROR, "JPWL: giving up\n");
return OPJ_FALSE;
}
}
}
#endif /* USE_JPWL */
p_j2k->m_specific_param.m_decoder.m_default_tcp->tccps =
(opj_tccp_t*) opj_calloc(l_image->numcomps, sizeof(opj_tccp_t));
if(p_j2k->m_specific_param.m_decoder.m_default_tcp->tccps == 00) {
opj_event_msg(p_manager, EVT_ERROR, "Not enough memory to take in charge SIZ marker\n");
return OPJ_FALSE;
}
p_j2k->m_specific_param.m_decoder.m_default_tcp->m_mct_records =
(opj_mct_data_t*)opj_calloc(OPJ_J2K_MCT_DEFAULT_NB_RECORDS ,sizeof(opj_mct_data_t));
if (! p_j2k->m_specific_param.m_decoder.m_default_tcp->m_mct_records) {
opj_event_msg(p_manager, EVT_ERROR, "Not enough memory to take in charge SIZ marker\n");
return OPJ_FALSE;
}
p_j2k->m_specific_param.m_decoder.m_default_tcp->m_nb_max_mct_records = OPJ_J2K_MCT_DEFAULT_NB_RECORDS;
p_j2k->m_specific_param.m_decoder.m_default_tcp->m_mcc_records =
(opj_simple_mcc_decorrelation_data_t*)
opj_calloc(OPJ_J2K_MCC_DEFAULT_NB_RECORDS, sizeof(opj_simple_mcc_decorrelation_data_t));
if (! p_j2k->m_specific_param.m_decoder.m_default_tcp->m_mcc_records) {
opj_event_msg(p_manager, EVT_ERROR, "Not enough memory to take in charge SIZ marker\n");
return OPJ_FALSE;
}
p_j2k->m_specific_param.m_decoder.m_default_tcp->m_nb_max_mcc_records = OPJ_J2K_MCC_DEFAULT_NB_RECORDS;
/* set up default dc level shift */
for (i=0;i<l_image->numcomps;++i) {
if (! l_image->comps[i].sgnd) {
p_j2k->m_specific_param.m_decoder.m_default_tcp->tccps[i].m_dc_level_shift = 1 << (l_image->comps[i].prec - 1);
}
}
l_current_tile_param = l_cp->tcps;
for (i = 0; i < l_nb_tiles; ++i) {
l_current_tile_param->tccps = (opj_tccp_t*) opj_calloc(l_image->numcomps, sizeof(opj_tccp_t));
if (l_current_tile_param->tccps == 00) {
opj_event_msg(p_manager, EVT_ERROR, "Not enough memory to take in charge SIZ marker\n");
return OPJ_FALSE;
}
++l_current_tile_param;
}
p_j2k->m_specific_param.m_decoder.m_state = J2K_STATE_MH; /* FIXME J2K_DEC_STATE_MH; */
opj_image_comp_header_update(l_image,l_cp);
return OPJ_TRUE;
}
static OPJ_BOOL opj_j2k_write_com( opj_j2k_t *p_j2k,
opj_stream_private_t *p_stream,
opj_event_mgr_t * p_manager
)
{
OPJ_UINT32 l_comment_size;
OPJ_UINT32 l_total_com_size;
const OPJ_CHAR *l_comment;
OPJ_BYTE * l_current_ptr = 00;
/* preconditions */
assert(p_j2k != 00);
assert(p_stream != 00);
assert(p_manager != 00);
l_comment = p_j2k->m_cp.comment;
l_comment_size = (OPJ_UINT32)strlen(l_comment);
l_total_com_size = l_comment_size + 6;
if (l_total_com_size > p_j2k->m_specific_param.m_encoder.m_header_tile_data_size) {
OPJ_BYTE *new_header_tile_data = (OPJ_BYTE *) opj_realloc(p_j2k->m_specific_param.m_encoder.m_header_tile_data, l_total_com_size);
if (! new_header_tile_data) {
opj_free(p_j2k->m_specific_param.m_encoder.m_header_tile_data);
p_j2k->m_specific_param.m_encoder.m_header_tile_data = NULL;
p_j2k->m_specific_param.m_encoder.m_header_tile_data_size = 0;
opj_event_msg(p_manager, EVT_ERROR, "Not enough memory to write the COM marker\n");
return OPJ_FALSE;
}
p_j2k->m_specific_param.m_encoder.m_header_tile_data = new_header_tile_data;
p_j2k->m_specific_param.m_encoder.m_header_tile_data_size = l_total_com_size;
}
l_current_ptr = p_j2k->m_specific_param.m_encoder.m_header_tile_data;
opj_write_bytes(l_current_ptr,J2K_MS_COM , 2); /* COM */
l_current_ptr+=2;
opj_write_bytes(l_current_ptr,l_total_com_size - 2 , 2); /* L_COM */
l_current_ptr+=2;
opj_write_bytes(l_current_ptr,1 , 2); /* General use (IS 8859-15:1999 (Latin) values) */
l_current_ptr+=2;
memcpy( l_current_ptr,l_comment,l_comment_size);
if (opj_stream_write_data(p_stream,p_j2k->m_specific_param.m_encoder.m_header_tile_data,l_total_com_size,p_manager) != l_total_com_size) {
return OPJ_FALSE;
}
return OPJ_TRUE;
}
/**
* Reads a COM marker (comments)
* @param p_j2k the jpeg2000 file codec.
* @param p_header_data the data contained in the COM box.
* @param p_header_size the size of the data contained in the COM marker.
* @param p_manager the user event manager.
*/
static OPJ_BOOL opj_j2k_read_com ( opj_j2k_t *p_j2k,
OPJ_BYTE * p_header_data,
OPJ_UINT32 p_header_size,
opj_event_mgr_t * p_manager
)
{
/* preconditions */
assert(p_j2k != 00);
assert(p_manager != 00);
assert(p_header_data != 00);
(void)p_header_size;
return OPJ_TRUE;
}
static OPJ_BOOL opj_j2k_write_cod( opj_j2k_t *p_j2k,
opj_stream_private_t *p_stream,
opj_event_mgr_t * p_manager )
{
opj_cp_t *l_cp = 00;
opj_tcp_t *l_tcp = 00;
OPJ_UINT32 l_code_size,l_remaining_size;
OPJ_BYTE * l_current_data = 00;
/* preconditions */
assert(p_j2k != 00);
assert(p_manager != 00);
assert(p_stream != 00);
l_cp = &(p_j2k->m_cp);
l_tcp = &l_cp->tcps[p_j2k->m_current_tile_number];
l_code_size = 9 + opj_j2k_get_SPCod_SPCoc_size(p_j2k,p_j2k->m_current_tile_number,0);
l_remaining_size = l_code_size;
if (l_code_size > p_j2k->m_specific_param.m_encoder.m_header_tile_data_size) {
OPJ_BYTE *new_header_tile_data = (OPJ_BYTE *) opj_realloc(p_j2k->m_specific_param.m_encoder.m_header_tile_data, l_code_size);
if (! new_header_tile_data) {
opj_free(p_j2k->m_specific_param.m_encoder.m_header_tile_data);
p_j2k->m_specific_param.m_encoder.m_header_tile_data = NULL;
p_j2k->m_specific_param.m_encoder.m_header_tile_data_size = 0;
opj_event_msg(p_manager, EVT_ERROR, "Not enough memory to write COD marker\n");
return OPJ_FALSE;
}
p_j2k->m_specific_param.m_encoder.m_header_tile_data = new_header_tile_data;
p_j2k->m_specific_param.m_encoder.m_header_tile_data_size = l_code_size;
}
l_current_data = p_j2k->m_specific_param.m_encoder.m_header_tile_data;
opj_write_bytes(l_current_data,J2K_MS_COD,2); /* COD */
l_current_data += 2;
opj_write_bytes(l_current_data,l_code_size-2,2); /* L_COD */
l_current_data += 2;
opj_write_bytes(l_current_data,l_tcp->csty,1); /* Scod */
++l_current_data;
opj_write_bytes(l_current_data,l_tcp->prg,1); /* SGcod (A) */
++l_current_data;
opj_write_bytes(l_current_data,l_tcp->numlayers,2); /* SGcod (B) */
l_current_data+=2;
opj_write_bytes(l_current_data,l_tcp->mct,1); /* SGcod (C) */
++l_current_data;
l_remaining_size -= 9;
if (! opj_j2k_write_SPCod_SPCoc(p_j2k,p_j2k->m_current_tile_number,0,l_current_data,&l_remaining_size,p_manager)) {
opj_event_msg(p_manager, EVT_ERROR, "Error writing COD marker\n");
return OPJ_FALSE;
}
if (l_remaining_size != 0) {
opj_event_msg(p_manager, EVT_ERROR, "Error writing COD marker\n");
return OPJ_FALSE;
}
if (opj_stream_write_data(p_stream,p_j2k->m_specific_param.m_encoder.m_header_tile_data,l_code_size,p_manager) != l_code_size) {
return OPJ_FALSE;
}
return OPJ_TRUE;
}
/**
* Reads a COD marker (Coding Styke defaults)
* @param p_header_data the data contained in the COD box.
* @param p_j2k the jpeg2000 codec.
* @param p_header_size the size of the data contained in the COD marker.
* @param p_manager the user event manager.
*/
static OPJ_BOOL opj_j2k_read_cod ( opj_j2k_t *p_j2k,
OPJ_BYTE * p_header_data,
OPJ_UINT32 p_header_size,
opj_event_mgr_t * p_manager
)
{
/* loop */
OPJ_UINT32 i;
OPJ_UINT32 l_tmp;
opj_cp_t *l_cp = 00;
opj_tcp_t *l_tcp = 00;
opj_image_t *l_image = 00;
/* preconditions */
assert(p_header_data != 00);
assert(p_j2k != 00);
assert(p_manager != 00);
l_image = p_j2k->m_private_image;
l_cp = &(p_j2k->m_cp);
/* If we are in the first tile-part header of the current tile */
l_tcp = (p_j2k->m_specific_param.m_decoder.m_state == J2K_STATE_TPH) ?
&l_cp->tcps[p_j2k->m_current_tile_number] :
p_j2k->m_specific_param.m_decoder.m_default_tcp;
/* Only one COD per tile */
if (l_tcp->cod) {
opj_event_msg(p_manager, EVT_ERROR, "COD marker already read. No more than one COD marker per tile.\n");
return OPJ_FALSE;
}
l_tcp->cod = 1;
/* Make sure room is sufficient */
if (p_header_size < 5) {
opj_event_msg(p_manager, EVT_ERROR, "Error reading COD marker\n");
return OPJ_FALSE;
}
opj_read_bytes(p_header_data,&l_tcp->csty,1); /* Scod */
++p_header_data;
/* Make sure we know how to decode this */
if ((l_tcp->csty & ~(OPJ_UINT32)(J2K_CP_CSTY_PRT | J2K_CP_CSTY_SOP | J2K_CP_CSTY_EPH)) != 0U) {
opj_event_msg(p_manager, EVT_ERROR, "Unknown Scod value in COD marker\n");
return OPJ_FALSE;
}
opj_read_bytes(p_header_data,&l_tmp,1); /* SGcod (A) */
++p_header_data;
l_tcp->prg = (OPJ_PROG_ORDER) l_tmp;
/* Make sure progression order is valid */
if (l_tcp->prg > OPJ_CPRL ) {
opj_event_msg(p_manager, EVT_ERROR, "Unknown progression order in COD marker\n");
l_tcp->prg = OPJ_PROG_UNKNOWN;
}
opj_read_bytes(p_header_data,&l_tcp->numlayers,2); /* SGcod (B) */
p_header_data+=2;
if ((l_tcp->numlayers < 1U) || (l_tcp->numlayers > 65535U)) {
opj_event_msg(p_manager, EVT_ERROR, "Invalid number of layers in COD marker : %d not in range [1-65535]\n", l_tcp->numlayers);
return OPJ_FALSE;
}
/* If user didn't set a number layer to decode take the max specify in the codestream. */
if (l_cp->m_specific_param.m_dec.m_layer) {
l_tcp->num_layers_to_decode = l_cp->m_specific_param.m_dec.m_layer;
}
else {
l_tcp->num_layers_to_decode = l_tcp->numlayers;
}
opj_read_bytes(p_header_data,&l_tcp->mct,1); /* SGcod (C) */
++p_header_data;
p_header_size -= 5;
for (i = 0; i < l_image->numcomps; ++i) {
l_tcp->tccps[i].csty = l_tcp->csty & J2K_CCP_CSTY_PRT;
}
if (! opj_j2k_read_SPCod_SPCoc(p_j2k,0,p_header_data,&p_header_size,p_manager)) {
opj_event_msg(p_manager, EVT_ERROR, "Error reading COD marker\n");
return OPJ_FALSE;
}
if (p_header_size != 0) {
opj_event_msg(p_manager, EVT_ERROR, "Error reading COD marker\n");
return OPJ_FALSE;
}
/* Apply the coding style to other components of the current tile or the m_default_tcp*/
opj_j2k_copy_tile_component_parameters(p_j2k);
/* Index */
#ifdef WIP_REMOVE_MSD
if (p_j2k->cstr_info) {
/*opj_codestream_info_t *l_cstr_info = p_j2k->cstr_info;*/
p_j2k->cstr_info->prog = l_tcp->prg;
p_j2k->cstr_info->numlayers = l_tcp->numlayers;
p_j2k->cstr_info->numdecompos = (OPJ_INT32*) opj_malloc(l_image->numcomps * sizeof(OPJ_UINT32));
for (i = 0; i < l_image->numcomps; ++i) {
p_j2k->cstr_info->numdecompos[i] = l_tcp->tccps[i].numresolutions - 1;
}
}
#endif
return OPJ_TRUE;
}
#if 0
static OPJ_BOOL opj_j2k_write_coc( opj_j2k_t *p_j2k,
OPJ_UINT32 p_comp_no,
opj_stream_private_t *p_stream,
opj_event_mgr_t * p_manager )
{
OPJ_UINT32 l_coc_size,l_remaining_size;
OPJ_UINT32 l_comp_room;
/* preconditions */
assert(p_j2k != 00);
assert(p_manager != 00);
assert(p_stream != 00);
l_comp_room = (p_j2k->m_private_image->numcomps <= 256) ? 1 : 2;
l_coc_size = 5 + l_comp_room + opj_j2k_get_SPCod_SPCoc_size(p_j2k,p_j2k->m_current_tile_number,p_comp_no);
if (l_coc_size > p_j2k->m_specific_param.m_encoder.m_header_tile_data_size) {
OPJ_BYTE *new_header_tile_data;
/*p_j2k->m_specific_param.m_encoder.m_header_tile_data
= (OPJ_BYTE*)opj_realloc(
p_j2k->m_specific_param.m_encoder.m_header_tile_data,
l_coc_size);*/
new_header_tile_data = (OPJ_BYTE *) opj_realloc(p_j2k->m_specific_param.m_encoder.m_header_tile_data, l_coc_size);
if (! new_header_tile_data) {
opj_free(p_j2k->m_specific_param.m_encoder.m_header_tile_data);
p_j2k->m_specific_param.m_encoder.m_header_tile_data = NULL;
p_j2k->m_specific_param.m_encoder.m_header_tile_data_size = 0;
opj_event_msg(p_manager, EVT_ERROR, "Not enough memory to write COC marker\n");
return OPJ_FALSE;
}
p_j2k->m_specific_param.m_encoder.m_header_tile_data = new_header_tile_data;
p_j2k->m_specific_param.m_encoder.m_header_tile_data_size = l_coc_size;
}
opj_j2k_write_coc_in_memory(p_j2k,p_comp_no,p_j2k->m_specific_param.m_encoder.m_header_tile_data,&l_remaining_size,p_manager);
if (opj_stream_write_data(p_stream,p_j2k->m_specific_param.m_encoder.m_header_tile_data,l_coc_size,p_manager) != l_coc_size) {
return OPJ_FALSE;
}
return OPJ_TRUE;
}
#endif
#if 0
static void opj_j2k_write_coc_in_memory( opj_j2k_t *p_j2k,
OPJ_UINT32 p_comp_no,
OPJ_BYTE * p_data,
OPJ_UINT32 * p_data_written,
opj_event_mgr_t * p_manager
)
{
opj_cp_t *l_cp = 00;
opj_tcp_t *l_tcp = 00;
OPJ_UINT32 l_coc_size,l_remaining_size;
OPJ_BYTE * l_current_data = 00;
opj_image_t *l_image = 00;
OPJ_UINT32 l_comp_room;
/* preconditions */
assert(p_j2k != 00);
assert(p_manager != 00);
l_cp = &(p_j2k->m_cp);
l_tcp = &l_cp->tcps[p_j2k->m_current_tile_number];
l_image = p_j2k->m_private_image;
l_comp_room = (l_image->numcomps <= 256) ? 1 : 2;
l_coc_size = 5 + l_comp_room + opj_j2k_get_SPCod_SPCoc_size(p_j2k,p_j2k->m_current_tile_number,p_comp_no);
l_remaining_size = l_coc_size;
l_current_data = p_data;
opj_write_bytes(l_current_data,J2K_MS_COC,2); /* COC */
l_current_data += 2;
opj_write_bytes(l_current_data,l_coc_size-2,2); /* L_COC */
l_current_data += 2;
opj_write_bytes(l_current_data,p_comp_no, l_comp_room); /* Ccoc */
l_current_data+=l_comp_room;
opj_write_bytes(l_current_data, l_tcp->tccps[p_comp_no].csty, 1); /* Scoc */
++l_current_data;
l_remaining_size -= (5 + l_comp_room);
opj_j2k_write_SPCod_SPCoc(p_j2k,p_j2k->m_current_tile_number,0,l_current_data,&l_remaining_size,p_manager);
* p_data_written = l_coc_size;
}
#endif
static OPJ_UINT32 opj_j2k_get_max_coc_size(opj_j2k_t *p_j2k)
{
OPJ_UINT32 i,j;
OPJ_UINT32 l_nb_comp;
OPJ_UINT32 l_nb_tiles;
OPJ_UINT32 l_max = 0;
/* preconditions */
l_nb_tiles = p_j2k->m_cp.tw * p_j2k->m_cp.th ;
l_nb_comp = p_j2k->m_private_image->numcomps;
for (i=0;i<l_nb_tiles;++i) {
for (j=0;j<l_nb_comp;++j) {
l_max = opj_uint_max(l_max,opj_j2k_get_SPCod_SPCoc_size(p_j2k,i,j));
}
}
return 6 + l_max;
}
/**
* Reads a COC marker (Coding Style Component)
* @param p_header_data the data contained in the COC box.
* @param p_j2k the jpeg2000 codec.
* @param p_header_size the size of the data contained in the COC marker.
* @param p_manager the user event manager.
*/
static OPJ_BOOL opj_j2k_read_coc ( opj_j2k_t *p_j2k,
OPJ_BYTE * p_header_data,
OPJ_UINT32 p_header_size,
opj_event_mgr_t * p_manager
)
{
opj_cp_t *l_cp = NULL;
opj_tcp_t *l_tcp = NULL;
opj_image_t *l_image = NULL;
OPJ_UINT32 l_comp_room;
OPJ_UINT32 l_comp_no;
/* preconditions */
assert(p_header_data != 00);
assert(p_j2k != 00);
assert(p_manager != 00);
l_cp = &(p_j2k->m_cp);
l_tcp = (p_j2k->m_specific_param.m_decoder.m_state == J2K_STATE_TPH ) ? /*FIXME J2K_DEC_STATE_TPH*/
&l_cp->tcps[p_j2k->m_current_tile_number] :
p_j2k->m_specific_param.m_decoder.m_default_tcp;
l_image = p_j2k->m_private_image;
l_comp_room = l_image->numcomps <= 256 ? 1 : 2;
/* make sure room is sufficient*/
if (p_header_size < l_comp_room + 1) {
opj_event_msg(p_manager, EVT_ERROR, "Error reading COC marker\n");
return OPJ_FALSE;
}
p_header_size -= l_comp_room + 1;
opj_read_bytes(p_header_data,&l_comp_no,l_comp_room); /* Ccoc */
p_header_data += l_comp_room;
if (l_comp_no >= l_image->numcomps) {
opj_event_msg(p_manager, EVT_ERROR, "Error reading COC marker (bad number of components)\n");
return OPJ_FALSE;
}
opj_read_bytes(p_header_data,&l_tcp->tccps[l_comp_no].csty,1); /* Scoc */
++p_header_data ;
if (! opj_j2k_read_SPCod_SPCoc(p_j2k,l_comp_no,p_header_data,&p_header_size,p_manager)) {
opj_event_msg(p_manager, EVT_ERROR, "Error reading COC marker\n");
return OPJ_FALSE;
}
if (p_header_size != 0) {
opj_event_msg(p_manager, EVT_ERROR, "Error reading COC marker\n");
return OPJ_FALSE;
}
return OPJ_TRUE;
}
static OPJ_BOOL opj_j2k_write_qcd( opj_j2k_t *p_j2k,
opj_stream_private_t *p_stream,
opj_event_mgr_t * p_manager
)
{
OPJ_UINT32 l_qcd_size,l_remaining_size;
OPJ_BYTE * l_current_data = 00;
/* preconditions */
assert(p_j2k != 00);
assert(p_manager != 00);
assert(p_stream != 00);
l_qcd_size = 4 + opj_j2k_get_SQcd_SQcc_size(p_j2k,p_j2k->m_current_tile_number,0);
l_remaining_size = l_qcd_size;
if (l_qcd_size > p_j2k->m_specific_param.m_encoder.m_header_tile_data_size) {
OPJ_BYTE *new_header_tile_data = (OPJ_BYTE *) opj_realloc(p_j2k->m_specific_param.m_encoder.m_header_tile_data, l_qcd_size);
if (! new_header_tile_data) {
opj_free(p_j2k->m_specific_param.m_encoder.m_header_tile_data);
p_j2k->m_specific_param.m_encoder.m_header_tile_data = NULL;
p_j2k->m_specific_param.m_encoder.m_header_tile_data_size = 0;
opj_event_msg(p_manager, EVT_ERROR, "Not enough memory to write QCD marker\n");
return OPJ_FALSE;
}
p_j2k->m_specific_param.m_encoder.m_header_tile_data = new_header_tile_data;
p_j2k->m_specific_param.m_encoder.m_header_tile_data_size = l_qcd_size;
}
l_current_data = p_j2k->m_specific_param.m_encoder.m_header_tile_data;
opj_write_bytes(l_current_data,J2K_MS_QCD,2); /* QCD */
l_current_data += 2;
opj_write_bytes(l_current_data,l_qcd_size-2,2); /* L_QCD */
l_current_data += 2;
l_remaining_size -= 4;
if (! opj_j2k_write_SQcd_SQcc(p_j2k,p_j2k->m_current_tile_number,0,l_current_data,&l_remaining_size,p_manager)) {
opj_event_msg(p_manager, EVT_ERROR, "Error writing QCD marker\n");
return OPJ_FALSE;
}
if (l_remaining_size != 0) {
opj_event_msg(p_manager, EVT_ERROR, "Error writing QCD marker\n");
return OPJ_FALSE;
}
if (opj_stream_write_data(p_stream, p_j2k->m_specific_param.m_encoder.m_header_tile_data,l_qcd_size,p_manager) != l_qcd_size) {
return OPJ_FALSE;
}
return OPJ_TRUE;
}
/**
* Reads a QCD marker (Quantization defaults)
* @param p_header_data the data contained in the QCD box.
* @param p_j2k the jpeg2000 codec.
* @param p_header_size the size of the data contained in the QCD marker.
* @param p_manager the user event manager.
*/
static OPJ_BOOL opj_j2k_read_qcd ( opj_j2k_t *p_j2k,
OPJ_BYTE * p_header_data,
OPJ_UINT32 p_header_size,
opj_event_mgr_t * p_manager
)
{
/* preconditions */
assert(p_header_data != 00);
assert(p_j2k != 00);
assert(p_manager != 00);
if (! opj_j2k_read_SQcd_SQcc(p_j2k,0,p_header_data,&p_header_size,p_manager)) {
opj_event_msg(p_manager, EVT_ERROR, "Error reading QCD marker\n");
return OPJ_FALSE;
}
if (p_header_size != 0) {
opj_event_msg(p_manager, EVT_ERROR, "Error reading QCD marker\n");
return OPJ_FALSE;
}
/* Apply the quantization parameters to other components of the current tile or the m_default_tcp */
opj_j2k_copy_tile_quantization_parameters(p_j2k);
return OPJ_TRUE;
}
#if 0
static OPJ_BOOL opj_j2k_write_qcc( opj_j2k_t *p_j2k,
OPJ_UINT32 p_comp_no,
opj_stream_private_t *p_stream,
opj_event_mgr_t * p_manager
)
{
OPJ_UINT32 l_qcc_size,l_remaining_size;
/* preconditions */
assert(p_j2k != 00);
assert(p_manager != 00);
assert(p_stream != 00);
l_qcc_size = 5 + opj_j2k_get_SQcd_SQcc_size(p_j2k,p_j2k->m_current_tile_number,p_comp_no);
l_qcc_size += p_j2k->m_private_image->numcomps <= 256 ? 0:1;
l_remaining_size = l_qcc_size;
if (l_qcc_size > p_j2k->m_specific_param.m_encoder.m_header_tile_data_size) {
OPJ_BYTE *new_header_tile_data = (OPJ_BYTE *) opj_realloc(p_j2k->m_specific_param.m_encoder.m_header_tile_data, l_qcc_size);
if (! new_header_tile_data) {
opj_free(p_j2k->m_specific_param.m_encoder.m_header_tile_data);
p_j2k->m_specific_param.m_encoder.m_header_tile_data = NULL;
p_j2k->m_specific_param.m_encoder.m_header_tile_data_size = 0;
opj_event_msg(p_manager, EVT_ERROR, "Not enough memory to write QCC marker\n");
return OPJ_FALSE;
}
p_j2k->m_specific_param.m_encoder.m_header_tile_data = new_header_tile_data;
p_j2k->m_specific_param.m_encoder.m_header_tile_data_size = l_qcc_size;
}
opj_j2k_write_qcc_in_memory(p_j2k,p_comp_no,p_j2k->m_specific_param.m_encoder.m_header_tile_data,&l_remaining_size,p_manager);
if (opj_stream_write_data(p_stream,p_j2k->m_specific_param.m_encoder.m_header_tile_data,l_qcc_size,p_manager) != l_qcc_size) {
return OPJ_FALSE;
}
return OPJ_TRUE;
}
#endif
#if 0
static void opj_j2k_write_qcc_in_memory( opj_j2k_t *p_j2k,
OPJ_UINT32 p_comp_no,
OPJ_BYTE * p_data,
OPJ_UINT32 * p_data_written,
opj_event_mgr_t * p_manager
)
{
OPJ_UINT32 l_qcc_size,l_remaining_size;
OPJ_BYTE * l_current_data = 00;
/* preconditions */
assert(p_j2k != 00);
assert(p_manager != 00);
l_qcc_size = 6 + opj_j2k_get_SQcd_SQcc_size(p_j2k,p_j2k->m_current_tile_number,p_comp_no);
l_remaining_size = l_qcc_size;
l_current_data = p_data;
opj_write_bytes(l_current_data,J2K_MS_QCC,2); /* QCC */
l_current_data += 2;
if (p_j2k->m_private_image->numcomps <= 256) {
--l_qcc_size;
opj_write_bytes(l_current_data,l_qcc_size-2,2); /* L_QCC */
l_current_data += 2;
opj_write_bytes(l_current_data, p_comp_no, 1); /* Cqcc */
++l_current_data;
/* in the case only one byte is sufficient the last byte allocated is useless -> still do -6 for available */
l_remaining_size -= 6;
}
else {
opj_write_bytes(l_current_data,l_qcc_size-2,2); /* L_QCC */
l_current_data += 2;
opj_write_bytes(l_current_data, p_comp_no, 2); /* Cqcc */
l_current_data+=2;
l_remaining_size -= 6;
}
opj_j2k_write_SQcd_SQcc(p_j2k,p_j2k->m_current_tile_number,p_comp_no,l_current_data,&l_remaining_size,p_manager);
*p_data_written = l_qcc_size;
}
#endif
static OPJ_UINT32 opj_j2k_get_max_qcc_size (opj_j2k_t *p_j2k)
{
return opj_j2k_get_max_coc_size(p_j2k);
}
/**
* Reads a QCC marker (Quantization component)
* @param p_header_data the data contained in the QCC box.
* @param p_j2k the jpeg2000 codec.
* @param p_header_size the size of the data contained in the QCC marker.
* @param p_manager the user event manager.
*/
static OPJ_BOOL opj_j2k_read_qcc( opj_j2k_t *p_j2k,
OPJ_BYTE * p_header_data,
OPJ_UINT32 p_header_size,
opj_event_mgr_t * p_manager
)
{
OPJ_UINT32 l_num_comp,l_comp_no;
/* preconditions */
assert(p_header_data != 00);
assert(p_j2k != 00);
assert(p_manager != 00);
l_num_comp = p_j2k->m_private_image->numcomps;
if (l_num_comp <= 256) {
if (p_header_size < 1) {
opj_event_msg(p_manager, EVT_ERROR, "Error reading QCC marker\n");
return OPJ_FALSE;
}
opj_read_bytes(p_header_data,&l_comp_no,1);
++p_header_data;
--p_header_size;
}
else {
if (p_header_size < 2) {
opj_event_msg(p_manager, EVT_ERROR, "Error reading QCC marker\n");
return OPJ_FALSE;
}
opj_read_bytes(p_header_data,&l_comp_no,2);
p_header_data+=2;
p_header_size-=2;
}
#ifdef USE_JPWL
if (p_j2k->m_cp.correct) {
static OPJ_UINT32 backup_compno = 0;
/* compno is negative or larger than the number of components!!! */
if (/*(l_comp_no < 0) ||*/ (l_comp_no >= l_num_comp)) {
opj_event_msg(p_manager, EVT_ERROR,
"JPWL: bad component number in QCC (%d out of a maximum of %d)\n",
l_comp_no, l_num_comp);
if (!JPWL_ASSUME) {
opj_event_msg(p_manager, EVT_ERROR, "JPWL: giving up\n");
return OPJ_FALSE;
}
/* we try to correct */
l_comp_no = backup_compno % l_num_comp;
opj_event_msg(p_manager, EVT_WARNING, "- trying to adjust this\n"
"- setting component number to %d\n",
l_comp_no);
}
/* keep your private count of tiles */
backup_compno++;
};
#endif /* USE_JPWL */
if (l_comp_no >= p_j2k->m_private_image->numcomps) {
opj_event_msg(p_manager, EVT_ERROR,
"Invalid component number: %d, regarding the number of components %d\n",
l_comp_no, p_j2k->m_private_image->numcomps);
return OPJ_FALSE;
}
if (! opj_j2k_read_SQcd_SQcc(p_j2k,l_comp_no,p_header_data,&p_header_size,p_manager)) {
opj_event_msg(p_manager, EVT_ERROR, "Error reading QCC marker\n");
return OPJ_FALSE;
}
if (p_header_size != 0) {
opj_event_msg(p_manager, EVT_ERROR, "Error reading QCC marker\n");
return OPJ_FALSE;
}
return OPJ_TRUE;
}
static OPJ_BOOL opj_j2k_write_poc( opj_j2k_t *p_j2k,
opj_stream_private_t *p_stream,
opj_event_mgr_t * p_manager
)
{
OPJ_UINT32 l_nb_comp;
OPJ_UINT32 l_nb_poc;
OPJ_UINT32 l_poc_size;
OPJ_UINT32 l_written_size = 0;
opj_tcp_t *l_tcp = 00;
OPJ_UINT32 l_poc_room;
/* preconditions */
assert(p_j2k != 00);
assert(p_manager != 00);
assert(p_stream != 00);
l_tcp = &p_j2k->m_cp.tcps[p_j2k->m_current_tile_number];
l_nb_comp = p_j2k->m_private_image->numcomps;
l_nb_poc = 1 + l_tcp->numpocs;
if (l_nb_comp <= 256) {
l_poc_room = 1;
}
else {
l_poc_room = 2;
}
l_poc_size = 4 + (5 + 2 * l_poc_room) * l_nb_poc;
if (l_poc_size > p_j2k->m_specific_param.m_encoder.m_header_tile_data_size) {
OPJ_BYTE *new_header_tile_data = (OPJ_BYTE *) opj_realloc(p_j2k->m_specific_param.m_encoder.m_header_tile_data, l_poc_size);
if (! new_header_tile_data) {
opj_free(p_j2k->m_specific_param.m_encoder.m_header_tile_data);
p_j2k->m_specific_param.m_encoder.m_header_tile_data = NULL;
p_j2k->m_specific_param.m_encoder.m_header_tile_data_size = 0;
opj_event_msg(p_manager, EVT_ERROR, "Not enough memory to write POC marker\n");
return OPJ_FALSE;
}
p_j2k->m_specific_param.m_encoder.m_header_tile_data = new_header_tile_data;
p_j2k->m_specific_param.m_encoder.m_header_tile_data_size = l_poc_size;
}
opj_j2k_write_poc_in_memory(p_j2k,p_j2k->m_specific_param.m_encoder.m_header_tile_data,&l_written_size,p_manager);
if (opj_stream_write_data(p_stream,p_j2k->m_specific_param.m_encoder.m_header_tile_data,l_poc_size,p_manager) != l_poc_size) {
return OPJ_FALSE;
}
return OPJ_TRUE;
}
static void opj_j2k_write_poc_in_memory( opj_j2k_t *p_j2k,
OPJ_BYTE * p_data,
OPJ_UINT32 * p_data_written,
opj_event_mgr_t * p_manager
)
{
OPJ_UINT32 i;
OPJ_BYTE * l_current_data = 00;
OPJ_UINT32 l_nb_comp;
OPJ_UINT32 l_nb_poc;
OPJ_UINT32 l_poc_size;
opj_image_t *l_image = 00;
opj_tcp_t *l_tcp = 00;
opj_tccp_t *l_tccp = 00;
opj_poc_t *l_current_poc = 00;
OPJ_UINT32 l_poc_room;
/* preconditions */
assert(p_j2k != 00);
assert(p_manager != 00);
l_tcp = &p_j2k->m_cp.tcps[p_j2k->m_current_tile_number];
l_tccp = &l_tcp->tccps[0];
l_image = p_j2k->m_private_image;
l_nb_comp = l_image->numcomps;
l_nb_poc = 1 + l_tcp->numpocs;
if (l_nb_comp <= 256) {
l_poc_room = 1;
}
else {
l_poc_room = 2;
}
l_poc_size = 4 + (5 + 2 * l_poc_room) * l_nb_poc;
l_current_data = p_data;
opj_write_bytes(l_current_data,J2K_MS_POC,2); /* POC */
l_current_data += 2;
opj_write_bytes(l_current_data,l_poc_size-2,2); /* Lpoc */
l_current_data += 2;
l_current_poc = l_tcp->pocs;
for (i = 0; i < l_nb_poc; ++i) {
opj_write_bytes(l_current_data,l_current_poc->resno0,1); /* RSpoc_i */
++l_current_data;
opj_write_bytes(l_current_data,l_current_poc->compno0,l_poc_room); /* CSpoc_i */
l_current_data+=l_poc_room;
opj_write_bytes(l_current_data,l_current_poc->layno1,2); /* LYEpoc_i */
l_current_data+=2;
opj_write_bytes(l_current_data,l_current_poc->resno1,1); /* REpoc_i */
++l_current_data;
opj_write_bytes(l_current_data,l_current_poc->compno1,l_poc_room); /* CEpoc_i */
l_current_data+=l_poc_room;
opj_write_bytes(l_current_data,l_current_poc->prg,1); /* Ppoc_i */
++l_current_data;
/* change the value of the max layer according to the actual number of layers in the file, components and resolutions*/
l_current_poc->layno1 = (OPJ_UINT32)opj_int_min((OPJ_INT32)l_current_poc->layno1, (OPJ_INT32)l_tcp->numlayers);
l_current_poc->resno1 = (OPJ_UINT32)opj_int_min((OPJ_INT32)l_current_poc->resno1, (OPJ_INT32)l_tccp->numresolutions);
l_current_poc->compno1 = (OPJ_UINT32)opj_int_min((OPJ_INT32)l_current_poc->compno1, (OPJ_INT32)l_nb_comp);
++l_current_poc;
}
*p_data_written = l_poc_size;
}
static OPJ_UINT32 opj_j2k_get_max_poc_size(opj_j2k_t *p_j2k)
{
opj_tcp_t * l_tcp = 00;
OPJ_UINT32 l_nb_tiles = 0;
OPJ_UINT32 l_max_poc = 0;
OPJ_UINT32 i;
l_tcp = p_j2k->m_cp.tcps;
l_nb_tiles = p_j2k->m_cp.th * p_j2k->m_cp.tw;
for (i=0;i<l_nb_tiles;++i) {
l_max_poc = opj_uint_max(l_max_poc,l_tcp->numpocs);
++l_tcp;
}
++l_max_poc;
return 4 + 9 * l_max_poc;
}
static OPJ_UINT32 opj_j2k_get_max_toc_size (opj_j2k_t *p_j2k)
{
OPJ_UINT32 i;
OPJ_UINT32 l_nb_tiles;
OPJ_UINT32 l_max = 0;
opj_tcp_t * l_tcp = 00;
l_tcp = p_j2k->m_cp.tcps;
l_nb_tiles = p_j2k->m_cp.tw * p_j2k->m_cp.th ;
for (i=0;i<l_nb_tiles;++i) {
l_max = opj_uint_max(l_max,l_tcp->m_nb_tile_parts);
++l_tcp;
}
return 12 * l_max;
}
static OPJ_UINT32 opj_j2k_get_specific_header_sizes(opj_j2k_t *p_j2k)
{
OPJ_UINT32 l_nb_bytes = 0;
OPJ_UINT32 l_nb_comps;
OPJ_UINT32 l_coc_bytes,l_qcc_bytes;
l_nb_comps = p_j2k->m_private_image->numcomps - 1;
l_nb_bytes += opj_j2k_get_max_toc_size(p_j2k);
if (!(OPJ_IS_CINEMA(p_j2k->m_cp.rsiz))) {
l_coc_bytes = opj_j2k_get_max_coc_size(p_j2k);
l_nb_bytes += l_nb_comps * l_coc_bytes;
l_qcc_bytes = opj_j2k_get_max_qcc_size(p_j2k);
l_nb_bytes += l_nb_comps * l_qcc_bytes;
}
l_nb_bytes += opj_j2k_get_max_poc_size(p_j2k);
/*** DEVELOPER CORNER, Add room for your headers ***/
return l_nb_bytes;
}
/**
* Reads a POC marker (Progression Order Change)
*
* @param p_header_data the data contained in the POC box.
* @param p_j2k the jpeg2000 codec.
* @param p_header_size the size of the data contained in the POC marker.
* @param p_manager the user event manager.
*/
static OPJ_BOOL opj_j2k_read_poc ( opj_j2k_t *p_j2k,
OPJ_BYTE * p_header_data,
OPJ_UINT32 p_header_size,
opj_event_mgr_t * p_manager
)
{
OPJ_UINT32 i, l_nb_comp, l_tmp;
opj_image_t * l_image = 00;
OPJ_UINT32 l_old_poc_nb, l_current_poc_nb, l_current_poc_remaining;
OPJ_UINT32 l_chunk_size, l_comp_room;
opj_cp_t *l_cp = 00;
opj_tcp_t *l_tcp = 00;
opj_poc_t *l_current_poc = 00;
/* preconditions */
assert(p_header_data != 00);
assert(p_j2k != 00);
assert(p_manager != 00);
l_image = p_j2k->m_private_image;
l_nb_comp = l_image->numcomps;
if (l_nb_comp <= 256) {
l_comp_room = 1;
}
else {
l_comp_room = 2;
}
l_chunk_size = 5 + 2 * l_comp_room;
l_current_poc_nb = p_header_size / l_chunk_size;
l_current_poc_remaining = p_header_size % l_chunk_size;
if ((l_current_poc_nb <= 0) || (l_current_poc_remaining != 0)) {
opj_event_msg(p_manager, EVT_ERROR, "Error reading POC marker\n");
return OPJ_FALSE;
}
l_cp = &(p_j2k->m_cp);
l_tcp = (p_j2k->m_specific_param.m_decoder.m_state == J2K_STATE_TPH) ?
&l_cp->tcps[p_j2k->m_current_tile_number] :
p_j2k->m_specific_param.m_decoder.m_default_tcp;
l_old_poc_nb = l_tcp->POC ? l_tcp->numpocs + 1 : 0;
l_current_poc_nb += l_old_poc_nb;
if(l_current_poc_nb >= 32)
{
opj_event_msg(p_manager, EVT_ERROR, "Too many POCs %d\n", l_current_poc_nb);
return OPJ_FALSE;
}
assert(l_current_poc_nb < 32);
/* now poc is in use.*/
l_tcp->POC = 1;
l_current_poc = &l_tcp->pocs[l_old_poc_nb];
for (i = l_old_poc_nb; i < l_current_poc_nb; ++i) {
opj_read_bytes(p_header_data,&(l_current_poc->resno0),1); /* RSpoc_i */
++p_header_data;
opj_read_bytes(p_header_data,&(l_current_poc->compno0),l_comp_room); /* CSpoc_i */
p_header_data+=l_comp_room;
opj_read_bytes(p_header_data,&(l_current_poc->layno1),2); /* LYEpoc_i */
/* make sure layer end is in acceptable bounds */
l_current_poc->layno1 = opj_uint_min(l_current_poc->layno1, l_tcp->numlayers);
p_header_data+=2;
opj_read_bytes(p_header_data,&(l_current_poc->resno1),1); /* REpoc_i */
++p_header_data;
opj_read_bytes(p_header_data,&(l_current_poc->compno1),l_comp_room); /* CEpoc_i */
p_header_data+=l_comp_room;
opj_read_bytes(p_header_data,&l_tmp,1); /* Ppoc_i */
++p_header_data;
l_current_poc->prg = (OPJ_PROG_ORDER) l_tmp;
/* make sure comp is in acceptable bounds */
l_current_poc->compno1 = opj_uint_min(l_current_poc->compno1, l_nb_comp);
++l_current_poc;
}
l_tcp->numpocs = l_current_poc_nb - 1;
return OPJ_TRUE;
}
/**
* Reads a CRG marker (Component registration)
*
* @param p_header_data the data contained in the TLM box.
* @param p_j2k the jpeg2000 codec.
* @param p_header_size the size of the data contained in the TLM marker.
* @param p_manager the user event manager.
*/
static OPJ_BOOL opj_j2k_read_crg ( opj_j2k_t *p_j2k,
OPJ_BYTE * p_header_data,
OPJ_UINT32 p_header_size,
opj_event_mgr_t * p_manager
)
{
OPJ_UINT32 l_nb_comp;
/* preconditions */
assert(p_header_data != 00);
assert(p_j2k != 00);
assert(p_manager != 00);
l_nb_comp = p_j2k->m_private_image->numcomps;
if (p_header_size != l_nb_comp *4) {
opj_event_msg(p_manager, EVT_ERROR, "Error reading CRG marker\n");
return OPJ_FALSE;
}
/* Do not care of this at the moment since only local variables are set here */
/*
for
(i = 0; i < l_nb_comp; ++i)
{
opj_read_bytes(p_header_data,&l_Xcrg_i,2); // Xcrg_i
p_header_data+=2;
opj_read_bytes(p_header_data,&l_Ycrg_i,2); // Xcrg_i
p_header_data+=2;
}
*/
return OPJ_TRUE;
}
/**
* Reads a TLM marker (Tile Length Marker)
*
* @param p_header_data the data contained in the TLM box.
* @param p_j2k the jpeg2000 codec.
* @param p_header_size the size of the data contained in the TLM marker.
* @param p_manager the user event manager.
*/
static OPJ_BOOL opj_j2k_read_tlm ( opj_j2k_t *p_j2k,
OPJ_BYTE * p_header_data,
OPJ_UINT32 p_header_size,
opj_event_mgr_t * p_manager
)
{
OPJ_UINT32 l_Ztlm, l_Stlm, l_ST, l_SP, l_tot_num_tp_remaining, l_quotient, l_Ptlm_size;
/* preconditions */
assert(p_header_data != 00);
assert(p_j2k != 00);
assert(p_manager != 00);
if (p_header_size < 2) {
opj_event_msg(p_manager, EVT_ERROR, "Error reading TLM marker\n");
return OPJ_FALSE;
}
p_header_size -= 2;
opj_read_bytes(p_header_data,&l_Ztlm,1); /* Ztlm */
++p_header_data;
opj_read_bytes(p_header_data,&l_Stlm,1); /* Stlm */
++p_header_data;
l_ST = ((l_Stlm >> 4) & 0x3);
l_SP = (l_Stlm >> 6) & 0x1;
l_Ptlm_size = (l_SP + 1) * 2;
l_quotient = l_Ptlm_size + l_ST;
l_tot_num_tp_remaining = p_header_size % l_quotient;
if (l_tot_num_tp_remaining != 0) {
opj_event_msg(p_manager, EVT_ERROR, "Error reading TLM marker\n");
return OPJ_FALSE;
}
/* FIXME Do not care of this at the moment since only local variables are set here */
/*
for
(i = 0; i < l_tot_num_tp; ++i)
{
opj_read_bytes(p_header_data,&l_Ttlm_i,l_ST); // Ttlm_i
p_header_data += l_ST;
opj_read_bytes(p_header_data,&l_Ptlm_i,l_Ptlm_size); // Ptlm_i
p_header_data += l_Ptlm_size;
}*/
return OPJ_TRUE;
}
/**
* Reads a PLM marker (Packet length, main header marker)
*
* @param p_header_data the data contained in the TLM box.
* @param p_j2k the jpeg2000 codec.
* @param p_header_size the size of the data contained in the TLM marker.
* @param p_manager the user event manager.
*/
static OPJ_BOOL opj_j2k_read_plm ( opj_j2k_t *p_j2k,
OPJ_BYTE * p_header_data,
OPJ_UINT32 p_header_size,
opj_event_mgr_t * p_manager
)
{
/* preconditions */
assert(p_header_data != 00);
assert(p_j2k != 00);
assert(p_manager != 00);
if (p_header_size < 1) {
opj_event_msg(p_manager, EVT_ERROR, "Error reading PLM marker\n");
return OPJ_FALSE;
}
/* Do not care of this at the moment since only local variables are set here */
/*
opj_read_bytes(p_header_data,&l_Zplm,1); // Zplm
++p_header_data;
--p_header_size;
while
(p_header_size > 0)
{
opj_read_bytes(p_header_data,&l_Nplm,1); // Nplm
++p_header_data;
p_header_size -= (1+l_Nplm);
if
(p_header_size < 0)
{
opj_event_msg(p_manager, EVT_ERROR, "Error reading PLM marker\n");
return false;
}
for
(i = 0; i < l_Nplm; ++i)
{
opj_read_bytes(p_header_data,&l_tmp,1); // Iplm_ij
++p_header_data;
// take only the last seven bytes
l_packet_len |= (l_tmp & 0x7f);
if
(l_tmp & 0x80)
{
l_packet_len <<= 7;
}
else
{
// store packet length and proceed to next packet
l_packet_len = 0;
}
}
if
(l_packet_len != 0)
{
opj_event_msg(p_manager, EVT_ERROR, "Error reading PLM marker\n");
return false;
}
}
*/
return OPJ_TRUE;
}
/**
* Reads a PLT marker (Packet length, tile-part header)
*
* @param p_header_data the data contained in the PLT box.
* @param p_j2k the jpeg2000 codec.
* @param p_header_size the size of the data contained in the PLT marker.
* @param p_manager the user event manager.
*/
static OPJ_BOOL opj_j2k_read_plt ( opj_j2k_t *p_j2k,
OPJ_BYTE * p_header_data,
OPJ_UINT32 p_header_size,
opj_event_mgr_t * p_manager
)
{
OPJ_UINT32 l_Zplt, l_tmp, l_packet_len = 0, i;
/* preconditions */
assert(p_header_data != 00);
assert(p_j2k != 00);
assert(p_manager != 00);
if (p_header_size < 1) {
opj_event_msg(p_manager, EVT_ERROR, "Error reading PLT marker\n");
return OPJ_FALSE;
}
opj_read_bytes(p_header_data,&l_Zplt,1); /* Zplt */
++p_header_data;
--p_header_size;
for (i = 0; i < p_header_size; ++i) {
opj_read_bytes(p_header_data,&l_tmp,1); /* Iplt_ij */
++p_header_data;
/* take only the last seven bytes */
l_packet_len |= (l_tmp & 0x7f);
if (l_tmp & 0x80) {
l_packet_len <<= 7;
}
else {
/* store packet length and proceed to next packet */
l_packet_len = 0;
}
}
if (l_packet_len != 0) {
opj_event_msg(p_manager, EVT_ERROR, "Error reading PLT marker\n");
return OPJ_FALSE;
}
return OPJ_TRUE;
}
/**
* Reads a PPM marker (Packed packet headers, main header)
*
* @param p_header_data the data contained in the POC box.
* @param p_j2k the jpeg2000 codec.
* @param p_header_size the size of the data contained in the POC marker.
* @param p_manager the user event manager.
*/
static OPJ_BOOL opj_j2k_read_ppm (
opj_j2k_t *p_j2k,
OPJ_BYTE * p_header_data,
OPJ_UINT32 p_header_size,
opj_event_mgr_t * p_manager )
{
opj_cp_t *l_cp = 00;
OPJ_UINT32 l_Z_ppm;
/* preconditions */
assert(p_header_data != 00);
assert(p_j2k != 00);
assert(p_manager != 00);
/* We need to have the Z_ppm element + 1 byte of Nppm/Ippm at minimum */
if (p_header_size < 2) {
opj_event_msg(p_manager, EVT_ERROR, "Error reading PPM marker\n");
return OPJ_FALSE;
}
l_cp = &(p_j2k->m_cp);
l_cp->ppm = 1;
opj_read_bytes(p_header_data,&l_Z_ppm,1); /* Z_ppm */
++p_header_data;
--p_header_size;
/* check allocation needed */
if (l_cp->ppm_markers == NULL) { /* first PPM marker */
OPJ_UINT32 l_newCount = l_Z_ppm + 1U; /* can't overflow, l_Z_ppm is UINT8 */
assert(l_cp->ppm_markers_count == 0U);
l_cp->ppm_markers = (opj_ppx *) opj_calloc(l_newCount, sizeof(opj_ppx));
if (l_cp->ppm_markers == NULL) {
opj_event_msg(p_manager, EVT_ERROR, "Not enough memory to read PPM marker\n");
return OPJ_FALSE;
}
l_cp->ppm_markers_count = l_newCount;
} else if (l_cp->ppm_markers_count <= l_Z_ppm) {
OPJ_UINT32 l_newCount = l_Z_ppm + 1U; /* can't overflow, l_Z_ppm is UINT8 */
opj_ppx *new_ppm_markers;
new_ppm_markers = (opj_ppx *) opj_realloc(l_cp->ppm_markers, l_newCount * sizeof(opj_ppx));
if (new_ppm_markers == NULL) {
/* clean up to be done on l_cp destruction */
opj_event_msg(p_manager, EVT_ERROR, "Not enough memory to read PPM marker\n");
return OPJ_FALSE;
}
l_cp->ppm_markers = new_ppm_markers;
memset(l_cp->ppm_markers + l_cp->ppm_markers_count, 0, (l_newCount - l_cp->ppm_markers_count) * sizeof(opj_ppx));
l_cp->ppm_markers_count = l_newCount;
}
if (l_cp->ppm_markers[l_Z_ppm].m_data != NULL) {
/* clean up to be done on l_cp destruction */
opj_event_msg(p_manager, EVT_ERROR, "Zppm %u already read\n", l_Z_ppm);
return OPJ_FALSE;
}
l_cp->ppm_markers[l_Z_ppm].m_data = opj_malloc(p_header_size);
if (l_cp->ppm_markers[l_Z_ppm].m_data == NULL) {
/* clean up to be done on l_cp destruction */
opj_event_msg(p_manager, EVT_ERROR, "Not enough memory to read PPM marker\n");
return OPJ_FALSE;
}
l_cp->ppm_markers[l_Z_ppm].m_data_size = p_header_size;
memcpy(l_cp->ppm_markers[l_Z_ppm].m_data, p_header_data, p_header_size);
return OPJ_TRUE;
}
/**
* Merges all PPM markers read (Packed headers, main header)
*
* @param p_cp main coding parameters.
* @param p_manager the user event manager.
*/
static OPJ_BOOL opj_j2k_merge_ppm ( opj_cp_t *p_cp, opj_event_mgr_t * p_manager )
{
OPJ_UINT32 i, l_ppm_data_size, l_N_ppm_remaining;
/* preconditions */
assert(p_cp != 00);
assert(p_manager != 00);
assert(p_cp->ppm_buffer == NULL);
if (p_cp->ppm == 0U) {
return OPJ_TRUE;
}
l_ppm_data_size = 0U;
l_N_ppm_remaining = 0U;
for (i = 0U; i < p_cp->ppm_markers_count; ++i) {
if (p_cp->ppm_markers[i].m_data != NULL) { /* standard doesn't seem to require contiguous Zppm */
OPJ_UINT32 l_N_ppm;
OPJ_UINT32 l_data_size = p_cp->ppm_markers[i].m_data_size;
const OPJ_BYTE* l_data = p_cp->ppm_markers[i].m_data;
if (l_N_ppm_remaining >= l_data_size) {
l_N_ppm_remaining -= l_data_size;
l_data_size = 0U;
} else {
l_data += l_N_ppm_remaining;
l_data_size -= l_N_ppm_remaining;
l_N_ppm_remaining = 0U;
}
if (l_data_size > 0U) {
do
{
/* read Nppm */
if (l_data_size < 4U) {
/* clean up to be done on l_cp destruction */
opj_event_msg(p_manager, EVT_ERROR, "Not enough bytes to read Nppm\n");
return OPJ_FALSE;
}
opj_read_bytes(l_data, &l_N_ppm, 4);
l_data+=4;
l_data_size-=4;
l_ppm_data_size += l_N_ppm; /* can't overflow, max 256 markers of max 65536 bytes, that is when PPM markers are not corrupted which is checked elsewhere */
if (l_data_size >= l_N_ppm) {
l_data_size -= l_N_ppm;
l_data += l_N_ppm;
} else {
l_N_ppm_remaining = l_N_ppm - l_data_size;
l_data_size = 0U;
}
} while (l_data_size > 0U);
}
}
}
if (l_N_ppm_remaining != 0U) {
/* clean up to be done on l_cp destruction */
opj_event_msg(p_manager, EVT_ERROR, "Corrupted PPM markers\n");
return OPJ_FALSE;
}
p_cp->ppm_buffer = (OPJ_BYTE *) opj_malloc(l_ppm_data_size);
if (p_cp->ppm_buffer == 00) {
opj_event_msg(p_manager, EVT_ERROR, "Not enough memory to read PPM marker\n");
return OPJ_FALSE;
}
p_cp->ppm_len = l_ppm_data_size;
l_ppm_data_size = 0U;
l_N_ppm_remaining = 0U;
for (i = 0U; i < p_cp->ppm_markers_count; ++i) {
if (p_cp->ppm_markers[i].m_data != NULL) { /* standard doesn't seem to require contiguous Zppm */
OPJ_UINT32 l_N_ppm;
OPJ_UINT32 l_data_size = p_cp->ppm_markers[i].m_data_size;
const OPJ_BYTE* l_data = p_cp->ppm_markers[i].m_data;
if (l_N_ppm_remaining >= l_data_size) {
memcpy(p_cp->ppm_buffer + l_ppm_data_size, l_data, l_data_size);
l_ppm_data_size += l_data_size;
l_N_ppm_remaining -= l_data_size;
l_data_size = 0U;
} else {
memcpy(p_cp->ppm_buffer + l_ppm_data_size, l_data, l_N_ppm_remaining);
l_ppm_data_size += l_N_ppm_remaining;
l_data += l_N_ppm_remaining;
l_data_size -= l_N_ppm_remaining;
l_N_ppm_remaining = 0U;
}
if (l_data_size > 0U) {
do
{
/* read Nppm */
if (l_data_size < 4U) {
/* clean up to be done on l_cp destruction */
opj_event_msg(p_manager, EVT_ERROR, "Not enough bytes to read Nppm\n");
return OPJ_FALSE;
}
opj_read_bytes(l_data, &l_N_ppm, 4);
l_data+=4;
l_data_size-=4;
if (l_data_size >= l_N_ppm) {
memcpy(p_cp->ppm_buffer + l_ppm_data_size, l_data, l_N_ppm);
l_ppm_data_size += l_N_ppm;
l_data_size -= l_N_ppm;
l_data += l_N_ppm;
} else {
memcpy(p_cp->ppm_buffer + l_ppm_data_size, l_data, l_data_size);
l_ppm_data_size += l_data_size;
l_N_ppm_remaining = l_N_ppm - l_data_size;
l_data_size = 0U;
}
} while (l_data_size > 0U);
}
opj_free(p_cp->ppm_markers[i].m_data);
p_cp->ppm_markers[i].m_data = NULL;
p_cp->ppm_markers[i].m_data_size = 0U;
}
}
p_cp->ppm_data = p_cp->ppm_buffer;
p_cp->ppm_data_size = p_cp->ppm_len;
p_cp->ppm_markers_count = 0U;
opj_free(p_cp->ppm_markers);
p_cp->ppm_markers = NULL;
return OPJ_TRUE;
}
/**
* Reads a PPT marker (Packed packet headers, tile-part header)
*
* @param p_header_data the data contained in the PPT box.
* @param p_j2k the jpeg2000 codec.
* @param p_header_size the size of the data contained in the PPT marker.
* @param p_manager the user event manager.
*/
static OPJ_BOOL opj_j2k_read_ppt ( opj_j2k_t *p_j2k,
OPJ_BYTE * p_header_data,
OPJ_UINT32 p_header_size,
opj_event_mgr_t * p_manager
)
{
opj_cp_t *l_cp = 00;
opj_tcp_t *l_tcp = 00;
OPJ_UINT32 l_Z_ppt;
/* preconditions */
assert(p_header_data != 00);
assert(p_j2k != 00);
assert(p_manager != 00);
/* We need to have the Z_ppt element + 1 byte of Ippt at minimum */
if (p_header_size < 2) {
opj_event_msg(p_manager, EVT_ERROR, "Error reading PPT marker\n");
return OPJ_FALSE;
}
l_cp = &(p_j2k->m_cp);
if (l_cp->ppm){
opj_event_msg(p_manager, EVT_ERROR, "Error reading PPT marker: packet header have been previously found in the main header (PPM marker).\n");
return OPJ_FALSE;
}
l_tcp = &(l_cp->tcps[p_j2k->m_current_tile_number]);
l_tcp->ppt = 1;
opj_read_bytes(p_header_data,&l_Z_ppt,1); /* Z_ppt */
++p_header_data;
--p_header_size;
/* check allocation needed */
if (l_tcp->ppt_markers == NULL) { /* first PPT marker */
OPJ_UINT32 l_newCount = l_Z_ppt + 1U; /* can't overflow, l_Z_ppt is UINT8 */
assert(l_tcp->ppt_markers_count == 0U);
l_tcp->ppt_markers = (opj_ppx *) opj_calloc(l_newCount, sizeof(opj_ppx));
if (l_tcp->ppt_markers == NULL) {
opj_event_msg(p_manager, EVT_ERROR, "Not enough memory to read PPT marker\n");
return OPJ_FALSE;
}
l_tcp->ppt_markers_count = l_newCount;
} else if (l_tcp->ppt_markers_count <= l_Z_ppt) {
OPJ_UINT32 l_newCount = l_Z_ppt + 1U; /* can't overflow, l_Z_ppt is UINT8 */
opj_ppx *new_ppt_markers;
new_ppt_markers = (opj_ppx *) opj_realloc(l_tcp->ppt_markers, l_newCount * sizeof(opj_ppx));
if (new_ppt_markers == NULL) {
/* clean up to be done on l_tcp destruction */
opj_event_msg(p_manager, EVT_ERROR, "Not enough memory to read PPT marker\n");
return OPJ_FALSE;
}
l_tcp->ppt_markers = new_ppt_markers;
memset(l_tcp->ppt_markers + l_tcp->ppt_markers_count, 0, (l_newCount - l_tcp->ppt_markers_count) * sizeof(opj_ppx));
l_tcp->ppt_markers_count = l_newCount;
}
if (l_tcp->ppt_markers[l_Z_ppt].m_data != NULL) {
/* clean up to be done on l_tcp destruction */
opj_event_msg(p_manager, EVT_ERROR, "Zppt %u already read\n", l_Z_ppt);
return OPJ_FALSE;
}
l_tcp->ppt_markers[l_Z_ppt].m_data = opj_malloc(p_header_size);
if (l_tcp->ppt_markers[l_Z_ppt].m_data == NULL) {
/* clean up to be done on l_tcp destruction */
opj_event_msg(p_manager, EVT_ERROR, "Not enough memory to read PPT marker\n");
return OPJ_FALSE;
}
l_tcp->ppt_markers[l_Z_ppt].m_data_size = p_header_size;
memcpy(l_tcp->ppt_markers[l_Z_ppt].m_data, p_header_data, p_header_size);
return OPJ_TRUE;
}
/**
* Merges all PPT markers read (Packed packet headers, tile-part header)
*
* @param p_tcp the tile.
* @param p_manager the user event manager.
*/
static OPJ_BOOL opj_j2k_merge_ppt(opj_tcp_t *p_tcp, opj_event_mgr_t * p_manager)
{
OPJ_UINT32 i, l_ppt_data_size;
/* preconditions */
assert(p_tcp != 00);
assert(p_manager != 00);
assert(p_tcp->ppt_buffer == NULL);
if (p_tcp->ppt == 0U) {
return OPJ_TRUE;
}
l_ppt_data_size = 0U;
for (i = 0U; i < p_tcp->ppt_markers_count; ++i) {
l_ppt_data_size += p_tcp->ppt_markers[i].m_data_size; /* can't overflow, max 256 markers of max 65536 bytes */
}
p_tcp->ppt_buffer = (OPJ_BYTE *) opj_malloc(l_ppt_data_size);
if (p_tcp->ppt_buffer == 00) {
opj_event_msg(p_manager, EVT_ERROR, "Not enough memory to read PPT marker\n");
return OPJ_FALSE;
}
p_tcp->ppt_len = l_ppt_data_size;
l_ppt_data_size = 0U;
for (i = 0U; i < p_tcp->ppt_markers_count; ++i) {
if (p_tcp->ppt_markers[i].m_data != NULL) { /* standard doesn't seem to require contiguous Zppt */
memcpy(p_tcp->ppt_buffer + l_ppt_data_size, p_tcp->ppt_markers[i].m_data, p_tcp->ppt_markers[i].m_data_size);
l_ppt_data_size += p_tcp->ppt_markers[i].m_data_size; /* can't overflow, max 256 markers of max 65536 bytes */
opj_free(p_tcp->ppt_markers[i].m_data);
p_tcp->ppt_markers[i].m_data = NULL;
p_tcp->ppt_markers[i].m_data_size = 0U;
}
}
p_tcp->ppt_markers_count = 0U;
opj_free(p_tcp->ppt_markers);
p_tcp->ppt_markers = NULL;
p_tcp->ppt_data = p_tcp->ppt_buffer;
p_tcp->ppt_data_size = p_tcp->ppt_len;
return OPJ_TRUE;
}
static OPJ_BOOL opj_j2k_write_tlm( opj_j2k_t *p_j2k,
opj_stream_private_t *p_stream,
opj_event_mgr_t * p_manager
)
{
OPJ_BYTE * l_current_data = 00;
OPJ_UINT32 l_tlm_size;
/* preconditions */
assert(p_j2k != 00);
assert(p_manager != 00);
assert(p_stream != 00);
l_tlm_size = 6 + (5*p_j2k->m_specific_param.m_encoder.m_total_tile_parts);
if (l_tlm_size > p_j2k->m_specific_param.m_encoder.m_header_tile_data_size) {
OPJ_BYTE *new_header_tile_data = (OPJ_BYTE *) opj_realloc(p_j2k->m_specific_param.m_encoder.m_header_tile_data, l_tlm_size);
if (! new_header_tile_data) {
opj_free(p_j2k->m_specific_param.m_encoder.m_header_tile_data);
p_j2k->m_specific_param.m_encoder.m_header_tile_data = NULL;
p_j2k->m_specific_param.m_encoder.m_header_tile_data_size = 0;
opj_event_msg(p_manager, EVT_ERROR, "Not enough memory to write TLM marker\n");
return OPJ_FALSE;
}
p_j2k->m_specific_param.m_encoder.m_header_tile_data = new_header_tile_data;
p_j2k->m_specific_param.m_encoder.m_header_tile_data_size = l_tlm_size;
}
l_current_data = p_j2k->m_specific_param.m_encoder.m_header_tile_data;
/* change the way data is written to avoid seeking if possible */
/* TODO */
p_j2k->m_specific_param.m_encoder.m_tlm_start = opj_stream_tell(p_stream);
opj_write_bytes(l_current_data,J2K_MS_TLM,2); /* TLM */
l_current_data += 2;
opj_write_bytes(l_current_data,l_tlm_size-2,2); /* Lpoc */
l_current_data += 2;
opj_write_bytes(l_current_data,0,1); /* Ztlm=0*/
++l_current_data;
opj_write_bytes(l_current_data,0x50,1); /* Stlm ST=1(8bits-255 tiles max),SP=1(Ptlm=32bits) */
++l_current_data;
/* do nothing on the 5 * l_j2k->m_specific_param.m_encoder.m_total_tile_parts remaining data */
if (opj_stream_write_data(p_stream,p_j2k->m_specific_param.m_encoder.m_header_tile_data,l_tlm_size,p_manager) != l_tlm_size) {
return OPJ_FALSE;
}
return OPJ_TRUE;
}
static OPJ_BOOL opj_j2k_write_sot( opj_j2k_t *p_j2k,
OPJ_BYTE * p_data,
OPJ_UINT32 * p_data_written,
const opj_stream_private_t *p_stream,
opj_event_mgr_t * p_manager
)
{
/* preconditions */
assert(p_j2k != 00);
assert(p_manager != 00);
assert(p_stream != 00);
opj_write_bytes(p_data,J2K_MS_SOT,2); /* SOT */
p_data += 2;
opj_write_bytes(p_data,10,2); /* Lsot */
p_data += 2;
opj_write_bytes(p_data, p_j2k->m_current_tile_number,2); /* Isot */
p_data += 2;
/* Psot */
p_data += 4;
opj_write_bytes(p_data, p_j2k->m_specific_param.m_encoder.m_current_tile_part_number,1); /* TPsot */
++p_data;
opj_write_bytes(p_data, p_j2k->m_cp.tcps[p_j2k->m_current_tile_number].m_nb_tile_parts,1); /* TNsot */
++p_data;
/* UniPG>> */
#ifdef USE_JPWL
/* update markers struct */
/*
OPJ_BOOL res = j2k_add_marker(p_j2k->cstr_info, J2K_MS_SOT, p_j2k->sot_start, len + 2);
*/
assert( 0 && "TODO" );
#endif /* USE_JPWL */
* p_data_written = 12;
return OPJ_TRUE;
}
static OPJ_BOOL opj_j2k_get_sot_values(OPJ_BYTE * p_header_data,
OPJ_UINT32 p_header_size,
OPJ_UINT32* p_tile_no,
OPJ_UINT32* p_tot_len,
OPJ_UINT32* p_current_part,
OPJ_UINT32* p_num_parts,
opj_event_mgr_t * p_manager )
{
/* preconditions */
assert(p_header_data != 00);
assert(p_manager != 00);
/* Size of this marker is fixed = 12 (we have already read marker and its size)*/
if (p_header_size != 8) {
opj_event_msg(p_manager, EVT_ERROR, "Error reading SOT marker\n");
return OPJ_FALSE;
}
opj_read_bytes(p_header_data,p_tile_no,2); /* Isot */
p_header_data+=2;
opj_read_bytes(p_header_data,p_tot_len,4); /* Psot */
p_header_data+=4;
opj_read_bytes(p_header_data,p_current_part,1); /* TPsot */
++p_header_data;
opj_read_bytes(p_header_data,p_num_parts ,1); /* TNsot */
++p_header_data;
return OPJ_TRUE;
}
static OPJ_BOOL opj_j2k_read_sot ( opj_j2k_t *p_j2k,
OPJ_BYTE * p_header_data,
OPJ_UINT32 p_header_size,
opj_event_mgr_t * p_manager )
{
opj_cp_t *l_cp = 00;
opj_tcp_t *l_tcp = 00;
OPJ_UINT32 l_tot_len, l_num_parts = 0;
OPJ_UINT32 l_current_part;
OPJ_UINT32 l_tile_x,l_tile_y;
/* preconditions */
assert(p_j2k != 00);
assert(p_manager != 00);
if (! opj_j2k_get_sot_values(p_header_data, p_header_size, &(p_j2k->m_current_tile_number), &l_tot_len, &l_current_part, &l_num_parts, p_manager)) {
opj_event_msg(p_manager, EVT_ERROR, "Error reading SOT marker\n");
return OPJ_FALSE;
}
l_cp = &(p_j2k->m_cp);
/* testcase 2.pdf.SIGFPE.706.1112 */
if (p_j2k->m_current_tile_number >= l_cp->tw * l_cp->th) {
opj_event_msg(p_manager, EVT_ERROR, "Invalid tile number %d\n", p_j2k->m_current_tile_number);
return OPJ_FALSE;
}
l_tcp = &l_cp->tcps[p_j2k->m_current_tile_number];
l_tile_x = p_j2k->m_current_tile_number % l_cp->tw;
l_tile_y = p_j2k->m_current_tile_number / l_cp->tw;
#ifdef USE_JPWL
if (l_cp->correct) {
OPJ_UINT32 tileno = p_j2k->m_current_tile_number;
static OPJ_UINT32 backup_tileno = 0;
/* tileno is negative or larger than the number of tiles!!! */
if (tileno > (l_cp->tw * l_cp->th)) {
opj_event_msg(p_manager, EVT_ERROR,
"JPWL: bad tile number (%d out of a maximum of %d)\n",
tileno, (l_cp->tw * l_cp->th));
if (!JPWL_ASSUME) {
opj_event_msg(p_manager, EVT_ERROR, "JPWL: giving up\n");
return OPJ_FALSE;
}
/* we try to correct */
tileno = backup_tileno;
opj_event_msg(p_manager, EVT_WARNING, "- trying to adjust this\n"
"- setting tile number to %d\n",
tileno);
}
/* keep your private count of tiles */
backup_tileno++;
};
#endif /* USE_JPWL */
/* look for the tile in the list of already processed tile (in parts). */
/* Optimization possible here with a more complex data structure and with the removing of tiles */
/* since the time taken by this function can only grow at the time */
/* PSot should be equal to zero or >=14 or <= 2^32-1 */
if ((l_tot_len !=0 ) && (l_tot_len < 14) )
{
if (l_tot_len == 12 ) /* MSD: Special case for the PHR data which are read by kakadu*/
{
opj_event_msg(p_manager, EVT_WARNING, "Empty SOT marker detected: Psot=%d.\n", l_tot_len);
}
else
{
opj_event_msg(p_manager, EVT_ERROR, "Psot value is not correct regards to the JPEG2000 norm: %d.\n", l_tot_len);
return OPJ_FALSE;
}
}
#ifdef USE_JPWL
if (l_cp->correct) {
/* totlen is negative or larger than the bytes left!!! */
if (/*(l_tot_len < 0) ||*/ (l_tot_len > p_header_size ) ) { /* FIXME it seems correct; for info in V1 -> (p_stream_numbytesleft(p_stream) + 8))) { */
opj_event_msg(p_manager, EVT_ERROR,
"JPWL: bad tile byte size (%d bytes against %d bytes left)\n",
l_tot_len, p_header_size ); /* FIXME it seems correct; for info in V1 -> p_stream_numbytesleft(p_stream) + 8); */
if (!JPWL_ASSUME) {
opj_event_msg(p_manager, EVT_ERROR, "JPWL: giving up\n");
return OPJ_FALSE;
}
/* we try to correct */
l_tot_len = 0;
opj_event_msg(p_manager, EVT_WARNING, "- trying to adjust this\n"
"- setting Psot to %d => assuming it is the last tile\n",
l_tot_len);
}
};
#endif /* USE_JPWL */
/* Ref A.4.2: Psot could be equal zero if it is the last tile-part of the codestream.*/
if (!l_tot_len) {
opj_event_msg(p_manager, EVT_INFO, "Psot value of the current tile-part is equal to zero, "
"we assuming it is the last tile-part of the codestream.\n");
p_j2k->m_specific_param.m_decoder.m_last_tile_part = 1;
}
if (l_num_parts != 0) { /* Number of tile-part header is provided by this tile-part header */
l_num_parts += p_j2k->m_specific_param.m_decoder.m_nb_tile_parts_correction;
/* Useful to manage the case of textGBR.jp2 file because two values of TNSot are allowed: the correct numbers of
* tile-parts for that tile and zero (A.4.2 of 15444-1 : 2002). */
if (l_tcp->m_nb_tile_parts) {
if (l_current_part >= l_tcp->m_nb_tile_parts){
opj_event_msg(p_manager, EVT_ERROR, "In SOT marker, TPSot (%d) is not valid regards to the current "
"number of tile-part (%d), giving up\n", l_current_part, l_tcp->m_nb_tile_parts );
p_j2k->m_specific_param.m_decoder.m_last_tile_part = 1;
return OPJ_FALSE;
}
}
if( l_current_part >= l_num_parts ) {
/* testcase 451.pdf.SIGSEGV.ce9.3723 */
opj_event_msg(p_manager, EVT_ERROR, "In SOT marker, TPSot (%d) is not valid regards to the current "
"number of tile-part (header) (%d), giving up\n", l_current_part, l_num_parts );
p_j2k->m_specific_param.m_decoder.m_last_tile_part = 1;
return OPJ_FALSE;
}
l_tcp->m_nb_tile_parts = l_num_parts;
}
/* If know the number of tile part header we will check if we didn't read the last*/
if (l_tcp->m_nb_tile_parts) {
if (l_tcp->m_nb_tile_parts == (l_current_part+1)) {
p_j2k->m_specific_param.m_decoder.m_can_decode = 1; /* Process the last tile-part header*/
}
}
if (!p_j2k->m_specific_param.m_decoder.m_last_tile_part){
/* Keep the size of data to skip after this marker */
p_j2k->m_specific_param.m_decoder.m_sot_length = l_tot_len - 12; /* SOT_marker_size = 12 */
}
else {
/* FIXME: need to be computed from the number of bytes remaining in the codestream */
p_j2k->m_specific_param.m_decoder.m_sot_length = 0;
}
p_j2k->m_specific_param.m_decoder.m_state = J2K_STATE_TPH;
/* Check if the current tile is outside the area we want decode or not corresponding to the tile index*/
if (p_j2k->m_specific_param.m_decoder.m_tile_ind_to_dec == -1) {
p_j2k->m_specific_param.m_decoder.m_skip_data =
(l_tile_x < p_j2k->m_specific_param.m_decoder.m_start_tile_x)
|| (l_tile_x >= p_j2k->m_specific_param.m_decoder.m_end_tile_x)
|| (l_tile_y < p_j2k->m_specific_param.m_decoder.m_start_tile_y)
|| (l_tile_y >= p_j2k->m_specific_param.m_decoder.m_end_tile_y);
}
else {
assert( p_j2k->m_specific_param.m_decoder.m_tile_ind_to_dec >= 0 );
p_j2k->m_specific_param.m_decoder.m_skip_data =
(p_j2k->m_current_tile_number != (OPJ_UINT32)p_j2k->m_specific_param.m_decoder.m_tile_ind_to_dec);
}
/* Index */
if (p_j2k->cstr_index)
{
assert(p_j2k->cstr_index->tile_index != 00);
p_j2k->cstr_index->tile_index[p_j2k->m_current_tile_number].tileno = p_j2k->m_current_tile_number;
p_j2k->cstr_index->tile_index[p_j2k->m_current_tile_number].current_tpsno = l_current_part;
if (l_num_parts != 0){
p_j2k->cstr_index->tile_index[p_j2k->m_current_tile_number].nb_tps = l_num_parts;
p_j2k->cstr_index->tile_index[p_j2k->m_current_tile_number].current_nb_tps = l_num_parts;
if (!p_j2k->cstr_index->tile_index[p_j2k->m_current_tile_number].tp_index) {
p_j2k->cstr_index->tile_index[p_j2k->m_current_tile_number].tp_index =
(opj_tp_index_t*)opj_calloc(l_num_parts, sizeof(opj_tp_index_t));
if (!p_j2k->cstr_index->tile_index[p_j2k->m_current_tile_number].tp_index) {
opj_event_msg(p_manager, EVT_ERROR, "Not enough memory to read SOT marker. Tile index allocation failed\n");
return OPJ_FALSE;
}
}
else {
opj_tp_index_t *new_tp_index = (opj_tp_index_t *) opj_realloc(
p_j2k->cstr_index->tile_index[p_j2k->m_current_tile_number].tp_index, l_num_parts* sizeof(opj_tp_index_t));
if (! new_tp_index) {
opj_free(p_j2k->cstr_index->tile_index[p_j2k->m_current_tile_number].tp_index);
p_j2k->cstr_index->tile_index[p_j2k->m_current_tile_number].tp_index = NULL;
opj_event_msg(p_manager, EVT_ERROR, "Not enough memory to read SOT marker. Tile index allocation failed\n");
return OPJ_FALSE;
}
p_j2k->cstr_index->tile_index[p_j2k->m_current_tile_number].tp_index = new_tp_index;
}
}
else{
/*if (!p_j2k->cstr_index->tile_index[p_j2k->m_current_tile_number].tp_index)*/ {
if (!p_j2k->cstr_index->tile_index[p_j2k->m_current_tile_number].tp_index) {
p_j2k->cstr_index->tile_index[p_j2k->m_current_tile_number].current_nb_tps = 10;
p_j2k->cstr_index->tile_index[p_j2k->m_current_tile_number].tp_index =
(opj_tp_index_t*)opj_calloc( p_j2k->cstr_index->tile_index[p_j2k->m_current_tile_number].current_nb_tps,
sizeof(opj_tp_index_t));
if (!p_j2k->cstr_index->tile_index[p_j2k->m_current_tile_number].tp_index) {
p_j2k->cstr_index->tile_index[p_j2k->m_current_tile_number].current_nb_tps = 0;
opj_event_msg(p_manager, EVT_ERROR, "Not enough memory to read SOT marker. Tile index allocation failed\n");
return OPJ_FALSE;
}
}
if ( l_current_part >= p_j2k->cstr_index->tile_index[p_j2k->m_current_tile_number].current_nb_tps ){
opj_tp_index_t *new_tp_index;
p_j2k->cstr_index->tile_index[p_j2k->m_current_tile_number].current_nb_tps = l_current_part + 1;
new_tp_index = (opj_tp_index_t *) opj_realloc(
p_j2k->cstr_index->tile_index[p_j2k->m_current_tile_number].tp_index,
p_j2k->cstr_index->tile_index[p_j2k->m_current_tile_number].current_nb_tps * sizeof(opj_tp_index_t));
if (! new_tp_index) {
opj_free(p_j2k->cstr_index->tile_index[p_j2k->m_current_tile_number].tp_index);
p_j2k->cstr_index->tile_index[p_j2k->m_current_tile_number].tp_index = NULL;
p_j2k->cstr_index->tile_index[p_j2k->m_current_tile_number].current_nb_tps = 0;
opj_event_msg(p_manager, EVT_ERROR, "Not enough memory to read SOT marker. Tile index allocation failed\n");
return OPJ_FALSE;
}
p_j2k->cstr_index->tile_index[p_j2k->m_current_tile_number].tp_index = new_tp_index;
}
}
}
}
/* FIXME move this onto a separate method to call before reading any SOT, remove part about main_end header, use a index struct inside p_j2k */
/* if (p_j2k->cstr_info) {
if (l_tcp->first) {
if (tileno == 0) {
p_j2k->cstr_info->main_head_end = p_stream_tell(p_stream) - 13;
}
p_j2k->cstr_info->tile[tileno].tileno = tileno;
p_j2k->cstr_info->tile[tileno].start_pos = p_stream_tell(p_stream) - 12;
p_j2k->cstr_info->tile[tileno].end_pos = p_j2k->cstr_info->tile[tileno].start_pos + totlen - 1;
p_j2k->cstr_info->tile[tileno].num_tps = numparts;
if (numparts) {
p_j2k->cstr_info->tile[tileno].tp = (opj_tp_info_t *) opj_malloc(numparts * sizeof(opj_tp_info_t));
}
else {
p_j2k->cstr_info->tile[tileno].tp = (opj_tp_info_t *) opj_malloc(10 * sizeof(opj_tp_info_t)); // Fixme (10)
}
}
else {
p_j2k->cstr_info->tile[tileno].end_pos += totlen;
}
p_j2k->cstr_info->tile[tileno].tp[partno].tp_start_pos = p_stream_tell(p_stream) - 12;
p_j2k->cstr_info->tile[tileno].tp[partno].tp_end_pos =
p_j2k->cstr_info->tile[tileno].tp[partno].tp_start_pos + totlen - 1;
}*/
return OPJ_TRUE;
}
static OPJ_BOOL opj_j2k_write_sod( opj_j2k_t *p_j2k,
opj_tcd_t * p_tile_coder,
OPJ_BYTE * p_data,
OPJ_UINT32 * p_data_written,
OPJ_UINT32 p_total_data_size,
const opj_stream_private_t *p_stream,
opj_event_mgr_t * p_manager
)
{
opj_codestream_info_t *l_cstr_info = 00;
OPJ_UINT32 l_remaining_data;
/* preconditions */
assert(p_j2k != 00);
assert(p_manager != 00);
assert(p_stream != 00);
opj_write_bytes(p_data,J2K_MS_SOD,2); /* SOD */
p_data += 2;
/* make room for the EOF marker */
l_remaining_data = p_total_data_size - 4;
/* update tile coder */
p_tile_coder->tp_num = p_j2k->m_specific_param.m_encoder.m_current_poc_tile_part_number ;
p_tile_coder->cur_tp_num = p_j2k->m_specific_param.m_encoder.m_current_tile_part_number;
/* INDEX >> */
/* TODO mergeV2: check this part which use cstr_info */
/*l_cstr_info = p_j2k->cstr_info;
if (l_cstr_info) {
if (!p_j2k->m_specific_param.m_encoder.m_current_tile_part_number ) {
//TODO cstr_info->tile[p_j2k->m_current_tile_number].end_header = p_stream_tell(p_stream) + p_j2k->pos_correction - 1;
l_cstr_info->tile[p_j2k->m_current_tile_number].tileno = p_j2k->m_current_tile_number;
}
else {*/
/*
TODO
if
(cstr_info->tile[p_j2k->m_current_tile_number].packet[cstr_info->packno - 1].end_pos < p_stream_tell(p_stream))
{
cstr_info->tile[p_j2k->m_current_tile_number].packet[cstr_info->packno].start_pos = p_stream_tell(p_stream);
}*/
/*}*/
/* UniPG>> */
#ifdef USE_JPWL
/* update markers struct */
/*OPJ_BOOL res = j2k_add_marker(p_j2k->cstr_info, J2K_MS_SOD, p_j2k->sod_start, 2);
*/
assert( 0 && "TODO" );
#endif /* USE_JPWL */
/* <<UniPG */
/*}*/
/* << INDEX */
if (p_j2k->m_specific_param.m_encoder.m_current_tile_part_number == 0) {
p_tile_coder->tcd_image->tiles->packno = 0;
if (l_cstr_info) {
l_cstr_info->packno = 0;
}
}
*p_data_written = 0;
if (! opj_tcd_encode_tile(p_tile_coder, p_j2k->m_current_tile_number, p_data, p_data_written, l_remaining_data , l_cstr_info)) {
opj_event_msg(p_manager, EVT_ERROR, "Cannot encode tile\n");
return OPJ_FALSE;
}
*p_data_written += 2;
return OPJ_TRUE;
}
static OPJ_BOOL opj_j2k_read_sod (opj_j2k_t *p_j2k,
opj_stream_private_t *p_stream,
opj_event_mgr_t * p_manager
)
{
OPJ_SIZE_T l_current_read_size;
opj_codestream_index_t * l_cstr_index = 00;
OPJ_BYTE ** l_current_data = 00;
opj_tcp_t * l_tcp = 00;
OPJ_UINT32 * l_tile_len = 00;
OPJ_BOOL l_sot_length_pb_detected = OPJ_FALSE;
/* preconditions */
assert(p_j2k != 00);
assert(p_manager != 00);
assert(p_stream != 00);
l_tcp = &(p_j2k->m_cp.tcps[p_j2k->m_current_tile_number]);
if (p_j2k->m_specific_param.m_decoder.m_last_tile_part) {
/* opj_stream_get_number_byte_left returns OPJ_OFF_T
// but we are in the last tile part,
// so its result will fit on OPJ_UINT32 unless we find
// a file with a single tile part of more than 4 GB...*/
p_j2k->m_specific_param.m_decoder.m_sot_length = (OPJ_UINT32)(opj_stream_get_number_byte_left(p_stream) - 2);
}
else {
/* Check to avoid pass the limit of OPJ_UINT32 */
if (p_j2k->m_specific_param.m_decoder.m_sot_length >= 2 )
p_j2k->m_specific_param.m_decoder.m_sot_length -= 2;
else {
/* MSD: case commented to support empty SOT marker (PHR data) */
}
}
l_current_data = &(l_tcp->m_data);
l_tile_len = &l_tcp->m_data_size;
/* Patch to support new PHR data */
if (p_j2k->m_specific_param.m_decoder.m_sot_length) {
/* If we are here, we'll try to read the data after allocation */
/* Check enough bytes left in stream before allocation */
if ((OPJ_OFF_T)p_j2k->m_specific_param.m_decoder.m_sot_length > opj_stream_get_number_byte_left(p_stream)) {
opj_event_msg(p_manager, EVT_ERROR, "Tile part length size inconsistent with stream length\n");
return OPJ_FALSE;
}
if (! *l_current_data) {
/* LH: oddly enough, in this path, l_tile_len!=0.
* TODO: If this was consistant, we could simplify the code to only use realloc(), as realloc(0,...) default to malloc(0,...).
*/
*l_current_data = (OPJ_BYTE*) opj_malloc(p_j2k->m_specific_param.m_decoder.m_sot_length);
}
else {
OPJ_BYTE *l_new_current_data = (OPJ_BYTE *) opj_realloc(*l_current_data, *l_tile_len + p_j2k->m_specific_param.m_decoder.m_sot_length);
if (! l_new_current_data) {
opj_free(*l_current_data);
/*nothing more is done as l_current_data will be set to null, and just
afterward we enter in the error path
and the actual tile_len is updated (committed) at the end of the
function. */
}
*l_current_data = l_new_current_data;
}
if (*l_current_data == 00) {
opj_event_msg(p_manager, EVT_ERROR, "Not enough memory to decode tile\n");
return OPJ_FALSE;
}
}
else {
l_sot_length_pb_detected = OPJ_TRUE;
}
/* Index */
l_cstr_index = p_j2k->cstr_index;
if (l_cstr_index) {
OPJ_OFF_T l_current_pos = opj_stream_tell(p_stream) - 2;
OPJ_UINT32 l_current_tile_part = l_cstr_index->tile_index[p_j2k->m_current_tile_number].current_tpsno;
l_cstr_index->tile_index[p_j2k->m_current_tile_number].tp_index[l_current_tile_part].end_header =
l_current_pos;
l_cstr_index->tile_index[p_j2k->m_current_tile_number].tp_index[l_current_tile_part].end_pos =
l_current_pos + p_j2k->m_specific_param.m_decoder.m_sot_length + 2;
if (OPJ_FALSE == opj_j2k_add_tlmarker(p_j2k->m_current_tile_number,
l_cstr_index,
J2K_MS_SOD,
l_current_pos,
p_j2k->m_specific_param.m_decoder.m_sot_length + 2)) {
opj_event_msg(p_manager, EVT_ERROR, "Not enough memory to add tl marker\n");
return OPJ_FALSE;
}
/*l_cstr_index->packno = 0;*/
}
/* Patch to support new PHR data */
if (!l_sot_length_pb_detected) {
l_current_read_size = opj_stream_read_data(
p_stream,
*l_current_data + *l_tile_len,
p_j2k->m_specific_param.m_decoder.m_sot_length,
p_manager);
}
else
{
l_current_read_size = 0;
}
if (l_current_read_size != p_j2k->m_specific_param.m_decoder.m_sot_length) {
p_j2k->m_specific_param.m_decoder.m_state = J2K_STATE_NEOC;
}
else {
p_j2k->m_specific_param.m_decoder.m_state = J2K_STATE_TPHSOT;
}
*l_tile_len += (OPJ_UINT32)l_current_read_size;
return OPJ_TRUE;
}
static OPJ_BOOL opj_j2k_write_rgn(opj_j2k_t *p_j2k,
OPJ_UINT32 p_tile_no,
OPJ_UINT32 p_comp_no,
OPJ_UINT32 nb_comps,
opj_stream_private_t *p_stream,
opj_event_mgr_t * p_manager
)
{
OPJ_BYTE * l_current_data = 00;
OPJ_UINT32 l_rgn_size;
opj_cp_t *l_cp = 00;
opj_tcp_t *l_tcp = 00;
opj_tccp_t *l_tccp = 00;
OPJ_UINT32 l_comp_room;
/* preconditions */
assert(p_j2k != 00);
assert(p_manager != 00);
assert(p_stream != 00);
l_cp = &(p_j2k->m_cp);
l_tcp = &l_cp->tcps[p_tile_no];
l_tccp = &l_tcp->tccps[p_comp_no];
if (nb_comps <= 256) {
l_comp_room = 1;
}
else {
l_comp_room = 2;
}
l_rgn_size = 6 + l_comp_room;
l_current_data = p_j2k->m_specific_param.m_encoder.m_header_tile_data;
opj_write_bytes(l_current_data,J2K_MS_RGN,2); /* RGN */
l_current_data += 2;
opj_write_bytes(l_current_data,l_rgn_size-2,2); /* Lrgn */
l_current_data += 2;
opj_write_bytes(l_current_data,p_comp_no,l_comp_room); /* Crgn */
l_current_data+=l_comp_room;
opj_write_bytes(l_current_data, 0,1); /* Srgn */
++l_current_data;
opj_write_bytes(l_current_data, (OPJ_UINT32)l_tccp->roishift,1); /* SPrgn */
++l_current_data;
if (opj_stream_write_data(p_stream,p_j2k->m_specific_param.m_encoder.m_header_tile_data,l_rgn_size,p_manager) != l_rgn_size) {
return OPJ_FALSE;
}
return OPJ_TRUE;
}
static OPJ_BOOL opj_j2k_write_eoc( opj_j2k_t *p_j2k,
opj_stream_private_t *p_stream,
opj_event_mgr_t * p_manager
)
{
/* preconditions */
assert(p_j2k != 00);
assert(p_manager != 00);
assert(p_stream != 00);
opj_write_bytes(p_j2k->m_specific_param.m_encoder.m_header_tile_data,J2K_MS_EOC,2); /* EOC */
/* UniPG>> */
#ifdef USE_JPWL
/* update markers struct */
/*
OPJ_BOOL res = j2k_add_marker(p_j2k->cstr_info, J2K_MS_EOC, p_stream_tell(p_stream) - 2, 2);
*/
#endif /* USE_JPWL */
if ( opj_stream_write_data(p_stream,p_j2k->m_specific_param.m_encoder.m_header_tile_data,2,p_manager) != 2) {
return OPJ_FALSE;
}
if ( ! opj_stream_flush(p_stream,p_manager) ) {
return OPJ_FALSE;
}
return OPJ_TRUE;
}
/**
* Reads a RGN marker (Region Of Interest)
*
* @param p_header_data the data contained in the POC box.
* @param p_j2k the jpeg2000 codec.
* @param p_header_size the size of the data contained in the POC marker.
* @param p_manager the user event manager.
*/
static OPJ_BOOL opj_j2k_read_rgn (opj_j2k_t *p_j2k,
OPJ_BYTE * p_header_data,
OPJ_UINT32 p_header_size,
opj_event_mgr_t * p_manager
)
{
OPJ_UINT32 l_nb_comp;
opj_image_t * l_image = 00;
opj_cp_t *l_cp = 00;
opj_tcp_t *l_tcp = 00;
OPJ_UINT32 l_comp_room, l_comp_no, l_roi_sty;
/* preconditions*/
assert(p_header_data != 00);
assert(p_j2k != 00);
assert(p_manager != 00);
l_image = p_j2k->m_private_image;
l_nb_comp = l_image->numcomps;
if (l_nb_comp <= 256) {
l_comp_room = 1; }
else {
l_comp_room = 2; }
if (p_header_size != 2 + l_comp_room) {
opj_event_msg(p_manager, EVT_ERROR, "Error reading RGN marker\n");
return OPJ_FALSE;
}
l_cp = &(p_j2k->m_cp);
l_tcp = (p_j2k->m_specific_param.m_decoder.m_state == J2K_STATE_TPH) ?
&l_cp->tcps[p_j2k->m_current_tile_number] :
p_j2k->m_specific_param.m_decoder.m_default_tcp;
opj_read_bytes(p_header_data,&l_comp_no,l_comp_room); /* Crgn */
p_header_data+=l_comp_room;
opj_read_bytes(p_header_data,&l_roi_sty,1); /* Srgn */
++p_header_data;
#ifdef USE_JPWL
if (l_cp->correct) {
/* totlen is negative or larger than the bytes left!!! */
if (l_comp_room >= l_nb_comp) {
opj_event_msg(p_manager, EVT_ERROR,
"JPWL: bad component number in RGN (%d when there are only %d)\n",
l_comp_room, l_nb_comp);
if (!JPWL_ASSUME || JPWL_ASSUME) {
opj_event_msg(p_manager, EVT_ERROR, "JPWL: giving up\n");
return OPJ_FALSE;
}
}
};
#endif /* USE_JPWL */
/* testcase 3635.pdf.asan.77.2930 */
if (l_comp_no >= l_nb_comp) {
opj_event_msg(p_manager, EVT_ERROR,
"bad component number in RGN (%d when there are only %d)\n",
l_comp_no, l_nb_comp);
return OPJ_FALSE;
}
opj_read_bytes(p_header_data,(OPJ_UINT32 *) (&(l_tcp->tccps[l_comp_no].roishift)),1); /* SPrgn */
++p_header_data;
return OPJ_TRUE;
}
static OPJ_FLOAT32 opj_j2k_get_tp_stride (opj_tcp_t * p_tcp)
{
return (OPJ_FLOAT32) ((p_tcp->m_nb_tile_parts - 1) * 14);
}
static OPJ_FLOAT32 opj_j2k_get_default_stride (opj_tcp_t * p_tcp)
{
(void)p_tcp;
return 0;
}
static OPJ_BOOL opj_j2k_update_rates( opj_j2k_t *p_j2k,
opj_stream_private_t *p_stream,
opj_event_mgr_t * p_manager )
{
opj_cp_t * l_cp = 00;
opj_image_t * l_image = 00;
opj_tcp_t * l_tcp = 00;
opj_image_comp_t * l_img_comp = 00;
OPJ_UINT32 i,j,k;
OPJ_INT32 l_x0,l_y0,l_x1,l_y1;
OPJ_FLOAT32 * l_rates = 0;
OPJ_FLOAT32 l_sot_remove;
OPJ_UINT32 l_bits_empty, l_size_pixel;
OPJ_UINT32 l_tile_size = 0;
OPJ_UINT32 l_last_res;
OPJ_FLOAT32 (* l_tp_stride_func)(opj_tcp_t *) = 00;
/* preconditions */
assert(p_j2k != 00);
assert(p_manager != 00);
assert(p_stream != 00);
l_cp = &(p_j2k->m_cp);
l_image = p_j2k->m_private_image;
l_tcp = l_cp->tcps;
l_bits_empty = 8 * l_image->comps->dx * l_image->comps->dy;
l_size_pixel = l_image->numcomps * l_image->comps->prec;
l_sot_remove = (OPJ_FLOAT32) opj_stream_tell(p_stream) / (OPJ_FLOAT32)(l_cp->th * l_cp->tw);
if (l_cp->m_specific_param.m_enc.m_tp_on) {
l_tp_stride_func = opj_j2k_get_tp_stride;
}
else {
l_tp_stride_func = opj_j2k_get_default_stride;
}
for (i=0;i<l_cp->th;++i) {
for (j=0;j<l_cp->tw;++j) {
OPJ_FLOAT32 l_offset = (OPJ_FLOAT32)(*l_tp_stride_func)(l_tcp) / (OPJ_FLOAT32)l_tcp->numlayers;
/* 4 borders of the tile rescale on the image if necessary */
l_x0 = opj_int_max((OPJ_INT32)(l_cp->tx0 + j * l_cp->tdx), (OPJ_INT32)l_image->x0);
l_y0 = opj_int_max((OPJ_INT32)(l_cp->ty0 + i * l_cp->tdy), (OPJ_INT32)l_image->y0);
l_x1 = opj_int_min((OPJ_INT32)(l_cp->tx0 + (j + 1) * l_cp->tdx), (OPJ_INT32)l_image->x1);
l_y1 = opj_int_min((OPJ_INT32)(l_cp->ty0 + (i + 1) * l_cp->tdy), (OPJ_INT32)l_image->y1);
l_rates = l_tcp->rates;
/* Modification of the RATE >> */
if (*l_rates) {
*l_rates = (( (OPJ_FLOAT32) (l_size_pixel * (OPJ_UINT32)(l_x1 - l_x0) * (OPJ_UINT32)(l_y1 - l_y0)))
/
((*l_rates) * (OPJ_FLOAT32)l_bits_empty)
)
-
l_offset;
}
++l_rates;
for (k = 1; k < l_tcp->numlayers; ++k) {
if (*l_rates) {
*l_rates = (( (OPJ_FLOAT32) (l_size_pixel * (OPJ_UINT32)(l_x1 - l_x0) * (OPJ_UINT32)(l_y1 - l_y0)))
/
((*l_rates) * (OPJ_FLOAT32)l_bits_empty)
)
-
l_offset;
}
++l_rates;
}
++l_tcp;
}
}
l_tcp = l_cp->tcps;
for (i=0;i<l_cp->th;++i) {
for (j=0;j<l_cp->tw;++j) {
l_rates = l_tcp->rates;
if (*l_rates) {
*l_rates -= l_sot_remove;
if (*l_rates < 30) {
*l_rates = 30;
}
}
++l_rates;
l_last_res = l_tcp->numlayers - 1;
for (k = 1; k < l_last_res; ++k) {
if (*l_rates) {
*l_rates -= l_sot_remove;
if (*l_rates < *(l_rates - 1) + 10) {
*l_rates = (*(l_rates - 1)) + 20;
}
}
++l_rates;
}
if (*l_rates) {
*l_rates -= (l_sot_remove + 2.f);
if (*l_rates < *(l_rates - 1) + 10) {
*l_rates = (*(l_rates - 1)) + 20;
}
}
++l_tcp;
}
}
l_img_comp = l_image->comps;
l_tile_size = 0;
for (i=0;i<l_image->numcomps;++i) {
l_tile_size += ( opj_uint_ceildiv(l_cp->tdx,l_img_comp->dx)
*
opj_uint_ceildiv(l_cp->tdy,l_img_comp->dy)
*
l_img_comp->prec
);
++l_img_comp;
}
l_tile_size = (OPJ_UINT32) (l_tile_size * 0.1625); /* 1.3/8 = 0.1625 */
l_tile_size += opj_j2k_get_specific_header_sizes(p_j2k);
p_j2k->m_specific_param.m_encoder.m_encoded_tile_size = l_tile_size;
p_j2k->m_specific_param.m_encoder.m_encoded_tile_data =
(OPJ_BYTE *) opj_malloc(p_j2k->m_specific_param.m_encoder.m_encoded_tile_size);
if (p_j2k->m_specific_param.m_encoder.m_encoded_tile_data == 00) {
return OPJ_FALSE;
}
if (OPJ_IS_CINEMA(l_cp->rsiz)) {
p_j2k->m_specific_param.m_encoder.m_tlm_sot_offsets_buffer =
(OPJ_BYTE *) opj_malloc(5*p_j2k->m_specific_param.m_encoder.m_total_tile_parts);
if (! p_j2k->m_specific_param.m_encoder.m_tlm_sot_offsets_buffer) {
return OPJ_FALSE;
}
p_j2k->m_specific_param.m_encoder.m_tlm_sot_offsets_current =
p_j2k->m_specific_param.m_encoder.m_tlm_sot_offsets_buffer;
}
return OPJ_TRUE;
}
#if 0
static OPJ_BOOL opj_j2k_read_eoc ( opj_j2k_t *p_j2k,
opj_stream_private_t *p_stream,
opj_event_mgr_t * p_manager )
{
OPJ_UINT32 i;
opj_tcd_t * l_tcd = 00;
OPJ_UINT32 l_nb_tiles;
opj_tcp_t * l_tcp = 00;
OPJ_BOOL l_success;
/* preconditions */
assert(p_j2k != 00);
assert(p_manager != 00);
assert(p_stream != 00);
l_nb_tiles = p_j2k->m_cp.th * p_j2k->m_cp.tw;
l_tcp = p_j2k->m_cp.tcps;
l_tcd = opj_tcd_create(OPJ_TRUE);
if (l_tcd == 00) {
opj_event_msg(p_manager, EVT_ERROR, "Cannot decode tile, memory error\n");
return OPJ_FALSE;
}
for (i = 0; i < l_nb_tiles; ++i) {
if (l_tcp->m_data) {
if (! opj_tcd_init_decode_tile(l_tcd, i)) {
opj_tcd_destroy(l_tcd);
opj_event_msg(p_manager, EVT_ERROR, "Cannot decode tile, memory error\n");
return OPJ_FALSE;
}
l_success = opj_tcd_decode_tile(l_tcd, l_tcp->m_data, l_tcp->m_data_size, i, p_j2k->cstr_index);
/* cleanup */
if (! l_success) {
p_j2k->m_specific_param.m_decoder.m_state |= J2K_STATE_ERR;
break;
}
}
opj_j2k_tcp_destroy(l_tcp);
++l_tcp;
}
opj_tcd_destroy(l_tcd);
return OPJ_TRUE;
}
#endif
static OPJ_BOOL opj_j2k_get_end_header(opj_j2k_t *p_j2k,
struct opj_stream_private *p_stream,
struct opj_event_mgr * p_manager )
{
/* preconditions */
assert(p_j2k != 00);
assert(p_manager != 00);
assert(p_stream != 00);
p_j2k->cstr_index->main_head_end = opj_stream_tell(p_stream);
return OPJ_TRUE;
}
static OPJ_BOOL opj_j2k_write_mct_data_group( opj_j2k_t *p_j2k,
struct opj_stream_private *p_stream,
struct opj_event_mgr * p_manager )
{
OPJ_UINT32 i;
opj_simple_mcc_decorrelation_data_t * l_mcc_record;
opj_mct_data_t * l_mct_record;
opj_tcp_t * l_tcp;
/* preconditions */
assert(p_j2k != 00);
assert(p_stream != 00);
assert(p_manager != 00);
if (! opj_j2k_write_cbd(p_j2k,p_stream,p_manager)) {
return OPJ_FALSE;
}
l_tcp = &(p_j2k->m_cp.tcps[p_j2k->m_current_tile_number]);
l_mct_record = l_tcp->m_mct_records;
for (i=0;i<l_tcp->m_nb_mct_records;++i) {
if (! opj_j2k_write_mct_record(p_j2k,l_mct_record,p_stream,p_manager)) {
return OPJ_FALSE;
}
++l_mct_record;
}
l_mcc_record = l_tcp->m_mcc_records;
for (i=0;i<l_tcp->m_nb_mcc_records;++i) {
if (! opj_j2k_write_mcc_record(p_j2k,l_mcc_record,p_stream,p_manager)) {
return OPJ_FALSE;
}
++l_mcc_record;
}
if (! opj_j2k_write_mco(p_j2k,p_stream,p_manager)) {
return OPJ_FALSE;
}
return OPJ_TRUE;
}
#if 0
static OPJ_BOOL opj_j2k_write_all_coc(opj_j2k_t *p_j2k,
struct opj_stream_private *p_stream,
struct opj_event_mgr * p_manager )
{
OPJ_UINT32 compno;
/* preconditions */
assert(p_j2k != 00);
assert(p_manager != 00);
assert(p_stream != 00);
for (compno = 0; compno < p_j2k->m_private_image->numcomps; ++compno)
{
if (! opj_j2k_write_coc(p_j2k,compno,p_stream, p_manager)) {
return OPJ_FALSE;
}
}
return OPJ_TRUE;
}
#endif
#if 0
static OPJ_BOOL opj_j2k_write_all_qcc(opj_j2k_t *p_j2k,
struct opj_stream_private *p_stream,
struct opj_event_mgr * p_manager )
{
OPJ_UINT32 compno;
/* preconditions */
assert(p_j2k != 00);
assert(p_manager != 00);
assert(p_stream != 00);
for (compno = 0; compno < p_j2k->m_private_image->numcomps; ++compno)
{
if (! opj_j2k_write_qcc(p_j2k,compno,p_stream, p_manager)) {
return OPJ_FALSE;
}
}
return OPJ_TRUE;
}
#endif
static OPJ_BOOL opj_j2k_write_regions( opj_j2k_t *p_j2k,
struct opj_stream_private *p_stream,
struct opj_event_mgr * p_manager )
{
OPJ_UINT32 compno;
const opj_tccp_t *l_tccp = 00;
/* preconditions */
assert(p_j2k != 00);
assert(p_manager != 00);
assert(p_stream != 00);
l_tccp = p_j2k->m_cp.tcps->tccps;
for (compno = 0; compno < p_j2k->m_private_image->numcomps; ++compno) {
if (l_tccp->roishift) {
if (! opj_j2k_write_rgn(p_j2k,0,compno,p_j2k->m_private_image->numcomps,p_stream,p_manager)) {
return OPJ_FALSE;
}
}
++l_tccp;
}
return OPJ_TRUE;
}
static OPJ_BOOL opj_j2k_write_epc( opj_j2k_t *p_j2k,
struct opj_stream_private *p_stream,
struct opj_event_mgr * p_manager )
{
opj_codestream_index_t * l_cstr_index = 00;
/* preconditions */
assert(p_j2k != 00);
assert(p_manager != 00);
assert(p_stream != 00);
l_cstr_index = p_j2k->cstr_index;
if (l_cstr_index) {
l_cstr_index->codestream_size = (OPJ_UINT64)opj_stream_tell(p_stream);
/* UniPG>> */
/* The following adjustment is done to adjust the codestream size */
/* if SOD is not at 0 in the buffer. Useful in case of JP2, where */
/* the first bunch of bytes is not in the codestream */
l_cstr_index->codestream_size -= (OPJ_UINT64)l_cstr_index->main_head_start;
/* <<UniPG */
}
#ifdef USE_JPWL
/* preparation of JPWL marker segments */
#if 0
if(cp->epc_on) {
/* encode according to JPWL */
jpwl_encode(p_j2k, p_stream, image);
}
#endif
assert( 0 && "TODO" );
#endif /* USE_JPWL */
return OPJ_TRUE;
}
static OPJ_BOOL opj_j2k_read_unk ( opj_j2k_t *p_j2k,
opj_stream_private_t *p_stream,
OPJ_UINT32 *output_marker,
opj_event_mgr_t * p_manager
)
{
OPJ_UINT32 l_unknown_marker;
const opj_dec_memory_marker_handler_t * l_marker_handler;
OPJ_UINT32 l_size_unk = 2;
/* preconditions*/
assert(p_j2k != 00);
assert(p_manager != 00);
assert(p_stream != 00);
opj_event_msg(p_manager, EVT_WARNING, "Unknown marker\n");
for (;;) {
/* Try to read 2 bytes (the next marker ID) from stream and copy them into the buffer*/
if (opj_stream_read_data(p_stream,p_j2k->m_specific_param.m_decoder.m_header_data,2,p_manager) != 2) {
opj_event_msg(p_manager, EVT_ERROR, "Stream too short\n");
return OPJ_FALSE;
}
/* read 2 bytes as the new marker ID*/
opj_read_bytes(p_j2k->m_specific_param.m_decoder.m_header_data,&l_unknown_marker,2);
if (!(l_unknown_marker < 0xff00)) {
/* Get the marker handler from the marker ID*/
l_marker_handler = opj_j2k_get_marker_handler(l_unknown_marker);
if (!(p_j2k->m_specific_param.m_decoder.m_state & l_marker_handler->states)) {
opj_event_msg(p_manager, EVT_ERROR, "Marker is not compliant with its position\n");
return OPJ_FALSE;
}
else {
if (l_marker_handler->id != J2K_MS_UNK) {
/* Add the marker to the codestream index*/
if (l_marker_handler->id != J2K_MS_SOT)
{
OPJ_BOOL res = opj_j2k_add_mhmarker(p_j2k->cstr_index, J2K_MS_UNK,
(OPJ_UINT32) opj_stream_tell(p_stream) - l_size_unk,
l_size_unk);
if (res == OPJ_FALSE) {
opj_event_msg(p_manager, EVT_ERROR, "Not enough memory to add mh marker\n");
return OPJ_FALSE;
}
}
break; /* next marker is known and well located */
}
else
l_size_unk += 2;
}
}
}
*output_marker = l_marker_handler->id ;
return OPJ_TRUE;
}
static OPJ_BOOL opj_j2k_write_mct_record( opj_j2k_t *p_j2k,
opj_mct_data_t * p_mct_record,
struct opj_stream_private *p_stream,
struct opj_event_mgr * p_manager )
{
OPJ_UINT32 l_mct_size;
OPJ_BYTE * l_current_data = 00;
OPJ_UINT32 l_tmp;
/* preconditions */
assert(p_j2k != 00);
assert(p_manager != 00);
assert(p_stream != 00);
l_mct_size = 10 + p_mct_record->m_data_size;
if (l_mct_size > p_j2k->m_specific_param.m_encoder.m_header_tile_data_size) {
OPJ_BYTE *new_header_tile_data = (OPJ_BYTE *) opj_realloc(p_j2k->m_specific_param.m_encoder.m_header_tile_data, l_mct_size);
if (! new_header_tile_data) {
opj_free(p_j2k->m_specific_param.m_encoder.m_header_tile_data);
p_j2k->m_specific_param.m_encoder.m_header_tile_data = NULL;
p_j2k->m_specific_param.m_encoder.m_header_tile_data_size = 0;
opj_event_msg(p_manager, EVT_ERROR, "Not enough memory to write MCT marker\n");
return OPJ_FALSE;
}
p_j2k->m_specific_param.m_encoder.m_header_tile_data = new_header_tile_data;
p_j2k->m_specific_param.m_encoder.m_header_tile_data_size = l_mct_size;
}
l_current_data = p_j2k->m_specific_param.m_encoder.m_header_tile_data;
opj_write_bytes(l_current_data,J2K_MS_MCT,2); /* MCT */
l_current_data += 2;
opj_write_bytes(l_current_data,l_mct_size-2,2); /* Lmct */
l_current_data += 2;
opj_write_bytes(l_current_data,0,2); /* Zmct */
l_current_data += 2;
/* only one marker atm */
l_tmp = (p_mct_record->m_index & 0xff) | (p_mct_record->m_array_type << 8) | (p_mct_record->m_element_type << 10);
opj_write_bytes(l_current_data,l_tmp,2);
l_current_data += 2;
opj_write_bytes(l_current_data,0,2); /* Ymct */
l_current_data+=2;
memcpy(l_current_data,p_mct_record->m_data,p_mct_record->m_data_size);
if (opj_stream_write_data(p_stream,p_j2k->m_specific_param.m_encoder.m_header_tile_data,l_mct_size,p_manager) != l_mct_size) {
return OPJ_FALSE;
}
return OPJ_TRUE;
}
/**
* Reads a MCT marker (Multiple Component Transform)
*
* @param p_header_data the data contained in the MCT box.
* @param p_j2k the jpeg2000 codec.
* @param p_header_size the size of the data contained in the MCT marker.
* @param p_manager the user event manager.
*/
static OPJ_BOOL opj_j2k_read_mct ( opj_j2k_t *p_j2k,
OPJ_BYTE * p_header_data,
OPJ_UINT32 p_header_size,
opj_event_mgr_t * p_manager
)
{
OPJ_UINT32 i;
opj_tcp_t *l_tcp = 00;
OPJ_UINT32 l_tmp;
OPJ_UINT32 l_indix;
opj_mct_data_t * l_mct_data;
/* preconditions */
assert(p_header_data != 00);
assert(p_j2k != 00);
l_tcp = p_j2k->m_specific_param.m_decoder.m_state == J2K_STATE_TPH ?
&p_j2k->m_cp.tcps[p_j2k->m_current_tile_number] :
p_j2k->m_specific_param.m_decoder.m_default_tcp;
if (p_header_size < 2) {
opj_event_msg(p_manager, EVT_ERROR, "Error reading MCT marker\n");
return OPJ_FALSE;
}
/* first marker */
opj_read_bytes(p_header_data,&l_tmp,2); /* Zmct */
p_header_data += 2;
if (l_tmp != 0) {
opj_event_msg(p_manager, EVT_WARNING, "Cannot take in charge mct data within multiple MCT records\n");
return OPJ_TRUE;
}
if(p_header_size <= 6) {
opj_event_msg(p_manager, EVT_ERROR, "Error reading MCT marker\n");
return OPJ_FALSE;
}
/* Imct -> no need for other values, take the first, type is double with decorrelation x0000 1101 0000 0000*/
opj_read_bytes(p_header_data,&l_tmp,2); /* Imct */
p_header_data += 2;
l_indix = l_tmp & 0xff;
l_mct_data = l_tcp->m_mct_records;
for (i=0;i<l_tcp->m_nb_mct_records;++i) {
if (l_mct_data->m_index == l_indix) {
break;
}
++l_mct_data;
}
/* NOT FOUND */
if (i == l_tcp->m_nb_mct_records) {
if (l_tcp->m_nb_mct_records == l_tcp->m_nb_max_mct_records) {
opj_mct_data_t *new_mct_records;
l_tcp->m_nb_max_mct_records += OPJ_J2K_MCT_DEFAULT_NB_RECORDS;
new_mct_records = (opj_mct_data_t *) opj_realloc(l_tcp->m_mct_records, l_tcp->m_nb_max_mct_records * sizeof(opj_mct_data_t));
if (! new_mct_records) {
opj_free(l_tcp->m_mct_records);
l_tcp->m_mct_records = NULL;
l_tcp->m_nb_max_mct_records = 0;
l_tcp->m_nb_mct_records = 0;
opj_event_msg(p_manager, EVT_ERROR, "Not enough memory to read MCT marker\n");
return OPJ_FALSE;
}
l_tcp->m_mct_records = new_mct_records;
l_mct_data = l_tcp->m_mct_records + l_tcp->m_nb_mct_records;
memset(l_mct_data ,0,(l_tcp->m_nb_max_mct_records - l_tcp->m_nb_mct_records) * sizeof(opj_mct_data_t));
}
l_mct_data = l_tcp->m_mct_records + l_tcp->m_nb_mct_records;
++l_tcp->m_nb_mct_records;
}
if (l_mct_data->m_data) {
opj_free(l_mct_data->m_data);
l_mct_data->m_data = 00;
}
l_mct_data->m_index = l_indix;
l_mct_data->m_array_type = (J2K_MCT_ARRAY_TYPE)((l_tmp >> 8) & 3);
l_mct_data->m_element_type = (J2K_MCT_ELEMENT_TYPE)((l_tmp >> 10) & 3);
opj_read_bytes(p_header_data,&l_tmp,2); /* Ymct */
p_header_data+=2;
if (l_tmp != 0) {
opj_event_msg(p_manager, EVT_WARNING, "Cannot take in charge multiple MCT markers\n");
return OPJ_TRUE;
}
p_header_size -= 6;
l_mct_data->m_data = (OPJ_BYTE*)opj_malloc(p_header_size);
if (! l_mct_data->m_data) {
opj_event_msg(p_manager, EVT_ERROR, "Error reading MCT marker\n");
return OPJ_FALSE;
}
memcpy(l_mct_data->m_data,p_header_data,p_header_size);
l_mct_data->m_data_size = p_header_size;
return OPJ_TRUE;
}
static OPJ_BOOL opj_j2k_write_mcc_record( opj_j2k_t *p_j2k,
struct opj_simple_mcc_decorrelation_data * p_mcc_record,
struct opj_stream_private *p_stream,
struct opj_event_mgr * p_manager )
{
OPJ_UINT32 i;
OPJ_UINT32 l_mcc_size;
OPJ_BYTE * l_current_data = 00;
OPJ_UINT32 l_nb_bytes_for_comp;
OPJ_UINT32 l_mask;
OPJ_UINT32 l_tmcc;
/* preconditions */
assert(p_j2k != 00);
assert(p_manager != 00);
assert(p_stream != 00);
if (p_mcc_record->m_nb_comps > 255 ) {
l_nb_bytes_for_comp = 2;
l_mask = 0x8000;
}
else {
l_nb_bytes_for_comp = 1;
l_mask = 0;
}
l_mcc_size = p_mcc_record->m_nb_comps * 2 * l_nb_bytes_for_comp + 19;
if (l_mcc_size > p_j2k->m_specific_param.m_encoder.m_header_tile_data_size)
{
OPJ_BYTE *new_header_tile_data = (OPJ_BYTE *) opj_realloc(p_j2k->m_specific_param.m_encoder.m_header_tile_data, l_mcc_size);
if (! new_header_tile_data) {
opj_free(p_j2k->m_specific_param.m_encoder.m_header_tile_data);
p_j2k->m_specific_param.m_encoder.m_header_tile_data = NULL;
p_j2k->m_specific_param.m_encoder.m_header_tile_data_size = 0;
opj_event_msg(p_manager, EVT_ERROR, "Not enough memory to write MCC marker\n");
return OPJ_FALSE;
}
p_j2k->m_specific_param.m_encoder.m_header_tile_data = new_header_tile_data;
p_j2k->m_specific_param.m_encoder.m_header_tile_data_size = l_mcc_size;
}
l_current_data = p_j2k->m_specific_param.m_encoder.m_header_tile_data;
opj_write_bytes(l_current_data,J2K_MS_MCC,2); /* MCC */
l_current_data += 2;
opj_write_bytes(l_current_data,l_mcc_size-2,2); /* Lmcc */
l_current_data += 2;
/* first marker */
opj_write_bytes(l_current_data,0,2); /* Zmcc */
l_current_data += 2;
opj_write_bytes(l_current_data,p_mcc_record->m_index,1); /* Imcc -> no need for other values, take the first */
++l_current_data;
/* only one marker atm */
opj_write_bytes(l_current_data,0,2); /* Ymcc */
l_current_data+=2;
opj_write_bytes(l_current_data,1,2); /* Qmcc -> number of collections -> 1 */
l_current_data+=2;
opj_write_bytes(l_current_data,0x1,1); /* Xmcci type of component transformation -> array based decorrelation */
++l_current_data;
opj_write_bytes(l_current_data,p_mcc_record->m_nb_comps | l_mask,2); /* Nmcci number of input components involved and size for each component offset = 8 bits */
l_current_data+=2;
for (i=0;i<p_mcc_record->m_nb_comps;++i) {
opj_write_bytes(l_current_data,i,l_nb_bytes_for_comp); /* Cmccij Component offset*/
l_current_data+=l_nb_bytes_for_comp;
}
opj_write_bytes(l_current_data,p_mcc_record->m_nb_comps|l_mask,2); /* Mmcci number of output components involved and size for each component offset = 8 bits */
l_current_data+=2;
for (i=0;i<p_mcc_record->m_nb_comps;++i)
{
opj_write_bytes(l_current_data,i,l_nb_bytes_for_comp); /* Wmccij Component offset*/
l_current_data+=l_nb_bytes_for_comp;
}
l_tmcc = ((!p_mcc_record->m_is_irreversible)&1)<<16;
if (p_mcc_record->m_decorrelation_array) {
l_tmcc |= p_mcc_record->m_decorrelation_array->m_index;
}
if (p_mcc_record->m_offset_array) {
l_tmcc |= ((p_mcc_record->m_offset_array->m_index)<<8);
}
opj_write_bytes(l_current_data,l_tmcc,3); /* Tmcci : use MCT defined as number 1 and irreversible array based. */
l_current_data+=3;
if (opj_stream_write_data(p_stream,p_j2k->m_specific_param.m_encoder.m_header_tile_data,l_mcc_size,p_manager) != l_mcc_size) {
return OPJ_FALSE;
}
return OPJ_TRUE;
}
static OPJ_BOOL opj_j2k_read_mcc ( opj_j2k_t *p_j2k,
OPJ_BYTE * p_header_data,
OPJ_UINT32 p_header_size,
opj_event_mgr_t * p_manager )
{
OPJ_UINT32 i,j;
OPJ_UINT32 l_tmp;
OPJ_UINT32 l_indix;
opj_tcp_t * l_tcp;
opj_simple_mcc_decorrelation_data_t * l_mcc_record;
opj_mct_data_t * l_mct_data;
OPJ_UINT32 l_nb_collections;
OPJ_UINT32 l_nb_comps;
OPJ_UINT32 l_nb_bytes_by_comp;
/* preconditions */
assert(p_header_data != 00);
assert(p_j2k != 00);
assert(p_manager != 00);
l_tcp = p_j2k->m_specific_param.m_decoder.m_state == J2K_STATE_TPH ?
&p_j2k->m_cp.tcps[p_j2k->m_current_tile_number] :
p_j2k->m_specific_param.m_decoder.m_default_tcp;
if (p_header_size < 2) {
opj_event_msg(p_manager, EVT_ERROR, "Error reading MCC marker\n");
return OPJ_FALSE;
}
/* first marker */
opj_read_bytes(p_header_data,&l_tmp,2); /* Zmcc */
p_header_data += 2;
if (l_tmp != 0) {
opj_event_msg(p_manager, EVT_WARNING, "Cannot take in charge multiple data spanning\n");
return OPJ_TRUE;
}
if (p_header_size < 7) {
opj_event_msg(p_manager, EVT_ERROR, "Error reading MCC marker\n");
return OPJ_FALSE;
}
opj_read_bytes(p_header_data,&l_indix,1); /* Imcc -> no need for other values, take the first */
++p_header_data;
l_mcc_record = l_tcp->m_mcc_records;
for(i=0;i<l_tcp->m_nb_mcc_records;++i) {
if (l_mcc_record->m_index == l_indix) {
break;
}
++l_mcc_record;
}
/** NOT FOUND */
if (i == l_tcp->m_nb_mcc_records) {
if (l_tcp->m_nb_mcc_records == l_tcp->m_nb_max_mcc_records) {
opj_simple_mcc_decorrelation_data_t *new_mcc_records;
l_tcp->m_nb_max_mcc_records += OPJ_J2K_MCC_DEFAULT_NB_RECORDS;
new_mcc_records = (opj_simple_mcc_decorrelation_data_t *) opj_realloc(
l_tcp->m_mcc_records, l_tcp->m_nb_max_mcc_records * sizeof(opj_simple_mcc_decorrelation_data_t));
if (! new_mcc_records) {
opj_free(l_tcp->m_mcc_records);
l_tcp->m_mcc_records = NULL;
l_tcp->m_nb_max_mcc_records = 0;
l_tcp->m_nb_mcc_records = 0;
opj_event_msg(p_manager, EVT_ERROR, "Not enough memory to read MCC marker\n");
return OPJ_FALSE;
}
l_tcp->m_mcc_records = new_mcc_records;
l_mcc_record = l_tcp->m_mcc_records + l_tcp->m_nb_mcc_records;
memset(l_mcc_record,0,(l_tcp->m_nb_max_mcc_records-l_tcp->m_nb_mcc_records) * sizeof(opj_simple_mcc_decorrelation_data_t));
}
l_mcc_record = l_tcp->m_mcc_records + l_tcp->m_nb_mcc_records;
}
l_mcc_record->m_index = l_indix;
/* only one marker atm */
opj_read_bytes(p_header_data,&l_tmp,2); /* Ymcc */
p_header_data+=2;
if (l_tmp != 0) {
opj_event_msg(p_manager, EVT_WARNING, "Cannot take in charge multiple data spanning\n");
return OPJ_TRUE;
}
opj_read_bytes(p_header_data,&l_nb_collections,2); /* Qmcc -> number of collections -> 1 */
p_header_data+=2;
if (l_nb_collections > 1) {
opj_event_msg(p_manager, EVT_WARNING, "Cannot take in charge multiple collections\n");
return OPJ_TRUE;
}
p_header_size -= 7;
for (i=0;i<l_nb_collections;++i) {
if (p_header_size < 3) {
opj_event_msg(p_manager, EVT_ERROR, "Error reading MCC marker\n");
return OPJ_FALSE;
}
opj_read_bytes(p_header_data,&l_tmp,1); /* Xmcci type of component transformation -> array based decorrelation */
++p_header_data;
if (l_tmp != 1) {
opj_event_msg(p_manager, EVT_WARNING, "Cannot take in charge collections other than array decorrelation\n");
return OPJ_TRUE;
}
opj_read_bytes(p_header_data,&l_nb_comps,2);
p_header_data+=2;
p_header_size-=3;
l_nb_bytes_by_comp = 1 + (l_nb_comps>>15);
l_mcc_record->m_nb_comps = l_nb_comps & 0x7fff;
if (p_header_size < (l_nb_bytes_by_comp * l_mcc_record->m_nb_comps + 2)) {
opj_event_msg(p_manager, EVT_ERROR, "Error reading MCC marker\n");
return OPJ_FALSE;
}
p_header_size -= (l_nb_bytes_by_comp * l_mcc_record->m_nb_comps + 2);
for (j=0;j<l_mcc_record->m_nb_comps;++j) {
opj_read_bytes(p_header_data,&l_tmp,l_nb_bytes_by_comp); /* Cmccij Component offset*/
p_header_data+=l_nb_bytes_by_comp;
if (l_tmp != j) {
opj_event_msg(p_manager, EVT_WARNING, "Cannot take in charge collections with indix shuffle\n");
return OPJ_TRUE;
}
}
opj_read_bytes(p_header_data,&l_nb_comps,2);
p_header_data+=2;
l_nb_bytes_by_comp = 1 + (l_nb_comps>>15);
l_nb_comps &= 0x7fff;
if (l_nb_comps != l_mcc_record->m_nb_comps) {
opj_event_msg(p_manager, EVT_WARNING, "Cannot take in charge collections without same number of indixes\n");
return OPJ_TRUE;
}
if (p_header_size < (l_nb_bytes_by_comp * l_mcc_record->m_nb_comps + 3)) {
opj_event_msg(p_manager, EVT_ERROR, "Error reading MCC marker\n");
return OPJ_FALSE;
}
p_header_size -= (l_nb_bytes_by_comp * l_mcc_record->m_nb_comps + 3);
for (j=0;j<l_mcc_record->m_nb_comps;++j) {
opj_read_bytes(p_header_data,&l_tmp,l_nb_bytes_by_comp); /* Wmccij Component offset*/
p_header_data+=l_nb_bytes_by_comp;
if (l_tmp != j) {
opj_event_msg(p_manager, EVT_WARNING, "Cannot take in charge collections with indix shuffle\n");
return OPJ_TRUE;
}
}
opj_read_bytes(p_header_data,&l_tmp,3); /* Wmccij Component offset*/
p_header_data += 3;
l_mcc_record->m_is_irreversible = ! ((l_tmp>>16) & 1);
l_mcc_record->m_decorrelation_array = 00;
l_mcc_record->m_offset_array = 00;
l_indix = l_tmp & 0xff;
if (l_indix != 0) {
l_mct_data = l_tcp->m_mct_records;
for (j=0;j<l_tcp->m_nb_mct_records;++j) {
if (l_mct_data->m_index == l_indix) {
l_mcc_record->m_decorrelation_array = l_mct_data;
break;
}
++l_mct_data;
}
if (l_mcc_record->m_decorrelation_array == 00) {
opj_event_msg(p_manager, EVT_ERROR, "Error reading MCC marker\n");
return OPJ_FALSE;
}
}
l_indix = (l_tmp >> 8) & 0xff;
if (l_indix != 0) {
l_mct_data = l_tcp->m_mct_records;
for (j=0;j<l_tcp->m_nb_mct_records;++j) {
if (l_mct_data->m_index == l_indix) {
l_mcc_record->m_offset_array = l_mct_data;
break;
}
++l_mct_data;
}
if (l_mcc_record->m_offset_array == 00) {
opj_event_msg(p_manager, EVT_ERROR, "Error reading MCC marker\n");
return OPJ_FALSE;
}
}
}
if (p_header_size != 0) {
opj_event_msg(p_manager, EVT_ERROR, "Error reading MCC marker\n");
return OPJ_FALSE;
}
++l_tcp->m_nb_mcc_records;
return OPJ_TRUE;
}
static OPJ_BOOL opj_j2k_write_mco( opj_j2k_t *p_j2k,
struct opj_stream_private *p_stream,
struct opj_event_mgr * p_manager
)
{
OPJ_BYTE * l_current_data = 00;
OPJ_UINT32 l_mco_size;
opj_tcp_t * l_tcp = 00;
opj_simple_mcc_decorrelation_data_t * l_mcc_record;
OPJ_UINT32 i;
/* preconditions */
assert(p_j2k != 00);
assert(p_manager != 00);
assert(p_stream != 00);
l_tcp =&(p_j2k->m_cp.tcps[p_j2k->m_current_tile_number]);
l_current_data = p_j2k->m_specific_param.m_encoder.m_header_tile_data;
l_mco_size = 5 + l_tcp->m_nb_mcc_records;
if (l_mco_size > p_j2k->m_specific_param.m_encoder.m_header_tile_data_size) {
OPJ_BYTE *new_header_tile_data = (OPJ_BYTE *) opj_realloc(p_j2k->m_specific_param.m_encoder.m_header_tile_data, l_mco_size);
if (! new_header_tile_data) {
opj_free(p_j2k->m_specific_param.m_encoder.m_header_tile_data);
p_j2k->m_specific_param.m_encoder.m_header_tile_data = NULL;
p_j2k->m_specific_param.m_encoder.m_header_tile_data_size = 0;
opj_event_msg(p_manager, EVT_ERROR, "Not enough memory to write MCO marker\n");
return OPJ_FALSE;
}
p_j2k->m_specific_param.m_encoder.m_header_tile_data = new_header_tile_data;
p_j2k->m_specific_param.m_encoder.m_header_tile_data_size = l_mco_size;
}
opj_write_bytes(l_current_data,J2K_MS_MCO,2); /* MCO */
l_current_data += 2;
opj_write_bytes(l_current_data,l_mco_size-2,2); /* Lmco */
l_current_data += 2;
opj_write_bytes(l_current_data,l_tcp->m_nb_mcc_records,1); /* Nmco : only one tranform stage*/
++l_current_data;
l_mcc_record = l_tcp->m_mcc_records;
for (i=0;i<l_tcp->m_nb_mcc_records;++i) {
opj_write_bytes(l_current_data,l_mcc_record->m_index,1);/* Imco -> use the mcc indicated by 1*/
++l_current_data;
++l_mcc_record;
}
if (opj_stream_write_data(p_stream,p_j2k->m_specific_param.m_encoder.m_header_tile_data,l_mco_size,p_manager) != l_mco_size) {
return OPJ_FALSE;
}
return OPJ_TRUE;
}
/**
* Reads a MCO marker (Multiple Component Transform Ordering)
*
* @param p_header_data the data contained in the MCO box.
* @param p_j2k the jpeg2000 codec.
* @param p_header_size the size of the data contained in the MCO marker.
* @param p_manager the user event manager.
*/
static OPJ_BOOL opj_j2k_read_mco ( opj_j2k_t *p_j2k,
OPJ_BYTE * p_header_data,
OPJ_UINT32 p_header_size,
opj_event_mgr_t * p_manager
)
{
OPJ_UINT32 l_tmp, i;
OPJ_UINT32 l_nb_stages;
opj_tcp_t * l_tcp;
opj_tccp_t * l_tccp;
opj_image_t * l_image;
/* preconditions */
assert(p_header_data != 00);
assert(p_j2k != 00);
assert(p_manager != 00);
l_image = p_j2k->m_private_image;
l_tcp = p_j2k->m_specific_param.m_decoder.m_state == J2K_STATE_TPH ?
&p_j2k->m_cp.tcps[p_j2k->m_current_tile_number] :
p_j2k->m_specific_param.m_decoder.m_default_tcp;
if (p_header_size < 1) {
opj_event_msg(p_manager, EVT_ERROR, "Error reading MCO marker\n");
return OPJ_FALSE;
}
opj_read_bytes(p_header_data,&l_nb_stages,1); /* Nmco : only one tranform stage*/
++p_header_data;
if (l_nb_stages > 1) {
opj_event_msg(p_manager, EVT_WARNING, "Cannot take in charge multiple transformation stages.\n");
return OPJ_TRUE;
}
if (p_header_size != l_nb_stages + 1) {
opj_event_msg(p_manager, EVT_WARNING, "Error reading MCO marker\n");
return OPJ_FALSE;
}
l_tccp = l_tcp->tccps;
for (i=0;i<l_image->numcomps;++i) {
l_tccp->m_dc_level_shift = 0;
++l_tccp;
}
if (l_tcp->m_mct_decoding_matrix) {
opj_free(l_tcp->m_mct_decoding_matrix);
l_tcp->m_mct_decoding_matrix = 00;
}
for (i=0;i<l_nb_stages;++i) {
opj_read_bytes(p_header_data,&l_tmp,1);
++p_header_data;
if (! opj_j2k_add_mct(l_tcp,p_j2k->m_private_image,l_tmp)) {
return OPJ_FALSE;
}
}
return OPJ_TRUE;
}
static OPJ_BOOL opj_j2k_add_mct(opj_tcp_t * p_tcp, opj_image_t * p_image, OPJ_UINT32 p_index)
{
OPJ_UINT32 i;
opj_simple_mcc_decorrelation_data_t * l_mcc_record;
opj_mct_data_t * l_deco_array, * l_offset_array;
OPJ_UINT32 l_data_size,l_mct_size, l_offset_size;
OPJ_UINT32 l_nb_elem;
OPJ_UINT32 * l_offset_data, * l_current_offset_data;
opj_tccp_t * l_tccp;
/* preconditions */
assert(p_tcp != 00);
l_mcc_record = p_tcp->m_mcc_records;
for (i=0;i<p_tcp->m_nb_mcc_records;++i) {
if (l_mcc_record->m_index == p_index) {
break;
}
}
if (i==p_tcp->m_nb_mcc_records) {
/** element discarded **/
return OPJ_TRUE;
}
if (l_mcc_record->m_nb_comps != p_image->numcomps) {
/** do not support number of comps != image */
return OPJ_TRUE;
}
l_deco_array = l_mcc_record->m_decorrelation_array;
if (l_deco_array) {
l_data_size = MCT_ELEMENT_SIZE[l_deco_array->m_element_type] * p_image->numcomps * p_image->numcomps;
if (l_deco_array->m_data_size != l_data_size) {
return OPJ_FALSE;
}
l_nb_elem = p_image->numcomps * p_image->numcomps;
l_mct_size = l_nb_elem * (OPJ_UINT32)sizeof(OPJ_FLOAT32);
p_tcp->m_mct_decoding_matrix = (OPJ_FLOAT32*)opj_malloc(l_mct_size);
if (! p_tcp->m_mct_decoding_matrix ) {
return OPJ_FALSE;
}
j2k_mct_read_functions_to_float[l_deco_array->m_element_type](l_deco_array->m_data,p_tcp->m_mct_decoding_matrix,l_nb_elem);
}
l_offset_array = l_mcc_record->m_offset_array;
if (l_offset_array) {
l_data_size = MCT_ELEMENT_SIZE[l_offset_array->m_element_type] * p_image->numcomps;
if (l_offset_array->m_data_size != l_data_size) {
return OPJ_FALSE;
}
l_nb_elem = p_image->numcomps;
l_offset_size = l_nb_elem * (OPJ_UINT32)sizeof(OPJ_UINT32);
l_offset_data = (OPJ_UINT32*)opj_malloc(l_offset_size);
if (! l_offset_data ) {
return OPJ_FALSE;
}
j2k_mct_read_functions_to_int32[l_offset_array->m_element_type](l_offset_array->m_data,l_offset_data,l_nb_elem);
l_tccp = p_tcp->tccps;
l_current_offset_data = l_offset_data;
for (i=0;i<p_image->numcomps;++i) {
l_tccp->m_dc_level_shift = (OPJ_INT32)*(l_current_offset_data++);
++l_tccp;
}
opj_free(l_offset_data);
}
return OPJ_TRUE;
}
static OPJ_BOOL opj_j2k_write_cbd( opj_j2k_t *p_j2k,
struct opj_stream_private *p_stream,
struct opj_event_mgr * p_manager )
{
OPJ_UINT32 i;
OPJ_UINT32 l_cbd_size;
OPJ_BYTE * l_current_data = 00;
opj_image_t *l_image = 00;
opj_image_comp_t * l_comp = 00;
/* preconditions */
assert(p_j2k != 00);
assert(p_manager != 00);
assert(p_stream != 00);
l_image = p_j2k->m_private_image;
l_cbd_size = 6 + p_j2k->m_private_image->numcomps;
if (l_cbd_size > p_j2k->m_specific_param.m_encoder.m_header_tile_data_size) {
OPJ_BYTE *new_header_tile_data = (OPJ_BYTE *) opj_realloc(p_j2k->m_specific_param.m_encoder.m_header_tile_data, l_cbd_size);
if (! new_header_tile_data) {
opj_free(p_j2k->m_specific_param.m_encoder.m_header_tile_data);
p_j2k->m_specific_param.m_encoder.m_header_tile_data = NULL;
p_j2k->m_specific_param.m_encoder.m_header_tile_data_size = 0;
opj_event_msg(p_manager, EVT_ERROR, "Not enough memory to write CBD marker\n");
return OPJ_FALSE;
}
p_j2k->m_specific_param.m_encoder.m_header_tile_data = new_header_tile_data;
p_j2k->m_specific_param.m_encoder.m_header_tile_data_size = l_cbd_size;
}
l_current_data = p_j2k->m_specific_param.m_encoder.m_header_tile_data;
opj_write_bytes(l_current_data,J2K_MS_CBD,2); /* CBD */
l_current_data += 2;
opj_write_bytes(l_current_data,l_cbd_size-2,2); /* L_CBD */
l_current_data += 2;
opj_write_bytes(l_current_data,l_image->numcomps, 2); /* Ncbd */
l_current_data+=2;
l_comp = l_image->comps;
for (i=0;i<l_image->numcomps;++i) {
opj_write_bytes(l_current_data, (l_comp->sgnd << 7) | (l_comp->prec - 1), 1); /* Component bit depth */
++l_current_data;
++l_comp;
}
if (opj_stream_write_data(p_stream,p_j2k->m_specific_param.m_encoder.m_header_tile_data,l_cbd_size,p_manager) != l_cbd_size) {
return OPJ_FALSE;
}
return OPJ_TRUE;
}
/**
* Reads a CBD marker (Component bit depth definition)
* @param p_header_data the data contained in the CBD box.
* @param p_j2k the jpeg2000 codec.
* @param p_header_size the size of the data contained in the CBD marker.
* @param p_manager the user event manager.
*/
static OPJ_BOOL opj_j2k_read_cbd ( opj_j2k_t *p_j2k,
OPJ_BYTE * p_header_data,
OPJ_UINT32 p_header_size,
opj_event_mgr_t * p_manager
)
{
OPJ_UINT32 l_nb_comp,l_num_comp;
OPJ_UINT32 l_comp_def;
OPJ_UINT32 i;
opj_image_comp_t * l_comp = 00;
/* preconditions */
assert(p_header_data != 00);
assert(p_j2k != 00);
assert(p_manager != 00);
l_num_comp = p_j2k->m_private_image->numcomps;
if (p_header_size != (p_j2k->m_private_image->numcomps + 2)) {
opj_event_msg(p_manager, EVT_ERROR, "Crror reading CBD marker\n");
return OPJ_FALSE;
}
opj_read_bytes(p_header_data,&l_nb_comp,2); /* Ncbd */
p_header_data+=2;
if (l_nb_comp != l_num_comp) {
opj_event_msg(p_manager, EVT_ERROR, "Crror reading CBD marker\n");
return OPJ_FALSE;
}
l_comp = p_j2k->m_private_image->comps;
for (i=0;i<l_num_comp;++i) {
opj_read_bytes(p_header_data,&l_comp_def,1); /* Component bit depth */
++p_header_data;
l_comp->sgnd = (l_comp_def>>7) & 1;
l_comp->prec = (l_comp_def&0x7f) + 1;
++l_comp;
}
return OPJ_TRUE;
}
/* ----------------------------------------------------------------------- */
/* J2K / JPT decoder interface */
/* ----------------------------------------------------------------------- */
void opj_j2k_setup_decoder(opj_j2k_t *j2k, opj_dparameters_t *parameters)
{
if(j2k && parameters) {
j2k->m_cp.m_specific_param.m_dec.m_layer = parameters->cp_layer;
j2k->m_cp.m_specific_param.m_dec.m_reduce = parameters->cp_reduce;
#ifdef USE_JPWL
j2k->m_cp.correct = parameters->jpwl_correct;
j2k->m_cp.exp_comps = parameters->jpwl_exp_comps;
j2k->m_cp.max_tiles = parameters->jpwl_max_tiles;
#endif /* USE_JPWL */
}
}
/* ----------------------------------------------------------------------- */
/* J2K encoder interface */
/* ----------------------------------------------------------------------- */
opj_j2k_t* opj_j2k_create_compress(void)
{
opj_j2k_t *l_j2k = (opj_j2k_t*) opj_calloc(1,sizeof(opj_j2k_t));
if (!l_j2k) {
return NULL;
}
l_j2k->m_is_decoder = 0;
l_j2k->m_cp.m_is_decoder = 0;
l_j2k->m_specific_param.m_encoder.m_header_tile_data = (OPJ_BYTE *) opj_malloc(OPJ_J2K_DEFAULT_HEADER_SIZE);
if (! l_j2k->m_specific_param.m_encoder.m_header_tile_data) {
opj_j2k_destroy(l_j2k);
return NULL;
}
l_j2k->m_specific_param.m_encoder.m_header_tile_data_size = OPJ_J2K_DEFAULT_HEADER_SIZE;
/* validation list creation*/
l_j2k->m_validation_list = opj_procedure_list_create();
if (! l_j2k->m_validation_list) {
opj_j2k_destroy(l_j2k);
return NULL;
}
/* execution list creation*/
l_j2k->m_procedure_list = opj_procedure_list_create();
if (! l_j2k->m_procedure_list) {
opj_j2k_destroy(l_j2k);
return NULL;
}
return l_j2k;
}
static int opj_j2k_initialise_4K_poc(opj_poc_t *POC, int numres){
POC[0].tile = 1;
POC[0].resno0 = 0;
POC[0].compno0 = 0;
POC[0].layno1 = 1;
POC[0].resno1 = (OPJ_UINT32)(numres-1);
POC[0].compno1 = 3;
POC[0].prg1 = OPJ_CPRL;
POC[1].tile = 1;
POC[1].resno0 = (OPJ_UINT32)(numres-1);
POC[1].compno0 = 0;
POC[1].layno1 = 1;
POC[1].resno1 = (OPJ_UINT32)numres;
POC[1].compno1 = 3;
POC[1].prg1 = OPJ_CPRL;
return 2;
}
static void opj_j2k_set_cinema_parameters(opj_cparameters_t *parameters, opj_image_t *image, opj_event_mgr_t *p_manager)
{
/* Configure cinema parameters */
int i;
/* No tiling */
parameters->tile_size_on = OPJ_FALSE;
parameters->cp_tdx=1;
parameters->cp_tdy=1;
/* One tile part for each component */
parameters->tp_flag = 'C';
parameters->tp_on = 1;
/* Tile and Image shall be at (0,0) */
parameters->cp_tx0 = 0;
parameters->cp_ty0 = 0;
parameters->image_offset_x0 = 0;
parameters->image_offset_y0 = 0;
/* Codeblock size= 32*32 */
parameters->cblockw_init = 32;
parameters->cblockh_init = 32;
/* Codeblock style: no mode switch enabled */
parameters->mode = 0;
/* No ROI */
parameters->roi_compno = -1;
/* No subsampling */
parameters->subsampling_dx = 1;
parameters->subsampling_dy = 1;
/* 9-7 transform */
parameters->irreversible = 1;
/* Number of layers */
if (parameters->tcp_numlayers > 1){
opj_event_msg(p_manager, EVT_WARNING,
"JPEG 2000 Profile-3 and 4 (2k/4k dc profile) requires:\n"
"1 single quality layer"
"-> Number of layers forced to 1 (rather than %d)\n"
"-> Rate of the last layer (%3.1f) will be used",
parameters->tcp_numlayers, parameters->tcp_rates[parameters->tcp_numlayers-1]);
parameters->tcp_rates[0] = parameters->tcp_rates[parameters->tcp_numlayers-1];
parameters->tcp_numlayers = 1;
}
/* Resolution levels */
switch (parameters->rsiz){
case OPJ_PROFILE_CINEMA_2K:
if(parameters->numresolution > 6){
opj_event_msg(p_manager, EVT_WARNING,
"JPEG 2000 Profile-3 (2k dc profile) requires:\n"
"Number of decomposition levels <= 5\n"
"-> Number of decomposition levels forced to 5 (rather than %d)\n",
parameters->numresolution+1);
parameters->numresolution = 6;
}
break;
case OPJ_PROFILE_CINEMA_4K:
if(parameters->numresolution < 2){
opj_event_msg(p_manager, EVT_WARNING,
"JPEG 2000 Profile-4 (4k dc profile) requires:\n"
"Number of decomposition levels >= 1 && <= 6\n"
"-> Number of decomposition levels forced to 1 (rather than %d)\n",
parameters->numresolution+1);
parameters->numresolution = 1;
}else if(parameters->numresolution > 7){
opj_event_msg(p_manager, EVT_WARNING,
"JPEG 2000 Profile-4 (4k dc profile) requires:\n"
"Number of decomposition levels >= 1 && <= 6\n"
"-> Number of decomposition levels forced to 6 (rather than %d)\n",
parameters->numresolution+1);
parameters->numresolution = 7;
}
break;
default :
break;
}
/* Precincts */
parameters->csty |= 0x01;
parameters->res_spec = parameters->numresolution-1;
for (i = 0; i<parameters->res_spec; i++) {
parameters->prcw_init[i] = 256;
parameters->prch_init[i] = 256;
}
/* The progression order shall be CPRL */
parameters->prog_order = OPJ_CPRL;
/* Progression order changes for 4K, disallowed for 2K */
if (parameters->rsiz == OPJ_PROFILE_CINEMA_4K) {
parameters->numpocs = (OPJ_UINT32)opj_j2k_initialise_4K_poc(parameters->POC,parameters->numresolution);
} else {
parameters->numpocs = 0;
}
/* Limited bit-rate */
parameters->cp_disto_alloc = 1;
if (parameters->max_cs_size <= 0) {
/* No rate has been introduced, 24 fps is assumed */
parameters->max_cs_size = OPJ_CINEMA_24_CS;
opj_event_msg(p_manager, EVT_WARNING,
"JPEG 2000 Profile-3 and 4 (2k/4k dc profile) requires:\n"
"Maximum 1302083 compressed bytes @ 24fps\n"
"As no rate has been given, this limit will be used.\n");
} else if (parameters->max_cs_size > OPJ_CINEMA_24_CS) {
opj_event_msg(p_manager, EVT_WARNING,
"JPEG 2000 Profile-3 and 4 (2k/4k dc profile) requires:\n"
"Maximum 1302083 compressed bytes @ 24fps\n"
"-> Specified rate exceeds this limit. Rate will be forced to 1302083 bytes.\n");
parameters->max_cs_size = OPJ_CINEMA_24_CS;
}
if (parameters->max_comp_size <= 0) {
/* No rate has been introduced, 24 fps is assumed */
parameters->max_comp_size = OPJ_CINEMA_24_COMP;
opj_event_msg(p_manager, EVT_WARNING,
"JPEG 2000 Profile-3 and 4 (2k/4k dc profile) requires:\n"
"Maximum 1041666 compressed bytes @ 24fps\n"
"As no rate has been given, this limit will be used.\n");
} else if (parameters->max_comp_size > OPJ_CINEMA_24_COMP) {
opj_event_msg(p_manager, EVT_WARNING,
"JPEG 2000 Profile-3 and 4 (2k/4k dc profile) requires:\n"
"Maximum 1041666 compressed bytes @ 24fps\n"
"-> Specified rate exceeds this limit. Rate will be forced to 1041666 bytes.\n");
parameters->max_comp_size = OPJ_CINEMA_24_COMP;
}
parameters->tcp_rates[0] = (OPJ_FLOAT32) (image->numcomps * image->comps[0].w * image->comps[0].h * image->comps[0].prec)/
(OPJ_FLOAT32)(((OPJ_UINT32)parameters->max_cs_size) * 8 * image->comps[0].dx * image->comps[0].dy);
}
static OPJ_BOOL opj_j2k_is_cinema_compliant(opj_image_t *image, OPJ_UINT16 rsiz, opj_event_mgr_t *p_manager)
{
OPJ_UINT32 i;
/* Number of components */
if (image->numcomps != 3){
opj_event_msg(p_manager, EVT_WARNING,
"JPEG 2000 Profile-3 (2k dc profile) requires:\n"
"3 components"
"-> Number of components of input image (%d) is not compliant\n"
"-> Non-profile-3 codestream will be generated\n",
image->numcomps);
return OPJ_FALSE;
}
/* Bitdepth */
for (i = 0; i < image->numcomps; i++) {
if ((image->comps[i].bpp != 12) | (image->comps[i].sgnd)){
char signed_str[] = "signed";
char unsigned_str[] = "unsigned";
char *tmp_str = image->comps[i].sgnd?signed_str:unsigned_str;
opj_event_msg(p_manager, EVT_WARNING,
"JPEG 2000 Profile-3 (2k dc profile) requires:\n"
"Precision of each component shall be 12 bits unsigned"
"-> At least component %d of input image (%d bits, %s) is not compliant\n"
"-> Non-profile-3 codestream will be generated\n",
i,image->comps[i].bpp, tmp_str);
return OPJ_FALSE;
}
}
/* Image size */
switch (rsiz){
case OPJ_PROFILE_CINEMA_2K:
if (((image->comps[0].w > 2048) | (image->comps[0].h > 1080))){
opj_event_msg(p_manager, EVT_WARNING,
"JPEG 2000 Profile-3 (2k dc profile) requires:\n"
"width <= 2048 and height <= 1080\n"
"-> Input image size %d x %d is not compliant\n"
"-> Non-profile-3 codestream will be generated\n",
image->comps[0].w,image->comps[0].h);
return OPJ_FALSE;
}
break;
case OPJ_PROFILE_CINEMA_4K:
if (((image->comps[0].w > 4096) | (image->comps[0].h > 2160))){
opj_event_msg(p_manager, EVT_WARNING,
"JPEG 2000 Profile-4 (4k dc profile) requires:\n"
"width <= 4096 and height <= 2160\n"
"-> Image size %d x %d is not compliant\n"
"-> Non-profile-4 codestream will be generated\n",
image->comps[0].w,image->comps[0].h);
return OPJ_FALSE;
}
break;
default :
break;
}
return OPJ_TRUE;
}
OPJ_BOOL opj_j2k_setup_encoder( opj_j2k_t *p_j2k,
opj_cparameters_t *parameters,
opj_image_t *image,
opj_event_mgr_t * p_manager)
{
OPJ_UINT32 i, j, tileno, numpocs_tile;
opj_cp_t *cp = 00;
if(!p_j2k || !parameters || ! image) {
return OPJ_FALSE;
}
if ((parameters->numresolution <= 0) || (parameters->numresolution > OPJ_J2K_MAXRLVLS)) {
opj_event_msg(p_manager, EVT_ERROR, "Invalid number of resolutions : %d not in range [1,%d]\n", parameters->numresolution, OPJ_J2K_MAXRLVLS);
return OPJ_FALSE;
}
/* keep a link to cp so that we can destroy it later in j2k_destroy_compress */
cp = &(p_j2k->m_cp);
/* set default values for cp */
cp->tw = 1;
cp->th = 1;
/* FIXME ADE: to be removed once deprecated cp_cinema and cp_rsiz have been removed */
if (parameters->rsiz == OPJ_PROFILE_NONE) { /* consider deprecated fields only if RSIZ has not been set */
OPJ_BOOL deprecated_used = OPJ_FALSE;
switch (parameters->cp_cinema){
case OPJ_CINEMA2K_24:
parameters->rsiz = OPJ_PROFILE_CINEMA_2K;
parameters->max_cs_size = OPJ_CINEMA_24_CS;
parameters->max_comp_size = OPJ_CINEMA_24_COMP;
deprecated_used = OPJ_TRUE;
break;
case OPJ_CINEMA2K_48:
parameters->rsiz = OPJ_PROFILE_CINEMA_2K;
parameters->max_cs_size = OPJ_CINEMA_48_CS;
parameters->max_comp_size = OPJ_CINEMA_48_COMP;
deprecated_used = OPJ_TRUE;
break;
case OPJ_CINEMA4K_24:
parameters->rsiz = OPJ_PROFILE_CINEMA_4K;
parameters->max_cs_size = OPJ_CINEMA_24_CS;
parameters->max_comp_size = OPJ_CINEMA_24_COMP;
deprecated_used = OPJ_TRUE;
break;
case OPJ_OFF:
default:
break;
}
switch (parameters->cp_rsiz){
case OPJ_CINEMA2K:
parameters->rsiz = OPJ_PROFILE_CINEMA_2K;
deprecated_used = OPJ_TRUE;
break;
case OPJ_CINEMA4K:
parameters->rsiz = OPJ_PROFILE_CINEMA_4K;
deprecated_used = OPJ_TRUE;
break;
case OPJ_MCT:
parameters->rsiz = OPJ_PROFILE_PART2 | OPJ_EXTENSION_MCT;
deprecated_used = OPJ_TRUE;
case OPJ_STD_RSIZ:
default:
break;
}
if (deprecated_used) {
opj_event_msg(p_manager, EVT_WARNING,
"Deprecated fields cp_cinema or cp_rsiz are used\n"
"Please consider using only the rsiz field\n"
"See openjpeg.h documentation for more details\n");
}
}
/* see if max_codestream_size does limit input rate */
if (parameters->max_cs_size <= 0) {
if (parameters->tcp_rates[parameters->tcp_numlayers-1] > 0) {
OPJ_FLOAT32 temp_size;
temp_size =(OPJ_FLOAT32)(image->numcomps * image->comps[0].w * image->comps[0].h * image->comps[0].prec)/
(parameters->tcp_rates[parameters->tcp_numlayers-1] * 8 * (OPJ_FLOAT32)image->comps[0].dx * (OPJ_FLOAT32)image->comps[0].dy);
parameters->max_cs_size = (int) floor(temp_size);
} else {
parameters->max_cs_size = 0;
}
} else {
OPJ_FLOAT32 temp_rate;
OPJ_BOOL cap = OPJ_FALSE;
temp_rate = (OPJ_FLOAT32) (image->numcomps * image->comps[0].w * image->comps[0].h * image->comps[0].prec)/
(OPJ_FLOAT32)(((OPJ_UINT32)parameters->max_cs_size) * 8 * image->comps[0].dx * image->comps[0].dy);
for (i = 0; i < (OPJ_UINT32) parameters->tcp_numlayers; i++) {
if (parameters->tcp_rates[i] < temp_rate) {
parameters->tcp_rates[i] = temp_rate;
cap = OPJ_TRUE;
}
}
if (cap) {
opj_event_msg(p_manager, EVT_WARNING,
"The desired maximum codestream size has limited\n"
"at least one of the desired quality layers\n");
}
}
/* Manage profiles and applications and set RSIZ */
/* set cinema parameters if required */
if (OPJ_IS_CINEMA(parameters->rsiz)){
if ((parameters->rsiz == OPJ_PROFILE_CINEMA_S2K)
|| (parameters->rsiz == OPJ_PROFILE_CINEMA_S4K)){
opj_event_msg(p_manager, EVT_WARNING,
"JPEG 2000 Scalable Digital Cinema profiles not yet supported\n");
parameters->rsiz = OPJ_PROFILE_NONE;
} else {
opj_j2k_set_cinema_parameters(parameters,image,p_manager);
if (!opj_j2k_is_cinema_compliant(image,parameters->rsiz,p_manager)) {
parameters->rsiz = OPJ_PROFILE_NONE;
}
}
} else if (OPJ_IS_STORAGE(parameters->rsiz)) {
opj_event_msg(p_manager, EVT_WARNING,
"JPEG 2000 Long Term Storage profile not yet supported\n");
parameters->rsiz = OPJ_PROFILE_NONE;
} else if (OPJ_IS_BROADCAST(parameters->rsiz)) {
opj_event_msg(p_manager, EVT_WARNING,
"JPEG 2000 Broadcast profiles not yet supported\n");
parameters->rsiz = OPJ_PROFILE_NONE;
} else if (OPJ_IS_IMF(parameters->rsiz)) {
opj_event_msg(p_manager, EVT_WARNING,
"JPEG 2000 IMF profiles not yet supported\n");
parameters->rsiz = OPJ_PROFILE_NONE;
} else if (OPJ_IS_PART2(parameters->rsiz)) {
if (parameters->rsiz == ((OPJ_PROFILE_PART2) | (OPJ_EXTENSION_NONE))) {
opj_event_msg(p_manager, EVT_WARNING,
"JPEG 2000 Part-2 profile defined\n"
"but no Part-2 extension enabled.\n"
"Profile set to NONE.\n");
parameters->rsiz = OPJ_PROFILE_NONE;
} else if (parameters->rsiz != ((OPJ_PROFILE_PART2) | (OPJ_EXTENSION_MCT))) {
opj_event_msg(p_manager, EVT_WARNING,
"Unsupported Part-2 extension enabled\n"
"Profile set to NONE.\n");
parameters->rsiz = OPJ_PROFILE_NONE;
}
}
/*
copy user encoding parameters
*/
cp->m_specific_param.m_enc.m_max_comp_size = (OPJ_UINT32)parameters->max_comp_size;
cp->rsiz = parameters->rsiz;
cp->m_specific_param.m_enc.m_disto_alloc = (OPJ_UINT32)parameters->cp_disto_alloc & 1u;
cp->m_specific_param.m_enc.m_fixed_alloc = (OPJ_UINT32)parameters->cp_fixed_alloc & 1u;
cp->m_specific_param.m_enc.m_fixed_quality = (OPJ_UINT32)parameters->cp_fixed_quality & 1u;
/* mod fixed_quality */
if (parameters->cp_fixed_alloc && parameters->cp_matrice) {
size_t array_size = (size_t)parameters->tcp_numlayers * (size_t)parameters->numresolution * 3 * sizeof(OPJ_INT32);
cp->m_specific_param.m_enc.m_matrice = (OPJ_INT32 *) opj_malloc(array_size);
if (!cp->m_specific_param.m_enc.m_matrice) {
opj_event_msg(p_manager, EVT_ERROR, "Not enough memory to allocate copy of user encoding parameters matrix \n");
return OPJ_FALSE;
}
memcpy(cp->m_specific_param.m_enc.m_matrice, parameters->cp_matrice, array_size);
}
/* tiles */
cp->tdx = (OPJ_UINT32)parameters->cp_tdx;
cp->tdy = (OPJ_UINT32)parameters->cp_tdy;
/* tile offset */
cp->tx0 = (OPJ_UINT32)parameters->cp_tx0;
cp->ty0 = (OPJ_UINT32)parameters->cp_ty0;
/* comment string */
if(parameters->cp_comment) {
cp->comment = (char*)opj_malloc(strlen(parameters->cp_comment) + 1U);
if(!cp->comment) {
opj_event_msg(p_manager, EVT_ERROR, "Not enough memory to allocate copy of comment string\n");
return OPJ_FALSE;
}
strcpy(cp->comment, parameters->cp_comment);
} else {
/* Create default comment for codestream */
const char comment[] = "Created by OpenJPEG version ";
const size_t clen = strlen(comment);
const char *version = opj_version();
/* UniPG>> */
#ifdef USE_JPWL
cp->comment = (char*)opj_malloc(clen+strlen(version)+11);
if(!cp->comment) {
opj_event_msg(p_manager, EVT_ERROR, "Not enough memory to allocate comment string\n");
return OPJ_FALSE;
}
sprintf(cp->comment,"%s%s with JPWL", comment, version);
#else
cp->comment = (char*)opj_malloc(clen+strlen(version)+1);
if(!cp->comment) {
opj_event_msg(p_manager, EVT_ERROR, "Not enough memory to allocate comment string\n");
return OPJ_FALSE;
}
sprintf(cp->comment,"%s%s", comment, version);
#endif
/* <<UniPG */
}
/*
calculate other encoding parameters
*/
if (parameters->tile_size_on) {
cp->tw = (OPJ_UINT32)opj_int_ceildiv((OPJ_INT32)(image->x1 - cp->tx0), (OPJ_INT32)cp->tdx);
cp->th = (OPJ_UINT32)opj_int_ceildiv((OPJ_INT32)(image->y1 - cp->ty0), (OPJ_INT32)cp->tdy);
} else {
cp->tdx = image->x1 - cp->tx0;
cp->tdy = image->y1 - cp->ty0;
}
if (parameters->tp_on) {
cp->m_specific_param.m_enc.m_tp_flag = (OPJ_BYTE)parameters->tp_flag;
cp->m_specific_param.m_enc.m_tp_on = 1;
}
#ifdef USE_JPWL
/*
calculate JPWL encoding parameters
*/
if (parameters->jpwl_epc_on) {
OPJ_INT32 i;
/* set JPWL on */
cp->epc_on = OPJ_TRUE;
cp->info_on = OPJ_FALSE; /* no informative technique */
/* set EPB on */
if ((parameters->jpwl_hprot_MH > 0) || (parameters->jpwl_hprot_TPH[0] > 0)) {
cp->epb_on = OPJ_TRUE;
cp->hprot_MH = parameters->jpwl_hprot_MH;
for (i = 0; i < JPWL_MAX_NO_TILESPECS; i++) {
cp->hprot_TPH_tileno[i] = parameters->jpwl_hprot_TPH_tileno[i];
cp->hprot_TPH[i] = parameters->jpwl_hprot_TPH[i];
}
/* if tile specs are not specified, copy MH specs */
if (cp->hprot_TPH[0] == -1) {
cp->hprot_TPH_tileno[0] = 0;
cp->hprot_TPH[0] = parameters->jpwl_hprot_MH;
}
for (i = 0; i < JPWL_MAX_NO_PACKSPECS; i++) {
cp->pprot_tileno[i] = parameters->jpwl_pprot_tileno[i];
cp->pprot_packno[i] = parameters->jpwl_pprot_packno[i];
cp->pprot[i] = parameters->jpwl_pprot[i];
}
}
/* set ESD writing */
if ((parameters->jpwl_sens_size == 1) || (parameters->jpwl_sens_size == 2)) {
cp->esd_on = OPJ_TRUE;
cp->sens_size = parameters->jpwl_sens_size;
cp->sens_addr = parameters->jpwl_sens_addr;
cp->sens_range = parameters->jpwl_sens_range;
cp->sens_MH = parameters->jpwl_sens_MH;
for (i = 0; i < JPWL_MAX_NO_TILESPECS; i++) {
cp->sens_TPH_tileno[i] = parameters->jpwl_sens_TPH_tileno[i];
cp->sens_TPH[i] = parameters->jpwl_sens_TPH[i];
}
}
/* always set RED writing to false: we are at the encoder */
cp->red_on = OPJ_FALSE;
} else {
cp->epc_on = OPJ_FALSE;
}
#endif /* USE_JPWL */
/* initialize the mutiple tiles */
/* ---------------------------- */
cp->tcps = (opj_tcp_t*) opj_calloc(cp->tw * cp->th, sizeof(opj_tcp_t));
if (!cp->tcps) {
opj_event_msg(p_manager, EVT_ERROR, "Not enough memory to allocate tile coding parameters\n");
return OPJ_FALSE;
}
if (parameters->numpocs) {
/* initialisation of POC */
opj_j2k_check_poc_val(parameters->POC,parameters->numpocs, (OPJ_UINT32)parameters->numresolution, image->numcomps, (OPJ_UINT32)parameters->tcp_numlayers, p_manager);
/* TODO MSD use the return value*/
}
for (tileno = 0; tileno < cp->tw * cp->th; tileno++) {
opj_tcp_t *tcp = &cp->tcps[tileno];
tcp->numlayers = (OPJ_UINT32)parameters->tcp_numlayers;
for (j = 0; j < tcp->numlayers; j++) {
if(OPJ_IS_CINEMA(cp->rsiz)){
if (cp->m_specific_param.m_enc.m_fixed_quality) {
tcp->distoratio[j] = parameters->tcp_distoratio[j];
}
tcp->rates[j] = parameters->tcp_rates[j];
}else{
if (cp->m_specific_param.m_enc.m_fixed_quality) { /* add fixed_quality */
tcp->distoratio[j] = parameters->tcp_distoratio[j];
} else {
tcp->rates[j] = parameters->tcp_rates[j];
}
}
}
tcp->csty = (OPJ_UINT32)parameters->csty;
tcp->prg = parameters->prog_order;
tcp->mct = (OPJ_UINT32)parameters->tcp_mct;
numpocs_tile = 0;
tcp->POC = 0;
if (parameters->numpocs) {
/* initialisation of POC */
tcp->POC = 1;
for (i = 0; i < parameters->numpocs; i++) {
if (tileno + 1 == parameters->POC[i].tile ) {
opj_poc_t *tcp_poc = &tcp->pocs[numpocs_tile];
tcp_poc->resno0 = parameters->POC[numpocs_tile].resno0;
tcp_poc->compno0 = parameters->POC[numpocs_tile].compno0;
tcp_poc->layno1 = parameters->POC[numpocs_tile].layno1;
tcp_poc->resno1 = parameters->POC[numpocs_tile].resno1;
tcp_poc->compno1 = parameters->POC[numpocs_tile].compno1;
tcp_poc->prg1 = parameters->POC[numpocs_tile].prg1;
tcp_poc->tile = parameters->POC[numpocs_tile].tile;
numpocs_tile++;
}
}
tcp->numpocs = numpocs_tile -1 ;
}else{
tcp->numpocs = 0;
}
tcp->tccps = (opj_tccp_t*) opj_calloc(image->numcomps, sizeof(opj_tccp_t));
if (!tcp->tccps) {
opj_event_msg(p_manager, EVT_ERROR, "Not enough memory to allocate tile component coding parameters\n");
return OPJ_FALSE;
}
if (parameters->mct_data) {
OPJ_UINT32 lMctSize = image->numcomps * image->numcomps * (OPJ_UINT32)sizeof(OPJ_FLOAT32);
OPJ_FLOAT32 * lTmpBuf = (OPJ_FLOAT32*)opj_malloc(lMctSize);
OPJ_INT32 * l_dc_shift = (OPJ_INT32 *) ((OPJ_BYTE *) parameters->mct_data + lMctSize);
if (!lTmpBuf) {
opj_event_msg(p_manager, EVT_ERROR, "Not enough memory to allocate temp buffer\n");
return OPJ_FALSE;
}
tcp->mct = 2;
tcp->m_mct_coding_matrix = (OPJ_FLOAT32*)opj_malloc(lMctSize);
if (! tcp->m_mct_coding_matrix) {
opj_free(lTmpBuf);
lTmpBuf = NULL;
opj_event_msg(p_manager, EVT_ERROR, "Not enough memory to allocate encoder MCT coding matrix \n");
return OPJ_FALSE;
}
memcpy(tcp->m_mct_coding_matrix,parameters->mct_data,lMctSize);
memcpy(lTmpBuf,parameters->mct_data,lMctSize);
tcp->m_mct_decoding_matrix = (OPJ_FLOAT32*)opj_malloc(lMctSize);
if (! tcp->m_mct_decoding_matrix) {
opj_free(lTmpBuf);
lTmpBuf = NULL;
opj_event_msg(p_manager, EVT_ERROR, "Not enough memory to allocate encoder MCT decoding matrix \n");
return OPJ_FALSE;
}
if(opj_matrix_inversion_f(lTmpBuf,(tcp->m_mct_decoding_matrix),image->numcomps) == OPJ_FALSE) {
opj_free(lTmpBuf);
lTmpBuf = NULL;
opj_event_msg(p_manager, EVT_ERROR, "Failed to inverse encoder MCT decoding matrix \n");
return OPJ_FALSE;
}
tcp->mct_norms = (OPJ_FLOAT64*)
opj_malloc(image->numcomps * sizeof(OPJ_FLOAT64));
if (! tcp->mct_norms) {
opj_free(lTmpBuf);
lTmpBuf = NULL;
opj_event_msg(p_manager, EVT_ERROR, "Not enough memory to allocate encoder MCT norms \n");
return OPJ_FALSE;
}
opj_calculate_norms(tcp->mct_norms,image->numcomps,tcp->m_mct_decoding_matrix);
opj_free(lTmpBuf);
for (i = 0; i < image->numcomps; i++) {
opj_tccp_t *tccp = &tcp->tccps[i];
tccp->m_dc_level_shift = l_dc_shift[i];
}
if (opj_j2k_setup_mct_encoding(tcp,image) == OPJ_FALSE) {
/* free will be handled by opj_j2k_destroy */
opj_event_msg(p_manager, EVT_ERROR, "Failed to setup j2k mct encoding\n");
return OPJ_FALSE;
}
}
else {
if(tcp->mct==1 && image->numcomps >= 3) { /* RGB->YCC MCT is enabled */
if ((image->comps[0].dx != image->comps[1].dx) ||
(image->comps[0].dx != image->comps[2].dx) ||
(image->comps[0].dy != image->comps[1].dy) ||
(image->comps[0].dy != image->comps[2].dy)) {
opj_event_msg(p_manager, EVT_WARNING, "Cannot perform MCT on components with different sizes. Disabling MCT.\n");
tcp->mct = 0;
}
}
for (i = 0; i < image->numcomps; i++) {
opj_tccp_t *tccp = &tcp->tccps[i];
opj_image_comp_t * l_comp = &(image->comps[i]);
if (! l_comp->sgnd) {
tccp->m_dc_level_shift = 1 << (l_comp->prec - 1);
}
}
}
for (i = 0; i < image->numcomps; i++) {
opj_tccp_t *tccp = &tcp->tccps[i];
tccp->csty = parameters->csty & 0x01; /* 0 => one precinct || 1 => custom precinct */
tccp->numresolutions = (OPJ_UINT32)parameters->numresolution;
tccp->cblkw = (OPJ_UINT32)opj_int_floorlog2(parameters->cblockw_init);
tccp->cblkh = (OPJ_UINT32)opj_int_floorlog2(parameters->cblockh_init);
tccp->cblksty = (OPJ_UINT32)parameters->mode;
tccp->qmfbid = parameters->irreversible ? 0 : 1;
tccp->qntsty = parameters->irreversible ? J2K_CCP_QNTSTY_SEQNT : J2K_CCP_QNTSTY_NOQNT;
tccp->numgbits = 2;
if ((OPJ_INT32)i == parameters->roi_compno) {
tccp->roishift = parameters->roi_shift;
} else {
tccp->roishift = 0;
}
if (parameters->csty & J2K_CCP_CSTY_PRT) {
OPJ_INT32 p = 0, it_res;
assert( tccp->numresolutions > 0 );
for (it_res = (OPJ_INT32)tccp->numresolutions - 1; it_res >= 0; it_res--) {
if (p < parameters->res_spec) {
if (parameters->prcw_init[p] < 1) {
tccp->prcw[it_res] = 1;
} else {
tccp->prcw[it_res] = (OPJ_UINT32)opj_int_floorlog2(parameters->prcw_init[p]);
}
if (parameters->prch_init[p] < 1) {
tccp->prch[it_res] = 1;
}else {
tccp->prch[it_res] = (OPJ_UINT32)opj_int_floorlog2(parameters->prch_init[p]);
}
} else {
OPJ_INT32 res_spec = parameters->res_spec;
OPJ_INT32 size_prcw = 0;
OPJ_INT32 size_prch = 0;
assert(res_spec>0); /* issue 189 */
size_prcw = parameters->prcw_init[res_spec - 1] >> (p - (res_spec - 1));
size_prch = parameters->prch_init[res_spec - 1] >> (p - (res_spec - 1));
if (size_prcw < 1) {
tccp->prcw[it_res] = 1;
} else {
tccp->prcw[it_res] = (OPJ_UINT32)opj_int_floorlog2(size_prcw);
}
if (size_prch < 1) {
tccp->prch[it_res] = 1;
} else {
tccp->prch[it_res] = (OPJ_UINT32)opj_int_floorlog2(size_prch);
}
}
p++;
/*printf("\nsize precinct for level %d : %d,%d\n", it_res,tccp->prcw[it_res], tccp->prch[it_res]); */
} /*end for*/
} else {
for (j = 0; j < tccp->numresolutions; j++) {
tccp->prcw[j] = 15;
tccp->prch[j] = 15;
}
}
opj_dwt_calc_explicit_stepsizes(tccp, image->comps[i].prec);
}
}
if (parameters->mct_data) {
opj_free(parameters->mct_data);
parameters->mct_data = 00;
}
return OPJ_TRUE;
}
static OPJ_BOOL opj_j2k_add_mhmarker(opj_codestream_index_t *cstr_index, OPJ_UINT32 type, OPJ_OFF_T pos, OPJ_UINT32 len)
{
assert(cstr_index != 00);
/* expand the list? */
if ((cstr_index->marknum + 1) > cstr_index->maxmarknum) {
opj_marker_info_t *new_marker;
cstr_index->maxmarknum = (OPJ_UINT32)(100 + (OPJ_FLOAT32) cstr_index->maxmarknum);
new_marker = (opj_marker_info_t *) opj_realloc(cstr_index->marker, cstr_index->maxmarknum *sizeof(opj_marker_info_t));
if (! new_marker) {
opj_free(cstr_index->marker);
cstr_index->marker = NULL;
cstr_index->maxmarknum = 0;
cstr_index->marknum = 0;
/* opj_event_msg(p_manager, EVT_ERROR, "Not enough memory to add mh marker\n"); */
return OPJ_FALSE;
}
cstr_index->marker = new_marker;
}
/* add the marker */
cstr_index->marker[cstr_index->marknum].type = (OPJ_UINT16)type;
cstr_index->marker[cstr_index->marknum].pos = (OPJ_INT32)pos;
cstr_index->marker[cstr_index->marknum].len = (OPJ_INT32)len;
cstr_index->marknum++;
return OPJ_TRUE;
}
static OPJ_BOOL opj_j2k_add_tlmarker(OPJ_UINT32 tileno, opj_codestream_index_t *cstr_index, OPJ_UINT32 type, OPJ_OFF_T pos, OPJ_UINT32 len)
{
assert(cstr_index != 00);
assert(cstr_index->tile_index != 00);
/* expand the list? */
if ((cstr_index->tile_index[tileno].marknum + 1) > cstr_index->tile_index[tileno].maxmarknum) {
opj_marker_info_t *new_marker;
cstr_index->tile_index[tileno].maxmarknum = (OPJ_UINT32)(100 + (OPJ_FLOAT32) cstr_index->tile_index[tileno].maxmarknum);
new_marker = (opj_marker_info_t *) opj_realloc(
cstr_index->tile_index[tileno].marker,
cstr_index->tile_index[tileno].maxmarknum *sizeof(opj_marker_info_t));
if (! new_marker) {
opj_free(cstr_index->tile_index[tileno].marker);
cstr_index->tile_index[tileno].marker = NULL;
cstr_index->tile_index[tileno].maxmarknum = 0;
cstr_index->tile_index[tileno].marknum = 0;
/* opj_event_msg(p_manager, EVT_ERROR, "Not enough memory to add tl marker\n"); */
return OPJ_FALSE;
}
cstr_index->tile_index[tileno].marker = new_marker;
}
/* add the marker */
cstr_index->tile_index[tileno].marker[cstr_index->tile_index[tileno].marknum].type = (OPJ_UINT16)type;
cstr_index->tile_index[tileno].marker[cstr_index->tile_index[tileno].marknum].pos = (OPJ_INT32)pos;
cstr_index->tile_index[tileno].marker[cstr_index->tile_index[tileno].marknum].len = (OPJ_INT32)len;
cstr_index->tile_index[tileno].marknum++;
if (type == J2K_MS_SOT) {
OPJ_UINT32 l_current_tile_part = cstr_index->tile_index[tileno].current_tpsno;
if (cstr_index->tile_index[tileno].tp_index)
cstr_index->tile_index[tileno].tp_index[l_current_tile_part].start_pos = pos;
}
return OPJ_TRUE;
}
/*
* -----------------------------------------------------------------------
* -----------------------------------------------------------------------
* -----------------------------------------------------------------------
*/
OPJ_BOOL opj_j2k_end_decompress(opj_j2k_t *p_j2k,
opj_stream_private_t *p_stream,
opj_event_mgr_t * p_manager
)
{
(void)p_j2k;
(void)p_stream;
(void)p_manager;
return OPJ_TRUE;
}
OPJ_BOOL opj_j2k_read_header( opj_stream_private_t *p_stream,
opj_j2k_t* p_j2k,
opj_image_t** p_image,
opj_event_mgr_t* p_manager )
{
/* preconditions */
assert(p_j2k != 00);
assert(p_stream != 00);
assert(p_manager != 00);
/* create an empty image header */
p_j2k->m_private_image = opj_image_create0();
if (! p_j2k->m_private_image) {
return OPJ_FALSE;
}
/* customization of the validation */
if (! opj_j2k_setup_decoding_validation(p_j2k, p_manager)) {
opj_image_destroy(p_j2k->m_private_image);
p_j2k->m_private_image = NULL;
return OPJ_FALSE;
}
/* validation of the parameters codec */
if (! opj_j2k_exec(p_j2k, p_j2k->m_validation_list, p_stream,p_manager)) {
opj_image_destroy(p_j2k->m_private_image);
p_j2k->m_private_image = NULL;
return OPJ_FALSE;
}
/* customization of the encoding */
if (! opj_j2k_setup_header_reading(p_j2k, p_manager)) {
opj_image_destroy(p_j2k->m_private_image);
p_j2k->m_private_image = NULL;
return OPJ_FALSE;
}
/* read header */
if (! opj_j2k_exec (p_j2k,p_j2k->m_procedure_list,p_stream,p_manager)) {
opj_image_destroy(p_j2k->m_private_image);
p_j2k->m_private_image = NULL;
return OPJ_FALSE;
}
*p_image = opj_image_create0();
if (! (*p_image)) {
return OPJ_FALSE;
}
/* Copy codestream image information to the output image */
opj_copy_image_header(p_j2k->m_private_image, *p_image);
/*Allocate and initialize some elements of codestrem index*/
if (!opj_j2k_allocate_tile_element_cstr_index(p_j2k)){
return OPJ_FALSE;
}
return OPJ_TRUE;
}
static OPJ_BOOL opj_j2k_setup_header_reading (opj_j2k_t *p_j2k, opj_event_mgr_t * p_manager)
{
/* preconditions*/
assert(p_j2k != 00);
assert(p_manager != 00);
if (! opj_procedure_list_add_procedure(p_j2k->m_procedure_list,(opj_procedure)opj_j2k_read_header_procedure, p_manager)) {
return OPJ_FALSE;
}
/* DEVELOPER CORNER, add your custom procedures */
if (! opj_procedure_list_add_procedure(p_j2k->m_procedure_list,(opj_procedure)opj_j2k_copy_default_tcp_and_create_tcd, p_manager)) {
return OPJ_FALSE;
}
return OPJ_TRUE;
}
static OPJ_BOOL opj_j2k_setup_decoding_validation (opj_j2k_t *p_j2k, opj_event_mgr_t * p_manager)
{
/* preconditions*/
assert(p_j2k != 00);
assert(p_manager != 00);
if (! opj_procedure_list_add_procedure(p_j2k->m_validation_list,(opj_procedure)opj_j2k_build_decoder, p_manager)) {
return OPJ_FALSE;
}
if (! opj_procedure_list_add_procedure(p_j2k->m_validation_list,(opj_procedure)opj_j2k_decoding_validation, p_manager)) {
return OPJ_FALSE;
}
/* DEVELOPER CORNER, add your custom validation procedure */
return OPJ_TRUE;
}
static OPJ_BOOL opj_j2k_mct_validation ( opj_j2k_t * p_j2k,
opj_stream_private_t *p_stream,
opj_event_mgr_t * p_manager )
{
OPJ_BOOL l_is_valid = OPJ_TRUE;
OPJ_UINT32 i,j;
/* preconditions */
assert(p_j2k != 00);
assert(p_stream != 00);
assert(p_manager != 00);
if ((p_j2k->m_cp.rsiz & 0x8200) == 0x8200) {
OPJ_UINT32 l_nb_tiles = p_j2k->m_cp.th * p_j2k->m_cp.tw;
opj_tcp_t * l_tcp = p_j2k->m_cp.tcps;
for (i=0;i<l_nb_tiles;++i) {
if (l_tcp->mct == 2) {
opj_tccp_t * l_tccp = l_tcp->tccps;
l_is_valid &= (l_tcp->m_mct_coding_matrix != 00);
for (j=0;j<p_j2k->m_private_image->numcomps;++j) {
l_is_valid &= ! (l_tccp->qmfbid & 1);
++l_tccp;
}
}
++l_tcp;
}
}
return l_is_valid;
}
OPJ_BOOL opj_j2k_setup_mct_encoding(opj_tcp_t * p_tcp, opj_image_t * p_image)
{
OPJ_UINT32 i;
OPJ_UINT32 l_indix = 1;
opj_mct_data_t * l_mct_deco_data = 00,* l_mct_offset_data = 00;
opj_simple_mcc_decorrelation_data_t * l_mcc_data;
OPJ_UINT32 l_mct_size,l_nb_elem;
OPJ_FLOAT32 * l_data, * l_current_data;
opj_tccp_t * l_tccp;
/* preconditions */
assert(p_tcp != 00);
if (p_tcp->mct != 2) {
return OPJ_TRUE;
}
if (p_tcp->m_mct_decoding_matrix) {
if (p_tcp->m_nb_mct_records == p_tcp->m_nb_max_mct_records) {
opj_mct_data_t *new_mct_records;
p_tcp->m_nb_max_mct_records += OPJ_J2K_MCT_DEFAULT_NB_RECORDS;
new_mct_records = (opj_mct_data_t *) opj_realloc(p_tcp->m_mct_records, p_tcp->m_nb_max_mct_records * sizeof(opj_mct_data_t));
if (! new_mct_records) {
opj_free(p_tcp->m_mct_records);
p_tcp->m_mct_records = NULL;
p_tcp->m_nb_max_mct_records = 0;
p_tcp->m_nb_mct_records = 0;
/* opj_event_msg(p_manager, EVT_ERROR, "Not enough memory to setup mct encoding\n"); */
return OPJ_FALSE;
}
p_tcp->m_mct_records = new_mct_records;
l_mct_deco_data = p_tcp->m_mct_records + p_tcp->m_nb_mct_records;
memset(l_mct_deco_data ,0,(p_tcp->m_nb_max_mct_records - p_tcp->m_nb_mct_records) * sizeof(opj_mct_data_t));
}
l_mct_deco_data = p_tcp->m_mct_records + p_tcp->m_nb_mct_records;
if (l_mct_deco_data->m_data) {
opj_free(l_mct_deco_data->m_data);
l_mct_deco_data->m_data = 00;
}
l_mct_deco_data->m_index = l_indix++;
l_mct_deco_data->m_array_type = MCT_TYPE_DECORRELATION;
l_mct_deco_data->m_element_type = MCT_TYPE_FLOAT;
l_nb_elem = p_image->numcomps * p_image->numcomps;
l_mct_size = l_nb_elem * MCT_ELEMENT_SIZE[l_mct_deco_data->m_element_type];
l_mct_deco_data->m_data = (OPJ_BYTE*)opj_malloc(l_mct_size );
if (! l_mct_deco_data->m_data) {
return OPJ_FALSE;
}
j2k_mct_write_functions_from_float[l_mct_deco_data->m_element_type](p_tcp->m_mct_decoding_matrix,l_mct_deco_data->m_data,l_nb_elem);
l_mct_deco_data->m_data_size = l_mct_size;
++p_tcp->m_nb_mct_records;
}
if (p_tcp->m_nb_mct_records == p_tcp->m_nb_max_mct_records) {
opj_mct_data_t *new_mct_records;
p_tcp->m_nb_max_mct_records += OPJ_J2K_MCT_DEFAULT_NB_RECORDS;
new_mct_records = (opj_mct_data_t *) opj_realloc(p_tcp->m_mct_records, p_tcp->m_nb_max_mct_records * sizeof(opj_mct_data_t));
if (! new_mct_records) {
opj_free(p_tcp->m_mct_records);
p_tcp->m_mct_records = NULL;
p_tcp->m_nb_max_mct_records = 0;
p_tcp->m_nb_mct_records = 0;
/* opj_event_msg(p_manager, EVT_ERROR, "Not enough memory to setup mct encoding\n"); */
return OPJ_FALSE;
}
p_tcp->m_mct_records = new_mct_records;
l_mct_offset_data = p_tcp->m_mct_records + p_tcp->m_nb_mct_records;
memset(l_mct_offset_data ,0,(p_tcp->m_nb_max_mct_records - p_tcp->m_nb_mct_records) * sizeof(opj_mct_data_t));
if (l_mct_deco_data) {
l_mct_deco_data = l_mct_offset_data - 1;
}
}
l_mct_offset_data = p_tcp->m_mct_records + p_tcp->m_nb_mct_records;
if (l_mct_offset_data->m_data) {
opj_free(l_mct_offset_data->m_data);
l_mct_offset_data->m_data = 00;
}
l_mct_offset_data->m_index = l_indix++;
l_mct_offset_data->m_array_type = MCT_TYPE_OFFSET;
l_mct_offset_data->m_element_type = MCT_TYPE_FLOAT;
l_nb_elem = p_image->numcomps;
l_mct_size = l_nb_elem * MCT_ELEMENT_SIZE[l_mct_offset_data->m_element_type];
l_mct_offset_data->m_data = (OPJ_BYTE*)opj_malloc(l_mct_size );
if (! l_mct_offset_data->m_data) {
return OPJ_FALSE;
}
l_data = (OPJ_FLOAT32*)opj_malloc(l_nb_elem * sizeof(OPJ_FLOAT32));
if (! l_data) {
opj_free(l_mct_offset_data->m_data);
l_mct_offset_data->m_data = 00;
return OPJ_FALSE;
}
l_tccp = p_tcp->tccps;
l_current_data = l_data;
for (i=0;i<l_nb_elem;++i) {
*(l_current_data++) = (OPJ_FLOAT32) (l_tccp->m_dc_level_shift);
++l_tccp;
}
j2k_mct_write_functions_from_float[l_mct_offset_data->m_element_type](l_data,l_mct_offset_data->m_data,l_nb_elem);
opj_free(l_data);
l_mct_offset_data->m_data_size = l_mct_size;
++p_tcp->m_nb_mct_records;
if (p_tcp->m_nb_mcc_records == p_tcp->m_nb_max_mcc_records) {
opj_simple_mcc_decorrelation_data_t *new_mcc_records;
p_tcp->m_nb_max_mcc_records += OPJ_J2K_MCT_DEFAULT_NB_RECORDS;
new_mcc_records = (opj_simple_mcc_decorrelation_data_t *) opj_realloc(
p_tcp->m_mcc_records, p_tcp->m_nb_max_mcc_records * sizeof(opj_simple_mcc_decorrelation_data_t));
if (! new_mcc_records) {
opj_free(p_tcp->m_mcc_records);
p_tcp->m_mcc_records = NULL;
p_tcp->m_nb_max_mcc_records = 0;
p_tcp->m_nb_mcc_records = 0;
/* opj_event_msg(p_manager, EVT_ERROR, "Not enough memory to setup mct encoding\n"); */
return OPJ_FALSE;
}
p_tcp->m_mcc_records = new_mcc_records;
l_mcc_data = p_tcp->m_mcc_records + p_tcp->m_nb_mcc_records;
memset(l_mcc_data ,0,(p_tcp->m_nb_max_mcc_records - p_tcp->m_nb_mcc_records) * sizeof(opj_simple_mcc_decorrelation_data_t));
}
l_mcc_data = p_tcp->m_mcc_records + p_tcp->m_nb_mcc_records;
l_mcc_data->m_decorrelation_array = l_mct_deco_data;
l_mcc_data->m_is_irreversible = 1;
l_mcc_data->m_nb_comps = p_image->numcomps;
l_mcc_data->m_index = l_indix++;
l_mcc_data->m_offset_array = l_mct_offset_data;
++p_tcp->m_nb_mcc_records;
return OPJ_TRUE;
}
static OPJ_BOOL opj_j2k_build_decoder (opj_j2k_t * p_j2k,
opj_stream_private_t *p_stream,
opj_event_mgr_t * p_manager )
{
/* add here initialization of cp
copy paste of setup_decoder */
(void)p_j2k;
(void)p_stream;
(void)p_manager;
return OPJ_TRUE;
}
static OPJ_BOOL opj_j2k_build_encoder (opj_j2k_t * p_j2k,
opj_stream_private_t *p_stream,
opj_event_mgr_t * p_manager )
{
/* add here initialization of cp
copy paste of setup_encoder */
(void)p_j2k;
(void)p_stream;
(void)p_manager;
return OPJ_TRUE;
}
static OPJ_BOOL opj_j2k_encoding_validation ( opj_j2k_t * p_j2k,
opj_stream_private_t *p_stream,
opj_event_mgr_t * p_manager )
{
OPJ_BOOL l_is_valid = OPJ_TRUE;
/* preconditions */
assert(p_j2k != 00);
assert(p_stream != 00);
assert(p_manager != 00);
/* STATE checking */
/* make sure the state is at 0 */
l_is_valid &= (p_j2k->m_specific_param.m_decoder.m_state == J2K_STATE_NONE);
/* POINTER validation */
/* make sure a p_j2k codec is present */
l_is_valid &= (p_j2k->m_procedure_list != 00);
/* make sure a validation list is present */
l_is_valid &= (p_j2k->m_validation_list != 00);
/* ISO 15444-1:2004 states between 1 & 33 (0 -> 32) */
/* 33 (32) would always fail the check below (if a cast to 64bits was done) */
/* FIXME Shall we change OPJ_J2K_MAXRLVLS to 32 ? */
if ((p_j2k->m_cp.tcps->tccps->numresolutions <= 0) || (p_j2k->m_cp.tcps->tccps->numresolutions > 32)) {
opj_event_msg(p_manager, EVT_ERROR, "Number of resolutions is too high in comparison to the size of tiles\n");
return OPJ_FALSE;
}
if ((p_j2k->m_cp.tdx) < (OPJ_UINT32) (1 << (p_j2k->m_cp.tcps->tccps->numresolutions - 1U))) {
opj_event_msg(p_manager, EVT_ERROR, "Number of resolutions is too high in comparison to the size of tiles\n");
return OPJ_FALSE;
}
if ((p_j2k->m_cp.tdy) < (OPJ_UINT32) (1 << (p_j2k->m_cp.tcps->tccps->numresolutions - 1U))) {
opj_event_msg(p_manager, EVT_ERROR, "Number of resolutions is too high in comparison to the size of tiles\n");
return OPJ_FALSE;
}
/* PARAMETER VALIDATION */
return l_is_valid;
}
static OPJ_BOOL opj_j2k_decoding_validation ( opj_j2k_t *p_j2k,
opj_stream_private_t *p_stream,
opj_event_mgr_t * p_manager
)
{
OPJ_BOOL l_is_valid = OPJ_TRUE;
/* preconditions*/
assert(p_j2k != 00);
assert(p_stream != 00);
assert(p_manager != 00);
/* STATE checking */
/* make sure the state is at 0 */
#ifdef TODO_MSD
l_is_valid &= (p_j2k->m_specific_param.m_decoder.m_state == J2K_DEC_STATE_NONE);
#endif
l_is_valid &= (p_j2k->m_specific_param.m_decoder.m_state == 0x0000);
/* POINTER validation */
/* make sure a p_j2k codec is present */
/* make sure a procedure list is present */
l_is_valid &= (p_j2k->m_procedure_list != 00);
/* make sure a validation list is present */
l_is_valid &= (p_j2k->m_validation_list != 00);
/* PARAMETER VALIDATION */
return l_is_valid;
}
static OPJ_BOOL opj_j2k_read_header_procedure( opj_j2k_t *p_j2k,
opj_stream_private_t *p_stream,
opj_event_mgr_t * p_manager)
{
OPJ_UINT32 l_current_marker;
OPJ_UINT32 l_marker_size;
const opj_dec_memory_marker_handler_t * l_marker_handler = 00;
OPJ_BOOL l_has_siz = 0;
OPJ_BOOL l_has_cod = 0;
OPJ_BOOL l_has_qcd = 0;
/* preconditions */
assert(p_stream != 00);
assert(p_j2k != 00);
assert(p_manager != 00);
/* We enter in the main header */
p_j2k->m_specific_param.m_decoder.m_state = J2K_STATE_MHSOC;
/* Try to read the SOC marker, the codestream must begin with SOC marker */
if (! opj_j2k_read_soc(p_j2k,p_stream,p_manager)) {
opj_event_msg(p_manager, EVT_ERROR, "Expected a SOC marker \n");
return OPJ_FALSE;
}
/* Try to read 2 bytes (the next marker ID) from stream and copy them into the buffer */
if (opj_stream_read_data(p_stream,p_j2k->m_specific_param.m_decoder.m_header_data,2,p_manager) != 2) {
opj_event_msg(p_manager, EVT_ERROR, "Stream too short\n");
return OPJ_FALSE;
}
/* Read 2 bytes as the new marker ID */
opj_read_bytes(p_j2k->m_specific_param.m_decoder.m_header_data,&l_current_marker,2);
/* Try to read until the SOT is detected */
while (l_current_marker != J2K_MS_SOT) {
/* Check if the current marker ID is valid */
if (l_current_marker < 0xff00) {
opj_event_msg(p_manager, EVT_ERROR, "A marker ID was expected (0xff--) instead of %.8x\n", l_current_marker);
return OPJ_FALSE;
}
/* Get the marker handler from the marker ID */
l_marker_handler = opj_j2k_get_marker_handler(l_current_marker);
/* Manage case where marker is unknown */
if (l_marker_handler->id == J2K_MS_UNK) {
if (! opj_j2k_read_unk(p_j2k, p_stream, &l_current_marker, p_manager)){
opj_event_msg(p_manager, EVT_ERROR, "Unknow marker have been detected and generated error.\n");
return OPJ_FALSE;
}
if (l_current_marker == J2K_MS_SOT)
break; /* SOT marker is detected main header is completely read */
else /* Get the marker handler from the marker ID */
l_marker_handler = opj_j2k_get_marker_handler(l_current_marker);
}
if (l_marker_handler->id == J2K_MS_SIZ) {
/* Mark required SIZ marker as found */
l_has_siz = 1;
}
if (l_marker_handler->id == J2K_MS_COD) {
/* Mark required COD marker as found */
l_has_cod = 1;
}
if (l_marker_handler->id == J2K_MS_QCD) {
/* Mark required QCD marker as found */
l_has_qcd = 1;
}
/* Check if the marker is known and if it is the right place to find it */
if (! (p_j2k->m_specific_param.m_decoder.m_state & l_marker_handler->states) ) {
opj_event_msg(p_manager, EVT_ERROR, "Marker is not compliant with its position\n");
return OPJ_FALSE;
}
/* Try to read 2 bytes (the marker size) from stream and copy them into the buffer */
if (opj_stream_read_data(p_stream,p_j2k->m_specific_param.m_decoder.m_header_data,2,p_manager) != 2) {
opj_event_msg(p_manager, EVT_ERROR, "Stream too short\n");
return OPJ_FALSE;
}
/* read 2 bytes as the marker size */
opj_read_bytes(p_j2k->m_specific_param.m_decoder.m_header_data,&l_marker_size,2);
l_marker_size -= 2; /* Subtract the size of the marker ID already read */
/* Check if the marker size is compatible with the header data size */
if (l_marker_size > p_j2k->m_specific_param.m_decoder.m_header_data_size) {
OPJ_BYTE *new_header_data = (OPJ_BYTE *) opj_realloc(p_j2k->m_specific_param.m_decoder.m_header_data, l_marker_size);
if (! new_header_data) {
opj_free(p_j2k->m_specific_param.m_decoder.m_header_data);
p_j2k->m_specific_param.m_decoder.m_header_data = NULL;
p_j2k->m_specific_param.m_decoder.m_header_data_size = 0;
opj_event_msg(p_manager, EVT_ERROR, "Not enough memory to read header\n");
return OPJ_FALSE;
}
p_j2k->m_specific_param.m_decoder.m_header_data = new_header_data;
p_j2k->m_specific_param.m_decoder.m_header_data_size = l_marker_size;
}
/* Try to read the rest of the marker segment from stream and copy them into the buffer */
if (opj_stream_read_data(p_stream,p_j2k->m_specific_param.m_decoder.m_header_data,l_marker_size,p_manager) != l_marker_size) {
opj_event_msg(p_manager, EVT_ERROR, "Stream too short\n");
return OPJ_FALSE;
}
/* Read the marker segment with the correct marker handler */
if (! (*(l_marker_handler->handler))(p_j2k,p_j2k->m_specific_param.m_decoder.m_header_data,l_marker_size,p_manager)) {
opj_event_msg(p_manager, EVT_ERROR, "Marker handler function failed to read the marker segment\n");
return OPJ_FALSE;
}
/* Add the marker to the codestream index*/
if (OPJ_FALSE == opj_j2k_add_mhmarker(
p_j2k->cstr_index,
l_marker_handler->id,
(OPJ_UINT32) opj_stream_tell(p_stream) - l_marker_size - 4,
l_marker_size + 4 )) {
opj_event_msg(p_manager, EVT_ERROR, "Not enough memory to add mh marker\n");
return OPJ_FALSE;
}
/* Try to read 2 bytes (the next marker ID) from stream and copy them into the buffer */
if (opj_stream_read_data(p_stream,p_j2k->m_specific_param.m_decoder.m_header_data,2,p_manager) != 2) {
opj_event_msg(p_manager, EVT_ERROR, "Stream too short\n");
return OPJ_FALSE;
}
/* read 2 bytes as the new marker ID */
opj_read_bytes(p_j2k->m_specific_param.m_decoder.m_header_data,&l_current_marker,2);
}
if (l_has_siz == 0) {
opj_event_msg(p_manager, EVT_ERROR, "required SIZ marker not found in main header\n");
return OPJ_FALSE;
}
if (l_has_cod == 0) {
opj_event_msg(p_manager, EVT_ERROR, "required COD marker not found in main header\n");
return OPJ_FALSE;
}
if (l_has_qcd == 0) {
opj_event_msg(p_manager, EVT_ERROR, "required QCD marker not found in main header\n");
return OPJ_FALSE;
}
if (! opj_j2k_merge_ppm(&(p_j2k->m_cp), p_manager)) {
opj_event_msg(p_manager, EVT_ERROR, "Failed to merge PPM data\n");
return OPJ_FALSE;
}
opj_event_msg(p_manager, EVT_INFO, "Main header has been correctly decoded.\n");
/* Position of the last element if the main header */
p_j2k->cstr_index->main_head_end = (OPJ_UINT32) opj_stream_tell(p_stream) - 2;
/* Next step: read a tile-part header */
p_j2k->m_specific_param.m_decoder.m_state = J2K_STATE_TPHSOT;
return OPJ_TRUE;
}
static OPJ_BOOL opj_j2k_exec ( opj_j2k_t * p_j2k,
opj_procedure_list_t * p_procedure_list,
opj_stream_private_t *p_stream,
opj_event_mgr_t * p_manager )
{
OPJ_BOOL (** l_procedure) (opj_j2k_t * ,opj_stream_private_t *,opj_event_mgr_t *) = 00;
OPJ_BOOL l_result = OPJ_TRUE;
OPJ_UINT32 l_nb_proc, i;
/* preconditions*/
assert(p_procedure_list != 00);
assert(p_j2k != 00);
assert(p_stream != 00);
assert(p_manager != 00);
l_nb_proc = opj_procedure_list_get_nb_procedures(p_procedure_list);
l_procedure = (OPJ_BOOL (**) (opj_j2k_t * ,opj_stream_private_t *,opj_event_mgr_t *)) opj_procedure_list_get_first_procedure(p_procedure_list);
for (i=0;i<l_nb_proc;++i) {
l_result = l_result && ((*l_procedure) (p_j2k,p_stream,p_manager));
++l_procedure;
}
/* and clear the procedure list at the end.*/
opj_procedure_list_clear(p_procedure_list);
return l_result;
}
/* FIXME DOC*/
static OPJ_BOOL opj_j2k_copy_default_tcp_and_create_tcd ( opj_j2k_t * p_j2k,
opj_stream_private_t *p_stream,
opj_event_mgr_t * p_manager
)
{
opj_tcp_t * l_tcp = 00;
opj_tcp_t * l_default_tcp = 00;
OPJ_UINT32 l_nb_tiles;
OPJ_UINT32 i,j;
opj_tccp_t *l_current_tccp = 00;
OPJ_UINT32 l_tccp_size;
OPJ_UINT32 l_mct_size;
opj_image_t * l_image;
OPJ_UINT32 l_mcc_records_size,l_mct_records_size;
opj_mct_data_t * l_src_mct_rec, *l_dest_mct_rec;
opj_simple_mcc_decorrelation_data_t * l_src_mcc_rec, *l_dest_mcc_rec;
OPJ_UINT32 l_offset;
/* preconditions */
assert(p_j2k != 00);
assert(p_stream != 00);
assert(p_manager != 00);
l_image = p_j2k->m_private_image;
l_nb_tiles = p_j2k->m_cp.th * p_j2k->m_cp.tw;
l_tcp = p_j2k->m_cp.tcps;
l_tccp_size = l_image->numcomps * (OPJ_UINT32)sizeof(opj_tccp_t);
l_default_tcp = p_j2k->m_specific_param.m_decoder.m_default_tcp;
l_mct_size = l_image->numcomps * l_image->numcomps * (OPJ_UINT32)sizeof(OPJ_FLOAT32);
/* For each tile */
for (i=0; i<l_nb_tiles; ++i) {
/* keep the tile-compo coding parameters pointer of the current tile coding parameters*/
l_current_tccp = l_tcp->tccps;
/*Copy default coding parameters into the current tile coding parameters*/
memcpy(l_tcp, l_default_tcp, sizeof(opj_tcp_t));
/* Initialize some values of the current tile coding parameters*/
l_tcp->cod = 0;
l_tcp->ppt = 0;
l_tcp->ppt_data = 00;
/* Remove memory not owned by this tile in case of early error return. */
l_tcp->m_mct_decoding_matrix = 00;
l_tcp->m_nb_max_mct_records = 0;
l_tcp->m_mct_records = 00;
l_tcp->m_nb_max_mcc_records = 0;
l_tcp->m_mcc_records = 00;
/* Reconnect the tile-compo coding parameters pointer to the current tile coding parameters*/
l_tcp->tccps = l_current_tccp;
/* Get the mct_decoding_matrix of the dflt_tile_cp and copy them into the current tile cp*/
if (l_default_tcp->m_mct_decoding_matrix) {
l_tcp->m_mct_decoding_matrix = (OPJ_FLOAT32*)opj_malloc(l_mct_size);
if (! l_tcp->m_mct_decoding_matrix ) {
return OPJ_FALSE;
}
memcpy(l_tcp->m_mct_decoding_matrix,l_default_tcp->m_mct_decoding_matrix,l_mct_size);
}
/* Get the mct_record of the dflt_tile_cp and copy them into the current tile cp*/
l_mct_records_size = l_default_tcp->m_nb_max_mct_records * (OPJ_UINT32)sizeof(opj_mct_data_t);
l_tcp->m_mct_records = (opj_mct_data_t*)opj_malloc(l_mct_records_size);
if (! l_tcp->m_mct_records) {
return OPJ_FALSE;
}
memcpy(l_tcp->m_mct_records, l_default_tcp->m_mct_records,l_mct_records_size);
/* Copy the mct record data from dflt_tile_cp to the current tile*/
l_src_mct_rec = l_default_tcp->m_mct_records;
l_dest_mct_rec = l_tcp->m_mct_records;
for (j=0;j<l_default_tcp->m_nb_mct_records;++j) {
if (l_src_mct_rec->m_data) {
l_dest_mct_rec->m_data = (OPJ_BYTE*) opj_malloc(l_src_mct_rec->m_data_size);
if(! l_dest_mct_rec->m_data) {
return OPJ_FALSE;
}
memcpy(l_dest_mct_rec->m_data,l_src_mct_rec->m_data,l_src_mct_rec->m_data_size);
}
++l_src_mct_rec;
++l_dest_mct_rec;
/* Update with each pass to free exactly what has been allocated on early return. */
l_tcp->m_nb_max_mct_records += 1;
}
/* Get the mcc_record of the dflt_tile_cp and copy them into the current tile cp*/
l_mcc_records_size = l_default_tcp->m_nb_max_mcc_records * (OPJ_UINT32)sizeof(opj_simple_mcc_decorrelation_data_t);
l_tcp->m_mcc_records = (opj_simple_mcc_decorrelation_data_t*) opj_malloc(l_mcc_records_size);
if (! l_tcp->m_mcc_records) {
return OPJ_FALSE;
}
memcpy(l_tcp->m_mcc_records,l_default_tcp->m_mcc_records,l_mcc_records_size);
l_tcp->m_nb_max_mcc_records = l_default_tcp->m_nb_max_mcc_records;
/* Copy the mcc record data from dflt_tile_cp to the current tile*/
l_src_mcc_rec = l_default_tcp->m_mcc_records;
l_dest_mcc_rec = l_tcp->m_mcc_records;
for (j=0;j<l_default_tcp->m_nb_max_mcc_records;++j) {
if (l_src_mcc_rec->m_decorrelation_array) {
l_offset = (OPJ_UINT32)(l_src_mcc_rec->m_decorrelation_array - l_default_tcp->m_mct_records);
l_dest_mcc_rec->m_decorrelation_array = l_tcp->m_mct_records + l_offset;
}
if (l_src_mcc_rec->m_offset_array) {
l_offset = (OPJ_UINT32)(l_src_mcc_rec->m_offset_array - l_default_tcp->m_mct_records);
l_dest_mcc_rec->m_offset_array = l_tcp->m_mct_records + l_offset;
}
++l_src_mcc_rec;
++l_dest_mcc_rec;
}
/* Copy all the dflt_tile_compo_cp to the current tile cp */
memcpy(l_current_tccp,l_default_tcp->tccps,l_tccp_size);
/* Move to next tile cp*/
++l_tcp;
}
/* Create the current tile decoder*/
p_j2k->m_tcd = (opj_tcd_t*)opj_tcd_create(OPJ_TRUE); /* FIXME why a cast ? */
if (! p_j2k->m_tcd ) {
return OPJ_FALSE;
}
if ( !opj_tcd_init(p_j2k->m_tcd, l_image, &(p_j2k->m_cp)) ) {
opj_tcd_destroy(p_j2k->m_tcd);
p_j2k->m_tcd = 00;
opj_event_msg(p_manager, EVT_ERROR, "Cannot decode tile, memory error\n");
return OPJ_FALSE;
}
return OPJ_TRUE;
}
static const opj_dec_memory_marker_handler_t * opj_j2k_get_marker_handler (OPJ_UINT32 p_id)
{
const opj_dec_memory_marker_handler_t *e;
for (e = j2k_memory_marker_handler_tab; e->id != 0; ++e) {
if (e->id == p_id) {
break; /* we find a handler corresponding to the marker ID*/
}
}
return e;
}
void opj_j2k_destroy (opj_j2k_t *p_j2k)
{
if (p_j2k == 00) {
return;
}
if (p_j2k->m_is_decoder) {
if (p_j2k->m_specific_param.m_decoder.m_default_tcp != 00) {
opj_j2k_tcp_destroy(p_j2k->m_specific_param.m_decoder.m_default_tcp);
opj_free(p_j2k->m_specific_param.m_decoder.m_default_tcp);
p_j2k->m_specific_param.m_decoder.m_default_tcp = 00;
}
if (p_j2k->m_specific_param.m_decoder.m_header_data != 00) {
opj_free(p_j2k->m_specific_param.m_decoder.m_header_data);
p_j2k->m_specific_param.m_decoder.m_header_data = 00;
p_j2k->m_specific_param.m_decoder.m_header_data_size = 0;
}
}
else {
if (p_j2k->m_specific_param.m_encoder.m_encoded_tile_data) {
opj_free(p_j2k->m_specific_param.m_encoder.m_encoded_tile_data);
p_j2k->m_specific_param.m_encoder.m_encoded_tile_data = 00;
}
if (p_j2k->m_specific_param.m_encoder.m_tlm_sot_offsets_buffer) {
opj_free(p_j2k->m_specific_param.m_encoder.m_tlm_sot_offsets_buffer);
p_j2k->m_specific_param.m_encoder.m_tlm_sot_offsets_buffer = 00;
p_j2k->m_specific_param.m_encoder.m_tlm_sot_offsets_current = 00;
}
if (p_j2k->m_specific_param.m_encoder.m_header_tile_data) {
opj_free(p_j2k->m_specific_param.m_encoder.m_header_tile_data);
p_j2k->m_specific_param.m_encoder.m_header_tile_data = 00;
p_j2k->m_specific_param.m_encoder.m_header_tile_data_size = 0;
}
}
opj_tcd_destroy(p_j2k->m_tcd);
opj_j2k_cp_destroy(&(p_j2k->m_cp));
memset(&(p_j2k->m_cp),0,sizeof(opj_cp_t));
opj_procedure_list_destroy(p_j2k->m_procedure_list);
p_j2k->m_procedure_list = 00;
opj_procedure_list_destroy(p_j2k->m_validation_list);
p_j2k->m_procedure_list = 00;
j2k_destroy_cstr_index(p_j2k->cstr_index);
p_j2k->cstr_index = NULL;
opj_image_destroy(p_j2k->m_private_image);
p_j2k->m_private_image = NULL;
opj_image_destroy(p_j2k->m_output_image);
p_j2k->m_output_image = NULL;
opj_free(p_j2k);
}
void j2k_destroy_cstr_index (opj_codestream_index_t *p_cstr_ind)
{
if (p_cstr_ind) {
if (p_cstr_ind->marker) {
opj_free(p_cstr_ind->marker);
p_cstr_ind->marker = NULL;
}
if (p_cstr_ind->tile_index) {
OPJ_UINT32 it_tile = 0;
for (it_tile=0; it_tile < p_cstr_ind->nb_of_tiles; it_tile++) {
if(p_cstr_ind->tile_index[it_tile].packet_index) {
opj_free(p_cstr_ind->tile_index[it_tile].packet_index);
p_cstr_ind->tile_index[it_tile].packet_index = NULL;
}
if(p_cstr_ind->tile_index[it_tile].tp_index){
opj_free(p_cstr_ind->tile_index[it_tile].tp_index);
p_cstr_ind->tile_index[it_tile].tp_index = NULL;
}
if(p_cstr_ind->tile_index[it_tile].marker){
opj_free(p_cstr_ind->tile_index[it_tile].marker);
p_cstr_ind->tile_index[it_tile].marker = NULL;
}
}
opj_free( p_cstr_ind->tile_index);
p_cstr_ind->tile_index = NULL;
}
opj_free(p_cstr_ind);
}
}
static void opj_j2k_tcp_destroy (opj_tcp_t *p_tcp)
{
if (p_tcp == 00) {
return;
}
if (p_tcp->ppt_markers != 00) {
OPJ_UINT32 i;
for (i = 0U; i < p_tcp->ppt_markers_count; ++i) {
if (p_tcp->ppt_markers[i].m_data != NULL) {
opj_free(p_tcp->ppt_markers[i].m_data);
}
}
p_tcp->ppt_markers_count = 0U;
opj_free(p_tcp->ppt_markers);
p_tcp->ppt_markers = NULL;
}
if (p_tcp->ppt_buffer != 00) {
opj_free(p_tcp->ppt_buffer);
p_tcp->ppt_buffer = 00;
}
if (p_tcp->tccps != 00) {
opj_free(p_tcp->tccps);
p_tcp->tccps = 00;
}
if (p_tcp->m_mct_coding_matrix != 00) {
opj_free(p_tcp->m_mct_coding_matrix);
p_tcp->m_mct_coding_matrix = 00;
}
if (p_tcp->m_mct_decoding_matrix != 00) {
opj_free(p_tcp->m_mct_decoding_matrix);
p_tcp->m_mct_decoding_matrix = 00;
}
if (p_tcp->m_mcc_records) {
opj_free(p_tcp->m_mcc_records);
p_tcp->m_mcc_records = 00;
p_tcp->m_nb_max_mcc_records = 0;
p_tcp->m_nb_mcc_records = 0;
}
if (p_tcp->m_mct_records) {
opj_mct_data_t * l_mct_data = p_tcp->m_mct_records;
OPJ_UINT32 i;
for (i=0;i<p_tcp->m_nb_mct_records;++i) {
if (l_mct_data->m_data) {
opj_free(l_mct_data->m_data);
l_mct_data->m_data = 00;
}
++l_mct_data;
}
opj_free(p_tcp->m_mct_records);
p_tcp->m_mct_records = 00;
}
if (p_tcp->mct_norms != 00) {
opj_free(p_tcp->mct_norms);
p_tcp->mct_norms = 00;
}
opj_j2k_tcp_data_destroy(p_tcp);
}
static void opj_j2k_tcp_data_destroy (opj_tcp_t *p_tcp)
{
if (p_tcp->m_data) {
opj_free(p_tcp->m_data);
p_tcp->m_data = NULL;
p_tcp->m_data_size = 0;
}
}
static void opj_j2k_cp_destroy (opj_cp_t *p_cp)
{
OPJ_UINT32 l_nb_tiles;
opj_tcp_t * l_current_tile = 00;
if (p_cp == 00)
{
return;
}
if (p_cp->tcps != 00)
{
OPJ_UINT32 i;
l_current_tile = p_cp->tcps;
l_nb_tiles = p_cp->th * p_cp->tw;
for (i = 0U; i < l_nb_tiles; ++i)
{
opj_j2k_tcp_destroy(l_current_tile);
++l_current_tile;
}
opj_free(p_cp->tcps);
p_cp->tcps = 00;
}
if (p_cp->ppm_markers != 00) {
OPJ_UINT32 i;
for (i = 0U; i < p_cp->ppm_markers_count; ++i) {
if (p_cp->ppm_markers[i].m_data != NULL) {
opj_free(p_cp->ppm_markers[i].m_data);
}
}
p_cp->ppm_markers_count = 0U;
opj_free(p_cp->ppm_markers);
p_cp->ppm_markers = NULL;
}
opj_free(p_cp->ppm_buffer);
p_cp->ppm_buffer = 00;
p_cp->ppm_data = NULL; /* ppm_data belongs to the allocated buffer pointed by ppm_buffer */
opj_free(p_cp->comment);
p_cp->comment = 00;
if (! p_cp->m_is_decoder)
{
opj_free(p_cp->m_specific_param.m_enc.m_matrice);
p_cp->m_specific_param.m_enc.m_matrice = 00;
}
}
static OPJ_BOOL opj_j2k_need_nb_tile_parts_correction(opj_stream_private_t *p_stream, OPJ_UINT32 tile_no, OPJ_BOOL* p_correction_needed, opj_event_mgr_t * p_manager )
{
OPJ_BYTE l_header_data[10];
OPJ_OFF_T l_stream_pos_backup;
OPJ_UINT32 l_current_marker;
OPJ_UINT32 l_marker_size;
OPJ_UINT32 l_tile_no, l_tot_len, l_current_part, l_num_parts;
/* initialize to no correction needed */
*p_correction_needed = OPJ_FALSE;
l_stream_pos_backup = opj_stream_tell(p_stream);
if (l_stream_pos_backup == -1) {
/* let's do nothing */
return OPJ_TRUE;
}
for (;;) {
/* Try to read 2 bytes (the next marker ID) from stream and copy them into the buffer */
if (opj_stream_read_data(p_stream,l_header_data, 2, p_manager) != 2) {
/* assume all is OK */
if (! opj_stream_seek(p_stream, l_stream_pos_backup, p_manager)) {
return OPJ_FALSE;
}
return OPJ_TRUE;
}
/* Read 2 bytes from buffer as the new marker ID */
opj_read_bytes(l_header_data, &l_current_marker, 2);
if (l_current_marker != J2K_MS_SOT) {
/* assume all is OK */
if (! opj_stream_seek(p_stream, l_stream_pos_backup, p_manager)) {
return OPJ_FALSE;
}
return OPJ_TRUE;
}
/* Try to read 2 bytes (the marker size) from stream and copy them into the buffer */
if (opj_stream_read_data(p_stream, l_header_data, 2, p_manager) != 2) {
opj_event_msg(p_manager, EVT_ERROR, "Stream too short\n");
return OPJ_FALSE;
}
/* Read 2 bytes from the buffer as the marker size */
opj_read_bytes(l_header_data, &l_marker_size, 2);
/* Check marker size for SOT Marker */
if (l_marker_size != 10) {
opj_event_msg(p_manager, EVT_ERROR, "Inconsistent marker size\n");
return OPJ_FALSE;
}
l_marker_size -= 2;
if (opj_stream_read_data(p_stream, l_header_data, l_marker_size, p_manager) != l_marker_size) {
opj_event_msg(p_manager, EVT_ERROR, "Stream too short\n");
return OPJ_FALSE;
}
if (! opj_j2k_get_sot_values(l_header_data, l_marker_size, &l_tile_no, &l_tot_len, &l_current_part, &l_num_parts, p_manager)) {
return OPJ_FALSE;
}
if (l_tile_no == tile_no) {
/* we found what we were looking for */
break;
}
if ((l_tot_len == 0U) || (l_tot_len < 14U)) {
/* last SOT until EOC or invalid Psot value */
/* assume all is OK */
if (! opj_stream_seek(p_stream, l_stream_pos_backup, p_manager)) {
return OPJ_FALSE;
}
return OPJ_TRUE;
}
l_tot_len -= 12U;
/* look for next SOT marker */
if (opj_stream_skip(p_stream, (OPJ_OFF_T)(l_tot_len), p_manager) != (OPJ_OFF_T)(l_tot_len)) {
/* assume all is OK */
if (! opj_stream_seek(p_stream, l_stream_pos_backup, p_manager)) {
return OPJ_FALSE;
}
return OPJ_TRUE;
}
}
/* check for correction */
if (l_current_part == l_num_parts) {
*p_correction_needed = OPJ_TRUE;
}
if (! opj_stream_seek(p_stream, l_stream_pos_backup, p_manager)) {
return OPJ_FALSE;
}
return OPJ_TRUE;
}
OPJ_BOOL opj_j2k_read_tile_header( opj_j2k_t * p_j2k,
OPJ_UINT32 * p_tile_index,
OPJ_UINT32 * p_data_size,
OPJ_INT32 * p_tile_x0, OPJ_INT32 * p_tile_y0,
OPJ_INT32 * p_tile_x1, OPJ_INT32 * p_tile_y1,
OPJ_UINT32 * p_nb_comps,
OPJ_BOOL * p_go_on,
opj_stream_private_t *p_stream,
opj_event_mgr_t * p_manager )
{
OPJ_UINT32 l_current_marker = J2K_MS_SOT;
OPJ_UINT32 l_marker_size;
const opj_dec_memory_marker_handler_t * l_marker_handler = 00;
opj_tcp_t * l_tcp = NULL;
/* preconditions */
assert(p_stream != 00);
assert(p_j2k != 00);
assert(p_manager != 00);
/* Reach the End Of Codestream ?*/
if (p_j2k->m_specific_param.m_decoder.m_state == J2K_STATE_EOC){
l_current_marker = J2K_MS_EOC;
}
/* We need to encounter a SOT marker (a new tile-part header) */
else if (p_j2k->m_specific_param.m_decoder.m_state != J2K_STATE_TPHSOT){
return OPJ_FALSE;
}
/* Read into the codestream until reach the EOC or ! can_decode ??? FIXME */
while ( (!p_j2k->m_specific_param.m_decoder.m_can_decode) && (l_current_marker != J2K_MS_EOC) ) {
/* Try to read until the Start Of Data is detected */
while (l_current_marker != J2K_MS_SOD) {
if(opj_stream_get_number_byte_left(p_stream) == 0)
{
p_j2k->m_specific_param.m_decoder.m_state = J2K_STATE_NEOC;
break;
}
/* Try to read 2 bytes (the marker size) from stream and copy them into the buffer */
if (opj_stream_read_data(p_stream,p_j2k->m_specific_param.m_decoder.m_header_data,2,p_manager) != 2) {
opj_event_msg(p_manager, EVT_ERROR, "Stream too short\n");
return OPJ_FALSE;
}
/* Read 2 bytes from the buffer as the marker size */
opj_read_bytes(p_j2k->m_specific_param.m_decoder.m_header_data,&l_marker_size,2);
/* Check marker size (does not include marker ID but includes marker size) */
if (l_marker_size < 2) {
opj_event_msg(p_manager, EVT_ERROR, "Inconsistent marker size\n");
return OPJ_FALSE;
}
/* cf. https://code.google.com/p/openjpeg/issues/detail?id=226 */
if (l_current_marker == 0x8080 && opj_stream_get_number_byte_left(p_stream) == 0) {
p_j2k->m_specific_param.m_decoder.m_state = J2K_STATE_NEOC;
break;
}
/* Why this condition? FIXME */
if (p_j2k->m_specific_param.m_decoder.m_state & J2K_STATE_TPH){
p_j2k->m_specific_param.m_decoder.m_sot_length -= (l_marker_size + 2);
}
l_marker_size -= 2; /* Subtract the size of the marker ID already read */
/* Get the marker handler from the marker ID */
l_marker_handler = opj_j2k_get_marker_handler(l_current_marker);
/* Check if the marker is known and if it is the right place to find it */
if (! (p_j2k->m_specific_param.m_decoder.m_state & l_marker_handler->states) ) {
opj_event_msg(p_manager, EVT_ERROR, "Marker is not compliant with its position\n");
return OPJ_FALSE;
}
/* FIXME manage case of unknown marker as in the main header ? */
/* Check if the marker size is compatible with the header data size */
if (l_marker_size > p_j2k->m_specific_param.m_decoder.m_header_data_size) {
OPJ_BYTE *new_header_data = NULL;
/* If we are here, this means we consider this marker as known & we will read it */
/* Check enough bytes left in stream before allocation */
if ((OPJ_OFF_T)l_marker_size > opj_stream_get_number_byte_left(p_stream)) {
opj_event_msg(p_manager, EVT_ERROR, "Marker size inconsistent with stream length\n");
return OPJ_FALSE;
}
new_header_data = (OPJ_BYTE *) opj_realloc(p_j2k->m_specific_param.m_decoder.m_header_data, l_marker_size);
if (! new_header_data) {
opj_free(p_j2k->m_specific_param.m_decoder.m_header_data);
p_j2k->m_specific_param.m_decoder.m_header_data = NULL;
p_j2k->m_specific_param.m_decoder.m_header_data_size = 0;
opj_event_msg(p_manager, EVT_ERROR, "Not enough memory to read header\n");
return OPJ_FALSE;
}
p_j2k->m_specific_param.m_decoder.m_header_data = new_header_data;
p_j2k->m_specific_param.m_decoder.m_header_data_size = l_marker_size;
}
/* Try to read the rest of the marker segment from stream and copy them into the buffer */
if (opj_stream_read_data(p_stream,p_j2k->m_specific_param.m_decoder.m_header_data,l_marker_size,p_manager) != l_marker_size) {
opj_event_msg(p_manager, EVT_ERROR, "Stream too short\n");
return OPJ_FALSE;
}
if (!l_marker_handler->handler) {
/* See issue #175 */
opj_event_msg(p_manager, EVT_ERROR, "Not sure how that happened.\n");
return OPJ_FALSE;
}
/* Read the marker segment with the correct marker handler */
if (! (*(l_marker_handler->handler))(p_j2k,p_j2k->m_specific_param.m_decoder.m_header_data,l_marker_size,p_manager)) {
opj_event_msg(p_manager, EVT_ERROR, "Fail to read the current marker segment (%#x)\n", l_current_marker);
return OPJ_FALSE;
}
/* Add the marker to the codestream index*/
if (OPJ_FALSE == opj_j2k_add_tlmarker(p_j2k->m_current_tile_number,
p_j2k->cstr_index,
l_marker_handler->id,
(OPJ_UINT32) opj_stream_tell(p_stream) - l_marker_size - 4,
l_marker_size + 4 )) {
opj_event_msg(p_manager, EVT_ERROR, "Not enough memory to add tl marker\n");
return OPJ_FALSE;
}
/* Keep the position of the last SOT marker read */
if ( l_marker_handler->id == J2K_MS_SOT ) {
OPJ_UINT32 sot_pos = (OPJ_UINT32) opj_stream_tell(p_stream) - l_marker_size - 4 ;
if (sot_pos > p_j2k->m_specific_param.m_decoder.m_last_sot_read_pos)
{
p_j2k->m_specific_param.m_decoder.m_last_sot_read_pos = sot_pos;
}
}
if (p_j2k->m_specific_param.m_decoder.m_skip_data) {
/* Skip the rest of the tile part header*/
if (opj_stream_skip(p_stream,p_j2k->m_specific_param.m_decoder.m_sot_length,p_manager) != p_j2k->m_specific_param.m_decoder.m_sot_length) {
opj_event_msg(p_manager, EVT_ERROR, "Stream too short\n");
return OPJ_FALSE;
}
l_current_marker = J2K_MS_SOD; /* Normally we reached a SOD */
}
else {
/* Try to read 2 bytes (the next marker ID) from stream and copy them into the buffer*/
if (opj_stream_read_data(p_stream,p_j2k->m_specific_param.m_decoder.m_header_data,2,p_manager) != 2) {
opj_event_msg(p_manager, EVT_ERROR, "Stream too short\n");
return OPJ_FALSE;
}
/* Read 2 bytes from the buffer as the new marker ID */
opj_read_bytes(p_j2k->m_specific_param.m_decoder.m_header_data,&l_current_marker,2);
}
}
if(opj_stream_get_number_byte_left(p_stream) == 0
&& p_j2k->m_specific_param.m_decoder.m_state == J2K_STATE_NEOC)
break;
/* If we didn't skip data before, we need to read the SOD marker*/
if (! p_j2k->m_specific_param.m_decoder.m_skip_data) {
/* Try to read the SOD marker and skip data ? FIXME */
if (! opj_j2k_read_sod(p_j2k, p_stream, p_manager)) {
return OPJ_FALSE;
}
if (p_j2k->m_specific_param.m_decoder.m_can_decode && !p_j2k->m_specific_param.m_decoder.m_nb_tile_parts_correction_checked) {
/* Issue 254 */
OPJ_BOOL l_correction_needed;
p_j2k->m_specific_param.m_decoder.m_nb_tile_parts_correction_checked = 1;
if(!opj_j2k_need_nb_tile_parts_correction(p_stream, p_j2k->m_current_tile_number, &l_correction_needed, p_manager)) {
opj_event_msg(p_manager, EVT_ERROR, "opj_j2k_apply_nb_tile_parts_correction error\n");
return OPJ_FALSE;
}
if (l_correction_needed) {
OPJ_UINT32 l_nb_tiles = p_j2k->m_cp.tw * p_j2k->m_cp.th;
OPJ_UINT32 l_tile_no;
p_j2k->m_specific_param.m_decoder.m_can_decode = 0;
p_j2k->m_specific_param.m_decoder.m_nb_tile_parts_correction = 1;
/* correct tiles */
for (l_tile_no = 0U; l_tile_no < l_nb_tiles; ++l_tile_no) {
if (p_j2k->m_cp.tcps[l_tile_no].m_nb_tile_parts != 0U) {
p_j2k->m_cp.tcps[l_tile_no].m_nb_tile_parts+=1;
}
}
opj_event_msg(p_manager, EVT_WARNING, "Non conformant codestream TPsot==TNsot.\n");
}
}
if (! p_j2k->m_specific_param.m_decoder.m_can_decode){
/* Try to read 2 bytes (the next marker ID) from stream and copy them into the buffer */
if (opj_stream_read_data(p_stream,p_j2k->m_specific_param.m_decoder.m_header_data,2,p_manager) != 2) {
opj_event_msg(p_manager, EVT_ERROR, "Stream too short\n");
return OPJ_FALSE;
}
/* Read 2 bytes from buffer as the new marker ID */
opj_read_bytes(p_j2k->m_specific_param.m_decoder.m_header_data,&l_current_marker,2);
}
}
else {
/* Indicate we will try to read a new tile-part header*/
p_j2k->m_specific_param.m_decoder.m_skip_data = 0;
p_j2k->m_specific_param.m_decoder.m_can_decode = 0;
p_j2k->m_specific_param.m_decoder.m_state = J2K_STATE_TPHSOT;
/* Try to read 2 bytes (the next marker ID) from stream and copy them into the buffer */
if (opj_stream_read_data(p_stream,p_j2k->m_specific_param.m_decoder.m_header_data,2,p_manager) != 2) {
opj_event_msg(p_manager, EVT_ERROR, "Stream too short\n");
return OPJ_FALSE;
}
/* Read 2 bytes from buffer as the new marker ID */
opj_read_bytes(p_j2k->m_specific_param.m_decoder.m_header_data,&l_current_marker,2);
}
}
/* Current marker is the EOC marker ?*/
if (l_current_marker == J2K_MS_EOC) {
if (p_j2k->m_specific_param.m_decoder.m_state != J2K_STATE_EOC ){
p_j2k->m_current_tile_number = 0;
p_j2k->m_specific_param.m_decoder.m_state = J2K_STATE_EOC;
}
}
/* FIXME DOC ???*/
if ( ! p_j2k->m_specific_param.m_decoder.m_can_decode) {
OPJ_UINT32 l_nb_tiles = p_j2k->m_cp.th * p_j2k->m_cp.tw;
l_tcp = p_j2k->m_cp.tcps + p_j2k->m_current_tile_number;
while( (p_j2k->m_current_tile_number < l_nb_tiles) && (l_tcp->m_data == 00) ) {
++p_j2k->m_current_tile_number;
++l_tcp;
}
if (p_j2k->m_current_tile_number == l_nb_tiles) {
*p_go_on = OPJ_FALSE;
return OPJ_TRUE;
}
}
if (! opj_j2k_merge_ppt(p_j2k->m_cp.tcps + p_j2k->m_current_tile_number, p_manager)) {
opj_event_msg(p_manager, EVT_ERROR, "Failed to merge PPT data\n");
return OPJ_FALSE;
}
/*FIXME ???*/
if (! opj_tcd_init_decode_tile(p_j2k->m_tcd, p_j2k->m_current_tile_number, p_manager)) {
opj_event_msg(p_manager, EVT_ERROR, "Cannot decode tile, memory error\n");
return OPJ_FALSE;
}
opj_event_msg(p_manager, EVT_INFO, "Header of tile %d / %d has been read.\n",
p_j2k->m_current_tile_number+1, (p_j2k->m_cp.th * p_j2k->m_cp.tw));
*p_tile_index = p_j2k->m_current_tile_number;
*p_go_on = OPJ_TRUE;
*p_data_size = opj_tcd_get_decoded_tile_size(p_j2k->m_tcd);
*p_tile_x0 = p_j2k->m_tcd->tcd_image->tiles->x0;
*p_tile_y0 = p_j2k->m_tcd->tcd_image->tiles->y0;
*p_tile_x1 = p_j2k->m_tcd->tcd_image->tiles->x1;
*p_tile_y1 = p_j2k->m_tcd->tcd_image->tiles->y1;
*p_nb_comps = p_j2k->m_tcd->tcd_image->tiles->numcomps;
p_j2k->m_specific_param.m_decoder.m_state |= 0x0080;/* FIXME J2K_DEC_STATE_DATA;*/
return OPJ_TRUE;
}
OPJ_BOOL opj_j2k_decode_tile ( opj_j2k_t * p_j2k,
OPJ_UINT32 p_tile_index,
OPJ_BYTE * p_data,
OPJ_UINT32 p_data_size,
opj_stream_private_t *p_stream,
opj_event_mgr_t * p_manager )
{
OPJ_UINT32 l_current_marker;
OPJ_BYTE l_data [2];
opj_tcp_t * l_tcp;
/* preconditions */
assert(p_stream != 00);
assert(p_j2k != 00);
assert(p_manager != 00);
if ( !(p_j2k->m_specific_param.m_decoder.m_state & 0x0080/*FIXME J2K_DEC_STATE_DATA*/)
|| (p_tile_index != p_j2k->m_current_tile_number) ) {
return OPJ_FALSE;
}
l_tcp = &(p_j2k->m_cp.tcps[p_tile_index]);
if (! l_tcp->m_data) {
opj_j2k_tcp_destroy(l_tcp);
return OPJ_FALSE;
}
if (! opj_tcd_decode_tile( p_j2k->m_tcd,
l_tcp->m_data,
l_tcp->m_data_size,
p_tile_index,
p_j2k->cstr_index, p_manager) ) {
opj_j2k_tcp_destroy(l_tcp);
p_j2k->m_specific_param.m_decoder.m_state |= 0x8000;/*FIXME J2K_DEC_STATE_ERR;*/
opj_event_msg(p_manager, EVT_ERROR, "Failed to decode.\n");
return OPJ_FALSE;
}
if (! opj_tcd_update_tile_data(p_j2k->m_tcd,p_data,p_data_size)) {
return OPJ_FALSE;
}
/* To avoid to destroy the tcp which can be useful when we try to decode a tile decoded before (cf j2k_random_tile_access)
* we destroy just the data which will be re-read in read_tile_header*/
/*opj_j2k_tcp_destroy(l_tcp);
p_j2k->m_tcd->tcp = 0;*/
opj_j2k_tcp_data_destroy(l_tcp);
p_j2k->m_specific_param.m_decoder.m_can_decode = 0;
p_j2k->m_specific_param.m_decoder.m_state &= (~ (0x0080u));/* FIXME J2K_DEC_STATE_DATA);*/
if(opj_stream_get_number_byte_left(p_stream) == 0
&& p_j2k->m_specific_param.m_decoder.m_state == J2K_STATE_NEOC){
return OPJ_TRUE;
}
if (p_j2k->m_specific_param.m_decoder.m_state != 0x0100){ /*FIXME J2K_DEC_STATE_EOC)*/
if (opj_stream_read_data(p_stream,l_data,2,p_manager) != 2) {
opj_event_msg(p_manager, EVT_ERROR, "Stream too short\n");
return OPJ_FALSE;
}
opj_read_bytes(l_data,&l_current_marker,2);
if (l_current_marker == J2K_MS_EOC) {
p_j2k->m_current_tile_number = 0;
p_j2k->m_specific_param.m_decoder.m_state = 0x0100;/*FIXME J2K_DEC_STATE_EOC;*/
}
else if (l_current_marker != J2K_MS_SOT)
{
if(opj_stream_get_number_byte_left(p_stream) == 0) {
p_j2k->m_specific_param.m_decoder.m_state = J2K_STATE_NEOC;
opj_event_msg(p_manager, EVT_WARNING, "Stream does not end with EOC\n");
return OPJ_TRUE;
}
opj_event_msg(p_manager, EVT_ERROR, "Stream too short, expected SOT\n");
return OPJ_FALSE;
}
}
return OPJ_TRUE;
}
static OPJ_BOOL opj_j2k_update_image_data (opj_tcd_t * p_tcd, OPJ_BYTE * p_data, opj_image_t* p_output_image)
{
OPJ_UINT32 i,j,k = 0;
OPJ_UINT32 l_width_src,l_height_src;
OPJ_UINT32 l_width_dest,l_height_dest;
OPJ_INT32 l_offset_x0_src, l_offset_y0_src, l_offset_x1_src, l_offset_y1_src;
OPJ_SIZE_T l_start_offset_src, l_line_offset_src, l_end_offset_src ;
OPJ_UINT32 l_start_x_dest , l_start_y_dest;
OPJ_UINT32 l_x0_dest, l_y0_dest, l_x1_dest, l_y1_dest;
OPJ_SIZE_T l_start_offset_dest, l_line_offset_dest;
opj_image_comp_t * l_img_comp_src = 00;
opj_image_comp_t * l_img_comp_dest = 00;
opj_tcd_tilecomp_t * l_tilec = 00;
opj_image_t * l_image_src = 00;
OPJ_UINT32 l_size_comp, l_remaining;
OPJ_INT32 * l_dest_ptr;
opj_tcd_resolution_t* l_res= 00;
l_tilec = p_tcd->tcd_image->tiles->comps;
l_image_src = p_tcd->image;
l_img_comp_src = l_image_src->comps;
l_img_comp_dest = p_output_image->comps;
for (i=0; i<l_image_src->numcomps; i++) {
/* Allocate output component buffer if necessary */
if (!l_img_comp_dest->data) {
l_img_comp_dest->data = (OPJ_INT32*) opj_calloc((OPJ_SIZE_T)l_img_comp_dest->w * (OPJ_SIZE_T)l_img_comp_dest->h, sizeof(OPJ_INT32));
if (! l_img_comp_dest->data) {
return OPJ_FALSE;
}
}
/* Copy info from decoded comp image to output image */
l_img_comp_dest->resno_decoded = l_img_comp_src->resno_decoded;
/*-----*/
/* Compute the precision of the output buffer */
l_size_comp = l_img_comp_src->prec >> 3; /*(/ 8)*/
l_remaining = l_img_comp_src->prec & 7; /* (%8) */
l_res = l_tilec->resolutions + l_img_comp_src->resno_decoded;
if (l_remaining) {
++l_size_comp;
}
if (l_size_comp == 3) {
l_size_comp = 4;
}
/*-----*/
/* Current tile component size*/
/*if (i == 0) {
fprintf(stdout, "SRC: l_res_x0=%d, l_res_x1=%d, l_res_y0=%d, l_res_y1=%d\n",
l_res->x0, l_res->x1, l_res->y0, l_res->y1);
}*/
l_width_src = (OPJ_UINT32)(l_res->x1 - l_res->x0);
l_height_src = (OPJ_UINT32)(l_res->y1 - l_res->y0);
/* Border of the current output component*/
l_x0_dest = opj_uint_ceildivpow2(l_img_comp_dest->x0, l_img_comp_dest->factor);
l_y0_dest = opj_uint_ceildivpow2(l_img_comp_dest->y0, l_img_comp_dest->factor);
l_x1_dest = l_x0_dest + l_img_comp_dest->w; /* can't overflow given that image->x1 is uint32 */
l_y1_dest = l_y0_dest + l_img_comp_dest->h;
/*if (i == 0) {
fprintf(stdout, "DEST: l_x0_dest=%d, l_x1_dest=%d, l_y0_dest=%d, l_y1_dest=%d (%d)\n",
l_x0_dest, l_x1_dest, l_y0_dest, l_y1_dest, l_img_comp_dest->factor );
}*/
/*-----*/
/* Compute the area (l_offset_x0_src, l_offset_y0_src, l_offset_x1_src, l_offset_y1_src)
* of the input buffer (decoded tile component) which will be move
* in the output buffer. Compute the area of the output buffer (l_start_x_dest,
* l_start_y_dest, l_width_dest, l_height_dest) which will be modified
* by this input area.
* */
assert( l_res->x0 >= 0);
assert( l_res->x1 >= 0);
if ( l_x0_dest < (OPJ_UINT32)l_res->x0 ) {
l_start_x_dest = (OPJ_UINT32)l_res->x0 - l_x0_dest;
l_offset_x0_src = 0;
if ( l_x1_dest >= (OPJ_UINT32)l_res->x1 ) {
l_width_dest = l_width_src;
l_offset_x1_src = 0;
}
else {
l_width_dest = l_x1_dest - (OPJ_UINT32)l_res->x0 ;
l_offset_x1_src = (OPJ_INT32)(l_width_src - l_width_dest);
}
}
else {
l_start_x_dest = 0U;
l_offset_x0_src = (OPJ_INT32)l_x0_dest - l_res->x0;
if ( l_x1_dest >= (OPJ_UINT32)l_res->x1 ) {
l_width_dest = l_width_src - (OPJ_UINT32)l_offset_x0_src;
l_offset_x1_src = 0;
}
else {
l_width_dest = l_img_comp_dest->w ;
l_offset_x1_src = l_res->x1 - (OPJ_INT32)l_x1_dest;
}
}
if ( l_y0_dest < (OPJ_UINT32)l_res->y0 ) {
l_start_y_dest = (OPJ_UINT32)l_res->y0 - l_y0_dest;
l_offset_y0_src = 0;
if ( l_y1_dest >= (OPJ_UINT32)l_res->y1 ) {
l_height_dest = l_height_src;
l_offset_y1_src = 0;
}
else {
l_height_dest = l_y1_dest - (OPJ_UINT32)l_res->y0 ;
l_offset_y1_src = (OPJ_INT32)(l_height_src - l_height_dest);
}
}
else {
l_start_y_dest = 0U;
l_offset_y0_src = (OPJ_INT32)l_y0_dest - l_res->y0;
if ( l_y1_dest >= (OPJ_UINT32)l_res->y1 ) {
l_height_dest = l_height_src - (OPJ_UINT32)l_offset_y0_src;
l_offset_y1_src = 0;
}
else {
l_height_dest = l_img_comp_dest->h ;
l_offset_y1_src = l_res->y1 - (OPJ_INT32)l_y1_dest;
}
}
if( (l_offset_x0_src < 0 ) || (l_offset_y0_src < 0 ) || (l_offset_x1_src < 0 ) || (l_offset_y1_src < 0 ) ){
return OPJ_FALSE;
}
/* testcase 2977.pdf.asan.67.2198 */
if ((OPJ_INT32)l_width_dest < 0 || (OPJ_INT32)l_height_dest < 0) {
return OPJ_FALSE;
}
/*-----*/
/* Compute the input buffer offset */
l_start_offset_src = (OPJ_SIZE_T)l_offset_x0_src + (OPJ_SIZE_T)l_offset_y0_src * (OPJ_SIZE_T)l_width_src;
l_line_offset_src = (OPJ_SIZE_T)l_offset_x1_src + (OPJ_SIZE_T)l_offset_x0_src;
l_end_offset_src = (OPJ_SIZE_T)l_offset_y1_src * (OPJ_SIZE_T)l_width_src - (OPJ_SIZE_T)l_offset_x0_src;
/* Compute the output buffer offset */
l_start_offset_dest = (OPJ_SIZE_T)l_start_x_dest + (OPJ_SIZE_T)l_start_y_dest * (OPJ_SIZE_T)l_img_comp_dest->w;
l_line_offset_dest = (OPJ_SIZE_T)l_img_comp_dest->w - (OPJ_SIZE_T)l_width_dest;
/* Move the output buffer to the first place where we will write*/
l_dest_ptr = l_img_comp_dest->data + l_start_offset_dest;
/*if (i == 0) {
fprintf(stdout, "COMPO[%d]:\n",i);
fprintf(stdout, "SRC: l_start_x_src=%d, l_start_y_src=%d, l_width_src=%d, l_height_src=%d\n"
"\t tile offset:%d, %d, %d, %d\n"
"\t buffer offset: %d; %d, %d\n",
l_res->x0, l_res->y0, l_width_src, l_height_src,
l_offset_x0_src, l_offset_y0_src, l_offset_x1_src, l_offset_y1_src,
l_start_offset_src, l_line_offset_src, l_end_offset_src);
fprintf(stdout, "DEST: l_start_x_dest=%d, l_start_y_dest=%d, l_width_dest=%d, l_height_dest=%d\n"
"\t start offset: %d, line offset= %d\n",
l_start_x_dest, l_start_y_dest, l_width_dest, l_height_dest, l_start_offset_dest, l_line_offset_dest);
}*/
switch (l_size_comp) {
case 1:
{
OPJ_CHAR * l_src_ptr = (OPJ_CHAR*) p_data;
l_src_ptr += l_start_offset_src; /* Move to the first place where we will read*/
if (l_img_comp_src->sgnd) {
for (j = 0 ; j < l_height_dest ; ++j) {
for ( k = 0 ; k < l_width_dest ; ++k) {
*(l_dest_ptr++) = (OPJ_INT32) (*(l_src_ptr++)); /* Copy only the data needed for the output image */
}
l_dest_ptr+= l_line_offset_dest; /* Move to the next place where we will write */
l_src_ptr += l_line_offset_src ; /* Move to the next place where we will read */
}
}
else {
for ( j = 0 ; j < l_height_dest ; ++j ) {
for ( k = 0 ; k < l_width_dest ; ++k) {
*(l_dest_ptr++) = (OPJ_INT32) ((*(l_src_ptr++))&0xff);
}
l_dest_ptr+= l_line_offset_dest;
l_src_ptr += l_line_offset_src;
}
}
l_src_ptr += l_end_offset_src; /* Move to the end of this component-part of the input buffer */
p_data = (OPJ_BYTE*) l_src_ptr; /* Keep the current position for the next component-part */
}
break;
case 2:
{
OPJ_INT16 * l_src_ptr = (OPJ_INT16 *) p_data;
l_src_ptr += l_start_offset_src;
if (l_img_comp_src->sgnd) {
for (j=0;j<l_height_dest;++j) {
for (k=0;k<l_width_dest;++k) {
*(l_dest_ptr++) = *(l_src_ptr++);
}
l_dest_ptr+= l_line_offset_dest;
l_src_ptr += l_line_offset_src ;
}
}
else {
for (j=0;j<l_height_dest;++j) {
for (k=0;k<l_width_dest;++k) {
*(l_dest_ptr++) = (*(l_src_ptr++))&0xffff;
}
l_dest_ptr+= l_line_offset_dest;
l_src_ptr += l_line_offset_src ;
}
}
l_src_ptr += l_end_offset_src;
p_data = (OPJ_BYTE*) l_src_ptr;
}
break;
case 4:
{
OPJ_INT32 * l_src_ptr = (OPJ_INT32 *) p_data;
l_src_ptr += l_start_offset_src;
for (j=0;j<l_height_dest;++j) {
for (k=0;k<l_width_dest;++k) {
*(l_dest_ptr++) = (*(l_src_ptr++));
}
l_dest_ptr+= l_line_offset_dest;
l_src_ptr += l_line_offset_src ;
}
l_src_ptr += l_end_offset_src;
p_data = (OPJ_BYTE*) l_src_ptr;
}
break;
}
++l_img_comp_dest;
++l_img_comp_src;
++l_tilec;
}
return OPJ_TRUE;
}
OPJ_BOOL opj_j2k_set_decode_area( opj_j2k_t *p_j2k,
opj_image_t* p_image,
OPJ_INT32 p_start_x, OPJ_INT32 p_start_y,
OPJ_INT32 p_end_x, OPJ_INT32 p_end_y,
opj_event_mgr_t * p_manager )
{
opj_cp_t * l_cp = &(p_j2k->m_cp);
opj_image_t * l_image = p_j2k->m_private_image;
OPJ_UINT32 it_comp;
OPJ_INT32 l_comp_x1, l_comp_y1;
opj_image_comp_t* l_img_comp = NULL;
/* Check if we are read the main header */
if (p_j2k->m_specific_param.m_decoder.m_state != J2K_STATE_TPHSOT) { /* FIXME J2K_DEC_STATE_TPHSOT)*/
opj_event_msg(p_manager, EVT_ERROR, "Need to decode the main header before begin to decode the remaining codestream");
return OPJ_FALSE;
}
if ( !p_start_x && !p_start_y && !p_end_x && !p_end_y){
opj_event_msg(p_manager, EVT_INFO, "No decoded area parameters, set the decoded area to the whole image\n");
p_j2k->m_specific_param.m_decoder.m_start_tile_x = 0;
p_j2k->m_specific_param.m_decoder.m_start_tile_y = 0;
p_j2k->m_specific_param.m_decoder.m_end_tile_x = l_cp->tw;
p_j2k->m_specific_param.m_decoder.m_end_tile_y = l_cp->th;
return OPJ_TRUE;
}
/* ----- */
/* Check if the positions provided by the user are correct */
/* Left */
assert(p_start_x >= 0 );
assert(p_start_y >= 0 );
if ((OPJ_UINT32)p_start_x > l_image->x1 ) {
opj_event_msg(p_manager, EVT_ERROR,
"Left position of the decoded area (region_x0=%d) is outside the image area (Xsiz=%d).\n",
p_start_x, l_image->x1);
return OPJ_FALSE;
}
else if ((OPJ_UINT32)p_start_x < l_image->x0){
opj_event_msg(p_manager, EVT_WARNING,
"Left position of the decoded area (region_x0=%d) is outside the image area (XOsiz=%d).\n",
p_start_x, l_image->x0);
p_j2k->m_specific_param.m_decoder.m_start_tile_x = 0;
p_image->x0 = l_image->x0;
}
else {
p_j2k->m_specific_param.m_decoder.m_start_tile_x = ((OPJ_UINT32)p_start_x - l_cp->tx0) / l_cp->tdx;
p_image->x0 = (OPJ_UINT32)p_start_x;
}
/* Up */
if ((OPJ_UINT32)p_start_y > l_image->y1){
opj_event_msg(p_manager, EVT_ERROR,
"Up position of the decoded area (region_y0=%d) is outside the image area (Ysiz=%d).\n",
p_start_y, l_image->y1);
return OPJ_FALSE;
}
else if ((OPJ_UINT32)p_start_y < l_image->y0){
opj_event_msg(p_manager, EVT_WARNING,
"Up position of the decoded area (region_y0=%d) is outside the image area (YOsiz=%d).\n",
p_start_y, l_image->y0);
p_j2k->m_specific_param.m_decoder.m_start_tile_y = 0;
p_image->y0 = l_image->y0;
}
else {
p_j2k->m_specific_param.m_decoder.m_start_tile_y = ((OPJ_UINT32)p_start_y - l_cp->ty0) / l_cp->tdy;
p_image->y0 = (OPJ_UINT32)p_start_y;
}
/* Right */
assert((OPJ_UINT32)p_end_x > 0);
assert((OPJ_UINT32)p_end_y > 0);
if ((OPJ_UINT32)p_end_x < l_image->x0) {
opj_event_msg(p_manager, EVT_ERROR,
"Right position of the decoded area (region_x1=%d) is outside the image area (XOsiz=%d).\n",
p_end_x, l_image->x0);
return OPJ_FALSE;
}
else if ((OPJ_UINT32)p_end_x > l_image->x1) {
opj_event_msg(p_manager, EVT_WARNING,
"Right position of the decoded area (region_x1=%d) is outside the image area (Xsiz=%d).\n",
p_end_x, l_image->x1);
p_j2k->m_specific_param.m_decoder.m_end_tile_x = l_cp->tw;
p_image->x1 = l_image->x1;
}
else {
p_j2k->m_specific_param.m_decoder.m_end_tile_x = (OPJ_UINT32)opj_int_ceildiv(p_end_x - (OPJ_INT32)l_cp->tx0, (OPJ_INT32)l_cp->tdx);
p_image->x1 = (OPJ_UINT32)p_end_x;
}
/* Bottom */
if ((OPJ_UINT32)p_end_y < l_image->y0) {
opj_event_msg(p_manager, EVT_ERROR,
"Bottom position of the decoded area (region_y1=%d) is outside the image area (YOsiz=%d).\n",
p_end_y, l_image->y0);
return OPJ_FALSE;
}
if ((OPJ_UINT32)p_end_y > l_image->y1){
opj_event_msg(p_manager, EVT_WARNING,
"Bottom position of the decoded area (region_y1=%d) is outside the image area (Ysiz=%d).\n",
p_end_y, l_image->y1);
p_j2k->m_specific_param.m_decoder.m_end_tile_y = l_cp->th;
p_image->y1 = l_image->y1;
}
else{
p_j2k->m_specific_param.m_decoder.m_end_tile_y = (OPJ_UINT32)opj_int_ceildiv(p_end_y - (OPJ_INT32)l_cp->ty0, (OPJ_INT32)l_cp->tdy);
p_image->y1 = (OPJ_UINT32)p_end_y;
}
/* ----- */
p_j2k->m_specific_param.m_decoder.m_discard_tiles = 1;
l_img_comp = p_image->comps;
for (it_comp=0; it_comp < p_image->numcomps; ++it_comp)
{
OPJ_INT32 l_h,l_w;
l_img_comp->x0 = (OPJ_UINT32)opj_int_ceildiv((OPJ_INT32)p_image->x0, (OPJ_INT32)l_img_comp->dx);
l_img_comp->y0 = (OPJ_UINT32)opj_int_ceildiv((OPJ_INT32)p_image->y0, (OPJ_INT32)l_img_comp->dy);
l_comp_x1 = opj_int_ceildiv((OPJ_INT32)p_image->x1, (OPJ_INT32)l_img_comp->dx);
l_comp_y1 = opj_int_ceildiv((OPJ_INT32)p_image->y1, (OPJ_INT32)l_img_comp->dy);
l_w = opj_int_ceildivpow2(l_comp_x1, (OPJ_INT32)l_img_comp->factor)
- opj_int_ceildivpow2((OPJ_INT32)l_img_comp->x0, (OPJ_INT32)l_img_comp->factor);
if (l_w < 0){
opj_event_msg(p_manager, EVT_ERROR,
"Size x of the decoded component image is incorrect (comp[%d].w=%d).\n",
it_comp, l_w);
return OPJ_FALSE;
}
l_img_comp->w = (OPJ_UINT32)l_w;
l_h = opj_int_ceildivpow2(l_comp_y1, (OPJ_INT32)l_img_comp->factor)
- opj_int_ceildivpow2((OPJ_INT32)l_img_comp->y0, (OPJ_INT32)l_img_comp->factor);
if (l_h < 0){
opj_event_msg(p_manager, EVT_ERROR,
"Size y of the decoded component image is incorrect (comp[%d].h=%d).\n",
it_comp, l_h);
return OPJ_FALSE;
}
l_img_comp->h = (OPJ_UINT32)l_h;
l_img_comp++;
}
opj_event_msg( p_manager, EVT_INFO,"Setting decoding area to %d,%d,%d,%d\n",
p_image->x0, p_image->y0, p_image->x1, p_image->y1);
return OPJ_TRUE;
}
opj_j2k_t* opj_j2k_create_decompress(void)
{
opj_j2k_t *l_j2k = (opj_j2k_t*) opj_calloc(1,sizeof(opj_j2k_t));
if (!l_j2k) {
return 00;
}
l_j2k->m_is_decoder = 1;
l_j2k->m_cp.m_is_decoder = 1;
#ifdef OPJ_DISABLE_TPSOT_FIX
l_j2k->m_specific_param.m_decoder.m_nb_tile_parts_correction_checked = 1;
#endif
l_j2k->m_specific_param.m_decoder.m_default_tcp = (opj_tcp_t*) opj_calloc(1,sizeof(opj_tcp_t));
if (!l_j2k->m_specific_param.m_decoder.m_default_tcp) {
opj_j2k_destroy(l_j2k);
return 00;
}
l_j2k->m_specific_param.m_decoder.m_header_data = (OPJ_BYTE *) opj_calloc(1,OPJ_J2K_DEFAULT_HEADER_SIZE);
if (! l_j2k->m_specific_param.m_decoder.m_header_data) {
opj_j2k_destroy(l_j2k);
return 00;
}
l_j2k->m_specific_param.m_decoder.m_header_data_size = OPJ_J2K_DEFAULT_HEADER_SIZE;
l_j2k->m_specific_param.m_decoder.m_tile_ind_to_dec = -1 ;
l_j2k->m_specific_param.m_decoder.m_last_sot_read_pos = 0 ;
/* codestream index creation */
l_j2k->cstr_index = opj_j2k_create_cstr_index();
if (!l_j2k->cstr_index){
opj_j2k_destroy(l_j2k);
return 00;
}
/* validation list creation */
l_j2k->m_validation_list = opj_procedure_list_create();
if (! l_j2k->m_validation_list) {
opj_j2k_destroy(l_j2k);
return 00;
}
/* execution list creation */
l_j2k->m_procedure_list = opj_procedure_list_create();
if (! l_j2k->m_procedure_list) {
opj_j2k_destroy(l_j2k);
return 00;
}
return l_j2k;
}
static opj_codestream_index_t* opj_j2k_create_cstr_index(void)
{
opj_codestream_index_t* cstr_index = (opj_codestream_index_t*)
opj_calloc(1,sizeof(opj_codestream_index_t));
if (!cstr_index)
return NULL;
cstr_index->maxmarknum = 100;
cstr_index->marknum = 0;
cstr_index->marker = (opj_marker_info_t*)
opj_calloc(cstr_index->maxmarknum, sizeof(opj_marker_info_t));
if (!cstr_index-> marker)
return NULL;
cstr_index->tile_index = NULL;
return cstr_index;
}
static OPJ_UINT32 opj_j2k_get_SPCod_SPCoc_size ( opj_j2k_t *p_j2k,
OPJ_UINT32 p_tile_no,
OPJ_UINT32 p_comp_no )
{
opj_cp_t *l_cp = 00;
opj_tcp_t *l_tcp = 00;
opj_tccp_t *l_tccp = 00;
/* preconditions */
assert(p_j2k != 00);
l_cp = &(p_j2k->m_cp);
l_tcp = &l_cp->tcps[p_tile_no];
l_tccp = &l_tcp->tccps[p_comp_no];
/* preconditions again */
assert(p_tile_no < (l_cp->tw * l_cp->th));
assert(p_comp_no < p_j2k->m_private_image->numcomps);
if (l_tccp->csty & J2K_CCP_CSTY_PRT) {
return 5 + l_tccp->numresolutions;
}
else {
return 5;
}
}
static OPJ_BOOL opj_j2k_write_SPCod_SPCoc( opj_j2k_t *p_j2k,
OPJ_UINT32 p_tile_no,
OPJ_UINT32 p_comp_no,
OPJ_BYTE * p_data,
OPJ_UINT32 * p_header_size,
struct opj_event_mgr * p_manager )
{
OPJ_UINT32 i;
opj_cp_t *l_cp = 00;
opj_tcp_t *l_tcp = 00;
opj_tccp_t *l_tccp = 00;
/* preconditions */
assert(p_j2k != 00);
assert(p_header_size != 00);
assert(p_manager != 00);
assert(p_data != 00);
l_cp = &(p_j2k->m_cp);
l_tcp = &l_cp->tcps[p_tile_no];
l_tccp = &l_tcp->tccps[p_comp_no];
/* preconditions again */
assert(p_tile_no < (l_cp->tw * l_cp->th));
assert(p_comp_no <(p_j2k->m_private_image->numcomps));
if (*p_header_size < 5) {
opj_event_msg(p_manager, EVT_ERROR, "Error writing SPCod SPCoc element\n");
return OPJ_FALSE;
}
opj_write_bytes(p_data,l_tccp->numresolutions - 1, 1); /* SPcoc (D) */
++p_data;
opj_write_bytes(p_data,l_tccp->cblkw - 2, 1); /* SPcoc (E) */
++p_data;
opj_write_bytes(p_data,l_tccp->cblkh - 2, 1); /* SPcoc (F) */
++p_data;
opj_write_bytes(p_data,l_tccp->cblksty, 1); /* SPcoc (G) */
++p_data;
opj_write_bytes(p_data,l_tccp->qmfbid, 1); /* SPcoc (H) */
++p_data;
*p_header_size = *p_header_size - 5;
if (l_tccp->csty & J2K_CCP_CSTY_PRT) {
if (*p_header_size < l_tccp->numresolutions) {
opj_event_msg(p_manager, EVT_ERROR, "Error writing SPCod SPCoc element\n");
return OPJ_FALSE;
}
for (i = 0; i < l_tccp->numresolutions; ++i) {
opj_write_bytes(p_data,l_tccp->prcw[i] + (l_tccp->prch[i] << 4), 1); /* SPcoc (I_i) */
++p_data;
}
*p_header_size = *p_header_size - l_tccp->numresolutions;
}
return OPJ_TRUE;
}
static OPJ_BOOL opj_j2k_read_SPCod_SPCoc( opj_j2k_t *p_j2k,
OPJ_UINT32 compno,
OPJ_BYTE * p_header_data,
OPJ_UINT32 * p_header_size,
opj_event_mgr_t * p_manager)
{
OPJ_UINT32 i, l_tmp;
opj_cp_t *l_cp = NULL;
opj_tcp_t *l_tcp = NULL;
opj_tccp_t *l_tccp = NULL;
OPJ_BYTE * l_current_ptr = NULL;
/* preconditions */
assert(p_j2k != 00);
assert(p_manager != 00);
assert(p_header_data != 00);
l_cp = &(p_j2k->m_cp);
l_tcp = (p_j2k->m_specific_param.m_decoder.m_state == J2K_STATE_TPH) ?
&l_cp->tcps[p_j2k->m_current_tile_number] :
p_j2k->m_specific_param.m_decoder.m_default_tcp;
/* precondition again */
assert(compno < p_j2k->m_private_image->numcomps);
l_tccp = &l_tcp->tccps[compno];
l_current_ptr = p_header_data;
/* make sure room is sufficient */
if (*p_header_size < 5) {
opj_event_msg(p_manager, EVT_ERROR, "Error reading SPCod SPCoc element\n");
return OPJ_FALSE;
}
opj_read_bytes(l_current_ptr, &l_tccp->numresolutions ,1); /* SPcox (D) */
++l_tccp->numresolutions; /* tccp->numresolutions = read() + 1 */
if (l_tccp->numresolutions > OPJ_J2K_MAXRLVLS) {
opj_event_msg(p_manager, EVT_ERROR,
"Invalid value for numresolutions : %d, max value is set in openjpeg.h at %d\n",
l_tccp->numresolutions, OPJ_J2K_MAXRLVLS);
return OPJ_FALSE;
}
++l_current_ptr;
/* If user wants to remove more resolutions than the codestream contains, return error */
if (l_cp->m_specific_param.m_dec.m_reduce >= l_tccp->numresolutions) {
opj_event_msg(p_manager, EVT_ERROR, "Error decoding component %d.\nThe number of resolutions to remove is higher than the number "
"of resolutions of this component\nModify the cp_reduce parameter.\n\n", compno);
p_j2k->m_specific_param.m_decoder.m_state |= 0x8000;/* FIXME J2K_DEC_STATE_ERR;*/
return OPJ_FALSE;
}
opj_read_bytes(l_current_ptr,&l_tccp->cblkw ,1); /* SPcoc (E) */
++l_current_ptr;
l_tccp->cblkw += 2;
opj_read_bytes(l_current_ptr,&l_tccp->cblkh ,1); /* SPcoc (F) */
++l_current_ptr;
l_tccp->cblkh += 2;
if ((l_tccp->cblkw > 10) || (l_tccp->cblkh > 10) || ((l_tccp->cblkw + l_tccp->cblkh) > 12)) {
opj_event_msg(p_manager, EVT_ERROR, "Error reading SPCod SPCoc element, Invalid cblkw/cblkh combination\n");
return OPJ_FALSE;
}
opj_read_bytes(l_current_ptr,&l_tccp->cblksty ,1); /* SPcoc (G) */
++l_current_ptr;
opj_read_bytes(l_current_ptr,&l_tccp->qmfbid ,1); /* SPcoc (H) */
++l_current_ptr;
*p_header_size = *p_header_size - 5;
/* use custom precinct size ? */
if (l_tccp->csty & J2K_CCP_CSTY_PRT) {
if (*p_header_size < l_tccp->numresolutions) {
opj_event_msg(p_manager, EVT_ERROR, "Error reading SPCod SPCoc element\n");
return OPJ_FALSE;
}
for (i = 0; i < l_tccp->numresolutions; ++i) {
opj_read_bytes(l_current_ptr,&l_tmp ,1); /* SPcoc (I_i) */
++l_current_ptr;
/* Precinct exponent 0 is only allowed for lowest resolution level (Table A.21) */
if ((i != 0) && (((l_tmp & 0xf) == 0) || ((l_tmp >> 4) == 0))) {
opj_event_msg(p_manager, EVT_ERROR, "Invalid precinct size\n");
return OPJ_FALSE;
}
l_tccp->prcw[i] = l_tmp & 0xf;
l_tccp->prch[i] = l_tmp >> 4;
}
*p_header_size = *p_header_size - l_tccp->numresolutions;
}
else {
/* set default size for the precinct width and height */
for (i = 0; i < l_tccp->numresolutions; ++i) {
l_tccp->prcw[i] = 15;
l_tccp->prch[i] = 15;
}
}
#ifdef WIP_REMOVE_MSD
/* INDEX >> */
if (p_j2k->cstr_info && compno == 0) {
OPJ_UINT32 l_data_size = l_tccp->numresolutions * sizeof(OPJ_UINT32);
p_j2k->cstr_info->tile[p_j2k->m_current_tile_number].tccp_info[compno].cblkh = l_tccp->cblkh;
p_j2k->cstr_info->tile[p_j2k->m_current_tile_number].tccp_info[compno].cblkw = l_tccp->cblkw;
p_j2k->cstr_info->tile[p_j2k->m_current_tile_number].tccp_info[compno].numresolutions = l_tccp->numresolutions;
p_j2k->cstr_info->tile[p_j2k->m_current_tile_number].tccp_info[compno].cblksty = l_tccp->cblksty;
p_j2k->cstr_info->tile[p_j2k->m_current_tile_number].tccp_info[compno].qmfbid = l_tccp->qmfbid;
memcpy(p_j2k->cstr_info->tile[p_j2k->m_current_tile_number].pdx,l_tccp->prcw, l_data_size);
memcpy(p_j2k->cstr_info->tile[p_j2k->m_current_tile_number].pdy,l_tccp->prch, l_data_size);
}
/* << INDEX */
#endif
return OPJ_TRUE;
}
static void opj_j2k_copy_tile_component_parameters( opj_j2k_t *p_j2k )
{
/* loop */
OPJ_UINT32 i;
opj_cp_t *l_cp = NULL;
opj_tcp_t *l_tcp = NULL;
opj_tccp_t *l_ref_tccp = NULL, *l_copied_tccp = NULL;
OPJ_UINT32 l_prc_size;
/* preconditions */
assert(p_j2k != 00);
l_cp = &(p_j2k->m_cp);
l_tcp = (p_j2k->m_specific_param.m_decoder.m_state == J2K_STATE_TPH) ? /* FIXME J2K_DEC_STATE_TPH*/
&l_cp->tcps[p_j2k->m_current_tile_number] :
p_j2k->m_specific_param.m_decoder.m_default_tcp;
l_ref_tccp = &l_tcp->tccps[0];
l_copied_tccp = l_ref_tccp + 1;
l_prc_size = l_ref_tccp->numresolutions * (OPJ_UINT32)sizeof(OPJ_UINT32);
for (i=1; i<p_j2k->m_private_image->numcomps; ++i) {
l_copied_tccp->numresolutions = l_ref_tccp->numresolutions;
l_copied_tccp->cblkw = l_ref_tccp->cblkw;
l_copied_tccp->cblkh = l_ref_tccp->cblkh;
l_copied_tccp->cblksty = l_ref_tccp->cblksty;
l_copied_tccp->qmfbid = l_ref_tccp->qmfbid;
memcpy(l_copied_tccp->prcw,l_ref_tccp->prcw,l_prc_size);
memcpy(l_copied_tccp->prch,l_ref_tccp->prch,l_prc_size);
++l_copied_tccp;
}
}
static OPJ_UINT32 opj_j2k_get_SQcd_SQcc_size ( opj_j2k_t *p_j2k,
OPJ_UINT32 p_tile_no,
OPJ_UINT32 p_comp_no )
{
OPJ_UINT32 l_num_bands;
opj_cp_t *l_cp = 00;
opj_tcp_t *l_tcp = 00;
opj_tccp_t *l_tccp = 00;
/* preconditions */
assert(p_j2k != 00);
l_cp = &(p_j2k->m_cp);
l_tcp = &l_cp->tcps[p_tile_no];
l_tccp = &l_tcp->tccps[p_comp_no];
/* preconditions again */
assert(p_tile_no < l_cp->tw * l_cp->th);
assert(p_comp_no < p_j2k->m_private_image->numcomps);
l_num_bands = (l_tccp->qntsty == J2K_CCP_QNTSTY_SIQNT) ? 1 : (l_tccp->numresolutions * 3 - 2);
if (l_tccp->qntsty == J2K_CCP_QNTSTY_NOQNT) {
return 1 + l_num_bands;
}
else {
return 1 + 2*l_num_bands;
}
}
static OPJ_BOOL opj_j2k_write_SQcd_SQcc( opj_j2k_t *p_j2k,
OPJ_UINT32 p_tile_no,
OPJ_UINT32 p_comp_no,
OPJ_BYTE * p_data,
OPJ_UINT32 * p_header_size,
struct opj_event_mgr * p_manager )
{
OPJ_UINT32 l_header_size;
OPJ_UINT32 l_band_no, l_num_bands;
OPJ_UINT32 l_expn,l_mant;
opj_cp_t *l_cp = 00;
opj_tcp_t *l_tcp = 00;
opj_tccp_t *l_tccp = 00;
/* preconditions */
assert(p_j2k != 00);
assert(p_header_size != 00);
assert(p_manager != 00);
assert(p_data != 00);
l_cp = &(p_j2k->m_cp);
l_tcp = &l_cp->tcps[p_tile_no];
l_tccp = &l_tcp->tccps[p_comp_no];
/* preconditions again */
assert(p_tile_no < l_cp->tw * l_cp->th);
assert(p_comp_no <p_j2k->m_private_image->numcomps);
l_num_bands = (l_tccp->qntsty == J2K_CCP_QNTSTY_SIQNT) ? 1 : (l_tccp->numresolutions * 3 - 2);
if (l_tccp->qntsty == J2K_CCP_QNTSTY_NOQNT) {
l_header_size = 1 + l_num_bands;
if (*p_header_size < l_header_size) {
opj_event_msg(p_manager, EVT_ERROR, "Error writing SQcd SQcc element\n");
return OPJ_FALSE;
}
opj_write_bytes(p_data,l_tccp->qntsty + (l_tccp->numgbits << 5), 1); /* Sqcx */
++p_data;
for (l_band_no = 0; l_band_no < l_num_bands; ++l_band_no) {
l_expn = (OPJ_UINT32)l_tccp->stepsizes[l_band_no].expn;
opj_write_bytes(p_data, l_expn << 3, 1); /* SPqcx_i */
++p_data;
}
}
else {
l_header_size = 1 + 2*l_num_bands;
if (*p_header_size < l_header_size) {
opj_event_msg(p_manager, EVT_ERROR, "Error writing SQcd SQcc element\n");
return OPJ_FALSE;
}
opj_write_bytes(p_data,l_tccp->qntsty + (l_tccp->numgbits << 5), 1); /* Sqcx */
++p_data;
for (l_band_no = 0; l_band_no < l_num_bands; ++l_band_no) {
l_expn = (OPJ_UINT32)l_tccp->stepsizes[l_band_no].expn;
l_mant = (OPJ_UINT32)l_tccp->stepsizes[l_band_no].mant;
opj_write_bytes(p_data, (l_expn << 11) + l_mant, 2); /* SPqcx_i */
p_data += 2;
}
}
*p_header_size = *p_header_size - l_header_size;
return OPJ_TRUE;
}
static OPJ_BOOL opj_j2k_read_SQcd_SQcc(opj_j2k_t *p_j2k,
OPJ_UINT32 p_comp_no,
OPJ_BYTE* p_header_data,
OPJ_UINT32 * p_header_size,
opj_event_mgr_t * p_manager
)
{
/* loop*/
OPJ_UINT32 l_band_no;
opj_cp_t *l_cp = 00;
opj_tcp_t *l_tcp = 00;
opj_tccp_t *l_tccp = 00;
OPJ_BYTE * l_current_ptr = 00;
OPJ_UINT32 l_tmp, l_num_band;
/* preconditions*/
assert(p_j2k != 00);
assert(p_manager != 00);
assert(p_header_data != 00);
l_cp = &(p_j2k->m_cp);
/* come from tile part header or main header ?*/
l_tcp = (p_j2k->m_specific_param.m_decoder.m_state == J2K_STATE_TPH) ? /*FIXME J2K_DEC_STATE_TPH*/
&l_cp->tcps[p_j2k->m_current_tile_number] :
p_j2k->m_specific_param.m_decoder.m_default_tcp;
/* precondition again*/
assert(p_comp_no < p_j2k->m_private_image->numcomps);
l_tccp = &l_tcp->tccps[p_comp_no];
l_current_ptr = p_header_data;
if (*p_header_size < 1) {
opj_event_msg(p_manager, EVT_ERROR, "Error reading SQcd or SQcc element\n");
return OPJ_FALSE;
}
*p_header_size -= 1;
opj_read_bytes(l_current_ptr, &l_tmp ,1); /* Sqcx */
++l_current_ptr;
l_tccp->qntsty = l_tmp & 0x1f;
l_tccp->numgbits = l_tmp >> 5;
if (l_tccp->qntsty == J2K_CCP_QNTSTY_SIQNT) {
l_num_band = 1;
}
else {
l_num_band = (l_tccp->qntsty == J2K_CCP_QNTSTY_NOQNT) ?
(*p_header_size) :
(*p_header_size) / 2;
if( l_num_band > OPJ_J2K_MAXBANDS ) {
opj_event_msg(p_manager, EVT_WARNING, "While reading CCP_QNTSTY element inside QCD or QCC marker segment, "
"number of subbands (%d) is greater to OPJ_J2K_MAXBANDS (%d). So we limit the number of elements stored to "
"OPJ_J2K_MAXBANDS (%d) and skip the rest. \n", l_num_band, OPJ_J2K_MAXBANDS, OPJ_J2K_MAXBANDS);
/*return OPJ_FALSE;*/
}
}
#ifdef USE_JPWL
if (l_cp->correct) {
/* if JPWL is on, we check whether there are too many subbands */
if (/*(l_num_band < 0) ||*/ (l_num_band >= OPJ_J2K_MAXBANDS)) {
opj_event_msg(p_manager, JPWL_ASSUME ? EVT_WARNING : EVT_ERROR,
"JPWL: bad number of subbands in Sqcx (%d)\n",
l_num_band);
if (!JPWL_ASSUME) {
opj_event_msg(p_manager, EVT_ERROR, "JPWL: giving up\n");
return OPJ_FALSE;
}
/* we try to correct */
l_num_band = 1;
opj_event_msg(p_manager, EVT_WARNING, "- trying to adjust them\n"
"- setting number of bands to %d => HYPOTHESIS!!!\n",
l_num_band);
};
};
#endif /* USE_JPWL */
if (l_tccp->qntsty == J2K_CCP_QNTSTY_NOQNT) {
for (l_band_no = 0; l_band_no < l_num_band; l_band_no++) {
opj_read_bytes(l_current_ptr, &l_tmp ,1); /* SPqcx_i */
++l_current_ptr;
if (l_band_no < OPJ_J2K_MAXBANDS){
l_tccp->stepsizes[l_band_no].expn = (OPJ_INT32)(l_tmp >> 3);
l_tccp->stepsizes[l_band_no].mant = 0;
}
}
*p_header_size = *p_header_size - l_num_band;
}
else {
for (l_band_no = 0; l_band_no < l_num_band; l_band_no++) {
opj_read_bytes(l_current_ptr, &l_tmp ,2); /* SPqcx_i */
l_current_ptr+=2;
if (l_band_no < OPJ_J2K_MAXBANDS){
l_tccp->stepsizes[l_band_no].expn = (OPJ_INT32)(l_tmp >> 11);
l_tccp->stepsizes[l_band_no].mant = l_tmp & 0x7ff;
}
}
*p_header_size = *p_header_size - 2*l_num_band;
}
/* Add Antonin : if scalar_derived -> compute other stepsizes */
if (l_tccp->qntsty == J2K_CCP_QNTSTY_SIQNT) {
for (l_band_no = 1; l_band_no < OPJ_J2K_MAXBANDS; l_band_no++) {
l_tccp->stepsizes[l_band_no].expn =
((OPJ_INT32)(l_tccp->stepsizes[0].expn) - (OPJ_INT32)((l_band_no - 1) / 3) > 0) ?
(OPJ_INT32)(l_tccp->stepsizes[0].expn) - (OPJ_INT32)((l_band_no - 1) / 3) : 0;
l_tccp->stepsizes[l_band_no].mant = l_tccp->stepsizes[0].mant;
}
}
return OPJ_TRUE;
}
static void opj_j2k_copy_tile_quantization_parameters( opj_j2k_t *p_j2k )
{
OPJ_UINT32 i;
opj_cp_t *l_cp = NULL;
opj_tcp_t *l_tcp = NULL;
opj_tccp_t *l_ref_tccp = NULL;
opj_tccp_t *l_copied_tccp = NULL;
OPJ_UINT32 l_size;
/* preconditions */
assert(p_j2k != 00);
l_cp = &(p_j2k->m_cp);
l_tcp = p_j2k->m_specific_param.m_decoder.m_state == J2K_STATE_TPH ?
&l_cp->tcps[p_j2k->m_current_tile_number] :
p_j2k->m_specific_param.m_decoder.m_default_tcp;
l_ref_tccp = &l_tcp->tccps[0];
l_copied_tccp = l_ref_tccp + 1;
l_size = OPJ_J2K_MAXBANDS * sizeof(opj_stepsize_t);
for (i=1;i<p_j2k->m_private_image->numcomps;++i) {
l_copied_tccp->qntsty = l_ref_tccp->qntsty;
l_copied_tccp->numgbits = l_ref_tccp->numgbits;
memcpy(l_copied_tccp->stepsizes,l_ref_tccp->stepsizes,l_size);
++l_copied_tccp;
}
}
static void opj_j2k_dump_tile_info( opj_tcp_t * l_default_tile,OPJ_INT32 numcomps,FILE* out_stream)
{
if (l_default_tile)
{
OPJ_INT32 compno;
fprintf(out_stream, "\t default tile {\n");
fprintf(out_stream, "\t\t csty=%#x\n", l_default_tile->csty);
fprintf(out_stream, "\t\t prg=%#x\n", l_default_tile->prg);
fprintf(out_stream, "\t\t numlayers=%d\n", l_default_tile->numlayers);
fprintf(out_stream, "\t\t mct=%x\n", l_default_tile->mct);
for (compno = 0; compno < numcomps; compno++) {
opj_tccp_t *l_tccp = &(l_default_tile->tccps[compno]);
OPJ_UINT32 resno;
OPJ_INT32 bandno, numbands;
/* coding style*/
fprintf(out_stream, "\t\t comp %d {\n", compno);
fprintf(out_stream, "\t\t\t csty=%#x\n", l_tccp->csty);
fprintf(out_stream, "\t\t\t numresolutions=%d\n", l_tccp->numresolutions);
fprintf(out_stream, "\t\t\t cblkw=2^%d\n", l_tccp->cblkw);
fprintf(out_stream, "\t\t\t cblkh=2^%d\n", l_tccp->cblkh);
fprintf(out_stream, "\t\t\t cblksty=%#x\n", l_tccp->cblksty);
fprintf(out_stream, "\t\t\t qmfbid=%d\n", l_tccp->qmfbid);
fprintf(out_stream, "\t\t\t preccintsize (w,h)=");
for (resno = 0; resno < l_tccp->numresolutions; resno++) {
fprintf(out_stream, "(%d,%d) ", l_tccp->prcw[resno], l_tccp->prch[resno]);
}
fprintf(out_stream, "\n");
/* quantization style*/
fprintf(out_stream, "\t\t\t qntsty=%d\n", l_tccp->qntsty);
fprintf(out_stream, "\t\t\t numgbits=%d\n", l_tccp->numgbits);
fprintf(out_stream, "\t\t\t stepsizes (m,e)=");
numbands = (l_tccp->qntsty == J2K_CCP_QNTSTY_SIQNT) ? 1 : (OPJ_INT32)l_tccp->numresolutions * 3 - 2;
for (bandno = 0; bandno < numbands; bandno++) {
fprintf(out_stream, "(%d,%d) ", l_tccp->stepsizes[bandno].mant,
l_tccp->stepsizes[bandno].expn);
}
fprintf(out_stream, "\n");
/* RGN value*/
fprintf(out_stream, "\t\t\t roishift=%d\n", l_tccp->roishift);
fprintf(out_stream, "\t\t }\n");
} /*end of component of default tile*/
fprintf(out_stream, "\t }\n"); /*end of default tile*/
}
}
void j2k_dump (opj_j2k_t* p_j2k, OPJ_INT32 flag, FILE* out_stream)
{
/* Check if the flag is compatible with j2k file*/
if ( (flag & OPJ_JP2_INFO) || (flag & OPJ_JP2_IND)){
fprintf(out_stream, "Wrong flag\n");
return;
}
/* Dump the image_header */
if (flag & OPJ_IMG_INFO){
if (p_j2k->m_private_image)
j2k_dump_image_header(p_j2k->m_private_image, 0, out_stream);
}
/* Dump the codestream info from main header */
if (flag & OPJ_J2K_MH_INFO){
opj_j2k_dump_MH_info(p_j2k, out_stream);
}
/* Dump all tile/codestream info */
if (flag & OPJ_J2K_TCH_INFO){
OPJ_UINT32 l_nb_tiles = p_j2k->m_cp.th * p_j2k->m_cp.tw;
OPJ_UINT32 i;
opj_tcp_t * l_tcp = p_j2k->m_cp.tcps;
for (i=0;i<l_nb_tiles;++i) {
opj_j2k_dump_tile_info( l_tcp,(OPJ_INT32)p_j2k->m_private_image->numcomps, out_stream);
++l_tcp;
}
}
/* Dump the codestream info of the current tile */
if (flag & OPJ_J2K_TH_INFO){
}
/* Dump the codestream index from main header */
if (flag & OPJ_J2K_MH_IND){
opj_j2k_dump_MH_index(p_j2k, out_stream);
}
/* Dump the codestream index of the current tile */
if (flag & OPJ_J2K_TH_IND){
}
}
static void opj_j2k_dump_MH_index(opj_j2k_t* p_j2k, FILE* out_stream)
{
opj_codestream_index_t* cstr_index = p_j2k->cstr_index;
OPJ_UINT32 it_marker, it_tile, it_tile_part;
fprintf(out_stream, "Codestream index from main header: {\n");
fprintf(out_stream, "\t Main header start position=%" PRIi64 "\n"
"\t Main header end position=%" PRIi64 "\n",
cstr_index->main_head_start, cstr_index->main_head_end);
fprintf(out_stream, "\t Marker list: {\n");
if (cstr_index->marker){
for (it_marker=0; it_marker < cstr_index->marknum ; it_marker++){
fprintf(out_stream, "\t\t type=%#x, pos=%" PRIi64 ", len=%d\n",
cstr_index->marker[it_marker].type,
cstr_index->marker[it_marker].pos,
cstr_index->marker[it_marker].len );
}
}
fprintf(out_stream, "\t }\n");
if (cstr_index->tile_index){
/* Simple test to avoid to write empty information*/
OPJ_UINT32 l_acc_nb_of_tile_part = 0;
for (it_tile=0; it_tile < cstr_index->nb_of_tiles ; it_tile++){
l_acc_nb_of_tile_part += cstr_index->tile_index[it_tile].nb_tps;
}
if (l_acc_nb_of_tile_part)
{
fprintf(out_stream, "\t Tile index: {\n");
for (it_tile=0; it_tile < cstr_index->nb_of_tiles ; it_tile++){
OPJ_UINT32 nb_of_tile_part = cstr_index->tile_index[it_tile].nb_tps;
fprintf(out_stream, "\t\t nb of tile-part in tile [%d]=%d\n", it_tile, nb_of_tile_part);
if (cstr_index->tile_index[it_tile].tp_index){
for (it_tile_part =0; it_tile_part < nb_of_tile_part; it_tile_part++){
fprintf(out_stream, "\t\t\t tile-part[%d]: star_pos=%" PRIi64 ", end_header=%" PRIi64 ", end_pos=%" PRIi64 ".\n",
it_tile_part,
cstr_index->tile_index[it_tile].tp_index[it_tile_part].start_pos,
cstr_index->tile_index[it_tile].tp_index[it_tile_part].end_header,
cstr_index->tile_index[it_tile].tp_index[it_tile_part].end_pos);
}
}
if (cstr_index->tile_index[it_tile].marker){
for (it_marker=0; it_marker < cstr_index->tile_index[it_tile].marknum ; it_marker++){
fprintf(out_stream, "\t\t type=%#x, pos=%" PRIi64 ", len=%d\n",
cstr_index->tile_index[it_tile].marker[it_marker].type,
cstr_index->tile_index[it_tile].marker[it_marker].pos,
cstr_index->tile_index[it_tile].marker[it_marker].len );
}
}
}
fprintf(out_stream,"\t }\n");
}
}
fprintf(out_stream,"}\n");
}
static void opj_j2k_dump_MH_info(opj_j2k_t* p_j2k, FILE* out_stream)
{
fprintf(out_stream, "Codestream info from main header: {\n");
fprintf(out_stream, "\t tx0=%d, ty0=%d\n", p_j2k->m_cp.tx0, p_j2k->m_cp.ty0);
fprintf(out_stream, "\t tdx=%d, tdy=%d\n", p_j2k->m_cp.tdx, p_j2k->m_cp.tdy);
fprintf(out_stream, "\t tw=%d, th=%d\n", p_j2k->m_cp.tw, p_j2k->m_cp.th);
opj_j2k_dump_tile_info(p_j2k->m_specific_param.m_decoder.m_default_tcp,(OPJ_INT32)p_j2k->m_private_image->numcomps, out_stream);
fprintf(out_stream, "}\n");
}
void j2k_dump_image_header(opj_image_t* img_header, OPJ_BOOL dev_dump_flag, FILE* out_stream)
{
char tab[2];
if (dev_dump_flag){
fprintf(stdout, "[DEV] Dump an image_header struct {\n");
tab[0] = '\0';
}
else {
fprintf(out_stream, "Image info {\n");
tab[0] = '\t';tab[1] = '\0';
}
fprintf(out_stream, "%s x0=%d, y0=%d\n", tab, img_header->x0, img_header->y0);
fprintf(out_stream, "%s x1=%d, y1=%d\n", tab, img_header->x1, img_header->y1);
fprintf(out_stream, "%s numcomps=%d\n", tab, img_header->numcomps);
if (img_header->comps){
OPJ_UINT32 compno;
for (compno = 0; compno < img_header->numcomps; compno++) {
fprintf(out_stream, "%s\t component %d {\n", tab, compno);
j2k_dump_image_comp_header(&(img_header->comps[compno]), dev_dump_flag, out_stream);
fprintf(out_stream,"%s}\n",tab);
}
}
fprintf(out_stream, "}\n");
}
void j2k_dump_image_comp_header(opj_image_comp_t* comp_header, OPJ_BOOL dev_dump_flag, FILE* out_stream)
{
char tab[3];
if (dev_dump_flag){
fprintf(stdout, "[DEV] Dump an image_comp_header struct {\n");
tab[0] = '\0';
} else {
tab[0] = '\t';tab[1] = '\t';tab[2] = '\0';
}
fprintf(out_stream, "%s dx=%d, dy=%d\n", tab, comp_header->dx, comp_header->dy);
fprintf(out_stream, "%s prec=%d\n", tab, comp_header->prec);
fprintf(out_stream, "%s sgnd=%d\n", tab, comp_header->sgnd);
if (dev_dump_flag)
fprintf(out_stream, "}\n");
}
opj_codestream_info_v2_t* j2k_get_cstr_info(opj_j2k_t* p_j2k)
{
OPJ_UINT32 compno;
OPJ_UINT32 numcomps = p_j2k->m_private_image->numcomps;
opj_tcp_t *l_default_tile;
opj_codestream_info_v2_t* cstr_info = (opj_codestream_info_v2_t*) opj_calloc(1,sizeof(opj_codestream_info_v2_t));
if (!cstr_info)
return NULL;
cstr_info->nbcomps = p_j2k->m_private_image->numcomps;
cstr_info->tx0 = p_j2k->m_cp.tx0;
cstr_info->ty0 = p_j2k->m_cp.ty0;
cstr_info->tdx = p_j2k->m_cp.tdx;
cstr_info->tdy = p_j2k->m_cp.tdy;
cstr_info->tw = p_j2k->m_cp.tw;
cstr_info->th = p_j2k->m_cp.th;
cstr_info->tile_info = NULL; /* Not fill from the main header*/
l_default_tile = p_j2k->m_specific_param.m_decoder.m_default_tcp;
cstr_info->m_default_tile_info.csty = l_default_tile->csty;
cstr_info->m_default_tile_info.prg = l_default_tile->prg;
cstr_info->m_default_tile_info.numlayers = l_default_tile->numlayers;
cstr_info->m_default_tile_info.mct = l_default_tile->mct;
cstr_info->m_default_tile_info.tccp_info = (opj_tccp_info_t*) opj_calloc(cstr_info->nbcomps, sizeof(opj_tccp_info_t));
if (!cstr_info->m_default_tile_info.tccp_info)
{
opj_destroy_cstr_info(&cstr_info);
return NULL;
}
for (compno = 0; compno < numcomps; compno++) {
opj_tccp_t *l_tccp = &(l_default_tile->tccps[compno]);
opj_tccp_info_t *l_tccp_info = &(cstr_info->m_default_tile_info.tccp_info[compno]);
OPJ_INT32 bandno, numbands;
/* coding style*/
l_tccp_info->csty = l_tccp->csty;
l_tccp_info->numresolutions = l_tccp->numresolutions;
l_tccp_info->cblkw = l_tccp->cblkw;
l_tccp_info->cblkh = l_tccp->cblkh;
l_tccp_info->cblksty = l_tccp->cblksty;
l_tccp_info->qmfbid = l_tccp->qmfbid;
if (l_tccp->numresolutions < OPJ_J2K_MAXRLVLS)
{
memcpy(l_tccp_info->prch, l_tccp->prch, l_tccp->numresolutions);
memcpy(l_tccp_info->prcw, l_tccp->prcw, l_tccp->numresolutions);
}
/* quantization style*/
l_tccp_info->qntsty = l_tccp->qntsty;
l_tccp_info->numgbits = l_tccp->numgbits;
numbands = (l_tccp->qntsty == J2K_CCP_QNTSTY_SIQNT) ? 1 : (OPJ_INT32)l_tccp->numresolutions * 3 - 2;
if (numbands < OPJ_J2K_MAXBANDS) {
for (bandno = 0; bandno < numbands; bandno++) {
l_tccp_info->stepsizes_mant[bandno] = (OPJ_UINT32)l_tccp->stepsizes[bandno].mant;
l_tccp_info->stepsizes_expn[bandno] = (OPJ_UINT32)l_tccp->stepsizes[bandno].expn;
}
}
/* RGN value*/
l_tccp_info->roishift = l_tccp->roishift;
}
return cstr_info;
}
opj_codestream_index_t* j2k_get_cstr_index(opj_j2k_t* p_j2k)
{
opj_codestream_index_t* l_cstr_index = (opj_codestream_index_t*)
opj_calloc(1,sizeof(opj_codestream_index_t));
if (!l_cstr_index)
return NULL;
l_cstr_index->main_head_start = p_j2k->cstr_index->main_head_start;
l_cstr_index->main_head_end = p_j2k->cstr_index->main_head_end;
l_cstr_index->codestream_size = p_j2k->cstr_index->codestream_size;
l_cstr_index->marknum = p_j2k->cstr_index->marknum;
l_cstr_index->marker = (opj_marker_info_t*)opj_malloc(l_cstr_index->marknum*sizeof(opj_marker_info_t));
if (!l_cstr_index->marker){
opj_free( l_cstr_index);
return NULL;
}
if (p_j2k->cstr_index->marker)
memcpy(l_cstr_index->marker, p_j2k->cstr_index->marker, l_cstr_index->marknum * sizeof(opj_marker_info_t) );
else{
opj_free(l_cstr_index->marker);
l_cstr_index->marker = NULL;
}
l_cstr_index->nb_of_tiles = p_j2k->cstr_index->nb_of_tiles;
l_cstr_index->tile_index = (opj_tile_index_t*)opj_calloc(l_cstr_index->nb_of_tiles, sizeof(opj_tile_index_t) );
if (!l_cstr_index->tile_index){
opj_free( l_cstr_index->marker);
opj_free( l_cstr_index);
return NULL;
}
if (!p_j2k->cstr_index->tile_index){
opj_free(l_cstr_index->tile_index);
l_cstr_index->tile_index = NULL;
}
else {
OPJ_UINT32 it_tile = 0;
for (it_tile = 0; it_tile < l_cstr_index->nb_of_tiles; it_tile++ ){
/* Tile Marker*/
l_cstr_index->tile_index[it_tile].marknum = p_j2k->cstr_index->tile_index[it_tile].marknum;
l_cstr_index->tile_index[it_tile].marker =
(opj_marker_info_t*)opj_malloc(l_cstr_index->tile_index[it_tile].marknum*sizeof(opj_marker_info_t));
if (!l_cstr_index->tile_index[it_tile].marker) {
OPJ_UINT32 it_tile_free;
for (it_tile_free=0; it_tile_free < it_tile; it_tile_free++){
opj_free(l_cstr_index->tile_index[it_tile_free].marker);
}
opj_free( l_cstr_index->tile_index);
opj_free( l_cstr_index->marker);
opj_free( l_cstr_index);
return NULL;
}
if (p_j2k->cstr_index->tile_index[it_tile].marker)
memcpy( l_cstr_index->tile_index[it_tile].marker,
p_j2k->cstr_index->tile_index[it_tile].marker,
l_cstr_index->tile_index[it_tile].marknum * sizeof(opj_marker_info_t) );
else{
opj_free(l_cstr_index->tile_index[it_tile].marker);
l_cstr_index->tile_index[it_tile].marker = NULL;
}
/* Tile part index*/
l_cstr_index->tile_index[it_tile].nb_tps = p_j2k->cstr_index->tile_index[it_tile].nb_tps;
l_cstr_index->tile_index[it_tile].tp_index =
(opj_tp_index_t*)opj_malloc(l_cstr_index->tile_index[it_tile].nb_tps*sizeof(opj_tp_index_t));
if(!l_cstr_index->tile_index[it_tile].tp_index){
OPJ_UINT32 it_tile_free;
for (it_tile_free=0; it_tile_free < it_tile; it_tile_free++){
opj_free(l_cstr_index->tile_index[it_tile_free].marker);
opj_free(l_cstr_index->tile_index[it_tile_free].tp_index);
}
opj_free( l_cstr_index->tile_index);
opj_free( l_cstr_index->marker);
opj_free( l_cstr_index);
return NULL;
}
if (p_j2k->cstr_index->tile_index[it_tile].tp_index){
memcpy( l_cstr_index->tile_index[it_tile].tp_index,
p_j2k->cstr_index->tile_index[it_tile].tp_index,
l_cstr_index->tile_index[it_tile].nb_tps * sizeof(opj_tp_index_t) );
}
else{
opj_free(l_cstr_index->tile_index[it_tile].tp_index);
l_cstr_index->tile_index[it_tile].tp_index = NULL;
}
/* Packet index (NOT USED)*/
l_cstr_index->tile_index[it_tile].nb_packet = 0;
l_cstr_index->tile_index[it_tile].packet_index = NULL;
}
}
return l_cstr_index;
}
static OPJ_BOOL opj_j2k_allocate_tile_element_cstr_index(opj_j2k_t *p_j2k)
{
OPJ_UINT32 it_tile=0;
p_j2k->cstr_index->nb_of_tiles = p_j2k->m_cp.tw * p_j2k->m_cp.th;
p_j2k->cstr_index->tile_index = (opj_tile_index_t*)opj_calloc(p_j2k->cstr_index->nb_of_tiles, sizeof(opj_tile_index_t));
if (!p_j2k->cstr_index->tile_index)
return OPJ_FALSE;
for (it_tile=0; it_tile < p_j2k->cstr_index->nb_of_tiles; it_tile++){
p_j2k->cstr_index->tile_index[it_tile].maxmarknum = 100;
p_j2k->cstr_index->tile_index[it_tile].marknum = 0;
p_j2k->cstr_index->tile_index[it_tile].marker = (opj_marker_info_t*)
opj_calloc(p_j2k->cstr_index->tile_index[it_tile].maxmarknum, sizeof(opj_marker_info_t));
if (!p_j2k->cstr_index->tile_index[it_tile].marker)
return OPJ_FALSE;
}
return OPJ_TRUE;
}
static OPJ_BOOL opj_j2k_decode_tiles ( opj_j2k_t *p_j2k,
opj_stream_private_t *p_stream,
opj_event_mgr_t * p_manager)
{
OPJ_BOOL l_go_on = OPJ_TRUE;
OPJ_UINT32 l_current_tile_no;
OPJ_UINT32 l_data_size,l_max_data_size;
OPJ_INT32 l_tile_x0,l_tile_y0,l_tile_x1,l_tile_y1;
OPJ_UINT32 l_nb_comps;
OPJ_BYTE * l_current_data;
OPJ_UINT32 nr_tiles = 0;
l_current_data = (OPJ_BYTE*)opj_malloc(1000);
if (! l_current_data) {
opj_event_msg(p_manager, EVT_ERROR, "Not enough memory to decode tiles\n");
return OPJ_FALSE;
}
l_max_data_size = 1000;
for (;;) {
if (! opj_j2k_read_tile_header( p_j2k,
&l_current_tile_no,
&l_data_size,
&l_tile_x0, &l_tile_y0,
&l_tile_x1, &l_tile_y1,
&l_nb_comps,
&l_go_on,
p_stream,
p_manager)) {
opj_free(l_current_data);
return OPJ_FALSE;
}
if (! l_go_on) {
break;
}
if (l_data_size > l_max_data_size) {
OPJ_BYTE *l_new_current_data = (OPJ_BYTE *) opj_realloc(l_current_data, l_data_size);
if (! l_new_current_data) {
opj_free(l_current_data);
opj_event_msg(p_manager, EVT_ERROR, "Not enough memory to decode tile %d/%d\n", l_current_tile_no +1, p_j2k->m_cp.th * p_j2k->m_cp.tw);
return OPJ_FALSE;
}
l_current_data = l_new_current_data;
l_max_data_size = l_data_size;
}
if (! opj_j2k_decode_tile(p_j2k,l_current_tile_no,l_current_data,l_data_size,p_stream,p_manager)) {
opj_free(l_current_data);
opj_event_msg(p_manager, EVT_ERROR, "Failed to decode tile %d/%d\n", l_current_tile_no +1, p_j2k->m_cp.th * p_j2k->m_cp.tw);
return OPJ_FALSE;
}
opj_event_msg(p_manager, EVT_INFO, "Tile %d/%d has been decoded.\n", l_current_tile_no +1, p_j2k->m_cp.th * p_j2k->m_cp.tw);
if (! opj_j2k_update_image_data(p_j2k->m_tcd,l_current_data, p_j2k->m_output_image)) {
opj_free(l_current_data);
return OPJ_FALSE;
}
opj_event_msg(p_manager, EVT_INFO, "Image data has been updated with tile %d.\n\n", l_current_tile_no + 1);
if(opj_stream_get_number_byte_left(p_stream) == 0
&& p_j2k->m_specific_param.m_decoder.m_state == J2K_STATE_NEOC)
break;
if(++nr_tiles == p_j2k->m_cp.th * p_j2k->m_cp.tw)
break;
}
opj_free(l_current_data);
return OPJ_TRUE;
}
/**
* Sets up the procedures to do on decoding data. Developpers wanting to extend the library can add their own reading procedures.
*/
static OPJ_BOOL opj_j2k_setup_decoding (opj_j2k_t *p_j2k, opj_event_mgr_t * p_manager)
{
/* preconditions*/
assert(p_j2k != 00);
assert(p_manager != 00);
if (! opj_procedure_list_add_procedure(p_j2k->m_procedure_list,(opj_procedure)opj_j2k_decode_tiles, p_manager)) {
return OPJ_FALSE;
}
/* DEVELOPER CORNER, add your custom procedures */
return OPJ_TRUE;
}
/*
* Read and decode one tile.
*/
static OPJ_BOOL opj_j2k_decode_one_tile ( opj_j2k_t *p_j2k,
opj_stream_private_t *p_stream,
opj_event_mgr_t * p_manager)
{
OPJ_BOOL l_go_on = OPJ_TRUE;
OPJ_UINT32 l_current_tile_no;
OPJ_UINT32 l_tile_no_to_dec;
OPJ_UINT32 l_data_size,l_max_data_size;
OPJ_INT32 l_tile_x0,l_tile_y0,l_tile_x1,l_tile_y1;
OPJ_UINT32 l_nb_comps;
OPJ_BYTE * l_current_data;
l_current_data = (OPJ_BYTE*)opj_malloc(1000);
if (! l_current_data) {
opj_event_msg(p_manager, EVT_ERROR, "Not enough memory to decode one tile\n");
return OPJ_FALSE;
}
l_max_data_size = 1000;
/*Allocate and initialize some elements of codestrem index if not already done*/
if( !p_j2k->cstr_index->tile_index)
{
if (!opj_j2k_allocate_tile_element_cstr_index(p_j2k)){
opj_free(l_current_data);
return OPJ_FALSE;
}
}
/* Move into the codestream to the first SOT used to decode the desired tile */
l_tile_no_to_dec = (OPJ_UINT32)p_j2k->m_specific_param.m_decoder.m_tile_ind_to_dec;
if (p_j2k->cstr_index->tile_index)
if(p_j2k->cstr_index->tile_index->tp_index)
{
if ( ! p_j2k->cstr_index->tile_index[l_tile_no_to_dec].nb_tps) {
/* the index for this tile has not been built,
* so move to the last SOT read */
if ( !(opj_stream_read_seek(p_stream, p_j2k->m_specific_param.m_decoder.m_last_sot_read_pos+2, p_manager)) ){
opj_event_msg(p_manager, EVT_ERROR, "Problem with seek function\n");
opj_free(l_current_data);
return OPJ_FALSE;
}
}
else{
if ( !(opj_stream_read_seek(p_stream, p_j2k->cstr_index->tile_index[l_tile_no_to_dec].tp_index[0].start_pos+2, p_manager)) ) {
opj_event_msg(p_manager, EVT_ERROR, "Problem with seek function\n");
opj_free(l_current_data);
return OPJ_FALSE;
}
}
/* Special case if we have previously read the EOC marker (if the previous tile getted is the last ) */
if(p_j2k->m_specific_param.m_decoder.m_state == J2K_STATE_EOC)
p_j2k->m_specific_param.m_decoder.m_state = J2K_STATE_TPHSOT;
}
for (;;) {
if (! opj_j2k_read_tile_header( p_j2k,
&l_current_tile_no,
&l_data_size,
&l_tile_x0, &l_tile_y0,
&l_tile_x1, &l_tile_y1,
&l_nb_comps,
&l_go_on,
p_stream,
p_manager)) {
opj_free(l_current_data);
return OPJ_FALSE;
}
if (! l_go_on) {
break;
}
if (l_data_size > l_max_data_size) {
OPJ_BYTE *l_new_current_data = (OPJ_BYTE *) opj_realloc(l_current_data, l_data_size);
if (! l_new_current_data) {
opj_free(l_current_data);
l_current_data = NULL;
/* TODO: LH: why tile numbering policy used in messages differs from
the one used in opj_j2k_decode_tiles() ? */
opj_event_msg(p_manager, EVT_ERROR, "Not enough memory to decode tile %d/%d\n", l_current_tile_no, (p_j2k->m_cp.th * p_j2k->m_cp.tw) - 1);
return OPJ_FALSE;
}
l_current_data = l_new_current_data;
l_max_data_size = l_data_size;
}
if (! opj_j2k_decode_tile(p_j2k,l_current_tile_no,l_current_data,l_data_size,p_stream,p_manager)) {
opj_free(l_current_data);
return OPJ_FALSE;
}
opj_event_msg(p_manager, EVT_INFO, "Tile %d/%d has been decoded.\n", l_current_tile_no, (p_j2k->m_cp.th * p_j2k->m_cp.tw) - 1);
if (! opj_j2k_update_image_data(p_j2k->m_tcd,l_current_data, p_j2k->m_output_image)) {
opj_free(l_current_data);
return OPJ_FALSE;
}
opj_event_msg(p_manager, EVT_INFO, "Image data has been updated with tile %d.\n\n", l_current_tile_no);
if(l_current_tile_no == l_tile_no_to_dec)
{
/* move into the codestream to the the first SOT (FIXME or not move?)*/
if (!(opj_stream_read_seek(p_stream, p_j2k->cstr_index->main_head_end + 2, p_manager) ) ) {
opj_event_msg(p_manager, EVT_ERROR, "Problem with seek function\n");
return OPJ_FALSE;
}
break;
}
else {
opj_event_msg(p_manager, EVT_WARNING, "Tile read, decode and updated is not the desired (%d vs %d).\n", l_current_tile_no, l_tile_no_to_dec);
}
}
opj_free(l_current_data);
return OPJ_TRUE;
}
/**
* Sets up the procedures to do on decoding one tile. Developpers wanting to extend the library can add their own reading procedures.
*/
static OPJ_BOOL opj_j2k_setup_decoding_tile (opj_j2k_t *p_j2k, opj_event_mgr_t * p_manager)
{
/* preconditions*/
assert(p_j2k != 00);
assert(p_manager != 00);
if (! opj_procedure_list_add_procedure(p_j2k->m_procedure_list,(opj_procedure)opj_j2k_decode_one_tile, p_manager)) {
return OPJ_FALSE;
}
/* DEVELOPER CORNER, add your custom procedures */
return OPJ_TRUE;
}
OPJ_BOOL opj_j2k_decode(opj_j2k_t * p_j2k,
opj_stream_private_t * p_stream,
opj_image_t * p_image,
opj_event_mgr_t * p_manager)
{
OPJ_UINT32 compno;
if (!p_image)
return OPJ_FALSE;
p_j2k->m_output_image = opj_image_create0();
if (! (p_j2k->m_output_image)) {
return OPJ_FALSE;
}
opj_copy_image_header(p_image, p_j2k->m_output_image);
/* customization of the decoding */
opj_j2k_setup_decoding(p_j2k, p_manager);
/* Decode the codestream */
if (! opj_j2k_exec (p_j2k,p_j2k->m_procedure_list,p_stream,p_manager)) {
opj_image_destroy(p_j2k->m_private_image);
p_j2k->m_private_image = NULL;
return OPJ_FALSE;
}
/* Move data and copy one information from codec to output image*/
for (compno = 0; compno < p_image->numcomps; compno++) {
p_image->comps[compno].resno_decoded = p_j2k->m_output_image->comps[compno].resno_decoded;
p_image->comps[compno].data = p_j2k->m_output_image->comps[compno].data;
#if 0
char fn[256];
sprintf( fn, "/tmp/%d.raw", compno );
FILE *debug = fopen( fn, "wb" );
fwrite( p_image->comps[compno].data, sizeof(OPJ_INT32), p_image->comps[compno].w * p_image->comps[compno].h, debug );
fclose( debug );
#endif
p_j2k->m_output_image->comps[compno].data = NULL;
}
return OPJ_TRUE;
}
OPJ_BOOL opj_j2k_get_tile( opj_j2k_t *p_j2k,
opj_stream_private_t *p_stream,
opj_image_t* p_image,
opj_event_mgr_t * p_manager,
OPJ_UINT32 tile_index )
{
OPJ_UINT32 compno;
OPJ_UINT32 l_tile_x, l_tile_y;
opj_image_comp_t* l_img_comp;
if (!p_image) {
opj_event_msg(p_manager, EVT_ERROR, "We need an image previously created.\n");
return OPJ_FALSE;
}
if ( /*(tile_index < 0) &&*/ (tile_index >= p_j2k->m_cp.tw * p_j2k->m_cp.th) ){
opj_event_msg(p_manager, EVT_ERROR, "Tile index provided by the user is incorrect %d (max = %d) \n", tile_index, (p_j2k->m_cp.tw * p_j2k->m_cp.th) - 1);
return OPJ_FALSE;
}
/* Compute the dimension of the desired tile*/
l_tile_x = tile_index % p_j2k->m_cp.tw;
l_tile_y = tile_index / p_j2k->m_cp.tw;
p_image->x0 = l_tile_x * p_j2k->m_cp.tdx + p_j2k->m_cp.tx0;
if (p_image->x0 < p_j2k->m_private_image->x0)
p_image->x0 = p_j2k->m_private_image->x0;
p_image->x1 = (l_tile_x + 1) * p_j2k->m_cp.tdx + p_j2k->m_cp.tx0;
if (p_image->x1 > p_j2k->m_private_image->x1)
p_image->x1 = p_j2k->m_private_image->x1;
p_image->y0 = l_tile_y * p_j2k->m_cp.tdy + p_j2k->m_cp.ty0;
if (p_image->y0 < p_j2k->m_private_image->y0)
p_image->y0 = p_j2k->m_private_image->y0;
p_image->y1 = (l_tile_y + 1) * p_j2k->m_cp.tdy + p_j2k->m_cp.ty0;
if (p_image->y1 > p_j2k->m_private_image->y1)
p_image->y1 = p_j2k->m_private_image->y1;
l_img_comp = p_image->comps;
for (compno=0; compno < p_image->numcomps; ++compno)
{
OPJ_INT32 l_comp_x1, l_comp_y1;
l_img_comp->factor = p_j2k->m_private_image->comps[compno].factor;
l_img_comp->x0 = (OPJ_UINT32)opj_int_ceildiv((OPJ_INT32)p_image->x0, (OPJ_INT32)l_img_comp->dx);
l_img_comp->y0 = (OPJ_UINT32)opj_int_ceildiv((OPJ_INT32)p_image->y0, (OPJ_INT32)l_img_comp->dy);
l_comp_x1 = opj_int_ceildiv((OPJ_INT32)p_image->x1, (OPJ_INT32)l_img_comp->dx);
l_comp_y1 = opj_int_ceildiv((OPJ_INT32)p_image->y1, (OPJ_INT32)l_img_comp->dy);
l_img_comp->w = (OPJ_UINT32)(opj_int_ceildivpow2(l_comp_x1, (OPJ_INT32)l_img_comp->factor) - opj_int_ceildivpow2((OPJ_INT32)l_img_comp->x0, (OPJ_INT32)l_img_comp->factor));
l_img_comp->h = (OPJ_UINT32)(opj_int_ceildivpow2(l_comp_y1, (OPJ_INT32)l_img_comp->factor) - opj_int_ceildivpow2((OPJ_INT32)l_img_comp->y0, (OPJ_INT32)l_img_comp->factor));
l_img_comp++;
}
/* Destroy the previous output image*/
if (p_j2k->m_output_image)
opj_image_destroy(p_j2k->m_output_image);
/* Create the ouput image from the information previously computed*/
p_j2k->m_output_image = opj_image_create0();
if (! (p_j2k->m_output_image)) {
return OPJ_FALSE;
}
opj_copy_image_header(p_image, p_j2k->m_output_image);
p_j2k->m_specific_param.m_decoder.m_tile_ind_to_dec = (OPJ_INT32)tile_index;
/* customization of the decoding */
opj_j2k_setup_decoding_tile(p_j2k, p_manager);
/* Decode the codestream */
if (! opj_j2k_exec (p_j2k,p_j2k->m_procedure_list,p_stream,p_manager)) {
opj_image_destroy(p_j2k->m_private_image);
p_j2k->m_private_image = NULL;
return OPJ_FALSE;
}
/* Move data and copy one information from codec to output image*/
for (compno = 0; compno < p_image->numcomps; compno++) {
p_image->comps[compno].resno_decoded = p_j2k->m_output_image->comps[compno].resno_decoded;
if (p_image->comps[compno].data)
opj_free(p_image->comps[compno].data);
p_image->comps[compno].data = p_j2k->m_output_image->comps[compno].data;
p_j2k->m_output_image->comps[compno].data = NULL;
}
return OPJ_TRUE;
}
OPJ_BOOL opj_j2k_set_decoded_resolution_factor(opj_j2k_t *p_j2k,
OPJ_UINT32 res_factor,
opj_event_mgr_t * p_manager)
{
OPJ_UINT32 it_comp;
p_j2k->m_cp.m_specific_param.m_dec.m_reduce = res_factor;
if (p_j2k->m_private_image) {
if (p_j2k->m_private_image->comps) {
if (p_j2k->m_specific_param.m_decoder.m_default_tcp) {
if (p_j2k->m_specific_param.m_decoder.m_default_tcp->tccps) {
for (it_comp = 0 ; it_comp < p_j2k->m_private_image->numcomps; it_comp++) {
OPJ_UINT32 max_res = p_j2k->m_specific_param.m_decoder.m_default_tcp->tccps[it_comp].numresolutions;
if ( res_factor >= max_res){
opj_event_msg(p_manager, EVT_ERROR, "Resolution factor is greater than the maximum resolution in the component.\n");
return OPJ_FALSE;
}
p_j2k->m_private_image->comps[it_comp].factor = res_factor;
}
return OPJ_TRUE;
}
}
}
}
return OPJ_FALSE;
}
OPJ_BOOL opj_j2k_encode(opj_j2k_t * p_j2k,
opj_stream_private_t *p_stream,
opj_event_mgr_t * p_manager )
{
OPJ_UINT32 i, j;
OPJ_UINT32 l_nb_tiles;
OPJ_UINT32 l_max_tile_size = 0, l_current_tile_size;
OPJ_BYTE * l_current_data = 00;
opj_tcd_t* p_tcd = 00;
/* preconditions */
assert(p_j2k != 00);
assert(p_stream != 00);
assert(p_manager != 00);
p_tcd = p_j2k->m_tcd;
l_nb_tiles = p_j2k->m_cp.th * p_j2k->m_cp.tw;
for (i=0;i<l_nb_tiles;++i) {
if (! opj_j2k_pre_write_tile(p_j2k,i,p_stream,p_manager)) {
if (l_current_data) {
opj_free(l_current_data);
}
return OPJ_FALSE;
}
/* if we only have one tile, then simply set tile component data equal to image component data */
/* otherwise, allocate the data */
for (j=0;j<p_j2k->m_tcd->image->numcomps;++j) {
opj_tcd_tilecomp_t* l_tilec = p_tcd->tcd_image->tiles->comps + j;
if (l_nb_tiles == 1) {
opj_image_comp_t * l_img_comp = p_tcd->image->comps + j;
l_tilec->data = l_img_comp->data;
l_tilec->ownsData = OPJ_FALSE;
} else {
if(! opj_alloc_tile_component_data(l_tilec)) {
opj_event_msg(p_manager, EVT_ERROR, "Error allocating tile component data." );
if (l_current_data) {
opj_free(l_current_data);
}
return OPJ_FALSE;
}
}
}
l_current_tile_size = opj_tcd_get_encoded_tile_size(p_j2k->m_tcd);
if (l_nb_tiles > 1) {
if (l_current_tile_size > l_max_tile_size) {
OPJ_BYTE *l_new_current_data = (OPJ_BYTE *) opj_realloc(l_current_data, l_current_tile_size);
if (! l_new_current_data) {
if (l_current_data) {
opj_free(l_current_data);
}
opj_event_msg(p_manager, EVT_ERROR, "Not enough memory to encode all tiles\n");
return OPJ_FALSE;
}
l_current_data = l_new_current_data;
l_max_tile_size = l_current_tile_size;
}
/* copy image data (32 bit) to l_current_data as contiguous, all-component, zero offset buffer */
/* 32 bit components @ 8 bit precision get converted to 8 bit */
/* 32 bit components @ 16 bit precision get converted to 16 bit */
opj_j2k_get_tile_data(p_j2k->m_tcd,l_current_data);
/* now copy this data into the tile component */
if (! opj_tcd_copy_tile_data(p_j2k->m_tcd,l_current_data,l_current_tile_size)) {
opj_event_msg(p_manager, EVT_ERROR, "Size mismatch between tile data and sent data." );
return OPJ_FALSE;
}
}
if (! opj_j2k_post_write_tile (p_j2k,p_stream,p_manager)) {
return OPJ_FALSE;
}
}
if (l_current_data) {
opj_free(l_current_data);
}
return OPJ_TRUE;
}
OPJ_BOOL opj_j2k_end_compress( opj_j2k_t *p_j2k,
opj_stream_private_t *p_stream,
opj_event_mgr_t * p_manager)
{
/* customization of the encoding */
if (! opj_j2k_setup_end_compress(p_j2k, p_manager)) {
return OPJ_FALSE;
}
if (! opj_j2k_exec (p_j2k, p_j2k->m_procedure_list, p_stream, p_manager))
{
return OPJ_FALSE;
}
return OPJ_TRUE;
}
OPJ_BOOL opj_j2k_start_compress(opj_j2k_t *p_j2k,
opj_stream_private_t *p_stream,
opj_image_t * p_image,
opj_event_mgr_t * p_manager)
{
/* preconditions */
assert(p_j2k != 00);
assert(p_stream != 00);
assert(p_manager != 00);
p_j2k->m_private_image = opj_image_create0();
if (! p_j2k->m_private_image) {
opj_event_msg(p_manager, EVT_ERROR, "Failed to allocate image header." );
return OPJ_FALSE;
}
opj_copy_image_header(p_image, p_j2k->m_private_image);
/* TODO_MSD: Find a better way */
if (p_image->comps) {
OPJ_UINT32 it_comp;
for (it_comp = 0 ; it_comp < p_image->numcomps; it_comp++) {
if (p_image->comps[it_comp].data) {
p_j2k->m_private_image->comps[it_comp].data =p_image->comps[it_comp].data;
p_image->comps[it_comp].data = NULL;
}
}
}
/* customization of the validation */
if (! opj_j2k_setup_encoding_validation (p_j2k, p_manager)) {
return OPJ_FALSE;
}
/* validation of the parameters codec */
if (! opj_j2k_exec(p_j2k,p_j2k->m_validation_list,p_stream,p_manager)) {
return OPJ_FALSE;
}
/* customization of the encoding */
if (! opj_j2k_setup_header_writing(p_j2k, p_manager)) {
return OPJ_FALSE;
}
/* write header */
if (! opj_j2k_exec (p_j2k,p_j2k->m_procedure_list,p_stream,p_manager)) {
return OPJ_FALSE;
}
return OPJ_TRUE;
}
static OPJ_BOOL opj_j2k_pre_write_tile ( opj_j2k_t * p_j2k,
OPJ_UINT32 p_tile_index,
opj_stream_private_t *p_stream,
opj_event_mgr_t * p_manager )
{
(void)p_stream;
if (p_tile_index != p_j2k->m_current_tile_number) {
opj_event_msg(p_manager, EVT_ERROR, "The given tile index does not match." );
return OPJ_FALSE;
}
opj_event_msg(p_manager, EVT_INFO, "tile number %d / %d\n", p_j2k->m_current_tile_number + 1, p_j2k->m_cp.tw * p_j2k->m_cp.th);
p_j2k->m_specific_param.m_encoder.m_current_tile_part_number = 0;
p_j2k->m_tcd->cur_totnum_tp = p_j2k->m_cp.tcps[p_tile_index].m_nb_tile_parts;
p_j2k->m_specific_param.m_encoder.m_current_poc_tile_part_number = 0;
/* initialisation before tile encoding */
if (! opj_tcd_init_encode_tile(p_j2k->m_tcd, p_j2k->m_current_tile_number, p_manager)) {
return OPJ_FALSE;
}
return OPJ_TRUE;
}
static void opj_get_tile_dimensions(opj_image_t * l_image,
opj_tcd_tilecomp_t * l_tilec,
opj_image_comp_t * l_img_comp,
OPJ_UINT32* l_size_comp,
OPJ_UINT32* l_width,
OPJ_UINT32* l_height,
OPJ_UINT32* l_offset_x,
OPJ_UINT32* l_offset_y,
OPJ_UINT32* l_image_width,
OPJ_UINT32* l_stride,
OPJ_UINT32* l_tile_offset) {
OPJ_UINT32 l_remaining;
*l_size_comp = l_img_comp->prec >> 3; /* (/8) */
l_remaining = l_img_comp->prec & 7; /* (%8) */
if (l_remaining) {
*l_size_comp += 1;
}
if (*l_size_comp == 3) {
*l_size_comp = 4;
}
*l_width = (OPJ_UINT32)(l_tilec->x1 - l_tilec->x0);
*l_height = (OPJ_UINT32)(l_tilec->y1 - l_tilec->y0);
*l_offset_x = (OPJ_UINT32)opj_int_ceildiv((OPJ_INT32)l_image->x0, (OPJ_INT32)l_img_comp->dx);
*l_offset_y = (OPJ_UINT32)opj_int_ceildiv((OPJ_INT32)l_image->y0, (OPJ_INT32)l_img_comp->dy);
*l_image_width = (OPJ_UINT32)opj_int_ceildiv((OPJ_INT32)l_image->x1 - (OPJ_INT32)l_image->x0, (OPJ_INT32)l_img_comp->dx);
*l_stride = *l_image_width - *l_width;
*l_tile_offset = ((OPJ_UINT32)l_tilec->x0 - *l_offset_x) + ((OPJ_UINT32)l_tilec->y0 - *l_offset_y) * *l_image_width;
}
static void opj_j2k_get_tile_data (opj_tcd_t * p_tcd, OPJ_BYTE * p_data)
{
OPJ_UINT32 i,j,k = 0;
for (i=0;i<p_tcd->image->numcomps;++i) {
opj_image_t * l_image = p_tcd->image;
OPJ_INT32 * l_src_ptr;
opj_tcd_tilecomp_t * l_tilec = p_tcd->tcd_image->tiles->comps + i;
opj_image_comp_t * l_img_comp = l_image->comps + i;
OPJ_UINT32 l_size_comp,l_width,l_height,l_offset_x,l_offset_y, l_image_width,l_stride,l_tile_offset;
opj_get_tile_dimensions(l_image,
l_tilec,
l_img_comp,
&l_size_comp,
&l_width,
&l_height,
&l_offset_x,
&l_offset_y,
&l_image_width,
&l_stride,
&l_tile_offset);
l_src_ptr = l_img_comp->data + l_tile_offset;
switch (l_size_comp) {
case 1:
{
OPJ_CHAR * l_dest_ptr = (OPJ_CHAR*) p_data;
if (l_img_comp->sgnd) {
for (j=0;j<l_height;++j) {
for (k=0;k<l_width;++k) {
*(l_dest_ptr) = (OPJ_CHAR) (*l_src_ptr);
++l_dest_ptr;
++l_src_ptr;
}
l_src_ptr += l_stride;
}
}
else {
for (j=0;j<l_height;++j) {
for (k=0;k<l_width;++k) {
*(l_dest_ptr) = (OPJ_CHAR)((*l_src_ptr)&0xff);
++l_dest_ptr;
++l_src_ptr;
}
l_src_ptr += l_stride;
}
}
p_data = (OPJ_BYTE*) l_dest_ptr;
}
break;
case 2:
{
OPJ_INT16 * l_dest_ptr = (OPJ_INT16 *) p_data;
if (l_img_comp->sgnd) {
for (j=0;j<l_height;++j) {
for (k=0;k<l_width;++k) {
*(l_dest_ptr++) = (OPJ_INT16) (*(l_src_ptr++));
}
l_src_ptr += l_stride;
}
}
else {
for (j=0;j<l_height;++j) {
for (k=0;k<l_width;++k) {
*(l_dest_ptr++) = (OPJ_INT16)((*(l_src_ptr++)) & 0xffff);
}
l_src_ptr += l_stride;
}
}
p_data = (OPJ_BYTE*) l_dest_ptr;
}
break;
case 4:
{
OPJ_INT32 * l_dest_ptr = (OPJ_INT32 *) p_data;
for (j=0;j<l_height;++j) {
for (k=0;k<l_width;++k) {
*(l_dest_ptr++) = *(l_src_ptr++);
}
l_src_ptr += l_stride;
}
p_data = (OPJ_BYTE*) l_dest_ptr;
}
break;
}
}
}
static OPJ_BOOL opj_j2k_post_write_tile ( opj_j2k_t * p_j2k,
opj_stream_private_t *p_stream,
opj_event_mgr_t * p_manager )
{
OPJ_UINT32 l_nb_bytes_written;
OPJ_BYTE * l_current_data = 00;
OPJ_UINT32 l_tile_size = 0;
OPJ_UINT32 l_available_data;
/* preconditions */
assert(p_j2k->m_specific_param.m_encoder.m_encoded_tile_data);
l_tile_size = p_j2k->m_specific_param.m_encoder.m_encoded_tile_size;
l_available_data = l_tile_size;
l_current_data = p_j2k->m_specific_param.m_encoder.m_encoded_tile_data;
l_nb_bytes_written = 0;
if (! opj_j2k_write_first_tile_part(p_j2k,l_current_data,&l_nb_bytes_written,l_available_data,p_stream,p_manager)) {
return OPJ_FALSE;
}
l_current_data += l_nb_bytes_written;
l_available_data -= l_nb_bytes_written;
l_nb_bytes_written = 0;
if (! opj_j2k_write_all_tile_parts(p_j2k,l_current_data,&l_nb_bytes_written,l_available_data,p_stream,p_manager)) {
return OPJ_FALSE;
}
l_available_data -= l_nb_bytes_written;
l_nb_bytes_written = l_tile_size - l_available_data;
if ( opj_stream_write_data( p_stream,
p_j2k->m_specific_param.m_encoder.m_encoded_tile_data,
l_nb_bytes_written,p_manager) != l_nb_bytes_written) {
return OPJ_FALSE;
}
++p_j2k->m_current_tile_number;
return OPJ_TRUE;
}
static OPJ_BOOL opj_j2k_setup_end_compress (opj_j2k_t *p_j2k, opj_event_mgr_t * p_manager)
{
/* preconditions */
assert(p_j2k != 00);
assert(p_manager != 00);
/* DEVELOPER CORNER, insert your custom procedures */
if (! opj_procedure_list_add_procedure(p_j2k->m_procedure_list,(opj_procedure)opj_j2k_write_eoc, p_manager)) {
return OPJ_FALSE;
}
if (OPJ_IS_CINEMA(p_j2k->m_cp.rsiz)) {
if (! opj_procedure_list_add_procedure(p_j2k->m_procedure_list,(opj_procedure)opj_j2k_write_updated_tlm, p_manager)) {
return OPJ_FALSE;
}
}
if (! opj_procedure_list_add_procedure(p_j2k->m_procedure_list,(opj_procedure)opj_j2k_write_epc, p_manager)) {
return OPJ_FALSE;
}
if (! opj_procedure_list_add_procedure(p_j2k->m_procedure_list,(opj_procedure)opj_j2k_end_encoding, p_manager)) {
return OPJ_FALSE;
}
if (! opj_procedure_list_add_procedure(p_j2k->m_procedure_list,(opj_procedure)opj_j2k_destroy_header_memory, p_manager)) {
return OPJ_FALSE;
}
return OPJ_TRUE;
}
static OPJ_BOOL opj_j2k_setup_encoding_validation (opj_j2k_t *p_j2k, opj_event_mgr_t * p_manager)
{
/* preconditions */
assert(p_j2k != 00);
assert(p_manager != 00);
if (! opj_procedure_list_add_procedure(p_j2k->m_validation_list, (opj_procedure)opj_j2k_build_encoder, p_manager)) {
return OPJ_FALSE;
}
if (! opj_procedure_list_add_procedure(p_j2k->m_validation_list, (opj_procedure)opj_j2k_encoding_validation, p_manager)) {
return OPJ_FALSE;
}
/* DEVELOPER CORNER, add your custom validation procedure */
if (! opj_procedure_list_add_procedure(p_j2k->m_validation_list, (opj_procedure)opj_j2k_mct_validation, p_manager)) {
return OPJ_FALSE;
}
return OPJ_TRUE;
}
static OPJ_BOOL opj_j2k_setup_header_writing (opj_j2k_t *p_j2k, opj_event_mgr_t * p_manager)
{
/* preconditions */
assert(p_j2k != 00);
assert(p_manager != 00);
if (! opj_procedure_list_add_procedure(p_j2k->m_procedure_list,(opj_procedure)opj_j2k_init_info, p_manager)) {
return OPJ_FALSE;
}
if (! opj_procedure_list_add_procedure(p_j2k->m_procedure_list,(opj_procedure)opj_j2k_write_soc, p_manager)) {
return OPJ_FALSE;
}
if (! opj_procedure_list_add_procedure(p_j2k->m_procedure_list,(opj_procedure)opj_j2k_write_siz, p_manager)) {
return OPJ_FALSE;
}
if (! opj_procedure_list_add_procedure(p_j2k->m_procedure_list,(opj_procedure)opj_j2k_write_cod, p_manager)) {
return OPJ_FALSE;
}
if (! opj_procedure_list_add_procedure(p_j2k->m_procedure_list,(opj_procedure)opj_j2k_write_qcd, p_manager)) {
return OPJ_FALSE;
}
if (OPJ_IS_CINEMA(p_j2k->m_cp.rsiz)) {
/* No need for COC or QCC, QCD and COD are used
if (! opj_procedure_list_add_procedure(p_j2k->m_procedure_list,(opj_procedure)opj_j2k_write_all_coc, p_manager)) {
return OPJ_FALSE;
}
if (! opj_procedure_list_add_procedure(p_j2k->m_procedure_list,(opj_procedure)opj_j2k_write_all_qcc, p_manager)) {
return OPJ_FALSE;
}
*/
if (! opj_procedure_list_add_procedure(p_j2k->m_procedure_list,(opj_procedure)opj_j2k_write_tlm, p_manager)) {
return OPJ_FALSE;
}
if (p_j2k->m_cp.rsiz == OPJ_PROFILE_CINEMA_4K) {
if (! opj_procedure_list_add_procedure(p_j2k->m_procedure_list,(opj_procedure)opj_j2k_write_poc, p_manager)) {
return OPJ_FALSE;
}
}
}
if (! opj_procedure_list_add_procedure(p_j2k->m_procedure_list,(opj_procedure)opj_j2k_write_regions, p_manager)) {
return OPJ_FALSE;
}
if (p_j2k->m_cp.comment != 00) {
if (! opj_procedure_list_add_procedure(p_j2k->m_procedure_list,(opj_procedure)opj_j2k_write_com, p_manager)) {
return OPJ_FALSE;
}
}
/* DEVELOPER CORNER, insert your custom procedures */
if (p_j2k->m_cp.rsiz & OPJ_EXTENSION_MCT) {
if (! opj_procedure_list_add_procedure(p_j2k->m_procedure_list,(opj_procedure)opj_j2k_write_mct_data_group, p_manager)) {
return OPJ_FALSE;
}
}
/* End of Developer Corner */
if (p_j2k->cstr_index) {
if (! opj_procedure_list_add_procedure(p_j2k->m_procedure_list,(opj_procedure)opj_j2k_get_end_header, p_manager)) {
return OPJ_FALSE;
}
}
if (! opj_procedure_list_add_procedure(p_j2k->m_procedure_list,(opj_procedure)opj_j2k_create_tcd, p_manager)) {
return OPJ_FALSE;
}
if (! opj_procedure_list_add_procedure(p_j2k->m_procedure_list,(opj_procedure)opj_j2k_update_rates, p_manager)) {
return OPJ_FALSE;
}
return OPJ_TRUE;
}
static OPJ_BOOL opj_j2k_write_first_tile_part (opj_j2k_t *p_j2k,
OPJ_BYTE * p_data,
OPJ_UINT32 * p_data_written,
OPJ_UINT32 p_total_data_size,
opj_stream_private_t *p_stream,
struct opj_event_mgr * p_manager )
{
OPJ_UINT32 l_nb_bytes_written = 0;
OPJ_UINT32 l_current_nb_bytes_written;
OPJ_BYTE * l_begin_data = 00;
opj_tcd_t * l_tcd = 00;
opj_cp_t * l_cp = 00;
l_tcd = p_j2k->m_tcd;
l_cp = &(p_j2k->m_cp);
l_tcd->cur_pino = 0;
/*Get number of tile parts*/
p_j2k->m_specific_param.m_encoder.m_current_poc_tile_part_number = 0;
/* INDEX >> */
/* << INDEX */
l_current_nb_bytes_written = 0;
l_begin_data = p_data;
if (! opj_j2k_write_sot(p_j2k,p_data,&l_current_nb_bytes_written,p_stream,p_manager))
{
return OPJ_FALSE;
}
l_nb_bytes_written += l_current_nb_bytes_written;
p_data += l_current_nb_bytes_written;
p_total_data_size -= l_current_nb_bytes_written;
if (!OPJ_IS_CINEMA(l_cp->rsiz)) {
#if 0
for (compno = 1; compno < p_j2k->m_private_image->numcomps; compno++) {
l_current_nb_bytes_written = 0;
opj_j2k_write_coc_in_memory(p_j2k,compno,p_data,&l_current_nb_bytes_written,p_manager);
l_nb_bytes_written += l_current_nb_bytes_written;
p_data += l_current_nb_bytes_written;
p_total_data_size -= l_current_nb_bytes_written;
l_current_nb_bytes_written = 0;
opj_j2k_write_qcc_in_memory(p_j2k,compno,p_data,&l_current_nb_bytes_written,p_manager);
l_nb_bytes_written += l_current_nb_bytes_written;
p_data += l_current_nb_bytes_written;
p_total_data_size -= l_current_nb_bytes_written;
}
#endif
if (l_cp->tcps[p_j2k->m_current_tile_number].numpocs) {
l_current_nb_bytes_written = 0;
opj_j2k_write_poc_in_memory(p_j2k,p_data,&l_current_nb_bytes_written,p_manager);
l_nb_bytes_written += l_current_nb_bytes_written;
p_data += l_current_nb_bytes_written;
p_total_data_size -= l_current_nb_bytes_written;
}
}
l_current_nb_bytes_written = 0;
if (! opj_j2k_write_sod(p_j2k,l_tcd,p_data,&l_current_nb_bytes_written,p_total_data_size,p_stream,p_manager)) {
return OPJ_FALSE;
}
l_nb_bytes_written += l_current_nb_bytes_written;
* p_data_written = l_nb_bytes_written;
/* Writing Psot in SOT marker */
opj_write_bytes(l_begin_data + 6,l_nb_bytes_written,4); /* PSOT */
if (OPJ_IS_CINEMA(l_cp->rsiz)){
opj_j2k_update_tlm(p_j2k,l_nb_bytes_written);
}
return OPJ_TRUE;
}
static OPJ_BOOL opj_j2k_write_all_tile_parts( opj_j2k_t *p_j2k,
OPJ_BYTE * p_data,
OPJ_UINT32 * p_data_written,
OPJ_UINT32 p_total_data_size,
opj_stream_private_t *p_stream,
struct opj_event_mgr * p_manager
)
{
OPJ_UINT32 tilepartno=0;
OPJ_UINT32 l_nb_bytes_written = 0;
OPJ_UINT32 l_current_nb_bytes_written;
OPJ_UINT32 l_part_tile_size;
OPJ_UINT32 tot_num_tp;
OPJ_UINT32 pino;
OPJ_BYTE * l_begin_data;
opj_tcp_t *l_tcp = 00;
opj_tcd_t * l_tcd = 00;
opj_cp_t * l_cp = 00;
l_tcd = p_j2k->m_tcd;
l_cp = &(p_j2k->m_cp);
l_tcp = l_cp->tcps + p_j2k->m_current_tile_number;
/*Get number of tile parts*/
tot_num_tp = opj_j2k_get_num_tp(l_cp,0,p_j2k->m_current_tile_number);
/* start writing remaining tile parts */
++p_j2k->m_specific_param.m_encoder.m_current_tile_part_number;
for (tilepartno = 1; tilepartno < tot_num_tp ; ++tilepartno) {
p_j2k->m_specific_param.m_encoder.m_current_poc_tile_part_number = tilepartno;
l_current_nb_bytes_written = 0;
l_part_tile_size = 0;
l_begin_data = p_data;
if (! opj_j2k_write_sot(p_j2k,p_data,&l_current_nb_bytes_written,p_stream,p_manager)) {
return OPJ_FALSE;
}
l_nb_bytes_written += l_current_nb_bytes_written;
p_data += l_current_nb_bytes_written;
p_total_data_size -= l_current_nb_bytes_written;
l_part_tile_size += l_current_nb_bytes_written;
l_current_nb_bytes_written = 0;
if (! opj_j2k_write_sod(p_j2k,l_tcd,p_data,&l_current_nb_bytes_written,p_total_data_size,p_stream,p_manager)) {
return OPJ_FALSE;
}
p_data += l_current_nb_bytes_written;
l_nb_bytes_written += l_current_nb_bytes_written;
p_total_data_size -= l_current_nb_bytes_written;
l_part_tile_size += l_current_nb_bytes_written;
/* Writing Psot in SOT marker */
opj_write_bytes(l_begin_data + 6,l_part_tile_size,4); /* PSOT */
if (OPJ_IS_CINEMA(l_cp->rsiz)) {
opj_j2k_update_tlm(p_j2k,l_part_tile_size);
}
++p_j2k->m_specific_param.m_encoder.m_current_tile_part_number;
}
for (pino = 1; pino <= l_tcp->numpocs; ++pino) {
l_tcd->cur_pino = pino;
/*Get number of tile parts*/
tot_num_tp = opj_j2k_get_num_tp(l_cp,pino,p_j2k->m_current_tile_number);
for (tilepartno = 0; tilepartno < tot_num_tp ; ++tilepartno) {
p_j2k->m_specific_param.m_encoder.m_current_poc_tile_part_number = tilepartno;
l_current_nb_bytes_written = 0;
l_part_tile_size = 0;
l_begin_data = p_data;
if (! opj_j2k_write_sot(p_j2k,p_data,&l_current_nb_bytes_written,p_stream,p_manager)) {
return OPJ_FALSE;
}
l_nb_bytes_written += l_current_nb_bytes_written;
p_data += l_current_nb_bytes_written;
p_total_data_size -= l_current_nb_bytes_written;
l_part_tile_size += l_current_nb_bytes_written;
l_current_nb_bytes_written = 0;
if (! opj_j2k_write_sod(p_j2k,l_tcd,p_data,&l_current_nb_bytes_written,p_total_data_size,p_stream,p_manager)) {
return OPJ_FALSE;
}
l_nb_bytes_written += l_current_nb_bytes_written;
p_data += l_current_nb_bytes_written;
p_total_data_size -= l_current_nb_bytes_written;
l_part_tile_size += l_current_nb_bytes_written;
/* Writing Psot in SOT marker */
opj_write_bytes(l_begin_data + 6,l_part_tile_size,4); /* PSOT */
if (OPJ_IS_CINEMA(l_cp->rsiz)) {
opj_j2k_update_tlm(p_j2k,l_part_tile_size);
}
++p_j2k->m_specific_param.m_encoder.m_current_tile_part_number;
}
}
*p_data_written = l_nb_bytes_written;
return OPJ_TRUE;
}
static OPJ_BOOL opj_j2k_write_updated_tlm( opj_j2k_t *p_j2k,
struct opj_stream_private *p_stream,
struct opj_event_mgr * p_manager )
{
OPJ_UINT32 l_tlm_size;
OPJ_OFF_T l_tlm_position, l_current_position;
/* preconditions */
assert(p_j2k != 00);
assert(p_manager != 00);
assert(p_stream != 00);
l_tlm_size = 5 * p_j2k->m_specific_param.m_encoder.m_total_tile_parts;
l_tlm_position = 6 + p_j2k->m_specific_param.m_encoder.m_tlm_start;
l_current_position = opj_stream_tell(p_stream);
if (! opj_stream_seek(p_stream,l_tlm_position,p_manager)) {
return OPJ_FALSE;
}
if (opj_stream_write_data(p_stream,p_j2k->m_specific_param.m_encoder.m_tlm_sot_offsets_buffer,l_tlm_size,p_manager) != l_tlm_size) {
return OPJ_FALSE;
}
if (! opj_stream_seek(p_stream,l_current_position,p_manager)) {
return OPJ_FALSE;
}
return OPJ_TRUE;
}
static OPJ_BOOL opj_j2k_end_encoding( opj_j2k_t *p_j2k,
struct opj_stream_private *p_stream,
struct opj_event_mgr * p_manager )
{
/* preconditions */
assert(p_j2k != 00);
assert(p_manager != 00);
assert(p_stream != 00);
opj_tcd_destroy(p_j2k->m_tcd);
p_j2k->m_tcd = 00;
if (p_j2k->m_specific_param.m_encoder.m_tlm_sot_offsets_buffer) {
opj_free(p_j2k->m_specific_param.m_encoder.m_tlm_sot_offsets_buffer);
p_j2k->m_specific_param.m_encoder.m_tlm_sot_offsets_buffer = 0;
p_j2k->m_specific_param.m_encoder.m_tlm_sot_offsets_current = 0;
}
if (p_j2k->m_specific_param.m_encoder.m_encoded_tile_data) {
opj_free(p_j2k->m_specific_param.m_encoder.m_encoded_tile_data);
p_j2k->m_specific_param.m_encoder.m_encoded_tile_data = 0;
}
p_j2k->m_specific_param.m_encoder.m_encoded_tile_size = 0;
return OPJ_TRUE;
}
/**
* Destroys the memory associated with the decoding of headers.
*/
static OPJ_BOOL opj_j2k_destroy_header_memory ( opj_j2k_t * p_j2k,
opj_stream_private_t *p_stream,
opj_event_mgr_t * p_manager
)
{
/* preconditions */
assert(p_j2k != 00);
assert(p_stream != 00);
assert(p_manager != 00);
if (p_j2k->m_specific_param.m_encoder.m_header_tile_data) {
opj_free(p_j2k->m_specific_param.m_encoder.m_header_tile_data);
p_j2k->m_specific_param.m_encoder.m_header_tile_data = 0;
}
p_j2k->m_specific_param.m_encoder.m_header_tile_data_size = 0;
return OPJ_TRUE;
}
static OPJ_BOOL opj_j2k_init_info( opj_j2k_t *p_j2k,
struct opj_stream_private *p_stream,
struct opj_event_mgr * p_manager )
{
opj_codestream_info_t * l_cstr_info = 00;
/* preconditions */
assert(p_j2k != 00);
assert(p_manager != 00);
assert(p_stream != 00);
(void)l_cstr_info;
/* TODO mergeV2: check this part which use cstr_info */
/*l_cstr_info = p_j2k->cstr_info;
if (l_cstr_info) {
OPJ_UINT32 compno;
l_cstr_info->tile = (opj_tile_info_t *) opj_malloc(p_j2k->m_cp.tw * p_j2k->m_cp.th * sizeof(opj_tile_info_t));
l_cstr_info->image_w = p_j2k->m_image->x1 - p_j2k->m_image->x0;
l_cstr_info->image_h = p_j2k->m_image->y1 - p_j2k->m_image->y0;
l_cstr_info->prog = (&p_j2k->m_cp.tcps[0])->prg;
l_cstr_info->tw = p_j2k->m_cp.tw;
l_cstr_info->th = p_j2k->m_cp.th;
l_cstr_info->tile_x = p_j2k->m_cp.tdx;*/ /* new version parser */
/*l_cstr_info->tile_y = p_j2k->m_cp.tdy;*/ /* new version parser */
/*l_cstr_info->tile_Ox = p_j2k->m_cp.tx0;*/ /* new version parser */
/*l_cstr_info->tile_Oy = p_j2k->m_cp.ty0;*/ /* new version parser */
/*l_cstr_info->numcomps = p_j2k->m_image->numcomps;
l_cstr_info->numlayers = (&p_j2k->m_cp.tcps[0])->numlayers;
l_cstr_info->numdecompos = (OPJ_INT32*) opj_malloc(p_j2k->m_image->numcomps * sizeof(OPJ_INT32));
for (compno=0; compno < p_j2k->m_image->numcomps; compno++) {
l_cstr_info->numdecompos[compno] = (&p_j2k->m_cp.tcps[0])->tccps->numresolutions - 1;
}
l_cstr_info->D_max = 0.0; */ /* ADD Marcela */
/*l_cstr_info->main_head_start = opj_stream_tell(p_stream);*/ /* position of SOC */
/*l_cstr_info->maxmarknum = 100;
l_cstr_info->marker = (opj_marker_info_t *) opj_malloc(l_cstr_info->maxmarknum * sizeof(opj_marker_info_t));
l_cstr_info->marknum = 0;
}*/
return opj_j2k_calculate_tp(p_j2k,&(p_j2k->m_cp),&p_j2k->m_specific_param.m_encoder.m_total_tile_parts,p_j2k->m_private_image,p_manager);
}
/**
* Creates a tile-coder decoder.
*
* @param p_stream the stream to write data to.
* @param p_j2k J2K codec.
* @param p_manager the user event manager.
*/
static OPJ_BOOL opj_j2k_create_tcd( opj_j2k_t *p_j2k,
opj_stream_private_t *p_stream,
opj_event_mgr_t * p_manager
)
{
/* preconditions */
assert(p_j2k != 00);
assert(p_manager != 00);
assert(p_stream != 00);
p_j2k->m_tcd = opj_tcd_create(OPJ_FALSE);
if (! p_j2k->m_tcd) {
opj_event_msg(p_manager, EVT_ERROR, "Not enough memory to create Tile Coder\n");
return OPJ_FALSE;
}
if (!opj_tcd_init(p_j2k->m_tcd,p_j2k->m_private_image,&p_j2k->m_cp)) {
opj_tcd_destroy(p_j2k->m_tcd);
p_j2k->m_tcd = 00;
return OPJ_FALSE;
}
return OPJ_TRUE;
}
OPJ_BOOL opj_j2k_write_tile (opj_j2k_t * p_j2k,
OPJ_UINT32 p_tile_index,
OPJ_BYTE * p_data,
OPJ_UINT32 p_data_size,
opj_stream_private_t *p_stream,
opj_event_mgr_t * p_manager )
{
if (! opj_j2k_pre_write_tile(p_j2k,p_tile_index,p_stream,p_manager)) {
opj_event_msg(p_manager, EVT_ERROR, "Error while opj_j2k_pre_write_tile with tile index = %d\n", p_tile_index);
return OPJ_FALSE;
}
else {
OPJ_UINT32 j;
/* Allocate data */
for (j=0;j<p_j2k->m_tcd->image->numcomps;++j) {
opj_tcd_tilecomp_t* l_tilec = p_j2k->m_tcd->tcd_image->tiles->comps + j;
if(! opj_alloc_tile_component_data(l_tilec)) {
opj_event_msg(p_manager, EVT_ERROR, "Error allocating tile component data." );
return OPJ_FALSE;
}
}
/* now copy data into the the tile component */
if (! opj_tcd_copy_tile_data(p_j2k->m_tcd,p_data,p_data_size)) {
opj_event_msg(p_manager, EVT_ERROR, "Size mismatch between tile data and sent data." );
return OPJ_FALSE;
}
if (! opj_j2k_post_write_tile(p_j2k,p_stream,p_manager)) {
opj_event_msg(p_manager, EVT_ERROR, "Error while opj_j2k_post_write_tile with tile index = %d\n", p_tile_index);
return OPJ_FALSE;
}
}
return OPJ_TRUE;
}
| ./CrossVul/dataset_final_sorted/CWE-416/c/bad_1830_0 |
crossvul-cpp_data_bad_1390_1 | /* libcomps - C alternative to yum.comps library
* Copyright (C) 2013 Jindrich Luza
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* any later version.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301,
* USA
*/
#include "comps_objmradix.h"
#include "comps_set.h"
#include <stdio.h>
void comps_objmrtree_data_destroy(COMPS_ObjMRTreeData * rtd) {
free(rtd->key);
COMPS_OBJECT_DESTROY(rtd->data);
comps_hslist_destroy(&rtd->subnodes);
free(rtd);
}
inline void comps_objmrtree_data_destroy_v(void * rtd) {
comps_objmrtree_data_destroy((COMPS_ObjMRTreeData*)rtd);
}
static COMPS_ObjMRTreeData * __comps_objmrtree_data_create(char * key,
size_t keylen,
COMPS_Object *data) {
COMPS_ObjMRTreeData * rtd;
if ((rtd = malloc(sizeof(*rtd))) == NULL)
return NULL;
if ((rtd->key = malloc(sizeof(char) * (keylen+1))) == NULL) {
free(rtd);
return NULL;
}
memcpy(rtd->key, key, sizeof(char)*keylen);
rtd->key[keylen] = '\0';
rtd->is_leaf = 1;
rtd->data = COMPS_OBJECT_CREATE(COMPS_ObjList, NULL);
if (data)
comps_objlist_append_x(rtd->data, data);
rtd->subnodes = comps_hslist_create();
comps_hslist_init(rtd->subnodes, NULL,
NULL,
&comps_objmrtree_data_destroy_v);
return rtd;
}
COMPS_ObjMRTreeData * comps_objmrtree_data_create(char *key, COMPS_Object *data){
COMPS_ObjMRTreeData * rtd;
rtd = __comps_objmrtree_data_create(key, strlen(key), data);
return rtd;
}
COMPS_ObjMRTreeData * comps_objmrtree_data_create_n(char * key, unsigned keylen,
void * data) {
COMPS_ObjMRTreeData * rtd;
rtd = __comps_objmrtree_data_create(key, keylen, data);
return rtd;
}
static void comps_objmrtree_create(COMPS_ObjMRTree *rtree, COMPS_Object **args){
(void)args;
rtree->subnodes = comps_hslist_create();
comps_hslist_init(rtree->subnodes, NULL, NULL, &comps_objmrtree_data_destroy_v);
if (rtree->subnodes == NULL) {
COMPS_OBJECT_DESTROY(rtree);
return;
}
rtree->len = 0;
}
void comps_objmrtree_create_u(COMPS_Object * obj, COMPS_Object **args) {
(void)args;
comps_objmrtree_create((COMPS_ObjMRTree*)obj, NULL);
}
static void comps_objmrtree_destroy(COMPS_ObjMRTree * rt) {
comps_hslist_destroy(&(rt->subnodes));
}
void comps_objmrtree_destroy_u(COMPS_Object *obj) {
comps_objmrtree_destroy((COMPS_ObjMRTree*)obj);
}
void comps_objmrtree_values_walk(COMPS_ObjMRTree * rt, void* udata,
void (*walk_f)(void*, void*)) {
COMPS_HSList *tmplist, *tmp_subnodes;
COMPS_HSListItem *it, *it2;
tmplist = comps_hslist_create();
comps_hslist_init(tmplist, NULL, NULL, NULL);
comps_hslist_append(tmplist, rt->subnodes, 0);
while (tmplist->first != NULL) {
it = tmplist->first;
comps_hslist_remove(tmplist, tmplist->first);
tmp_subnodes = (COMPS_HSList*)it->data;
free(it);
for (it = tmp_subnodes->first; it != NULL; it=it->next) {
if (((COMPS_ObjMRTreeData*)it->data)->subnodes->first) {
comps_hslist_append(tmplist,
((COMPS_ObjMRTreeData*)it->data)->subnodes, 0);
}
for (it2 = (COMPS_HSListItem*)((COMPS_ObjMRTreeData*)it->data)->data->first;
it2 != NULL; it2 = it2->next) {
walk_f(udata, it2->data);
}
}
}
comps_hslist_destroy(&tmplist);
}
void comps_objmrtree_copy(COMPS_ObjMRTree *ret, COMPS_ObjMRTree *rt){
COMPS_HSList * to_clone, *tmplist, *new_subnodes;
COMPS_HSListItem *it, *it2;
COMPS_ObjMRTreeData *rtdata;
COMPS_ObjList *new_data_list;
to_clone = comps_hslist_create();
comps_hslist_init(to_clone, NULL, NULL, NULL);
for (it = rt->subnodes->first; it != NULL; it = it->next) {
rtdata = comps_objmrtree_data_create(
((COMPS_ObjMRTreeData*)it->data)->key,
NULL);
new_data_list = (COMPS_ObjList*)
COMPS_OBJECT_COPY(((COMPS_ObjMRTreeData*)it->data)->data);
COMPS_OBJECT_DESTROY(&rtdata->data);
comps_hslist_destroy(&rtdata->subnodes);
rtdata->subnodes = ((COMPS_ObjMRTreeData*)it->data)->subnodes;
rtdata->data = new_data_list;
comps_hslist_append(ret->subnodes, rtdata, 0);
comps_hslist_append(to_clone, rtdata, 0);
}
while (to_clone->first) {
it2 = to_clone->first;
tmplist = ((COMPS_ObjMRTreeData*)it2->data)->subnodes;
comps_hslist_remove(to_clone, to_clone->first);
new_subnodes = comps_hslist_create();
comps_hslist_init(new_subnodes, NULL, NULL, &comps_objmrtree_data_destroy_v);
for (it = tmplist->first; it != NULL; it = it->next) {
rtdata = comps_objmrtree_data_create(
((COMPS_ObjMRTreeData*)it->data)->key,
NULL);
new_data_list = (COMPS_ObjList*)
COMPS_OBJECT_COPY(((COMPS_ObjMRTreeData*)it->data)->data);
comps_hslist_destroy(&rtdata->subnodes);
COMPS_OBJECT_DESTROY(rtdata->data);
rtdata->subnodes = ((COMPS_ObjMRTreeData*)it->data)->subnodes;
rtdata->data = new_data_list;
comps_hslist_append(new_subnodes, rtdata, 0);
comps_hslist_append(to_clone, rtdata, 0);
}
((COMPS_ObjMRTreeData*)it2->data)->subnodes = new_subnodes;
free(it2);
}
ret->len = rt->len;
comps_hslist_destroy(&to_clone);
}
COMPS_COPY_u(objmrtree, COMPS_ObjMRTree) /*comps_utils.h macro*/
void comps_objmrtree_copy_shallow(COMPS_ObjMRTree *ret, COMPS_ObjMRTree *rt){
COMPS_HSList * to_clone, *tmplist, *new_subnodes;
COMPS_HSListItem *it, *it2;
COMPS_ObjMRTreeData *rtdata;
COMPS_ObjList *new_data_list;
to_clone = comps_hslist_create();
comps_hslist_init(to_clone, NULL, NULL, NULL);
for (it = rt->subnodes->first; it != NULL; it = it->next) {
rtdata = comps_objmrtree_data_create(
((COMPS_ObjMRTreeData*)it->data)->key,
NULL);
new_data_list = (COMPS_ObjList*)
COMPS_OBJECT_COPY(((COMPS_ObjMRTreeData*)it->data)->data);
COMPS_OBJECT_DESTROY(&rtdata->data);
comps_hslist_destroy(&rtdata->subnodes);
rtdata->subnodes = ((COMPS_ObjMRTreeData*)it->data)->subnodes;
rtdata->data = new_data_list;
comps_hslist_append(ret->subnodes, rtdata, 0);
comps_hslist_append(to_clone, rtdata, 0);
}
while (to_clone->first) {
it2 = to_clone->first;
tmplist = ((COMPS_ObjMRTreeData*)it2->data)->subnodes;
comps_hslist_remove(to_clone, to_clone->first);
new_subnodes = comps_hslist_create();
comps_hslist_init(new_subnodes, NULL, NULL, &comps_objmrtree_data_destroy_v);
for (it = tmplist->first; it != NULL; it = it->next) {
rtdata = comps_objmrtree_data_create(
((COMPS_ObjMRTreeData*)it->data)->key,
NULL);
new_data_list = ((COMPS_ObjMRTreeData*)it->data)->data;
comps_hslist_destroy(&rtdata->subnodes);
COMPS_OBJECT_DESTROY(rtdata->data);
rtdata->subnodes = ((COMPS_ObjMRTreeData*)it->data)->subnodes;
rtdata->data = new_data_list;
comps_hslist_append(new_subnodes, rtdata, 0);
comps_hslist_append(to_clone, rtdata, 0);
}
((COMPS_ObjMRTreeData*)it2->data)->subnodes = new_subnodes;
free(it2);
}
ret->len = rt->len;
comps_hslist_destroy(&to_clone);
}
COMPS_ObjMRTree * comps_objmrtree_clone(COMPS_ObjMRTree * rt) {
COMPS_HSList * to_clone, *tmplist, *new_subnodes;
COMPS_ObjMRTree * ret;
COMPS_HSListItem *it, *it2;
COMPS_ObjMRTreeData *rtdata;
COMPS_ObjList *new_data_list;
to_clone = comps_hslist_create();
comps_hslist_init(to_clone, NULL, NULL, NULL);
ret = COMPS_OBJECT_CREATE(COMPS_ObjMRTree, NULL);
for (it = rt->subnodes->first; it != NULL; it = it->next) {
rtdata = comps_objmrtree_data_create(
((COMPS_ObjMRTreeData*)it->data)->key,
NULL);
new_data_list = (COMPS_ObjList*)
COMPS_OBJECT_COPY(((COMPS_ObjMRTreeData*)it->data)->data);
COMPS_OBJECT_DESTROY(&rtdata->data);
comps_hslist_destroy(&rtdata->subnodes);
rtdata->subnodes = ((COMPS_ObjMRTreeData*)it->data)->subnodes;
rtdata->data = new_data_list;
comps_hslist_append(ret->subnodes, rtdata, 0);
comps_hslist_append(to_clone, rtdata, 0);
}
while (to_clone->first) {
it2 = to_clone->first;
tmplist = ((COMPS_ObjMRTreeData*)it2->data)->subnodes;
comps_hslist_remove(to_clone, to_clone->first);
new_subnodes = comps_hslist_create();
comps_hslist_init(new_subnodes, NULL, NULL, &comps_objmrtree_data_destroy_v);
for (it = tmplist->first; it != NULL; it = it->next) {
rtdata = comps_objmrtree_data_create(
((COMPS_ObjMRTreeData*)it->data)->key,
NULL);
new_data_list = (COMPS_ObjList*)
COMPS_OBJECT_COPY(((COMPS_ObjMRTreeData*)it->data)->data);
comps_hslist_destroy(&rtdata->subnodes);
COMPS_OBJECT_DESTROY(rtdata->data);
rtdata->subnodes = ((COMPS_ObjMRTreeData*)it->data)->subnodes;
rtdata->data = new_data_list;
comps_hslist_append(new_subnodes, rtdata, 0);
comps_hslist_append(to_clone, rtdata, 0);
}
((COMPS_ObjMRTreeData*)it2->data)->subnodes = new_subnodes;
free(it2);
}
ret->len = rt->len;
comps_hslist_destroy(&to_clone);
return ret;
}
void comps_objmrtree_unite(COMPS_ObjMRTree *rt1, COMPS_ObjMRTree *rt2) {
COMPS_HSList *tmplist, *tmp_subnodes;
COMPS_HSListItem *it;
COMPS_ObjListIt *it2;
struct Pair {
COMPS_HSList * subnodes;
char * key;
char added;
} *pair, *parent_pair;
pair = malloc(sizeof(struct Pair));
pair->subnodes = rt2->subnodes;
pair->key = NULL;
tmplist = comps_hslist_create();
comps_hslist_init(tmplist, NULL, NULL, &free);
comps_hslist_append(tmplist, pair, 0);
while (tmplist->first != NULL) {
it = tmplist->first;
comps_hslist_remove(tmplist, tmplist->first);
tmp_subnodes = ((struct Pair*)it->data)->subnodes;
parent_pair = (struct Pair*) it->data;
free(it);
pair->added = 0;
for (it = tmp_subnodes->first; it != NULL; it=it->next) {
pair = malloc(sizeof(struct Pair));
pair->subnodes = ((COMPS_ObjMRTreeData*)it->data)->subnodes;
if (parent_pair->key != NULL) {
pair->key =
malloc(sizeof(char)
* (strlen(((COMPS_ObjMRTreeData*)it->data)->key)
+ strlen(parent_pair->key) + 1));
memcpy(pair->key, parent_pair->key,
sizeof(char) * strlen(parent_pair->key));
memcpy(pair->key+strlen(parent_pair->key),
((COMPS_ObjMRTreeData*)it->data)->key,
sizeof(char)*(strlen(((COMPS_ObjMRTreeData*)it->data)->key)+1));
} else {
pair->key = malloc(sizeof(char)*
(strlen(((COMPS_ObjMRTreeData*)it->data)->key) +
1));
memcpy(pair->key, ((COMPS_ObjMRTreeData*)it->data)->key,
sizeof(char)*(strlen(((COMPS_ObjMRTreeData*)it->data)->key)+1));
}
/* current node has data */
if (((COMPS_ObjMRTreeData*)it->data)->data->first != NULL) {
for (it2 = ((COMPS_ObjMRTreeData*)it->data)->data->first;
it2 != NULL; it2 = it2->next) {
comps_objmrtree_set(rt1, pair->key, it2->comps_obj);
}
if (((COMPS_ObjMRTreeData*)it->data)->subnodes->first) {
comps_hslist_append(tmplist, pair, 0);
} else {
free(pair->key);
free(pair);
}
/* current node hasn't data */
} else {
if (((COMPS_ObjMRTreeData*)it->data)->subnodes->first) {
comps_hslist_append(tmplist, pair, 0);
} else {
free(pair->key);
free(pair);
}
}
}
free(parent_pair->key);
free(parent_pair);
}
comps_hslist_destroy(&tmplist);
}
void comps_objmrtree_set_x(COMPS_ObjMRTree *rt, char *key, COMPS_Object *data) {
__comps_objmrtree_set(rt, key, strlen(key), data);
}
void comps_objmrtree_set(COMPS_ObjMRTree *rt, char *key, COMPS_Object *data) {
__comps_objmrtree_set(rt, key, strlen(key), comps_object_incref(data));
}
void __comps_objmrtree_set(COMPS_ObjMRTree *rt, char *key,
size_t len, COMPS_Object *ndata) {
static COMPS_HSListItem *it;
COMPS_HSList *subnodes;
COMPS_ObjMRTreeData *rtd;
static COMPS_ObjMRTreeData *rtdata;
size_t _len, offset=0;
unsigned x, found = 0;
char ended;//, tmpch;
if (rt->subnodes == NULL)
return;
subnodes = rt->subnodes;
while (offset != len)
{
found = 0;
for (it = subnodes->first; it != NULL; it = it->next) {
if (((COMPS_ObjMRTreeData*)it->data)->key[0] == key[offset]) {
found = 1;
break;
}
}
if (!found) { // not found in subnodes; create new subnode
rtd = comps_objmrtree_data_create(key+offset, ndata);
comps_hslist_append(subnodes, rtd, 0);
rt->len++;
return;
} else {
rtdata = (COMPS_ObjMRTreeData*)it->data;
ended = 0;
for (x=1; ;x++) {
if (rtdata->key[x] == 0) ended += 1;
if (x == len - offset) ended += 2;
if (ended != 0) break;
if (key[offset+x] != rtdata->key[x]) break;
}
if (ended == 3) { //keys equals; append new data
comps_objlist_append_x(rtdata->data, ndata);
rt->len++;
return;
} else if (ended == 2) { //global key ends first; make global leaf
comps_hslist_remove(subnodes, it);
it->next = NULL;
rtd = comps_objmrtree_data_create(key+offset, ndata);
comps_hslist_append(subnodes, rtd, 0);
((COMPS_ObjMRTreeData*)subnodes->last->data)->subnodes->last = it;
((COMPS_ObjMRTreeData*)subnodes->last->data)->subnodes->first = it;
_len = strlen(key + offset);
memmove(rtdata->key,rtdata->key + _len,
strlen(rtdata->key) - _len);
rtdata->key[strlen(rtdata->key) - _len] = 0;
rtdata->key = realloc(rtdata->key,
sizeof(char)* (strlen(rtdata->key)+1));
rt->len++;
return;
} else if (ended == 1) { //local key ends first; go deeper
subnodes = rtdata->subnodes;
offset += x;
} else { /* keys differ */
COMPS_ObjList *tmpdata = rtdata->data;
COMPS_HSList *tmphslist = rtdata->subnodes;
rtdata->subnodes = comps_hslist_create();
comps_hslist_init(rtdata->subnodes, NULL, NULL,
&comps_objmrtree_data_destroy_v);
int cmpret = strcmp(key+offset+x, rtdata->key+x);
rtdata->data = COMPS_OBJECT_CREATE(COMPS_ObjList, NULL);
if (cmpret > 0) {
rtd = comps_objmrtree_data_create(rtdata->key+x,
(COMPS_Object*)tmpdata);
comps_hslist_destroy(&rtd->subnodes);
rtd->subnodes = tmphslist;
comps_hslist_append(rtdata->subnodes, rtd, 0);
rtd = comps_objmrtree_data_create(key+offset+x,
(COMPS_Object*)ndata);
comps_hslist_append(rtdata->subnodes, rtd, 0);
} else {
rtd = comps_objmrtree_data_create(key+offset+x,
(COMPS_Object*)ndata);
comps_hslist_append(rtdata->subnodes, rtd, 0);
rtd = comps_objmrtree_data_create(rtdata->key+x,
(COMPS_Object*)tmpdata);
comps_hslist_destroy(&rtd->subnodes);
rtd->subnodes = tmphslist;
comps_hslist_append(rtdata->subnodes, rtd, 0);
}
rtdata->key = realloc(rtdata->key, sizeof(char)*(x+1));
rtdata->key[x] = 0;
rt->len++;
return;
}
}
}
}
void comps_objmrtree_set_n(COMPS_ObjMRTree *rt, char *key,
size_t len, void *ndata) {
__comps_objmrtree_set(rt, key, len, ndata);
}
COMPS_ObjList * comps_objmrtree_get(COMPS_ObjMRTree * rt, const char * key) {
COMPS_HSList * subnodes;
COMPS_HSListItem * it = NULL;
COMPS_ObjMRTreeData * rtdata;
unsigned int offset, len, x;
char found, ended;
len = strlen(key);
offset = 0;
subnodes = rt->subnodes;
while (offset != len) {
found = 0;
for (it = subnodes->first; it != NULL; it=it->next) {
if (((COMPS_ObjMRTreeData*)it->data)->key[0] == key[offset]) {
found = 1;
break;
}
}
if (!found)
return NULL;
rtdata = (COMPS_ObjMRTreeData*)it->data;
for (x=1; ;x++) {
ended=0;
if (rtdata->key[x] == 0) ended += 1;
if (x == len - offset) ended += 2;
if (ended != 0) break;
if (key[offset+x] != rtdata->key[x]) break;
}
if (ended == 3) return (COMPS_ObjList*)
comps_object_incref((COMPS_Object*)rtdata->data);
else if (ended == 1) offset+=x;
else return NULL;
subnodes = ((COMPS_ObjMRTreeData*)it->data)->subnodes;
}
if (it)
return ((COMPS_ObjMRTreeData*)it->data)->data;
else return NULL;
}
void comps_objmrtree_unset(COMPS_ObjMRTree * rt, const char * key) {
COMPS_HSList * subnodes;
COMPS_HSListItem * it;
COMPS_ObjMRTreeData * rtdata;
unsigned int offset, len, x;
char found, ended;
COMPS_HSList * path;
struct Relation {
COMPS_HSList * parent_nodes;
COMPS_HSListItem * child_it;
} *relation;
path = comps_hslist_create();
comps_hslist_init(path, NULL, NULL, &free);
len = strlen(key);
offset = 0;
subnodes = rt->subnodes;
while (offset != len) {
found = 0;
for (it = subnodes->first; it != NULL; it=it->next) {
if (((COMPS_ObjMRTreeData*)it->data)->key[0] == key[offset]) {
found = 1;
break;
}
}
if (!found) {
comps_hslist_destroy(&path);
return;
}
rtdata = (COMPS_ObjMRTreeData*)it->data;
for (x=1; ;x++) {
ended=0;
if (rtdata->key[x] == 0) ended += 1;
if (x == len - offset) ended += 2;
if (ended != 0) break;
if (key[offset+x] != rtdata->key[x]) break;
}
if (ended == 3) {
/* remove node from tree only if there's no descendant*/
if (rtdata->subnodes->last == NULL) {
comps_hslist_remove(subnodes, it);
rt->len -= rtdata->data->len;
comps_objmrtree_data_destroy(rtdata);
free(it);
}
else {
rt->len -= rtdata->data->len;
comps_objlist_clear(rtdata->data);
rtdata->is_leaf = 0;
}
if (path->last == NULL) {
comps_hslist_destroy(&path);
return;
}
rtdata = (COMPS_ObjMRTreeData*)
((struct Relation*)path->last->data)->child_it->data;
/*remove all predecessor of deleted node (recursive) with no childs*/
while (rtdata->subnodes->last == NULL) {
//printf("removing '%s'\n", rtdata->key);
comps_objmrtree_data_destroy(rtdata);
comps_hslist_remove(
((struct Relation*)path->last->data)->parent_nodes,
((struct Relation*)path->last->data)->child_it);
free(((struct Relation*)path->last->data)->child_it);
it = path->last;
comps_hslist_remove(path, path->last);
free(it);
rtdata = (COMPS_ObjMRTreeData*)
((struct Relation*)path->last->data)->child_it->data;
}
comps_hslist_destroy(&path);
return;
}
else if (ended == 1) offset+=x;
else {
comps_hslist_destroy(&path);
return;
}
if ((relation = malloc(sizeof(struct Relation))) == NULL) {
comps_hslist_destroy(&path);
return;
}
subnodes = ((COMPS_ObjMRTreeData*)it->data)->subnodes;
relation->parent_nodes = subnodes;
relation->child_it = it;
comps_hslist_append(path, (void*)relation, 0);
}
comps_hslist_destroy(&path);
return;
}
inline void comps_objmrtree_pair_destroy_v(void * pair) {
free(((COMPS_ObjMRTreePair *)pair)->key);
free(pair);
}
inline COMPS_HSList* __comps_objmrtree_all(COMPS_ObjMRTree * rt, char keyvalpair) {
COMPS_HSList *to_process, *ret;
COMPS_HSListItem *hsit, *oldit;
size_t x;
struct Pair {
char *key;
void *data;
COMPS_HSList *subnodes;
} *pair, *current_pair=NULL;//, *oldpair=NULL;
COMPS_ObjMRTreePair *rtpair;
to_process = comps_hslist_create();
comps_hslist_init(to_process, NULL, NULL, &free);
ret = comps_hslist_create();
if (keyvalpair == 0)
comps_hslist_init(ret, NULL, NULL, &free);
else if (keyvalpair == 1)
comps_hslist_init(ret, NULL, NULL, NULL);
else
comps_hslist_init(ret, NULL, NULL, &comps_objmrtree_pair_destroy_v);
for (hsit = rt->subnodes->first; hsit != NULL; hsit = hsit->next) {
pair = malloc(sizeof(struct Pair));
pair->key = __comps_strcpy(((COMPS_ObjMRTreeData*)hsit->data)->key);
pair->data = ((COMPS_ObjMRTreeData*)hsit->data)->data;
pair->subnodes = ((COMPS_ObjMRTreeData*)hsit->data)->subnodes;
comps_hslist_append(to_process, pair, 0);
}
while (to_process->first) {
//oldpair = current_pair;
current_pair = to_process->first->data;
oldit = to_process->first;
comps_hslist_remove(to_process, to_process->first);
if (current_pair->data) {
if (keyvalpair == 0) {
comps_hslist_append(ret, __comps_strcpy(current_pair->key), 0);
} else if (keyvalpair == 1) {
comps_hslist_append(ret, current_pair->data, 0);
} else {
rtpair = malloc(sizeof(COMPS_ObjMRTreePair));
rtpair->key = __comps_strcpy(current_pair->key);
rtpair->data = current_pair->data;
comps_hslist_append(ret, rtpair, 0);
}
}
for (hsit = current_pair->subnodes->first, x = 0;
hsit != NULL; hsit = hsit->next, x++) {
pair = malloc(sizeof(struct Pair));
pair->key = __comps_strcat(current_pair->key,
((COMPS_ObjMRTreeData*)hsit->data)->key);
pair->data = ((COMPS_ObjMRTreeData*)hsit->data)->data;
pair->subnodes = ((COMPS_ObjMRTreeData*)hsit->data)->subnodes;
comps_hslist_insert_at(to_process, x, pair, 0);
}
free(current_pair->key);
free(current_pair);
free(oldit);
}
comps_hslist_destroy(&to_process);
return ret;
}
COMPS_HSList* comps_objmrtree_keys(COMPS_ObjMRTree * rt) {
return __comps_objmrtree_all(rt, 0);
}
COMPS_HSList* comps_objmrtree_values(COMPS_ObjMRTree * rt) {
return __comps_objmrtree_all(rt, 1);
}
COMPS_HSList* comps_objmrtree_pairs(COMPS_ObjMRTree * rt) {
return __comps_objmrtree_all(rt, 2);
}
void comps_objmrtree_clear(COMPS_ObjMRTree * rt) {
COMPS_HSListItem *it, *oldit;
if (rt == NULL) return;
if (rt->subnodes == NULL) return;
oldit = rt->subnodes->first;
it = (oldit)?oldit->next:NULL;
for (;it != NULL; it=it->next) {
if (rt->subnodes->data_destructor != NULL)
rt->subnodes->data_destructor(oldit->data);
free(oldit);
oldit = it;
}
if (oldit) {
if (rt->subnodes->data_destructor != NULL)
rt->subnodes->data_destructor(oldit->data);
free(oldit);
}
}
char comps_objmrtree_paircmp(void *obj1, void *obj2) {
if (strcmp(((COMPS_ObjMRTreePair*)obj1)->key,
((COMPS_ObjMRTreePair*)obj2)->key) != 0)
return 0;
return comps_object_cmp((COMPS_Object*)((COMPS_ObjMRTreePair*)obj1)->data,
(COMPS_Object*)((COMPS_ObjMRTreePair*)obj1)->data);
}
signed char comps_objmrtree_cmp(COMPS_ObjMRTree *ort1, COMPS_ObjMRTree *ort2) {
COMPS_HSList *values1, *values2;
COMPS_HSListItem *it;
COMPS_Set *set1, *set2;
signed char ret;
values1 = comps_objmrtree_pairs(ort1);
values2 = comps_objmrtree_pairs(ort2);
set1 = comps_set_create();
comps_set_init(set1, NULL, NULL, NULL, &comps_objmrtree_paircmp);
set2 = comps_set_create();
comps_set_init(set2, NULL, NULL, NULL, &comps_objmrtree_paircmp);
for (it = values1->first; it != NULL; it = it->next) {
comps_set_add(set1, it->data);
}
for (it = values2->first; it != NULL; it = it->next) {
comps_set_add(set2, it->data);
}
ret = comps_set_cmp(set1, set2);
comps_set_destroy(&set1);
comps_set_destroy(&set2);
//printf("objmrtree cmp %d\n", !ret);
comps_hslist_destroy(&values1);
comps_hslist_destroy(&values2);
return !ret;
}
COMPS_CMP_u(objmrtree, COMPS_ObjMRTree)
COMPS_ObjectInfo COMPS_ObjMRTree_ObjInfo = {
.obj_size = sizeof(COMPS_ObjMRTree),
.constructor = &comps_objmrtree_create_u,
.destructor = &comps_objmrtree_destroy_u,
.copy = &comps_objmrtree_copy_u,
.obj_cmp = &comps_objmrtree_cmp_u
};
| ./CrossVul/dataset_final_sorted/CWE-416/c/bad_1390_1 |
crossvul-cpp_data_good_4231_3 | /*
** $Id: lvm.c $
** Lua virtual machine
** See Copyright Notice in lua.h
*/
#define lvm_c
#define LUA_CORE
#include "lprefix.h"
#include <float.h>
#include <limits.h>
#include <math.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include "lua.h"
#include "ldebug.h"
#include "ldo.h"
#include "lfunc.h"
#include "lgc.h"
#include "lobject.h"
#include "lopcodes.h"
#include "lstate.h"
#include "lstring.h"
#include "ltable.h"
#include "ltm.h"
#include "lvm.h"
/*
** By default, use jump tables in the main interpreter loop on gcc
** and compatible compilers.
*/
#if !defined(LUA_USE_JUMPTABLE)
#if defined(__GNUC__)
#define LUA_USE_JUMPTABLE 1
#else
#define LUA_USE_JUMPTABLE 0
#endif
#endif
/* limit for table tag-method chains (to avoid infinite loops) */
#define MAXTAGLOOP 2000
/*
** 'l_intfitsf' checks whether a given integer is in the range that
** can be converted to a float without rounding. Used in comparisons.
*/
/* number of bits in the mantissa of a float */
#define NBM (l_floatatt(MANT_DIG))
/*
** Check whether some integers may not fit in a float, testing whether
** (maxinteger >> NBM) > 0. (That implies (1 << NBM) <= maxinteger.)
** (The shifts are done in parts, to avoid shifting by more than the size
** of an integer. In a worst case, NBM == 113 for long double and
** sizeof(long) == 32.)
*/
#if ((((LUA_MAXINTEGER >> (NBM / 4)) >> (NBM / 4)) >> (NBM / 4)) \
>> (NBM - (3 * (NBM / 4)))) > 0
/* limit for integers that fit in a float */
#define MAXINTFITSF ((lua_Unsigned)1 << NBM)
/* check whether 'i' is in the interval [-MAXINTFITSF, MAXINTFITSF] */
#define l_intfitsf(i) ((MAXINTFITSF + l_castS2U(i)) <= (2 * MAXINTFITSF))
#else /* all integers fit in a float precisely */
#define l_intfitsf(i) 1
#endif
/*
** Try to convert a value from string to a number value.
** If the value is not a string or is a string not representing
** a valid numeral (or if coercions from strings to numbers
** are disabled via macro 'cvt2num'), do not modify 'result'
** and return 0.
*/
static int l_strton (const TValue *obj, TValue *result) {
lua_assert(obj != result);
if (!cvt2num(obj)) /* is object not a string? */
return 0;
else
return (luaO_str2num(svalue(obj), result) == vslen(obj) + 1);
}
/*
** Try to convert a value to a float. The float case is already handled
** by the macro 'tonumber'.
*/
int luaV_tonumber_ (const TValue *obj, lua_Number *n) {
TValue v;
if (ttisinteger(obj)) {
*n = cast_num(ivalue(obj));
return 1;
}
else if (l_strton(obj, &v)) { /* string coercible to number? */
*n = nvalue(&v); /* convert result of 'luaO_str2num' to a float */
return 1;
}
else
return 0; /* conversion failed */
}
/*
** try to convert a float to an integer, rounding according to 'mode'.
*/
int luaV_flttointeger (lua_Number n, lua_Integer *p, F2Imod mode) {
lua_Number f = l_floor(n);
if (n != f) { /* not an integral value? */
if (mode == F2Ieq) return 0; /* fails if mode demands integral value */
else if (mode == F2Iceil) /* needs ceil? */
f += 1; /* convert floor to ceil (remember: n != f) */
}
return lua_numbertointeger(f, p);
}
/*
** try to convert a value to an integer, rounding according to 'mode',
** without string coercion.
** ("Fast track" handled by macro 'tointegerns'.)
*/
int luaV_tointegerns (const TValue *obj, lua_Integer *p, F2Imod mode) {
if (ttisfloat(obj))
return luaV_flttointeger(fltvalue(obj), p, mode);
else if (ttisinteger(obj)) {
*p = ivalue(obj);
return 1;
}
else
return 0;
}
/*
** try to convert a value to an integer.
*/
int luaV_tointeger (const TValue *obj, lua_Integer *p, F2Imod mode) {
TValue v;
if (l_strton(obj, &v)) /* does 'obj' point to a numerical string? */
obj = &v; /* change it to point to its corresponding number */
return luaV_tointegerns(obj, p, mode);
}
/*
** Try to convert a 'for' limit to an integer, preserving the semantics
** of the loop. Return true if the loop must not run; otherwise, '*p'
** gets the integer limit.
** (The following explanation assumes a positive step; it is valid for
** negative steps mutatis mutandis.)
** If the limit is an integer or can be converted to an integer,
** rounding down, that is the limit.
** Otherwise, check whether the limit can be converted to a float. If
** the float is too large, clip it to LUA_MAXINTEGER. If the float
** is too negative, the loop should not run, because any initial
** integer value is greater than such limit; so, the function returns
** true to signal that. (For this latter case, no integer limit would be
** correct; even a limit of LUA_MININTEGER would run the loop once for
** an initial value equal to LUA_MININTEGER.)
*/
static int forlimit (lua_State *L, lua_Integer init, const TValue *lim,
lua_Integer *p, lua_Integer step) {
if (!luaV_tointeger(lim, p, (step < 0 ? F2Iceil : F2Ifloor))) {
/* not coercible to in integer */
lua_Number flim; /* try to convert to float */
if (!tonumber(lim, &flim)) /* cannot convert to float? */
luaG_forerror(L, lim, "limit");
/* else 'flim' is a float out of integer bounds */
if (luai_numlt(0, flim)) { /* if it is positive, it is too large */
if (step < 0) return 1; /* initial value must be less than it */
*p = LUA_MAXINTEGER; /* truncate */
}
else { /* it is less than min integer */
if (step > 0) return 1; /* initial value must be greater than it */
*p = LUA_MININTEGER; /* truncate */
}
}
return (step > 0 ? init > *p : init < *p); /* not to run? */
}
/*
** Prepare a numerical for loop (opcode OP_FORPREP).
** Return true to skip the loop. Otherwise,
** after preparation, stack will be as follows:
** ra : internal index (safe copy of the control variable)
** ra + 1 : loop counter (integer loops) or limit (float loops)
** ra + 2 : step
** ra + 3 : control variable
*/
static int forprep (lua_State *L, StkId ra) {
TValue *pinit = s2v(ra);
TValue *plimit = s2v(ra + 1);
TValue *pstep = s2v(ra + 2);
if (ttisinteger(pinit) && ttisinteger(pstep)) { /* integer loop? */
lua_Integer init = ivalue(pinit);
lua_Integer step = ivalue(pstep);
lua_Integer limit;
if (step == 0)
luaG_runerror(L, "'for' step is zero");
setivalue(s2v(ra + 3), init); /* control variable */
if (forlimit(L, init, plimit, &limit, step))
return 1; /* skip the loop */
else { /* prepare loop counter */
lua_Unsigned count;
if (step > 0) { /* ascending loop? */
count = l_castS2U(limit) - l_castS2U(init);
if (step != 1) /* avoid division in the too common case */
count /= l_castS2U(step);
}
else { /* step < 0; descending loop */
count = l_castS2U(init) - l_castS2U(limit);
/* 'step+1' avoids negating 'mininteger' */
count /= l_castS2U(-(step + 1)) + 1u;
}
/* store the counter in place of the limit (which won't be
needed anymore */
setivalue(plimit, l_castU2S(count));
}
}
else { /* try making all values floats */
lua_Number init; lua_Number limit; lua_Number step;
if (unlikely(!tonumber(plimit, &limit)))
luaG_forerror(L, plimit, "limit");
if (unlikely(!tonumber(pstep, &step)))
luaG_forerror(L, pstep, "step");
if (unlikely(!tonumber(pinit, &init)))
luaG_forerror(L, pinit, "initial value");
if (step == 0)
luaG_runerror(L, "'for' step is zero");
if (luai_numlt(0, step) ? luai_numlt(limit, init)
: luai_numlt(init, limit))
return 1; /* skip the loop */
else {
/* make sure internal values are all floats */
setfltvalue(plimit, limit);
setfltvalue(pstep, step);
setfltvalue(s2v(ra), init); /* internal index */
setfltvalue(s2v(ra + 3), init); /* control variable */
}
}
return 0;
}
/*
** Execute a step of a float numerical for loop, returning
** true iff the loop must continue. (The integer case is
** written online with opcode OP_FORLOOP, for performance.)
*/
static int floatforloop (StkId ra) {
lua_Number step = fltvalue(s2v(ra + 2));
lua_Number limit = fltvalue(s2v(ra + 1));
lua_Number idx = fltvalue(s2v(ra)); /* internal index */
idx = luai_numadd(L, idx, step); /* increment index */
if (luai_numlt(0, step) ? luai_numle(idx, limit)
: luai_numle(limit, idx)) {
chgfltvalue(s2v(ra), idx); /* update internal index */
setfltvalue(s2v(ra + 3), idx); /* and control variable */
return 1; /* jump back */
}
else
return 0; /* finish the loop */
}
/*
** Finish the table access 'val = t[key]'.
** if 'slot' is NULL, 't' is not a table; otherwise, 'slot' points to
** t[k] entry (which must be empty).
*/
void luaV_finishget (lua_State *L, const TValue *t, TValue *key, StkId val,
const TValue *slot) {
int loop; /* counter to avoid infinite loops */
const TValue *tm; /* metamethod */
for (loop = 0; loop < MAXTAGLOOP; loop++) {
if (slot == NULL) { /* 't' is not a table? */
lua_assert(!ttistable(t));
tm = luaT_gettmbyobj(L, t, TM_INDEX);
if (unlikely(notm(tm)))
luaG_typeerror(L, t, "index"); /* no metamethod */
/* else will try the metamethod */
}
else { /* 't' is a table */
lua_assert(isempty(slot));
tm = fasttm(L, hvalue(t)->metatable, TM_INDEX); /* table's metamethod */
if (tm == NULL) { /* no metamethod? */
setnilvalue(s2v(val)); /* result is nil */
return;
}
/* else will try the metamethod */
}
if (ttisfunction(tm)) { /* is metamethod a function? */
luaT_callTMres(L, tm, t, key, val); /* call it */
return;
}
t = tm; /* else try to access 'tm[key]' */
if (luaV_fastget(L, t, key, slot, luaH_get)) { /* fast track? */
setobj2s(L, val, slot); /* done */
return;
}
/* else repeat (tail call 'luaV_finishget') */
}
luaG_runerror(L, "'__index' chain too long; possible loop");
}
/*
** Finish a table assignment 't[key] = val'.
** If 'slot' is NULL, 't' is not a table. Otherwise, 'slot' points
** to the entry 't[key]', or to a value with an absent key if there
** is no such entry. (The value at 'slot' must be empty, otherwise
** 'luaV_fastget' would have done the job.)
*/
void luaV_finishset (lua_State *L, const TValue *t, TValue *key,
TValue *val, const TValue *slot) {
int loop; /* counter to avoid infinite loops */
for (loop = 0; loop < MAXTAGLOOP; loop++) {
const TValue *tm; /* '__newindex' metamethod */
if (slot != NULL) { /* is 't' a table? */
Table *h = hvalue(t); /* save 't' table */
lua_assert(isempty(slot)); /* slot must be empty */
tm = fasttm(L, h->metatable, TM_NEWINDEX); /* get metamethod */
if (tm == NULL) { /* no metamethod? */
if (isabstkey(slot)) /* no previous entry? */
slot = luaH_newkey(L, h, key); /* create one */
/* no metamethod and (now) there is an entry with given key */
setobj2t(L, cast(TValue *, slot), val); /* set its new value */
invalidateTMcache(h);
luaC_barrierback(L, obj2gco(h), val);
return;
}
/* else will try the metamethod */
}
else { /* not a table; check metamethod */
tm = luaT_gettmbyobj(L, t, TM_NEWINDEX);
if (unlikely(notm(tm)))
luaG_typeerror(L, t, "index");
}
/* try the metamethod */
if (ttisfunction(tm)) {
luaT_callTM(L, tm, t, key, val);
return;
}
t = tm; /* else repeat assignment over 'tm' */
if (luaV_fastget(L, t, key, slot, luaH_get)) {
luaV_finishfastset(L, t, slot, val);
return; /* done */
}
/* else 'return luaV_finishset(L, t, key, val, slot)' (loop) */
}
luaG_runerror(L, "'__newindex' chain too long; possible loop");
}
/*
** Compare two strings 'ls' x 'rs', returning an integer less-equal-
** -greater than zero if 'ls' is less-equal-greater than 'rs'.
** The code is a little tricky because it allows '\0' in the strings
** and it uses 'strcoll' (to respect locales) for each segments
** of the strings.
*/
static int l_strcmp (const TString *ls, const TString *rs) {
const char *l = getstr(ls);
size_t ll = tsslen(ls);
const char *r = getstr(rs);
size_t lr = tsslen(rs);
for (;;) { /* for each segment */
int temp = strcoll(l, r);
if (temp != 0) /* not equal? */
return temp; /* done */
else { /* strings are equal up to a '\0' */
size_t len = strlen(l); /* index of first '\0' in both strings */
if (len == lr) /* 'rs' is finished? */
return (len == ll) ? 0 : 1; /* check 'ls' */
else if (len == ll) /* 'ls' is finished? */
return -1; /* 'ls' is less than 'rs' ('rs' is not finished) */
/* both strings longer than 'len'; go on comparing after the '\0' */
len++;
l += len; ll -= len; r += len; lr -= len;
}
}
}
/*
** Check whether integer 'i' is less than float 'f'. If 'i' has an
** exact representation as a float ('l_intfitsf'), compare numbers as
** floats. Otherwise, use the equivalence 'i < f <=> i < ceil(f)'.
** If 'ceil(f)' is out of integer range, either 'f' is greater than
** all integers or less than all integers.
** (The test with 'l_intfitsf' is only for performance; the else
** case is correct for all values, but it is slow due to the conversion
** from float to int.)
** When 'f' is NaN, comparisons must result in false.
*/
static int LTintfloat (lua_Integer i, lua_Number f) {
if (l_intfitsf(i))
return luai_numlt(cast_num(i), f); /* compare them as floats */
else { /* i < f <=> i < ceil(f) */
lua_Integer fi;
if (luaV_flttointeger(f, &fi, F2Iceil)) /* fi = ceil(f) */
return i < fi; /* compare them as integers */
else /* 'f' is either greater or less than all integers */
return f > 0; /* greater? */
}
}
/*
** Check whether integer 'i' is less than or equal to float 'f'.
** See comments on previous function.
*/
static int LEintfloat (lua_Integer i, lua_Number f) {
if (l_intfitsf(i))
return luai_numle(cast_num(i), f); /* compare them as floats */
else { /* i <= f <=> i <= floor(f) */
lua_Integer fi;
if (luaV_flttointeger(f, &fi, F2Ifloor)) /* fi = floor(f) */
return i <= fi; /* compare them as integers */
else /* 'f' is either greater or less than all integers */
return f > 0; /* greater? */
}
}
/*
** Check whether float 'f' is less than integer 'i'.
** See comments on previous function.
*/
static int LTfloatint (lua_Number f, lua_Integer i) {
if (l_intfitsf(i))
return luai_numlt(f, cast_num(i)); /* compare them as floats */
else { /* f < i <=> floor(f) < i */
lua_Integer fi;
if (luaV_flttointeger(f, &fi, F2Ifloor)) /* fi = floor(f) */
return fi < i; /* compare them as integers */
else /* 'f' is either greater or less than all integers */
return f < 0; /* less? */
}
}
/*
** Check whether float 'f' is less than or equal to integer 'i'.
** See comments on previous function.
*/
static int LEfloatint (lua_Number f, lua_Integer i) {
if (l_intfitsf(i))
return luai_numle(f, cast_num(i)); /* compare them as floats */
else { /* f <= i <=> ceil(f) <= i */
lua_Integer fi;
if (luaV_flttointeger(f, &fi, F2Iceil)) /* fi = ceil(f) */
return fi <= i; /* compare them as integers */
else /* 'f' is either greater or less than all integers */
return f < 0; /* less? */
}
}
/*
** Return 'l < r', for numbers.
*/
static int LTnum (const TValue *l, const TValue *r) {
lua_assert(ttisnumber(l) && ttisnumber(r));
if (ttisinteger(l)) {
lua_Integer li = ivalue(l);
if (ttisinteger(r))
return li < ivalue(r); /* both are integers */
else /* 'l' is int and 'r' is float */
return LTintfloat(li, fltvalue(r)); /* l < r ? */
}
else {
lua_Number lf = fltvalue(l); /* 'l' must be float */
if (ttisfloat(r))
return luai_numlt(lf, fltvalue(r)); /* both are float */
else /* 'l' is float and 'r' is int */
return LTfloatint(lf, ivalue(r));
}
}
/*
** Return 'l <= r', for numbers.
*/
static int LEnum (const TValue *l, const TValue *r) {
lua_assert(ttisnumber(l) && ttisnumber(r));
if (ttisinteger(l)) {
lua_Integer li = ivalue(l);
if (ttisinteger(r))
return li <= ivalue(r); /* both are integers */
else /* 'l' is int and 'r' is float */
return LEintfloat(li, fltvalue(r)); /* l <= r ? */
}
else {
lua_Number lf = fltvalue(l); /* 'l' must be float */
if (ttisfloat(r))
return luai_numle(lf, fltvalue(r)); /* both are float */
else /* 'l' is float and 'r' is int */
return LEfloatint(lf, ivalue(r));
}
}
/*
** return 'l < r' for non-numbers.
*/
static int lessthanothers (lua_State *L, const TValue *l, const TValue *r) {
lua_assert(!ttisnumber(l) || !ttisnumber(r));
if (ttisstring(l) && ttisstring(r)) /* both are strings? */
return l_strcmp(tsvalue(l), tsvalue(r)) < 0;
else
return luaT_callorderTM(L, l, r, TM_LT);
}
/*
** Main operation less than; return 'l < r'.
*/
int luaV_lessthan (lua_State *L, const TValue *l, const TValue *r) {
if (ttisnumber(l) && ttisnumber(r)) /* both operands are numbers? */
return LTnum(l, r);
else return lessthanothers(L, l, r);
}
/*
** return 'l <= r' for non-numbers.
*/
static int lessequalothers (lua_State *L, const TValue *l, const TValue *r) {
lua_assert(!ttisnumber(l) || !ttisnumber(r));
if (ttisstring(l) && ttisstring(r)) /* both are strings? */
return l_strcmp(tsvalue(l), tsvalue(r)) <= 0;
else
return luaT_callorderTM(L, l, r, TM_LE);
}
/*
** Main operation less than or equal to; return 'l <= r'.
*/
int luaV_lessequal (lua_State *L, const TValue *l, const TValue *r) {
if (ttisnumber(l) && ttisnumber(r)) /* both operands are numbers? */
return LEnum(l, r);
else return lessequalothers(L, l, r);
}
/*
** Main operation for equality of Lua values; return 't1 == t2'.
** L == NULL means raw equality (no metamethods)
*/
int luaV_equalobj (lua_State *L, const TValue *t1, const TValue *t2) {
const TValue *tm;
if (ttypetag(t1) != ttypetag(t2)) { /* not the same variant? */
if (ttype(t1) != ttype(t2) || ttype(t1) != LUA_TNUMBER)
return 0; /* only numbers can be equal with different variants */
else { /* two numbers with different variants */
lua_Integer i1, i2; /* compare them as integers */
return (tointegerns(t1, &i1) && tointegerns(t2, &i2) && i1 == i2);
}
}
/* values have same type and same variant */
switch (ttypetag(t1)) {
case LUA_VNIL: case LUA_VFALSE: case LUA_VTRUE: return 1;
case LUA_VNUMINT: return (ivalue(t1) == ivalue(t2));
case LUA_VNUMFLT: return luai_numeq(fltvalue(t1), fltvalue(t2));
case LUA_VLIGHTUSERDATA: return pvalue(t1) == pvalue(t2);
case LUA_VLCF: return fvalue(t1) == fvalue(t2);
case LUA_VSHRSTR: return eqshrstr(tsvalue(t1), tsvalue(t2));
case LUA_VLNGSTR: return luaS_eqlngstr(tsvalue(t1), tsvalue(t2));
case LUA_VUSERDATA: {
if (uvalue(t1) == uvalue(t2)) return 1;
else if (L == NULL) return 0;
tm = fasttm(L, uvalue(t1)->metatable, TM_EQ);
if (tm == NULL)
tm = fasttm(L, uvalue(t2)->metatable, TM_EQ);
break; /* will try TM */
}
case LUA_VTABLE: {
if (hvalue(t1) == hvalue(t2)) return 1;
else if (L == NULL) return 0;
tm = fasttm(L, hvalue(t1)->metatable, TM_EQ);
if (tm == NULL)
tm = fasttm(L, hvalue(t2)->metatable, TM_EQ);
break; /* will try TM */
}
default:
return gcvalue(t1) == gcvalue(t2);
}
if (tm == NULL) /* no TM? */
return 0; /* objects are different */
else {
luaT_callTMres(L, tm, t1, t2, L->top); /* call TM */
return !l_isfalse(s2v(L->top));
}
}
/* macro used by 'luaV_concat' to ensure that element at 'o' is a string */
#define tostring(L,o) \
(ttisstring(o) || (cvt2str(o) && (luaO_tostring(L, o), 1)))
#define isemptystr(o) (ttisshrstring(o) && tsvalue(o)->shrlen == 0)
/* copy strings in stack from top - n up to top - 1 to buffer */
static void copy2buff (StkId top, int n, char *buff) {
size_t tl = 0; /* size already copied */
do {
size_t l = vslen(s2v(top - n)); /* length of string being copied */
memcpy(buff + tl, svalue(s2v(top - n)), l * sizeof(char));
tl += l;
} while (--n > 0);
}
/*
** Main operation for concatenation: concat 'total' values in the stack,
** from 'L->top - total' up to 'L->top - 1'.
*/
void luaV_concat (lua_State *L, int total) {
if (total == 1)
return; /* "all" values already concatenated */
do {
StkId top = L->top;
int n = 2; /* number of elements handled in this pass (at least 2) */
if (!(ttisstring(s2v(top - 2)) || cvt2str(s2v(top - 2))) ||
!tostring(L, s2v(top - 1)))
luaT_tryconcatTM(L);
else if (isemptystr(s2v(top - 1))) /* second operand is empty? */
cast_void(tostring(L, s2v(top - 2))); /* result is first operand */
else if (isemptystr(s2v(top - 2))) { /* first operand is empty string? */
setobjs2s(L, top - 2, top - 1); /* result is second op. */
}
else {
/* at least two non-empty string values; get as many as possible */
size_t tl = vslen(s2v(top - 1));
TString *ts;
/* collect total length and number of strings */
for (n = 1; n < total && tostring(L, s2v(top - n - 1)); n++) {
size_t l = vslen(s2v(top - n - 1));
if (unlikely(l >= (MAX_SIZE/sizeof(char)) - tl))
luaG_runerror(L, "string length overflow");
tl += l;
}
if (tl <= LUAI_MAXSHORTLEN) { /* is result a short string? */
char buff[LUAI_MAXSHORTLEN];
copy2buff(top, n, buff); /* copy strings to buffer */
ts = luaS_newlstr(L, buff, tl);
}
else { /* long string; copy strings directly to final result */
ts = luaS_createlngstrobj(L, tl);
copy2buff(top, n, getstr(ts));
}
setsvalue2s(L, top - n, ts); /* create result */
}
total -= n-1; /* got 'n' strings to create 1 new */
L->top -= n-1; /* popped 'n' strings and pushed one */
} while (total > 1); /* repeat until only 1 result left */
}
/*
** Main operation 'ra = #rb'.
*/
void luaV_objlen (lua_State *L, StkId ra, const TValue *rb) {
const TValue *tm;
switch (ttypetag(rb)) {
case LUA_VTABLE: {
Table *h = hvalue(rb);
tm = fasttm(L, h->metatable, TM_LEN);
if (tm) break; /* metamethod? break switch to call it */
setivalue(s2v(ra), luaH_getn(h)); /* else primitive len */
return;
}
case LUA_VSHRSTR: {
setivalue(s2v(ra), tsvalue(rb)->shrlen);
return;
}
case LUA_VLNGSTR: {
setivalue(s2v(ra), tsvalue(rb)->u.lnglen);
return;
}
default: { /* try metamethod */
tm = luaT_gettmbyobj(L, rb, TM_LEN);
if (unlikely(notm(tm))) /* no metamethod? */
luaG_typeerror(L, rb, "get length of");
break;
}
}
luaT_callTMres(L, tm, rb, rb, ra);
}
/*
** Integer division; return 'm // n', that is, floor(m/n).
** C division truncates its result (rounds towards zero).
** 'floor(q) == trunc(q)' when 'q >= 0' or when 'q' is integer,
** otherwise 'floor(q) == trunc(q) - 1'.
*/
lua_Integer luaV_idiv (lua_State *L, lua_Integer m, lua_Integer n) {
if (unlikely(l_castS2U(n) + 1u <= 1u)) { /* special cases: -1 or 0 */
if (n == 0)
luaG_runerror(L, "attempt to divide by zero");
return intop(-, 0, m); /* n==-1; avoid overflow with 0x80000...//-1 */
}
else {
lua_Integer q = m / n; /* perform C division */
if ((m ^ n) < 0 && m % n != 0) /* 'm/n' would be negative non-integer? */
q -= 1; /* correct result for different rounding */
return q;
}
}
/*
** Integer modulus; return 'm % n'. (Assume that C '%' with
** negative operands follows C99 behavior. See previous comment
** about luaV_idiv.)
*/
lua_Integer luaV_mod (lua_State *L, lua_Integer m, lua_Integer n) {
if (unlikely(l_castS2U(n) + 1u <= 1u)) { /* special cases: -1 or 0 */
if (n == 0)
luaG_runerror(L, "attempt to perform 'n%%0'");
return 0; /* m % -1 == 0; avoid overflow with 0x80000...%-1 */
}
else {
lua_Integer r = m % n;
if (r != 0 && (r ^ n) < 0) /* 'm/n' would be non-integer negative? */
r += n; /* correct result for different rounding */
return r;
}
}
/*
** Float modulus
*/
lua_Number luaV_modf (lua_State *L, lua_Number m, lua_Number n) {
lua_Number r;
luai_nummod(L, m, n, r);
return r;
}
/* number of bits in an integer */
#define NBITS cast_int(sizeof(lua_Integer) * CHAR_BIT)
/*
** Shift left operation. (Shift right just negates 'y'.)
*/
#define luaV_shiftr(x,y) luaV_shiftl(x,-(y))
lua_Integer luaV_shiftl (lua_Integer x, lua_Integer y) {
if (y < 0) { /* shift right? */
if (y <= -NBITS) return 0;
else return intop(>>, x, -y);
}
else { /* shift left */
if (y >= NBITS) return 0;
else return intop(<<, x, y);
}
}
/*
** create a new Lua closure, push it in the stack, and initialize
** its upvalues.
*/
static void pushclosure (lua_State *L, Proto *p, UpVal **encup, StkId base,
StkId ra) {
int nup = p->sizeupvalues;
Upvaldesc *uv = p->upvalues;
int i;
LClosure *ncl = luaF_newLclosure(L, nup);
ncl->p = p;
setclLvalue2s(L, ra, ncl); /* anchor new closure in stack */
for (i = 0; i < nup; i++) { /* fill in its upvalues */
if (uv[i].instack) /* upvalue refers to local variable? */
ncl->upvals[i] = luaF_findupval(L, base + uv[i].idx);
else /* get upvalue from enclosing function */
ncl->upvals[i] = encup[uv[i].idx];
luaC_objbarrier(L, ncl, ncl->upvals[i]);
}
}
/*
** finish execution of an opcode interrupted by a yield
*/
void luaV_finishOp (lua_State *L) {
CallInfo *ci = L->ci;
StkId base = ci->func + 1;
Instruction inst = *(ci->u.l.savedpc - 1); /* interrupted instruction */
OpCode op = GET_OPCODE(inst);
switch (op) { /* finish its execution */
case OP_MMBIN: case OP_MMBINI: case OP_MMBINK: {
setobjs2s(L, base + GETARG_A(*(ci->u.l.savedpc - 2)), --L->top);
break;
}
case OP_UNM: case OP_BNOT: case OP_LEN:
case OP_GETTABUP: case OP_GETTABLE: case OP_GETI:
case OP_GETFIELD: case OP_SELF: {
setobjs2s(L, base + GETARG_A(inst), --L->top);
break;
}
case OP_LT: case OP_LE:
case OP_LTI: case OP_LEI:
case OP_GTI: case OP_GEI:
case OP_EQ: { /* note that 'OP_EQI'/'OP_EQK' cannot yield */
int res = !l_isfalse(s2v(L->top - 1));
L->top--;
#if defined(LUA_COMPAT_LT_LE)
if (ci->callstatus & CIST_LEQ) { /* "<=" using "<" instead? */
ci->callstatus ^= CIST_LEQ; /* clear mark */
res = !res; /* negate result */
}
#endif
lua_assert(GET_OPCODE(*ci->u.l.savedpc) == OP_JMP);
if (res != GETARG_k(inst)) /* condition failed? */
ci->u.l.savedpc++; /* skip jump instruction */
break;
}
case OP_CONCAT: {
StkId top = L->top - 1; /* top when 'luaT_tryconcatTM' was called */
int a = GETARG_A(inst); /* first element to concatenate */
int total = cast_int(top - 1 - (base + a)); /* yet to concatenate */
setobjs2s(L, top - 2, top); /* put TM result in proper position */
L->top = top - 1; /* top is one after last element (at top-2) */
luaV_concat(L, total); /* concat them (may yield again) */
break;
}
default: {
/* only these other opcodes can yield */
lua_assert(op == OP_TFORCALL || op == OP_CALL ||
op == OP_TAILCALL || op == OP_SETTABUP || op == OP_SETTABLE ||
op == OP_SETI || op == OP_SETFIELD);
break;
}
}
}
/*
** {==================================================================
** Macros for arithmetic/bitwise/comparison opcodes in 'luaV_execute'
** ===================================================================
*/
#define l_addi(L,a,b) intop(+, a, b)
#define l_subi(L,a,b) intop(-, a, b)
#define l_muli(L,a,b) intop(*, a, b)
#define l_band(a,b) intop(&, a, b)
#define l_bor(a,b) intop(|, a, b)
#define l_bxor(a,b) intop(^, a, b)
#define l_lti(a,b) (a < b)
#define l_lei(a,b) (a <= b)
#define l_gti(a,b) (a > b)
#define l_gei(a,b) (a >= b)
/*
** Arithmetic operations with immediate operands. 'iop' is the integer
** operation, 'fop' is the float operation.
*/
#define op_arithI(L,iop,fop) { \
TValue *v1 = vRB(i); \
int imm = GETARG_sC(i); \
if (ttisinteger(v1)) { \
lua_Integer iv1 = ivalue(v1); \
pc++; setivalue(s2v(ra), iop(L, iv1, imm)); \
} \
else if (ttisfloat(v1)) { \
lua_Number nb = fltvalue(v1); \
lua_Number fimm = cast_num(imm); \
pc++; setfltvalue(s2v(ra), fop(L, nb, fimm)); \
}}
/*
** Auxiliary function for arithmetic operations over floats and others
** with two register operands.
*/
#define op_arithf_aux(L,v1,v2,fop) { \
lua_Number n1; lua_Number n2; \
if (tonumberns(v1, n1) && tonumberns(v2, n2)) { \
pc++; setfltvalue(s2v(ra), fop(L, n1, n2)); \
}}
/*
** Arithmetic operations over floats and others with register operands.
*/
#define op_arithf(L,fop) { \
TValue *v1 = vRB(i); \
TValue *v2 = vRC(i); \
op_arithf_aux(L, v1, v2, fop); }
/*
** Arithmetic operations with K operands for floats.
*/
#define op_arithfK(L,fop) { \
TValue *v1 = vRB(i); \
TValue *v2 = KC(i); \
op_arithf_aux(L, v1, v2, fop); }
/*
** Arithmetic operations over integers and floats.
*/
#define op_arith_aux(L,v1,v2,iop,fop) { \
if (ttisinteger(v1) && ttisinteger(v2)) { \
lua_Integer i1 = ivalue(v1); lua_Integer i2 = ivalue(v2); \
pc++; setivalue(s2v(ra), iop(L, i1, i2)); \
} \
else op_arithf_aux(L, v1, v2, fop); }
/*
** Arithmetic operations with register operands.
*/
#define op_arith(L,iop,fop) { \
TValue *v1 = vRB(i); \
TValue *v2 = vRC(i); \
op_arith_aux(L, v1, v2, iop, fop); }
/*
** Arithmetic operations with K operands.
*/
#define op_arithK(L,iop,fop) { \
TValue *v1 = vRB(i); \
TValue *v2 = KC(i); \
op_arith_aux(L, v1, v2, iop, fop); }
/*
** Bitwise operations with constant operand.
*/
#define op_bitwiseK(L,op) { \
TValue *v1 = vRB(i); \
TValue *v2 = KC(i); \
lua_Integer i1; \
lua_Integer i2 = ivalue(v2); \
if (tointegerns(v1, &i1)) { \
pc++; setivalue(s2v(ra), op(i1, i2)); \
}}
/*
** Bitwise operations with register operands.
*/
#define op_bitwise(L,op) { \
TValue *v1 = vRB(i); \
TValue *v2 = vRC(i); \
lua_Integer i1; lua_Integer i2; \
if (tointegerns(v1, &i1) && tointegerns(v2, &i2)) { \
pc++; setivalue(s2v(ra), op(i1, i2)); \
}}
/*
** Order operations with register operands. 'opn' actually works
** for all numbers, but the fast track improves performance for
** integers.
*/
#define op_order(L,opi,opn,other) { \
int cond; \
TValue *rb = vRB(i); \
if (ttisinteger(s2v(ra)) && ttisinteger(rb)) { \
lua_Integer ia = ivalue(s2v(ra)); \
lua_Integer ib = ivalue(rb); \
cond = opi(ia, ib); \
} \
else if (ttisnumber(s2v(ra)) && ttisnumber(rb)) \
cond = opn(s2v(ra), rb); \
else \
Protect(cond = other(L, s2v(ra), rb)); \
docondjump(); }
/*
** Order operations with immediate operand. (Immediate operand is
** always small enough to have an exact representation as a float.)
*/
#define op_orderI(L,opi,opf,inv,tm) { \
int cond; \
int im = GETARG_sB(i); \
if (ttisinteger(s2v(ra))) \
cond = opi(ivalue(s2v(ra)), im); \
else if (ttisfloat(s2v(ra))) { \
lua_Number fa = fltvalue(s2v(ra)); \
lua_Number fim = cast_num(im); \
cond = opf(fa, fim); \
} \
else { \
int isf = GETARG_C(i); \
Protect(cond = luaT_callorderiTM(L, s2v(ra), im, inv, isf, tm)); \
} \
docondjump(); }
/* }================================================================== */
/*
** {==================================================================
** Function 'luaV_execute': main interpreter loop
** ===================================================================
*/
/*
** some macros for common tasks in 'luaV_execute'
*/
#define RA(i) (base+GETARG_A(i))
#define RB(i) (base+GETARG_B(i))
#define vRB(i) s2v(RB(i))
#define KB(i) (k+GETARG_B(i))
#define RC(i) (base+GETARG_C(i))
#define vRC(i) s2v(RC(i))
#define KC(i) (k+GETARG_C(i))
#define RKC(i) ((TESTARG_k(i)) ? k + GETARG_C(i) : s2v(base + GETARG_C(i)))
#define updatetrap(ci) (trap = ci->u.l.trap)
#define updatebase(ci) (base = ci->func + 1)
#define updatestack(ci) { if (trap) { updatebase(ci); ra = RA(i); } }
/*
** Execute a jump instruction. The 'updatetrap' allows signals to stop
** tight loops. (Without it, the local copy of 'trap' could never change.)
*/
#define dojump(ci,i,e) { pc += GETARG_sJ(i) + e; updatetrap(ci); }
/* for test instructions, execute the jump instruction that follows it */
#define donextjump(ci) { Instruction ni = *pc; dojump(ci, ni, 1); }
/*
** do a conditional jump: skip next instruction if 'cond' is not what
** was expected (parameter 'k'), else do next instruction, which must
** be a jump.
*/
#define docondjump() if (cond != GETARG_k(i)) pc++; else donextjump(ci);
/*
** Correct global 'pc'.
*/
#define savepc(L) (ci->u.l.savedpc = pc)
/*
** Whenever code can raise errors, the global 'pc' and the global
** 'top' must be correct to report occasional errors.
*/
#define savestate(L,ci) (savepc(L), L->top = ci->top)
/*
** Protect code that, in general, can raise errors, reallocate the
** stack, and change the hooks.
*/
#define Protect(exp) (savestate(L,ci), (exp), updatetrap(ci))
/* special version that does not change the top */
#define ProtectNT(exp) (savepc(L), (exp), updatetrap(ci))
/*
** Protect code that will finish the loop (returns) or can only raise
** errors. (That is, it will not return to the interpreter main loop
** after changing the stack or hooks.)
*/
#define halfProtect(exp) (savestate(L,ci), (exp))
/* idem, but without changing the stack */
#define halfProtectNT(exp) (savepc(L), (exp))
#define checkGC(L,c) \
{ luaC_condGC(L, L->top = (c), /* limit of live values */ \
updatetrap(ci)); \
luai_threadyield(L); }
/* fetch an instruction and prepare its execution */
#define vmfetch() { \
if (trap) { /* stack reallocation or hooks? */ \
trap = luaG_traceexec(L, pc); /* handle hooks */ \
updatebase(ci); /* correct stack */ \
} \
i = *(pc++); \
ra = RA(i); /* WARNING: any stack reallocation invalidates 'ra' */ \
}
#define vmdispatch(o) switch(o)
#define vmcase(l) case l:
#define vmbreak break
void luaV_execute (lua_State *L, CallInfo *ci) {
LClosure *cl;
TValue *k;
StkId base;
const Instruction *pc;
int trap;
#if LUA_USE_JUMPTABLE
#include "ljumptab.h"
#endif
tailcall:
trap = L->hookmask;
cl = clLvalue(s2v(ci->func));
k = cl->p->k;
pc = ci->u.l.savedpc;
if (trap) {
if (cl->p->is_vararg)
trap = 0; /* hooks will start after VARARGPREP instruction */
else if (pc == cl->p->code) /* first instruction (not resuming)? */
luaD_hookcall(L, ci);
ci->u.l.trap = 1; /* there may be other hooks */
}
base = ci->func + 1;
/* main loop of interpreter */
for (;;) {
Instruction i; /* instruction being executed */
StkId ra; /* instruction's A register */
vmfetch();
lua_assert(base == ci->func + 1);
lua_assert(base <= L->top && L->top < L->stack + L->stacksize);
/* invalidate top for instructions not expecting it */
lua_assert(isIT(i) || (cast_void(L->top = base), 1));
vmdispatch (GET_OPCODE(i)) {
vmcase(OP_MOVE) {
setobjs2s(L, ra, RB(i));
vmbreak;
}
vmcase(OP_LOADI) {
lua_Integer b = GETARG_sBx(i);
setivalue(s2v(ra), b);
vmbreak;
}
vmcase(OP_LOADF) {
int b = GETARG_sBx(i);
setfltvalue(s2v(ra), cast_num(b));
vmbreak;
}
vmcase(OP_LOADK) {
TValue *rb = k + GETARG_Bx(i);
setobj2s(L, ra, rb);
vmbreak;
}
vmcase(OP_LOADKX) {
TValue *rb;
rb = k + GETARG_Ax(*pc); pc++;
setobj2s(L, ra, rb);
vmbreak;
}
vmcase(OP_LOADFALSE) {
setbfvalue(s2v(ra));
vmbreak;
}
vmcase(OP_LFALSESKIP) {
setbfvalue(s2v(ra));
pc++; /* skip next instruction */
vmbreak;
}
vmcase(OP_LOADTRUE) {
setbtvalue(s2v(ra));
vmbreak;
}
vmcase(OP_LOADNIL) {
int b = GETARG_B(i);
do {
setnilvalue(s2v(ra++));
} while (b--);
vmbreak;
}
vmcase(OP_GETUPVAL) {
int b = GETARG_B(i);
setobj2s(L, ra, cl->upvals[b]->v);
vmbreak;
}
vmcase(OP_SETUPVAL) {
UpVal *uv = cl->upvals[GETARG_B(i)];
setobj(L, uv->v, s2v(ra));
luaC_barrier(L, uv, s2v(ra));
vmbreak;
}
vmcase(OP_GETTABUP) {
const TValue *slot;
TValue *upval = cl->upvals[GETARG_B(i)]->v;
TValue *rc = KC(i);
TString *key = tsvalue(rc); /* key must be a string */
if (luaV_fastget(L, upval, key, slot, luaH_getshortstr)) {
setobj2s(L, ra, slot);
}
else
Protect(luaV_finishget(L, upval, rc, ra, slot));
vmbreak;
}
vmcase(OP_GETTABLE) {
const TValue *slot;
TValue *rb = vRB(i);
TValue *rc = vRC(i);
lua_Unsigned n;
if (ttisinteger(rc) /* fast track for integers? */
? (cast_void(n = ivalue(rc)), luaV_fastgeti(L, rb, n, slot))
: luaV_fastget(L, rb, rc, slot, luaH_get)) {
setobj2s(L, ra, slot);
}
else
Protect(luaV_finishget(L, rb, rc, ra, slot));
vmbreak;
}
vmcase(OP_GETI) {
const TValue *slot;
TValue *rb = vRB(i);
int c = GETARG_C(i);
if (luaV_fastgeti(L, rb, c, slot)) {
setobj2s(L, ra, slot);
}
else {
TValue key;
setivalue(&key, c);
Protect(luaV_finishget(L, rb, &key, ra, slot));
}
vmbreak;
}
vmcase(OP_GETFIELD) {
const TValue *slot;
TValue *rb = vRB(i);
TValue *rc = KC(i);
TString *key = tsvalue(rc); /* key must be a string */
if (luaV_fastget(L, rb, key, slot, luaH_getshortstr)) {
setobj2s(L, ra, slot);
}
else
Protect(luaV_finishget(L, rb, rc, ra, slot));
vmbreak;
}
vmcase(OP_SETTABUP) {
const TValue *slot;
TValue *upval = cl->upvals[GETARG_A(i)]->v;
TValue *rb = KB(i);
TValue *rc = RKC(i);
TString *key = tsvalue(rb); /* key must be a string */
if (luaV_fastget(L, upval, key, slot, luaH_getshortstr)) {
luaV_finishfastset(L, upval, slot, rc);
}
else
Protect(luaV_finishset(L, upval, rb, rc, slot));
vmbreak;
}
vmcase(OP_SETTABLE) {
const TValue *slot;
TValue *rb = vRB(i); /* key (table is in 'ra') */
TValue *rc = RKC(i); /* value */
lua_Unsigned n;
if (ttisinteger(rb) /* fast track for integers? */
? (cast_void(n = ivalue(rb)), luaV_fastgeti(L, s2v(ra), n, slot))
: luaV_fastget(L, s2v(ra), rb, slot, luaH_get)) {
luaV_finishfastset(L, s2v(ra), slot, rc);
}
else
Protect(luaV_finishset(L, s2v(ra), rb, rc, slot));
vmbreak;
}
vmcase(OP_SETI) {
const TValue *slot;
int c = GETARG_B(i);
TValue *rc = RKC(i);
if (luaV_fastgeti(L, s2v(ra), c, slot)) {
luaV_finishfastset(L, s2v(ra), slot, rc);
}
else {
TValue key;
setivalue(&key, c);
Protect(luaV_finishset(L, s2v(ra), &key, rc, slot));
}
vmbreak;
}
vmcase(OP_SETFIELD) {
const TValue *slot;
TValue *rb = KB(i);
TValue *rc = RKC(i);
TString *key = tsvalue(rb); /* key must be a string */
if (luaV_fastget(L, s2v(ra), key, slot, luaH_getshortstr)) {
luaV_finishfastset(L, s2v(ra), slot, rc);
}
else
Protect(luaV_finishset(L, s2v(ra), rb, rc, slot));
vmbreak;
}
vmcase(OP_NEWTABLE) {
int b = GETARG_B(i); /* log2(hash size) + 1 */
int c = GETARG_C(i); /* array size */
Table *t;
if (b > 0)
b = 1 << (b - 1); /* size is 2^(b - 1) */
lua_assert((!TESTARG_k(i)) == (GETARG_Ax(*pc) == 0));
if (TESTARG_k(i)) /* non-zero extra argument? */
c += GETARG_Ax(*pc) * (MAXARG_C + 1); /* add it to size */
pc++; /* skip extra argument */
L->top = ra + 1; /* correct top in case of emergency GC */
t = luaH_new(L); /* memory allocation */
sethvalue2s(L, ra, t);
if (b != 0 || c != 0)
luaH_resize(L, t, c, b); /* idem */
checkGC(L, ra + 1);
vmbreak;
}
vmcase(OP_SELF) {
const TValue *slot;
TValue *rb = vRB(i);
TValue *rc = RKC(i);
TString *key = tsvalue(rc); /* key must be a string */
setobj2s(L, ra + 1, rb);
if (luaV_fastget(L, rb, key, slot, luaH_getstr)) {
setobj2s(L, ra, slot);
}
else
Protect(luaV_finishget(L, rb, rc, ra, slot));
vmbreak;
}
vmcase(OP_ADDI) {
op_arithI(L, l_addi, luai_numadd);
vmbreak;
}
vmcase(OP_ADDK) {
op_arithK(L, l_addi, luai_numadd);
vmbreak;
}
vmcase(OP_SUBK) {
op_arithK(L, l_subi, luai_numsub);
vmbreak;
}
vmcase(OP_MULK) {
op_arithK(L, l_muli, luai_nummul);
vmbreak;
}
vmcase(OP_MODK) {
op_arithK(L, luaV_mod, luaV_modf);
vmbreak;
}
vmcase(OP_POWK) {
op_arithfK(L, luai_numpow);
vmbreak;
}
vmcase(OP_DIVK) {
op_arithfK(L, luai_numdiv);
vmbreak;
}
vmcase(OP_IDIVK) {
op_arithK(L, luaV_idiv, luai_numidiv);
vmbreak;
}
vmcase(OP_BANDK) {
op_bitwiseK(L, l_band);
vmbreak;
}
vmcase(OP_BORK) {
op_bitwiseK(L, l_bor);
vmbreak;
}
vmcase(OP_BXORK) {
op_bitwiseK(L, l_bxor);
vmbreak;
}
vmcase(OP_SHRI) {
TValue *rb = vRB(i);
int ic = GETARG_sC(i);
lua_Integer ib;
if (tointegerns(rb, &ib)) {
pc++; setivalue(s2v(ra), luaV_shiftl(ib, -ic));
}
vmbreak;
}
vmcase(OP_SHLI) {
TValue *rb = vRB(i);
int ic = GETARG_sC(i);
lua_Integer ib;
if (tointegerns(rb, &ib)) {
pc++; setivalue(s2v(ra), luaV_shiftl(ic, ib));
}
vmbreak;
}
vmcase(OP_ADD) {
op_arith(L, l_addi, luai_numadd);
vmbreak;
}
vmcase(OP_SUB) {
op_arith(L, l_subi, luai_numsub);
vmbreak;
}
vmcase(OP_MUL) {
op_arith(L, l_muli, luai_nummul);
vmbreak;
}
vmcase(OP_MOD) {
op_arith(L, luaV_mod, luaV_modf);
vmbreak;
}
vmcase(OP_POW) {
op_arithf(L, luai_numpow);
vmbreak;
}
vmcase(OP_DIV) { /* float division (always with floats) */
op_arithf(L, luai_numdiv);
vmbreak;
}
vmcase(OP_IDIV) { /* floor division */
op_arith(L, luaV_idiv, luai_numidiv);
vmbreak;
}
vmcase(OP_BAND) {
op_bitwise(L, l_band);
vmbreak;
}
vmcase(OP_BOR) {
op_bitwise(L, l_bor);
vmbreak;
}
vmcase(OP_BXOR) {
op_bitwise(L, l_bxor);
vmbreak;
}
vmcase(OP_SHR) {
op_bitwise(L, luaV_shiftr);
vmbreak;
}
vmcase(OP_SHL) {
op_bitwise(L, luaV_shiftl);
vmbreak;
}
vmcase(OP_MMBIN) {
Instruction pi = *(pc - 2); /* original arith. expression */
TValue *rb = vRB(i);
TMS tm = (TMS)GETARG_C(i);
StkId result = RA(pi);
lua_assert(OP_ADD <= GET_OPCODE(pi) && GET_OPCODE(pi) <= OP_SHR);
Protect(luaT_trybinTM(L, s2v(ra), rb, result, tm));
vmbreak;
}
vmcase(OP_MMBINI) {
Instruction pi = *(pc - 2); /* original arith. expression */
int imm = GETARG_sB(i);
TMS tm = (TMS)GETARG_C(i);
int flip = GETARG_k(i);
StkId result = RA(pi);
Protect(luaT_trybiniTM(L, s2v(ra), imm, flip, result, tm));
vmbreak;
}
vmcase(OP_MMBINK) {
Instruction pi = *(pc - 2); /* original arith. expression */
TValue *imm = KB(i);
TMS tm = (TMS)GETARG_C(i);
int flip = GETARG_k(i);
StkId result = RA(pi);
Protect(luaT_trybinassocTM(L, s2v(ra), imm, flip, result, tm));
vmbreak;
}
vmcase(OP_UNM) {
TValue *rb = vRB(i);
lua_Number nb;
if (ttisinteger(rb)) {
lua_Integer ib = ivalue(rb);
setivalue(s2v(ra), intop(-, 0, ib));
}
else if (tonumberns(rb, nb)) {
setfltvalue(s2v(ra), luai_numunm(L, nb));
}
else
Protect(luaT_trybinTM(L, rb, rb, ra, TM_UNM));
vmbreak;
}
vmcase(OP_BNOT) {
TValue *rb = vRB(i);
lua_Integer ib;
if (tointegerns(rb, &ib)) {
setivalue(s2v(ra), intop(^, ~l_castS2U(0), ib));
}
else
Protect(luaT_trybinTM(L, rb, rb, ra, TM_BNOT));
vmbreak;
}
vmcase(OP_NOT) {
TValue *rb = vRB(i);
if (l_isfalse(rb))
setbtvalue(s2v(ra));
else
setbfvalue(s2v(ra));
vmbreak;
}
vmcase(OP_LEN) {
Protect(luaV_objlen(L, ra, vRB(i)));
vmbreak;
}
vmcase(OP_CONCAT) {
int n = GETARG_B(i); /* number of elements to concatenate */
L->top = ra + n; /* mark the end of concat operands */
ProtectNT(luaV_concat(L, n));
checkGC(L, L->top); /* 'luaV_concat' ensures correct top */
vmbreak;
}
vmcase(OP_CLOSE) {
Protect(luaF_close(L, ra, LUA_OK));
vmbreak;
}
vmcase(OP_TBC) {
/* create new to-be-closed upvalue */
halfProtect(luaF_newtbcupval(L, ra));
vmbreak;
}
vmcase(OP_JMP) {
dojump(ci, i, 0);
vmbreak;
}
vmcase(OP_EQ) {
int cond;
TValue *rb = vRB(i);
Protect(cond = luaV_equalobj(L, s2v(ra), rb));
docondjump();
vmbreak;
}
vmcase(OP_LT) {
op_order(L, l_lti, LTnum, lessthanothers);
vmbreak;
}
vmcase(OP_LE) {
op_order(L, l_lei, LEnum, lessequalothers);
vmbreak;
}
vmcase(OP_EQK) {
TValue *rb = KB(i);
/* basic types do not use '__eq'; we can use raw equality */
int cond = luaV_rawequalobj(s2v(ra), rb);
docondjump();
vmbreak;
}
vmcase(OP_EQI) {
int cond;
int im = GETARG_sB(i);
if (ttisinteger(s2v(ra)))
cond = (ivalue(s2v(ra)) == im);
else if (ttisfloat(s2v(ra)))
cond = luai_numeq(fltvalue(s2v(ra)), cast_num(im));
else
cond = 0; /* other types cannot be equal to a number */
docondjump();
vmbreak;
}
vmcase(OP_LTI) {
op_orderI(L, l_lti, luai_numlt, 0, TM_LT);
vmbreak;
}
vmcase(OP_LEI) {
op_orderI(L, l_lei, luai_numle, 0, TM_LE);
vmbreak;
}
vmcase(OP_GTI) {
op_orderI(L, l_gti, luai_numgt, 1, TM_LT);
vmbreak;
}
vmcase(OP_GEI) {
op_orderI(L, l_gei, luai_numge, 1, TM_LE);
vmbreak;
}
vmcase(OP_TEST) {
int cond = !l_isfalse(s2v(ra));
docondjump();
vmbreak;
}
vmcase(OP_TESTSET) {
TValue *rb = vRB(i);
if (l_isfalse(rb) == GETARG_k(i))
pc++;
else {
setobj2s(L, ra, rb);
donextjump(ci);
}
vmbreak;
}
vmcase(OP_CALL) {
int b = GETARG_B(i);
int nresults = GETARG_C(i) - 1;
if (b != 0) /* fixed number of arguments? */
L->top = ra + b; /* top signals number of arguments */
/* else previous instruction set top */
ProtectNT(luaD_call(L, ra, nresults));
vmbreak;
}
vmcase(OP_TAILCALL) {
int b = GETARG_B(i); /* number of arguments + 1 (function) */
int nparams1 = GETARG_C(i);
/* delat is virtual 'func' - real 'func' (vararg functions) */
int delta = (nparams1) ? ci->u.l.nextraargs + nparams1 : 0;
if (b != 0)
L->top = ra + b;
else /* previous instruction set top */
b = cast_int(L->top - ra);
savepc(ci); /* some calls here can raise errors */
if (TESTARG_k(i)) {
/* close upvalues from current call; the compiler ensures
that there are no to-be-closed variables here, so this
call cannot change the stack */
luaF_close(L, base, NOCLOSINGMETH);
lua_assert(base == ci->func + 1);
}
while (!ttisfunction(s2v(ra))) { /* not a function? */
luaD_tryfuncTM(L, ra); /* try '__call' metamethod */
b++; /* there is now one extra argument */
checkstackGCp(L, 1, ra);
}
if (!ttisLclosure(s2v(ra))) { /* C function? */
luaD_call(L, ra, LUA_MULTRET); /* call it */
updatetrap(ci);
updatestack(ci); /* stack may have been relocated */
ci->func -= delta;
luaD_poscall(L, ci, cast_int(L->top - ra));
return;
}
ci->func -= delta;
luaD_pretailcall(L, ci, ra, b); /* prepare call frame */
goto tailcall;
}
vmcase(OP_RETURN) {
int n = GETARG_B(i) - 1; /* number of results */
int nparams1 = GETARG_C(i);
if (n < 0) /* not fixed? */
n = cast_int(L->top - ra); /* get what is available */
savepc(ci);
if (TESTARG_k(i)) { /* may there be open upvalues? */
if (L->top < ci->top)
L->top = ci->top;
luaF_close(L, base, LUA_OK);
updatetrap(ci);
updatestack(ci);
}
if (nparams1) /* vararg function? */
ci->func -= ci->u.l.nextraargs + nparams1;
L->top = ra + n; /* set call for 'luaD_poscall' */
luaD_poscall(L, ci, n);
return;
}
vmcase(OP_RETURN0) {
if (L->hookmask) {
L->top = ra;
halfProtectNT(luaD_poscall(L, ci, 0)); /* no hurry... */
}
else { /* do the 'poscall' here */
int nres = ci->nresults;
L->ci = ci->previous; /* back to caller */
L->top = base - 1;
while (nres-- > 0)
setnilvalue(s2v(L->top++)); /* all results are nil */
}
return;
}
vmcase(OP_RETURN1) {
if (L->hookmask) {
L->top = ra + 1;
halfProtectNT(luaD_poscall(L, ci, 1)); /* no hurry... */
}
else { /* do the 'poscall' here */
int nres = ci->nresults;
L->ci = ci->previous; /* back to caller */
if (nres == 0)
L->top = base - 1; /* asked for no results */
else {
setobjs2s(L, base - 1, ra); /* at least this result */
L->top = base;
while (--nres > 0) /* complete missing results */
setnilvalue(s2v(L->top++));
}
}
return;
}
vmcase(OP_FORLOOP) {
if (ttisinteger(s2v(ra + 2))) { /* integer loop? */
lua_Unsigned count = l_castS2U(ivalue(s2v(ra + 1)));
if (count > 0) { /* still more iterations? */
lua_Integer step = ivalue(s2v(ra + 2));
lua_Integer idx = ivalue(s2v(ra)); /* internal index */
chgivalue(s2v(ra + 1), count - 1); /* update counter */
idx = intop(+, idx, step); /* add step to index */
chgivalue(s2v(ra), idx); /* update internal index */
setivalue(s2v(ra + 3), idx); /* and control variable */
pc -= GETARG_Bx(i); /* jump back */
}
}
else if (floatforloop(ra)) /* float loop */
pc -= GETARG_Bx(i); /* jump back */
updatetrap(ci); /* allows a signal to break the loop */
vmbreak;
}
vmcase(OP_FORPREP) {
savestate(L, ci); /* in case of errors */
if (forprep(L, ra))
pc += GETARG_Bx(i) + 1; /* skip the loop */
vmbreak;
}
vmcase(OP_TFORPREP) {
/* create to-be-closed upvalue (if needed) */
halfProtect(luaF_newtbcupval(L, ra + 3));
pc += GETARG_Bx(i);
i = *(pc++); /* go to next instruction */
lua_assert(GET_OPCODE(i) == OP_TFORCALL && ra == RA(i));
goto l_tforcall;
}
vmcase(OP_TFORCALL) {
l_tforcall:
/* 'ra' has the iterator function, 'ra + 1' has the state,
'ra + 2' has the control variable, and 'ra + 3' has the
to-be-closed variable. The call will use the stack after
these values (starting at 'ra + 4')
*/
/* push function, state, and control variable */
memcpy(ra + 4, ra, 3 * sizeof(*ra));
L->top = ra + 4 + 3;
ProtectNT(luaD_call(L, ra + 4, GETARG_C(i))); /* do the call */
updatestack(ci); /* stack may have changed */
i = *(pc++); /* go to next instruction */
lua_assert(GET_OPCODE(i) == OP_TFORLOOP && ra == RA(i));
goto l_tforloop;
}
vmcase(OP_TFORLOOP) {
l_tforloop:
if (!ttisnil(s2v(ra + 4))) { /* continue loop? */
setobjs2s(L, ra + 2, ra + 4); /* save control variable */
pc -= GETARG_Bx(i); /* jump back */
}
vmbreak;
}
vmcase(OP_SETLIST) {
int n = GETARG_B(i);
unsigned int last = GETARG_C(i);
Table *h = hvalue(s2v(ra));
if (n == 0)
n = cast_int(L->top - ra) - 1; /* get up to the top */
else
L->top = ci->top; /* correct top in case of emergency GC */
last += n;
if (TESTARG_k(i)) {
last += GETARG_Ax(*pc) * (MAXARG_C + 1);
pc++;
}
if (last > luaH_realasize(h)) /* needs more space? */
luaH_resizearray(L, h, last); /* preallocate it at once */
for (; n > 0; n--) {
TValue *val = s2v(ra + n);
setobj2t(L, &h->array[last - 1], val);
last--;
luaC_barrierback(L, obj2gco(h), val);
}
vmbreak;
}
vmcase(OP_CLOSURE) {
Proto *p = cl->p->p[GETARG_Bx(i)];
halfProtect(pushclosure(L, p, cl->upvals, base, ra));
checkGC(L, ra + 1);
vmbreak;
}
vmcase(OP_VARARG) {
int n = GETARG_C(i) - 1; /* required results */
Protect(luaT_getvarargs(L, ci, ra, n));
vmbreak;
}
vmcase(OP_VARARGPREP) {
luaT_adjustvarargs(L, GETARG_A(i), ci, cl->p);
updatetrap(ci);
if (trap) {
luaD_hookcall(L, ci);
L->oldpc = pc + 1; /* next opcode will be seen as a "new" line */
}
updatebase(ci); /* function has new base after adjustment */
vmbreak;
}
vmcase(OP_EXTRAARG) {
lua_assert(0);
vmbreak;
}
}
}
}
/* }================================================================== */
| ./CrossVul/dataset_final_sorted/CWE-416/c/good_4231_3 |
crossvul-cpp_data_good_1181_1 | /*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% JJJJJ PPPP EEEEE GGGG %
% J P P E G %
% J PPPP EEE G GG %
% J J P E G G %
% JJJ P EEEEE GGG %
% %
% %
% Read/Write JPEG Image Format %
% %
% Software Design %
% John Cristy %
% July 1992 %
% %
% %
% Copyright 1999-2013 ImageMagick Studio LLC, a non-profit organization %
% dedicated to making software imaging solutions freely available. %
% %
% You may not use this file except in compliance with the License. You may %
% obtain a copy of the License at %
% %
% https://imagemagick.org/script/license.php %
% %
% Unless required by applicable law or agreed to in writing, software %
% distributed under the License is distributed on an "AS IS" BASIS, %
% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. %
% See the License for the specific language governing permissions and %
% limitations under the License. %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% This software is based in part on the work of the Independent JPEG Group.
% See ftp://ftp.uu.net/graphics/jpeg/jpegsrc.v6b.tar.gz for copyright and
% licensing restrictions. Blob support contributed by Glenn Randers-Pehrson.
%
%
*/
/*
Include declarations.
*/
#include "MagickCore/studio.h"
#include "MagickCore/artifact.h"
#include "MagickCore/attribute.h"
#include "MagickCore/blob.h"
#include "MagickCore/blob-private.h"
#include "MagickCore/cache.h"
#include "MagickCore/color.h"
#include "MagickCore/colormap-private.h"
#include "MagickCore/color-private.h"
#include "MagickCore/colormap.h"
#include "MagickCore/colorspace.h"
#include "MagickCore/colorspace-private.h"
#include "MagickCore/constitute.h"
#include "MagickCore/exception.h"
#include "MagickCore/exception-private.h"
#include "MagickCore/geometry.h"
#include "MagickCore/image.h"
#include "MagickCore/image-private.h"
#include "MagickCore/list.h"
#include "MagickCore/log.h"
#include "MagickCore/magick.h"
#include "MagickCore/memory_.h"
#include "MagickCore/memory-private.h"
#include "MagickCore/module.h"
#include "MagickCore/monitor.h"
#include "MagickCore/monitor-private.h"
#include "MagickCore/option.h"
#include "MagickCore/option-private.h"
#include "MagickCore/pixel-accessor.h"
#include "MagickCore/profile.h"
#include "MagickCore/property.h"
#include "MagickCore/quantum-private.h"
#include "MagickCore/resource_.h"
#include "MagickCore/semaphore.h"
#include "MagickCore/splay-tree.h"
#include "MagickCore/static.h"
#include "MagickCore/string_.h"
#include "MagickCore/string-private.h"
#include "MagickCore/token.h"
#include "MagickCore/utility.h"
#include "MagickCore/xml-tree.h"
#include "MagickCore/xml-tree-private.h"
#include <setjmp.h>
#if defined(MAGICKCORE_JPEG_DELEGATE)
#define JPEG_INTERNAL_OPTIONS
#if defined(__MINGW32__)
# define XMD_H 1 /* Avoid conflicting typedef for INT32 */
#endif
#undef HAVE_STDLIB_H
#include "jpeglib.h"
#include "jerror.h"
#endif
/*
Define declarations.
*/
#define ICC_MARKER (JPEG_APP0+2)
#define ICC_PROFILE "ICC_PROFILE"
#define IPTC_MARKER (JPEG_APP0+13)
#define XML_MARKER (JPEG_APP0+1)
#define MaxJPEGScans 1024
/*
Typedef declarations.
*/
#if defined(MAGICKCORE_JPEG_DELEGATE)
typedef struct _DestinationManager
{
struct jpeg_destination_mgr
manager;
Image
*image;
JOCTET
*buffer;
} DestinationManager;
typedef struct _ErrorManager
{
ExceptionInfo
*exception;
Image
*image;
MagickBooleanType
finished;
StringInfo
*profile;
jmp_buf
error_recovery;
} ErrorManager;
typedef struct _SourceManager
{
struct jpeg_source_mgr
manager;
Image
*image;
JOCTET
*buffer;
boolean
start_of_blob;
} SourceManager;
#endif
typedef struct _QuantizationTable
{
char
*slot,
*description;
size_t
width,
height;
double
divisor;
unsigned int
*levels;
} QuantizationTable;
/*
Const declarations.
*/
static const char
xmp_namespace[] = "http://ns.adobe.com/xap/1.0/ ";
#define XmpNamespaceExtent 28
/*
Forward declarations.
*/
#if defined(MAGICKCORE_JPEG_DELEGATE)
static MagickBooleanType
WriteJPEGImage(const ImageInfo *,Image *,ExceptionInfo *);
#endif
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% I s J P E G %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% IsJPEG() returns MagickTrue if the image format type, identified by the
% magick string, is JPEG.
%
% The format of the IsJPEG method is:
%
% MagickBooleanType IsJPEG(const unsigned char *magick,const size_t length)
%
% A description of each parameter follows:
%
% o magick: compare image format pattern against these bytes.
%
% o length: Specifies the length of the magick string.
%
*/
static MagickBooleanType IsJPEG(const unsigned char *magick,const size_t length)
{
if (length < 3)
return(MagickFalse);
if (memcmp(magick,"\377\330\377",3) == 0)
return(MagickTrue);
return(MagickFalse);
}
#if defined(MAGICKCORE_JPEG_DELEGATE)
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% R e a d J P E G I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ReadJPEGImage() reads a JPEG image file and returns it. It allocates
% the memory necessary for the new Image structure and returns a pointer to
% the new image.
%
% The format of the ReadJPEGImage method is:
%
% Image *ReadJPEGImage(const ImageInfo *image_info,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image_info: the image info.
%
% o exception: return any errors or warnings in this structure.
%
*/
static boolean FillInputBuffer(j_decompress_ptr cinfo)
{
SourceManager
*source;
source=(SourceManager *) cinfo->src;
source->manager.bytes_in_buffer=(size_t) ReadBlob(source->image,
MagickMinBufferExtent,source->buffer);
if (source->manager.bytes_in_buffer == 0)
{
if (source->start_of_blob != FALSE)
ERREXIT(cinfo,JERR_INPUT_EMPTY);
WARNMS(cinfo,JWRN_JPEG_EOF);
source->buffer[0]=(JOCTET) 0xff;
source->buffer[1]=(JOCTET) JPEG_EOI;
source->manager.bytes_in_buffer=2;
}
source->manager.next_input_byte=source->buffer;
source->start_of_blob=FALSE;
return(TRUE);
}
static int GetCharacter(j_decompress_ptr jpeg_info)
{
if (jpeg_info->src->bytes_in_buffer == 0)
{
(void) (*jpeg_info->src->fill_input_buffer)(jpeg_info);
if (jpeg_info->err->msg_code == JWRN_JPEG_EOF)
return EOF;
}
jpeg_info->src->bytes_in_buffer--;
return((int) GETJOCTET(*jpeg_info->src->next_input_byte++));
}
static void InitializeSource(j_decompress_ptr cinfo)
{
SourceManager
*source;
source=(SourceManager *) cinfo->src;
source->start_of_blob=TRUE;
}
static MagickBooleanType IsITUFaxImage(const Image *image)
{
const StringInfo
*profile;
const unsigned char
*datum;
profile=GetImageProfile(image,"8bim");
if (profile == (const StringInfo *) NULL)
return(MagickFalse);
if (GetStringInfoLength(profile) < 5)
return(MagickFalse);
datum=GetStringInfoDatum(profile);
if ((datum[0] == 0x47) && (datum[1] == 0x33) && (datum[2] == 0x46) &&
(datum[3] == 0x41) && (datum[4] == 0x58))
return(MagickTrue);
return(MagickFalse);
}
static void JPEGErrorHandler(j_common_ptr jpeg_info)
{
char
message[JMSG_LENGTH_MAX];
ErrorManager
*error_manager;
ExceptionInfo
*exception;
Image
*image;
*message='\0';
error_manager=(ErrorManager *) jpeg_info->client_data;
image=error_manager->image;
exception=error_manager->exception;
(jpeg_info->err->format_message)(jpeg_info,message);
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
"[%s] JPEG Trace: \"%s\"",image->filename,message);
if (error_manager->finished != MagickFalse)
(void) ThrowMagickException(exception,GetMagickModule(),CorruptImageWarning,
(char *) message,"`%s'",image->filename);
else
(void) ThrowMagickException(exception,GetMagickModule(),CorruptImageError,
(char *) message,"`%s'",image->filename);
longjmp(error_manager->error_recovery,1);
}
static void JPEGProgressHandler(j_common_ptr jpeg_info)
{
ErrorManager
*error_manager;
ExceptionInfo
*exception;
Image
*image;
error_manager=(ErrorManager *) jpeg_info->client_data;
image=error_manager->image;
exception=error_manager->exception;
if (jpeg_info->is_decompressor == 0)
return;
if (((j_decompress_ptr) jpeg_info)->input_scan_number < MaxJPEGScans)
return;
(void) ThrowMagickException(exception,GetMagickModule(),CorruptImageError,
"too many scans","`%s'",image->filename);
longjmp(error_manager->error_recovery,1);
}
static MagickBooleanType JPEGWarningHandler(j_common_ptr jpeg_info,int level)
{
#define JPEGExcessiveWarnings 1000
char
message[JMSG_LENGTH_MAX];
ErrorManager
*error_manager;
ExceptionInfo
*exception;
Image
*image;
*message='\0';
error_manager=(ErrorManager *) jpeg_info->client_data;
exception=error_manager->exception;
image=error_manager->image;
if (level < 0)
{
/*
Process warning message.
*/
(jpeg_info->err->format_message)(jpeg_info,message);
if (jpeg_info->err->num_warnings++ < JPEGExcessiveWarnings)
ThrowBinaryException(CorruptImageWarning,(char *) message,
image->filename);
}
else
if ((image->debug != MagickFalse) &&
(level >= jpeg_info->err->trace_level))
{
/*
Process trace message.
*/
(jpeg_info->err->format_message)(jpeg_info,message);
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
"[%s] JPEG Trace: \"%s\"",image->filename,message);
}
return(MagickTrue);
}
static boolean ReadComment(j_decompress_ptr jpeg_info)
{
ErrorManager
*error_manager;
ExceptionInfo
*exception;
Image
*image;
register unsigned char
*p;
register ssize_t
i;
size_t
length;
StringInfo
*comment;
/*
Determine length of comment.
*/
error_manager=(ErrorManager *) jpeg_info->client_data;
exception=error_manager->exception;
image=error_manager->image;
length=(size_t) ((size_t) GetCharacter(jpeg_info) << 8);
length+=GetCharacter(jpeg_info);
if (length <= 2)
return(TRUE);
length-=2;
comment=BlobToStringInfo((const void *) NULL,length);
if (comment == (StringInfo *) NULL)
{
(void) ThrowMagickException(exception,GetMagickModule(),
ResourceLimitError,"MemoryAllocationFailed","`%s'",image->filename);
return(FALSE);
}
/*
Read comment.
*/
error_manager->profile=comment;
p=GetStringInfoDatum(comment);
for (i=0; i < (ssize_t) length; i++)
{
int
c;
c=GetCharacter(jpeg_info);
if (c == EOF)
break;
*p++=(unsigned char) c;
}
*p='\0';
error_manager->profile=NULL;
if (i != (ssize_t) length)
{
comment=DestroyStringInfo(comment);
(void) ThrowMagickException(exception,GetMagickModule(),
CorruptImageError,"InsufficientImageDataInFile","`%s'",
image->filename);
return(FALSE);
}
p=GetStringInfoDatum(comment);
(void) SetImageProperty(image,"comment",(const char *) p,exception);
comment=DestroyStringInfo(comment);
return(TRUE);
}
static boolean ReadICCProfile(j_decompress_ptr jpeg_info)
{
char
magick[12];
ErrorManager
*error_manager;
ExceptionInfo
*exception;
Image
*image;
MagickBooleanType
status;
register ssize_t
i;
register unsigned char
*p;
size_t
length;
StringInfo
*icc_profile,
*profile;
/*
Read color profile.
*/
length=(size_t) ((size_t) GetCharacter(jpeg_info) << 8);
length+=(size_t) GetCharacter(jpeg_info);
length-=2;
if (length <= 14)
{
while (length-- > 0)
if (GetCharacter(jpeg_info) == EOF)
break;
return(TRUE);
}
for (i=0; i < 12; i++)
magick[i]=(char) GetCharacter(jpeg_info);
if (LocaleCompare(magick,ICC_PROFILE) != 0)
{
/*
Not a ICC profile, return.
*/
for (i=0; i < (ssize_t) (length-12); i++)
if (GetCharacter(jpeg_info) == EOF)
break;
return(TRUE);
}
(void) GetCharacter(jpeg_info); /* id */
(void) GetCharacter(jpeg_info); /* markers */
length-=14;
error_manager=(ErrorManager *) jpeg_info->client_data;
exception=error_manager->exception;
image=error_manager->image;
profile=BlobToStringInfo((const void *) NULL,length);
if (profile == (StringInfo *) NULL)
{
(void) ThrowMagickException(exception,GetMagickModule(),
ResourceLimitError,"MemoryAllocationFailed","`%s'",image->filename);
return(FALSE);
}
error_manager->profile=profile;
p=GetStringInfoDatum(profile);
for (i=0; i < (ssize_t) length; i++)
{
int
c;
c=GetCharacter(jpeg_info);
if (c == EOF)
break;
*p++=(unsigned char) c;
}
error_manager->profile=NULL;
if (i != (ssize_t) length)
{
profile=DestroyStringInfo(profile);
(void) ThrowMagickException(exception,GetMagickModule(),
CorruptImageError,"InsufficientImageDataInFile","`%s'",
image->filename);
return(FALSE);
}
icc_profile=(StringInfo *) GetImageProfile(image,"icc");
if (icc_profile != (StringInfo *) NULL)
{
ConcatenateStringInfo(icc_profile,profile);
profile=DestroyStringInfo(profile);
}
else
{
status=SetImageProfile(image,"icc",profile,exception);
profile=DestroyStringInfo(profile);
if (status == MagickFalse)
{
(void) ThrowMagickException(exception,GetMagickModule(),
ResourceLimitError,"MemoryAllocationFailed","`%s'",image->filename);
return(FALSE);
}
}
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
"Profile: ICC, %.20g bytes",(double) length);
return(TRUE);
}
static boolean ReadIPTCProfile(j_decompress_ptr jpeg_info)
{
char
magick[MagickPathExtent];
ErrorManager
*error_manager;
ExceptionInfo
*exception;
Image
*image;
MagickBooleanType
status;
register ssize_t
i;
register unsigned char
*p;
size_t
length;
StringInfo
*iptc_profile,
*profile;
/*
Determine length of binary data stored here.
*/
length=(size_t) ((size_t) GetCharacter(jpeg_info) << 8);
length+=(size_t) GetCharacter(jpeg_info);
length-=2;
if (length <= 14)
{
while (length-- > 0)
if (GetCharacter(jpeg_info) == EOF)
break;
return(TRUE);
}
/*
Validate that this was written as a Photoshop resource format slug.
*/
for (i=0; i < 10; i++)
magick[i]=(char) GetCharacter(jpeg_info);
magick[10]='\0';
length-=10;
if (length <= 10)
return(TRUE);
if (LocaleCompare(magick,"Photoshop ") != 0)
{
/*
Not a IPTC profile, return.
*/
for (i=0; i < (ssize_t) length; i++)
if (GetCharacter(jpeg_info) == EOF)
break;
return(TRUE);
}
/*
Remove the version number.
*/
for (i=0; i < 4; i++)
if (GetCharacter(jpeg_info) == EOF)
break;
if (length <= 11)
return(TRUE);
length-=4;
error_manager=(ErrorManager *) jpeg_info->client_data;
exception=error_manager->exception;
image=error_manager->image;
profile=BlobToStringInfo((const void *) NULL,length);
if (profile == (StringInfo *) NULL)
{
(void) ThrowMagickException(exception,GetMagickModule(),
ResourceLimitError,"MemoryAllocationFailed","`%s'",image->filename);
return(FALSE);
}
error_manager->profile=profile;
p=GetStringInfoDatum(profile);
for (i=0; i < (ssize_t) length; i++)
{
int
c;
c=GetCharacter(jpeg_info);
if (c == EOF)
break;
*p++=(unsigned char) c;
}
error_manager->profile=NULL;
if (i != (ssize_t) length)
{
profile=DestroyStringInfo(profile);
(void) ThrowMagickException(exception,GetMagickModule(),
CorruptImageError,"InsufficientImageDataInFile","`%s'",
image->filename);
return(FALSE);
}
/*
The IPTC profile is actually an 8bim.
*/
iptc_profile=(StringInfo *) GetImageProfile(image,"8bim");
if (iptc_profile != (StringInfo *) NULL)
{
ConcatenateStringInfo(iptc_profile,profile);
profile=DestroyStringInfo(profile);
}
else
{
status=SetImageProfile(image,"8bim",profile,exception);
profile=DestroyStringInfo(profile);
if (status == MagickFalse)
{
(void) ThrowMagickException(exception,GetMagickModule(),
ResourceLimitError,"MemoryAllocationFailed","`%s'",image->filename);
return(FALSE);
}
}
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
"Profile: iptc, %.20g bytes",(double) length);
return(TRUE);
}
static boolean ReadProfile(j_decompress_ptr jpeg_info)
{
char
name[MagickPathExtent];
const StringInfo
*previous_profile;
ErrorManager
*error_manager;
ExceptionInfo
*exception;
Image
*image;
int
marker;
MagickBooleanType
status;
register ssize_t
i;
register unsigned char
*p;
size_t
length;
StringInfo
*profile;
/*
Read generic profile.
*/
length=(size_t) ((size_t) GetCharacter(jpeg_info) << 8);
length+=(size_t) GetCharacter(jpeg_info);
if (length <= 2)
return(TRUE);
length-=2;
marker=jpeg_info->unread_marker-JPEG_APP0;
(void) FormatLocaleString(name,MagickPathExtent,"APP%d",marker);
error_manager=(ErrorManager *) jpeg_info->client_data;
exception=error_manager->exception;
image=error_manager->image;
profile=BlobToStringInfo((const void *) NULL,length);
if (profile == (StringInfo *) NULL)
{
(void) ThrowMagickException(exception,GetMagickModule(),
ResourceLimitError,"MemoryAllocationFailed","`%s'",image->filename);
return(FALSE);
}
error_manager->profile=profile;
p=GetStringInfoDatum(profile);
for (i=0; i < (ssize_t) length; i++)
{
int
c;
c=GetCharacter(jpeg_info);
if (c == EOF)
break;
*p++=(unsigned char) c;
}
error_manager->profile=NULL;
if (i != (ssize_t) length)
{
profile=DestroyStringInfo(profile);
(void) ThrowMagickException(exception,GetMagickModule(),
CorruptImageError,"InsufficientImageDataInFile","`%s'",
image->filename);
return(FALSE);
}
if (marker == 1)
{
p=GetStringInfoDatum(profile);
if ((length > 4) && (LocaleNCompare((char *) p,"exif",4) == 0))
(void) CopyMagickString(name,"exif",MagickPathExtent);
else if ((length > XmpNamespaceExtent) &&
(LocaleNCompare((char *) p,xmp_namespace,XmpNamespaceExtent-1) == 0))
{
ssize_t
j;
/*
Extract namespace from XMP profile.
*/
p=GetStringInfoDatum(profile)+XmpNamespaceExtent;
for (j=XmpNamespaceExtent; j < (ssize_t) GetStringInfoLength(profile); j++)
{
if (*p == '\0')
break;
p++;
}
if (j < (ssize_t) GetStringInfoLength(profile))
(void) DestroyStringInfo(SplitStringInfo(profile,(size_t) (j+1)));
(void) CopyMagickString(name,"xmp",MagickPathExtent);
}
}
previous_profile=GetImageProfile(image,name);
if ((previous_profile != (const StringInfo *) NULL) &&
(CompareStringInfo(previous_profile,profile) != 0))
{
size_t
profile_length;
profile_length=GetStringInfoLength(profile);
SetStringInfoLength(profile,GetStringInfoLength(profile)+
GetStringInfoLength(previous_profile));
(void) memmove(GetStringInfoDatum(profile)+
GetStringInfoLength(previous_profile),GetStringInfoDatum(profile),
profile_length);
(void) memcpy(GetStringInfoDatum(profile),
GetStringInfoDatum(previous_profile),
GetStringInfoLength(previous_profile));
}
status=SetImageProfile(image,name,profile,exception);
profile=DestroyStringInfo(profile);
if (status == MagickFalse)
{
(void) ThrowMagickException(exception,GetMagickModule(),
ResourceLimitError,"MemoryAllocationFailed","`%s'",image->filename);
return(FALSE);
}
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
"Profile: %s, %.20g bytes",name,(double) length);
return(TRUE);
}
static void SkipInputData(j_decompress_ptr cinfo,long number_bytes)
{
SourceManager
*source;
if (number_bytes <= 0)
return;
source=(SourceManager *) cinfo->src;
while (number_bytes > (long) source->manager.bytes_in_buffer)
{
number_bytes-=(long) source->manager.bytes_in_buffer;
(void) FillInputBuffer(cinfo);
}
source->manager.next_input_byte+=number_bytes;
source->manager.bytes_in_buffer-=number_bytes;
}
static void TerminateSource(j_decompress_ptr cinfo)
{
(void) cinfo;
}
static void JPEGSourceManager(j_decompress_ptr cinfo,Image *image)
{
SourceManager
*source;
cinfo->src=(struct jpeg_source_mgr *) (*cinfo->mem->alloc_small)
((j_common_ptr) cinfo,JPOOL_IMAGE,sizeof(SourceManager));
source=(SourceManager *) cinfo->src;
source->buffer=(JOCTET *) (*cinfo->mem->alloc_small)
((j_common_ptr) cinfo,JPOOL_IMAGE,MagickMinBufferExtent*sizeof(JOCTET));
source=(SourceManager *) cinfo->src;
source->manager.init_source=InitializeSource;
source->manager.fill_input_buffer=FillInputBuffer;
source->manager.skip_input_data=SkipInputData;
source->manager.resync_to_restart=jpeg_resync_to_restart;
source->manager.term_source=TerminateSource;
source->manager.bytes_in_buffer=0;
source->manager.next_input_byte=NULL;
source->image=image;
}
static void JPEGSetImageQuality(struct jpeg_decompress_struct *jpeg_info,
Image *image)
{
image->quality=UndefinedCompressionQuality;
#if defined(D_PROGRESSIVE_SUPPORTED)
if (image->compression == LosslessJPEGCompression)
{
image->quality=100;
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
"Quality: 100 (lossless)");
}
else
#endif
{
ssize_t
j,
qvalue,
sum;
register ssize_t
i;
/*
Determine the JPEG compression quality from the quantization tables.
*/
sum=0;
for (i=0; i < NUM_QUANT_TBLS; i++)
{
if (jpeg_info->quant_tbl_ptrs[i] != NULL)
for (j=0; j < DCTSIZE2; j++)
sum+=jpeg_info->quant_tbl_ptrs[i]->quantval[j];
}
if ((jpeg_info->quant_tbl_ptrs[0] != NULL) &&
(jpeg_info->quant_tbl_ptrs[1] != NULL))
{
ssize_t
hash[101] =
{
1020, 1015, 932, 848, 780, 735, 702, 679, 660, 645,
632, 623, 613, 607, 600, 594, 589, 585, 581, 571,
555, 542, 529, 514, 494, 474, 457, 439, 424, 410,
397, 386, 373, 364, 351, 341, 334, 324, 317, 309,
299, 294, 287, 279, 274, 267, 262, 257, 251, 247,
243, 237, 232, 227, 222, 217, 213, 207, 202, 198,
192, 188, 183, 177, 173, 168, 163, 157, 153, 148,
143, 139, 132, 128, 125, 119, 115, 108, 104, 99,
94, 90, 84, 79, 74, 70, 64, 59, 55, 49,
45, 40, 34, 30, 25, 20, 15, 11, 6, 4,
0
},
sums[101] =
{
32640, 32635, 32266, 31495, 30665, 29804, 29146, 28599, 28104,
27670, 27225, 26725, 26210, 25716, 25240, 24789, 24373, 23946,
23572, 22846, 21801, 20842, 19949, 19121, 18386, 17651, 16998,
16349, 15800, 15247, 14783, 14321, 13859, 13535, 13081, 12702,
12423, 12056, 11779, 11513, 11135, 10955, 10676, 10392, 10208,
9928, 9747, 9564, 9369, 9193, 9017, 8822, 8639, 8458,
8270, 8084, 7896, 7710, 7527, 7347, 7156, 6977, 6788,
6607, 6422, 6236, 6054, 5867, 5684, 5495, 5305, 5128,
4945, 4751, 4638, 4442, 4248, 4065, 3888, 3698, 3509,
3326, 3139, 2957, 2775, 2586, 2405, 2216, 2037, 1846,
1666, 1483, 1297, 1109, 927, 735, 554, 375, 201,
128, 0
};
qvalue=(ssize_t) (jpeg_info->quant_tbl_ptrs[0]->quantval[2]+
jpeg_info->quant_tbl_ptrs[0]->quantval[53]+
jpeg_info->quant_tbl_ptrs[1]->quantval[0]+
jpeg_info->quant_tbl_ptrs[1]->quantval[DCTSIZE2-1]);
for (i=0; i < 100; i++)
{
if ((qvalue < hash[i]) && (sum < sums[i]))
continue;
if (((qvalue <= hash[i]) && (sum <= sums[i])) || (i >= 50))
image->quality=(size_t) i+1;
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
"Quality: %.20g (%s)",(double) i+1,(qvalue <= hash[i]) &&
(sum <= sums[i]) ? "exact" : "approximate");
break;
}
}
else
if (jpeg_info->quant_tbl_ptrs[0] != NULL)
{
ssize_t
hash[101] =
{
510, 505, 422, 380, 355, 338, 326, 318, 311, 305,
300, 297, 293, 291, 288, 286, 284, 283, 281, 280,
279, 278, 277, 273, 262, 251, 243, 233, 225, 218,
211, 205, 198, 193, 186, 181, 177, 172, 168, 164,
158, 156, 152, 148, 145, 142, 139, 136, 133, 131,
129, 126, 123, 120, 118, 115, 113, 110, 107, 105,
102, 100, 97, 94, 92, 89, 87, 83, 81, 79,
76, 74, 70, 68, 66, 63, 61, 57, 55, 52,
50, 48, 44, 42, 39, 37, 34, 31, 29, 26,
24, 21, 18, 16, 13, 11, 8, 6, 3, 2,
0
},
sums[101] =
{
16320, 16315, 15946, 15277, 14655, 14073, 13623, 13230, 12859,
12560, 12240, 11861, 11456, 11081, 10714, 10360, 10027, 9679,
9368, 9056, 8680, 8331, 7995, 7668, 7376, 7084, 6823,
6562, 6345, 6125, 5939, 5756, 5571, 5421, 5240, 5086,
4976, 4829, 4719, 4616, 4463, 4393, 4280, 4166, 4092,
3980, 3909, 3835, 3755, 3688, 3621, 3541, 3467, 3396,
3323, 3247, 3170, 3096, 3021, 2952, 2874, 2804, 2727,
2657, 2583, 2509, 2437, 2362, 2290, 2211, 2136, 2068,
1996, 1915, 1858, 1773, 1692, 1620, 1552, 1477, 1398,
1326, 1251, 1179, 1109, 1031, 961, 884, 814, 736,
667, 592, 518, 441, 369, 292, 221, 151, 86,
64, 0
};
qvalue=(ssize_t) (jpeg_info->quant_tbl_ptrs[0]->quantval[2]+
jpeg_info->quant_tbl_ptrs[0]->quantval[53]);
for (i=0; i < 100; i++)
{
if ((qvalue < hash[i]) && (sum < sums[i]))
continue;
if (((qvalue <= hash[i]) && (sum <= sums[i])) || (i >= 50))
image->quality=(size_t)i+1;
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
"Quality: %.20g (%s)",(double) i+1,(qvalue <= hash[i]) &&
(sum <= sums[i]) ? "exact" : "approximate");
break;
}
}
}
}
static void JPEGSetImageSamplingFactor(struct jpeg_decompress_struct *jpeg_info, Image *image,ExceptionInfo *exception)
{
char
sampling_factor[MagickPathExtent];
switch (jpeg_info->out_color_space)
{
case JCS_CMYK:
{
(void) LogMagickEvent(CoderEvent,GetMagickModule(),"Colorspace: CMYK");
(void) FormatLocaleString(sampling_factor,MagickPathExtent,
"%dx%d,%dx%d,%dx%d,%dx%d",jpeg_info->comp_info[0].h_samp_factor,
jpeg_info->comp_info[0].v_samp_factor,
jpeg_info->comp_info[1].h_samp_factor,
jpeg_info->comp_info[1].v_samp_factor,
jpeg_info->comp_info[2].h_samp_factor,
jpeg_info->comp_info[2].v_samp_factor,
jpeg_info->comp_info[3].h_samp_factor,
jpeg_info->comp_info[3].v_samp_factor);
break;
}
case JCS_GRAYSCALE:
{
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
"Colorspace: GRAYSCALE");
(void) FormatLocaleString(sampling_factor,MagickPathExtent,"%dx%d",
jpeg_info->comp_info[0].h_samp_factor,
jpeg_info->comp_info[0].v_samp_factor);
break;
}
case JCS_RGB:
{
(void) LogMagickEvent(CoderEvent,GetMagickModule(),"Colorspace: RGB");
(void) FormatLocaleString(sampling_factor,MagickPathExtent,
"%dx%d,%dx%d,%dx%d",jpeg_info->comp_info[0].h_samp_factor,
jpeg_info->comp_info[0].v_samp_factor,
jpeg_info->comp_info[1].h_samp_factor,
jpeg_info->comp_info[1].v_samp_factor,
jpeg_info->comp_info[2].h_samp_factor,
jpeg_info->comp_info[2].v_samp_factor);
break;
}
default:
{
(void) LogMagickEvent(CoderEvent,GetMagickModule(),"Colorspace: %d",
jpeg_info->out_color_space);
(void) FormatLocaleString(sampling_factor,MagickPathExtent,
"%dx%d,%dx%d,%dx%d,%dx%d",jpeg_info->comp_info[0].h_samp_factor,
jpeg_info->comp_info[0].v_samp_factor,
jpeg_info->comp_info[1].h_samp_factor,
jpeg_info->comp_info[1].v_samp_factor,
jpeg_info->comp_info[2].h_samp_factor,
jpeg_info->comp_info[2].v_samp_factor,
jpeg_info->comp_info[3].h_samp_factor,
jpeg_info->comp_info[3].v_samp_factor);
break;
}
}
(void) SetImageProperty(image,"jpeg:sampling-factor",sampling_factor,
exception);
(void) LogMagickEvent(CoderEvent,GetMagickModule(),"Sampling Factors: %s",
sampling_factor);
}
static Image *ReadJPEGImage(const ImageInfo *image_info,
ExceptionInfo *exception)
{
char
value[MagickPathExtent];
const char
*dct_method,
*option;
ErrorManager
error_manager;
Image
*image;
JSAMPLE
*volatile jpeg_pixels;
JSAMPROW
scanline[1];
MagickBooleanType
debug,
status;
MagickSizeType
number_pixels;
MemoryInfo
*memory_info;
Quantum
index;
register ssize_t
i;
struct jpeg_decompress_struct
jpeg_info;
struct jpeg_error_mgr
jpeg_error;
struct jpeg_progress_mgr
jpeg_progress;
register JSAMPLE
*p;
size_t
units;
ssize_t
y;
/*
Open image file.
*/
assert(image_info != (const ImageInfo *) NULL);
assert(image_info->signature == MagickCoreSignature);
if (image_info->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",
image_info->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
debug=IsEventLogging();
(void) debug;
image=AcquireImage(image_info,exception);
status=OpenBlob(image_info,image,ReadBinaryBlobMode,exception);
if (status == MagickFalse)
{
image=DestroyImageList(image);
return((Image *) NULL);
}
/*
Verify that file size large enough to contain a JPEG datastream.
*/
if (GetBlobSize(image) < 107)
ThrowReaderException(CorruptImageError,"InsufficientImageDataInFile");
/*
Initialize JPEG parameters.
*/
(void) memset(&error_manager,0,sizeof(error_manager));
(void) memset(&jpeg_info,0,sizeof(jpeg_info));
(void) memset(&jpeg_error,0,sizeof(jpeg_error));
(void) memset(&jpeg_progress,0,sizeof(jpeg_progress));
jpeg_info.err=jpeg_std_error(&jpeg_error);
jpeg_info.err->emit_message=(void (*)(j_common_ptr,int)) JPEGWarningHandler;
jpeg_info.err->error_exit=(void (*)(j_common_ptr)) JPEGErrorHandler;
memory_info=(MemoryInfo *) NULL;
error_manager.exception=exception;
error_manager.image=image;
if (setjmp(error_manager.error_recovery) != 0)
{
jpeg_destroy_decompress(&jpeg_info);
if (error_manager.profile != (StringInfo *) NULL)
error_manager.profile=DestroyStringInfo(error_manager.profile);
(void) CloseBlob(image);
number_pixels=(MagickSizeType) image->columns*image->rows;
if (number_pixels != 0)
return(GetFirstImageInList(image));
return(DestroyImage(image));
}
jpeg_info.client_data=(void *) &error_manager;
jpeg_create_decompress(&jpeg_info);
if (GetMaxMemoryRequest() != ~0UL)
jpeg_info.mem->max_memory_to_use=(long) GetMaxMemoryRequest();
jpeg_progress.progress_monitor=(void (*)(j_common_ptr)) JPEGProgressHandler;
jpeg_info.progress=(&jpeg_progress);
JPEGSourceManager(&jpeg_info,image);
jpeg_set_marker_processor(&jpeg_info,JPEG_COM,ReadComment);
option=GetImageOption(image_info,"profile:skip");
if (IsOptionMember("ICC",option) == MagickFalse)
jpeg_set_marker_processor(&jpeg_info,ICC_MARKER,ReadICCProfile);
if (IsOptionMember("IPTC",option) == MagickFalse)
jpeg_set_marker_processor(&jpeg_info,IPTC_MARKER,ReadIPTCProfile);
for (i=1; i < 16; i++)
if ((i != 2) && (i != 13) && (i != 14))
if (IsOptionMember("APP",option) == MagickFalse)
jpeg_set_marker_processor(&jpeg_info,(int) (JPEG_APP0+i),ReadProfile);
i=(ssize_t) jpeg_read_header(&jpeg_info,TRUE);
if ((image_info->colorspace == YCbCrColorspace) ||
(image_info->colorspace == Rec601YCbCrColorspace) ||
(image_info->colorspace == Rec709YCbCrColorspace))
jpeg_info.out_color_space=JCS_YCbCr;
/*
Set image resolution.
*/
units=0;
if ((jpeg_info.saw_JFIF_marker != 0) && (jpeg_info.X_density != 1) &&
(jpeg_info.Y_density != 1))
{
image->resolution.x=(double) jpeg_info.X_density;
image->resolution.y=(double) jpeg_info.Y_density;
units=(size_t) jpeg_info.density_unit;
}
if (units == 1)
image->units=PixelsPerInchResolution;
if (units == 2)
image->units=PixelsPerCentimeterResolution;
number_pixels=(MagickSizeType) image->columns*image->rows;
option=GetImageOption(image_info,"jpeg:size");
if ((option != (const char *) NULL) &&
(jpeg_info.out_color_space != JCS_YCbCr))
{
double
scale_factor;
GeometryInfo
geometry_info;
MagickStatusType
flags;
/*
Scale the image.
*/
flags=ParseGeometry(option,&geometry_info);
if ((flags & SigmaValue) == 0)
geometry_info.sigma=geometry_info.rho;
jpeg_calc_output_dimensions(&jpeg_info);
image->magick_columns=jpeg_info.output_width;
image->magick_rows=jpeg_info.output_height;
scale_factor=1.0;
if (geometry_info.rho != 0.0)
scale_factor=jpeg_info.output_width/geometry_info.rho;
if ((geometry_info.sigma != 0.0) &&
(scale_factor > (jpeg_info.output_height/geometry_info.sigma)))
scale_factor=jpeg_info.output_height/geometry_info.sigma;
jpeg_info.scale_num=1U;
jpeg_info.scale_denom=(unsigned int) scale_factor;
jpeg_calc_output_dimensions(&jpeg_info);
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
"Scale factor: %.20g",(double) scale_factor);
}
#if (JPEG_LIB_VERSION >= 61) && defined(D_PROGRESSIVE_SUPPORTED)
#if defined(D_LOSSLESS_SUPPORTED)
image->interlace=jpeg_info.process == JPROC_PROGRESSIVE ?
JPEGInterlace : NoInterlace;
image->compression=jpeg_info.process == JPROC_LOSSLESS ?
LosslessJPEGCompression : JPEGCompression;
if (jpeg_info.data_precision > 8)
(void) ThrowMagickException(exception,GetMagickModule(),OptionError,
"12-bit JPEG not supported. Reducing pixel data to 8 bits","`%s'",
image->filename);
if (jpeg_info.data_precision == 16)
jpeg_info.data_precision=12;
#else
image->interlace=jpeg_info.progressive_mode != 0 ? JPEGInterlace :
NoInterlace;
image->compression=JPEGCompression;
#endif
#else
image->compression=JPEGCompression;
image->interlace=JPEGInterlace;
#endif
option=GetImageOption(image_info,"jpeg:colors");
if (option != (const char *) NULL)
{
/*
Let the JPEG library quantize the image.
*/
jpeg_info.quantize_colors=TRUE;
jpeg_info.desired_number_of_colors=(int) StringToUnsignedLong(option);
}
option=GetImageOption(image_info,"jpeg:block-smoothing");
if (option != (const char *) NULL)
jpeg_info.do_block_smoothing=IsStringTrue(option) != MagickFalse ? TRUE :
FALSE;
dct_method=GetImageOption(image_info,"jpeg:dct-method");
if (dct_method != (const char *) NULL)
switch (*dct_method)
{
case 'D':
case 'd':
{
if (LocaleCompare(dct_method,"default") == 0)
jpeg_info.dct_method=JDCT_DEFAULT;
break;
}
case 'F':
case 'f':
{
if (LocaleCompare(dct_method,"fastest") == 0)
jpeg_info.dct_method=JDCT_FASTEST;
if (LocaleCompare(dct_method,"float") == 0)
jpeg_info.dct_method=JDCT_FLOAT;
break;
}
case 'I':
case 'i':
{
if (LocaleCompare(dct_method,"ifast") == 0)
jpeg_info.dct_method=JDCT_IFAST;
if (LocaleCompare(dct_method,"islow") == 0)
jpeg_info.dct_method=JDCT_ISLOW;
break;
}
}
option=GetImageOption(image_info,"jpeg:fancy-upsampling");
if (option != (const char *) NULL)
jpeg_info.do_fancy_upsampling=IsStringTrue(option) != MagickFalse ? TRUE :
FALSE;
jpeg_calc_output_dimensions(&jpeg_info);
image->columns=jpeg_info.output_width;
image->rows=jpeg_info.output_height;
image->depth=(size_t) jpeg_info.data_precision;
switch (jpeg_info.out_color_space)
{
case JCS_RGB:
default:
{
(void) SetImageColorspace(image,sRGBColorspace,exception);
break;
}
case JCS_GRAYSCALE:
{
(void) SetImageColorspace(image,GRAYColorspace,exception);
break;
}
case JCS_YCbCr:
{
(void) SetImageColorspace(image,YCbCrColorspace,exception);
break;
}
case JCS_CMYK:
{
(void) SetImageColorspace(image,CMYKColorspace,exception);
break;
}
}
if (IsITUFaxImage(image) != MagickFalse)
{
(void) SetImageColorspace(image,LabColorspace,exception);
jpeg_info.out_color_space=JCS_YCbCr;
}
option=GetImageOption(image_info,"jpeg:colors");
if (option != (const char *) NULL)
if (AcquireImageColormap(image,StringToUnsignedLong(option),exception) == MagickFalse)
{
jpeg_destroy_decompress(&jpeg_info);
ThrowReaderException(ResourceLimitError,"MemoryAllocationFailed");
}
if ((jpeg_info.output_components == 1) && (jpeg_info.quantize_colors == 0))
{
size_t
colors;
colors=(size_t) GetQuantumRange(image->depth)+1;
if (AcquireImageColormap(image,colors,exception) == MagickFalse)
{
jpeg_destroy_decompress(&jpeg_info);
ThrowReaderException(ResourceLimitError,"MemoryAllocationFailed");
}
}
if (image->debug != MagickFalse)
{
if (image->interlace != NoInterlace)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
"Interlace: progressive");
else
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
"Interlace: nonprogressive");
(void) LogMagickEvent(CoderEvent,GetMagickModule(),"Data precision: %d",
(int) jpeg_info.data_precision);
(void) LogMagickEvent(CoderEvent,GetMagickModule(),"Geometry: %dx%d",
(int) jpeg_info.output_width,(int) jpeg_info.output_height);
}
JPEGSetImageQuality(&jpeg_info,image);
JPEGSetImageSamplingFactor(&jpeg_info,image,exception);
(void) FormatLocaleString(value,MagickPathExtent,"%.20g",(double)
jpeg_info.out_color_space);
(void) SetImageProperty(image,"jpeg:colorspace",value,exception);
#if defined(D_ARITH_CODING_SUPPORTED)
if (jpeg_info.arith_code == TRUE)
(void) SetImageProperty(image,"jpeg:coding","arithmetic",exception);
#endif
if (image_info->ping != MagickFalse)
{
jpeg_destroy_decompress(&jpeg_info);
(void) CloseBlob(image);
return(GetFirstImageInList(image));
}
status=SetImageExtent(image,image->columns,image->rows,exception);
if (status == MagickFalse)
{
jpeg_destroy_decompress(&jpeg_info);
return(DestroyImageList(image));
}
(void) jpeg_start_decompress(&jpeg_info);
if ((jpeg_info.output_components != 1) &&
(jpeg_info.output_components != 3) && (jpeg_info.output_components != 4))
{
jpeg_destroy_decompress(&jpeg_info);
ThrowReaderException(CorruptImageError,"ImageTypeNotSupported");
}
memory_info=AcquireVirtualMemory((size_t) image->columns,
jpeg_info.output_components*sizeof(*jpeg_pixels));
if (memory_info == (MemoryInfo *) NULL)
{
jpeg_destroy_decompress(&jpeg_info);
ThrowReaderException(ResourceLimitError,"MemoryAllocationFailed");
}
jpeg_pixels=(JSAMPLE *) GetVirtualMemoryBlob(memory_info);
(void) memset(jpeg_pixels,0,image->columns*
jpeg_info.output_components*sizeof(*jpeg_pixels));
/*
Convert JPEG pixels to pixel packets.
*/
if (setjmp(error_manager.error_recovery) != 0)
{
if (memory_info != (MemoryInfo *) NULL)
memory_info=RelinquishVirtualMemory(memory_info);
jpeg_destroy_decompress(&jpeg_info);
(void) CloseBlob(image);
number_pixels=(MagickSizeType) image->columns*image->rows;
if (number_pixels != 0)
return(GetFirstImageInList(image));
return(DestroyImage(image));
}
if (jpeg_info.quantize_colors != 0)
{
image->colors=(size_t) jpeg_info.actual_number_of_colors;
if (jpeg_info.out_color_space == JCS_GRAYSCALE)
for (i=0; i < (ssize_t) image->colors; i++)
{
image->colormap[i].red=(double) ScaleCharToQuantum(
jpeg_info.colormap[0][i]);
image->colormap[i].green=image->colormap[i].red;
image->colormap[i].blue=image->colormap[i].red;
image->colormap[i].alpha=(MagickRealType) OpaqueAlpha;
}
else
for (i=0; i < (ssize_t) image->colors; i++)
{
image->colormap[i].red=(double) ScaleCharToQuantum(
jpeg_info.colormap[0][i]);
image->colormap[i].green=(double) ScaleCharToQuantum(
jpeg_info.colormap[1][i]);
image->colormap[i].blue=(double) ScaleCharToQuantum(
jpeg_info.colormap[2][i]);
image->colormap[i].alpha=(MagickRealType) OpaqueAlpha;
}
}
scanline[0]=(JSAMPROW) jpeg_pixels;
for (y=0; y < (ssize_t) image->rows; y++)
{
register ssize_t
x;
register Quantum
*magick_restrict q;
if (jpeg_read_scanlines(&jpeg_info,scanline,1) != 1)
{
(void) ThrowMagickException(exception,GetMagickModule(),
CorruptImageWarning,"SkipToSyncByte","`%s'",image->filename);
continue;
}
p=jpeg_pixels;
q=QueueAuthenticPixels(image,0,y,image->columns,1,exception);
if (q == (Quantum *) NULL)
break;
if (jpeg_info.data_precision > 8)
{
unsigned short
scale;
scale=65535/(unsigned short) GetQuantumRange((size_t)
jpeg_info.data_precision);
if (jpeg_info.output_components == 1)
for (x=0; x < (ssize_t) image->columns; x++)
{
ssize_t
pixel;
pixel=(ssize_t) (scale*GETJSAMPLE(*p));
index=(Quantum) ConstrainColormapIndex(image,pixel,exception);
SetPixelIndex(image,index,q);
SetPixelViaPixelInfo(image,image->colormap+(ssize_t) index,q);
p++;
q+=GetPixelChannels(image);
}
else
if (image->colorspace != CMYKColorspace)
for (x=0; x < (ssize_t) image->columns; x++)
{
SetPixelRed(image,ScaleShortToQuantum(
(unsigned short) (scale*GETJSAMPLE(*p++))),q);
SetPixelGreen(image,ScaleShortToQuantum(
(unsigned short) (scale*GETJSAMPLE(*p++))),q);
SetPixelBlue(image,ScaleShortToQuantum(
(unsigned short) (scale*GETJSAMPLE(*p++))),q);
SetPixelAlpha(image,OpaqueAlpha,q);
q+=GetPixelChannels(image);
}
else
for (x=0; x < (ssize_t) image->columns; x++)
{
SetPixelCyan(image,QuantumRange-ScaleShortToQuantum(
(unsigned short) (scale*GETJSAMPLE(*p++))),q);
SetPixelMagenta(image,QuantumRange-ScaleShortToQuantum(
(unsigned short) (scale*GETJSAMPLE(*p++))),q);
SetPixelYellow(image,QuantumRange-ScaleShortToQuantum(
(unsigned short) (scale*GETJSAMPLE(*p++))),q);
SetPixelBlack(image,QuantumRange-ScaleShortToQuantum(
(unsigned short) (scale*GETJSAMPLE(*p++))),q);
SetPixelAlpha(image,OpaqueAlpha,q);
q+=GetPixelChannels(image);
}
}
else
if (jpeg_info.output_components == 1)
for (x=0; x < (ssize_t) image->columns; x++)
{
ssize_t
pixel;
pixel=(ssize_t) GETJSAMPLE(*p);
index=(Quantum) ConstrainColormapIndex(image,pixel,exception);
SetPixelIndex(image,index,q);
SetPixelViaPixelInfo(image,image->colormap+(ssize_t) index,q);
p++;
q+=GetPixelChannels(image);
}
else
if (image->colorspace != CMYKColorspace)
for (x=0; x < (ssize_t) image->columns; x++)
{
SetPixelRed(image,ScaleCharToQuantum((unsigned char)
GETJSAMPLE(*p++)),q);
SetPixelGreen(image,ScaleCharToQuantum((unsigned char)
GETJSAMPLE(*p++)),q);
SetPixelBlue(image,ScaleCharToQuantum((unsigned char)
GETJSAMPLE(*p++)),q);
SetPixelAlpha(image,OpaqueAlpha,q);
q+=GetPixelChannels(image);
}
else
for (x=0; x < (ssize_t) image->columns; x++)
{
SetPixelCyan(image,QuantumRange-ScaleCharToQuantum(
(unsigned char) GETJSAMPLE(*p++)),q);
SetPixelMagenta(image,QuantumRange-ScaleCharToQuantum(
(unsigned char) GETJSAMPLE(*p++)),q);
SetPixelYellow(image,QuantumRange-ScaleCharToQuantum(
(unsigned char) GETJSAMPLE(*p++)),q);
SetPixelBlack(image,QuantumRange-ScaleCharToQuantum(
(unsigned char) GETJSAMPLE(*p++)),q);
SetPixelAlpha(image,OpaqueAlpha,q);
q+=GetPixelChannels(image);
}
if (SyncAuthenticPixels(image,exception) == MagickFalse)
break;
status=SetImageProgress(image,LoadImageTag,(MagickOffsetType) y,
image->rows);
if (status == MagickFalse)
{
jpeg_abort_decompress(&jpeg_info);
break;
}
}
if (status != MagickFalse)
{
error_manager.finished=MagickTrue;
if (setjmp(error_manager.error_recovery) == 0)
(void) jpeg_finish_decompress(&jpeg_info);
}
/*
Free jpeg resources.
*/
jpeg_destroy_decompress(&jpeg_info);
memory_info=RelinquishVirtualMemory(memory_info);
(void) CloseBlob(image);
return(GetFirstImageInList(image));
}
#endif
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% R e g i s t e r J P E G I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% RegisterJPEGImage() adds properties for the JPEG image format to
% the list of supported formats. The properties include the image format
% tag, a method to read and/or write the format, whether the format
% supports the saving of more than one frame to the same file or blob,
% whether the format supports native in-memory I/O, and a brief
% description of the format.
%
% The format of the RegisterJPEGImage method is:
%
% size_t RegisterJPEGImage(void)
%
*/
ModuleExport size_t RegisterJPEGImage(void)
{
#define JPEGDescription "Joint Photographic Experts Group JFIF format"
#define JPEGStringify(macro_or_string) JPEGStringifyArg(macro_or_string)
#define JPEGStringifyArg(contents) #contents
char
version[MagickPathExtent];
MagickInfo
*entry;
*version='\0';
#if defined(LIBJPEG_TURBO_VERSION)
(void) CopyMagickString(version,"libjpeg-turbo " JPEGStringify(
LIBJPEG_TURBO_VERSION),MagickPathExtent);
#elif defined(JPEG_LIB_VERSION)
(void) FormatLocaleString(version,MagickPathExtent,"libjpeg %d",
JPEG_LIB_VERSION);
#endif
entry=AcquireMagickInfo("JPEG","JPE",JPEGDescription);
#if (JPEG_LIB_VERSION < 80) && !defined(LIBJPEG_TURBO_VERSION)
entry->flags^=CoderDecoderThreadSupportFlag;
#endif
#if defined(MAGICKCORE_JPEG_DELEGATE)
entry->decoder=(DecodeImageHandler *) ReadJPEGImage;
entry->encoder=(EncodeImageHandler *) WriteJPEGImage;
#endif
entry->magick=(IsImageFormatHandler *) IsJPEG;
entry->flags|=CoderDecoderSeekableStreamFlag;
entry->flags^=CoderAdjoinFlag;
entry->flags^=CoderUseExtensionFlag;
if (*version != '\0')
entry->version=ConstantString(version);
entry->mime_type=ConstantString("image/jpeg");
(void) RegisterMagickInfo(entry);
entry=AcquireMagickInfo("JPEG","JPEG",JPEGDescription);
#if (JPEG_LIB_VERSION < 80) && !defined(LIBJPEG_TURBO_VERSION)
entry->flags^=CoderDecoderThreadSupportFlag;
#endif
#if defined(MAGICKCORE_JPEG_DELEGATE)
entry->decoder=(DecodeImageHandler *) ReadJPEGImage;
entry->encoder=(EncodeImageHandler *) WriteJPEGImage;
#endif
entry->magick=(IsImageFormatHandler *) IsJPEG;
entry->flags|=CoderDecoderSeekableStreamFlag;
entry->flags^=CoderAdjoinFlag;
if (*version != '\0')
entry->version=ConstantString(version);
entry->mime_type=ConstantString("image/jpeg");
(void) RegisterMagickInfo(entry);
entry=AcquireMagickInfo("JPEG","JPG",JPEGDescription);
#if (JPEG_LIB_VERSION < 80) && !defined(LIBJPEG_TURBO_VERSION)
entry->flags^=CoderDecoderThreadSupportFlag;
#endif
#if defined(MAGICKCORE_JPEG_DELEGATE)
entry->decoder=(DecodeImageHandler *) ReadJPEGImage;
entry->encoder=(EncodeImageHandler *) WriteJPEGImage;
#endif
entry->flags|=CoderDecoderSeekableStreamFlag;
entry->flags^=CoderAdjoinFlag;
entry->flags^=CoderUseExtensionFlag;
if (*version != '\0')
entry->version=ConstantString(version);
entry->mime_type=ConstantString("image/jpeg");
(void) RegisterMagickInfo(entry);
entry=AcquireMagickInfo("JPEG","JPS",JPEGDescription);
#if (JPEG_LIB_VERSION < 80) && !defined(LIBJPEG_TURBO_VERSION)
entry->flags^=CoderDecoderThreadSupportFlag;
#endif
#if defined(MAGICKCORE_JPEG_DELEGATE)
entry->decoder=(DecodeImageHandler *) ReadJPEGImage;
entry->encoder=(EncodeImageHandler *) WriteJPEGImage;
#endif
entry->flags|=CoderDecoderSeekableStreamFlag;
entry->flags^=CoderAdjoinFlag;
entry->flags^=CoderUseExtensionFlag;
if (*version != '\0')
entry->version=ConstantString(version);
entry->mime_type=ConstantString("image/jpeg");
(void) RegisterMagickInfo(entry);
entry=AcquireMagickInfo("JPEG","PJPEG",JPEGDescription);
#if (JPEG_LIB_VERSION < 80) && !defined(LIBJPEG_TURBO_VERSION)
entry->flags^=CoderDecoderThreadSupportFlag;
#endif
#if defined(MAGICKCORE_JPEG_DELEGATE)
entry->decoder=(DecodeImageHandler *) ReadJPEGImage;
entry->encoder=(EncodeImageHandler *) WriteJPEGImage;
#endif
entry->flags|=CoderDecoderSeekableStreamFlag;
entry->flags^=CoderAdjoinFlag;
entry->flags^=CoderUseExtensionFlag;
if (*version != '\0')
entry->version=ConstantString(version);
entry->mime_type=ConstantString("image/jpeg");
(void) RegisterMagickInfo(entry);
return(MagickImageCoderSignature);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% U n r e g i s t e r J P E G I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% UnregisterJPEGImage() removes format registrations made by the
% JPEG module from the list of supported formats.
%
% The format of the UnregisterJPEGImage method is:
%
% UnregisterJPEGImage(void)
%
*/
ModuleExport void UnregisterJPEGImage(void)
{
(void) UnregisterMagickInfo("PJPG");
(void) UnregisterMagickInfo("JPS");
(void) UnregisterMagickInfo("JPG");
(void) UnregisterMagickInfo("JPEG");
(void) UnregisterMagickInfo("JPE");
}
#if defined(MAGICKCORE_JPEG_DELEGATE)
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% W r i t e J P E G I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% WriteJPEGImage() writes a JPEG image file and returns it. It
% allocates the memory necessary for the new Image structure and returns a
% pointer to the new image.
%
% The format of the WriteJPEGImage method is:
%
% MagickBooleanType WriteJPEGImage(const ImageInfo *image_info,
% Image *image,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image_info: the image info.
%
% o jpeg_image: The image.
%
% o exception: return any errors or warnings in this structure.
%
*/
static QuantizationTable *DestroyQuantizationTable(QuantizationTable *table)
{
assert(table != (QuantizationTable *) NULL);
if (table->slot != (char *) NULL)
table->slot=DestroyString(table->slot);
if (table->description != (char *) NULL)
table->description=DestroyString(table->description);
if (table->levels != (unsigned int *) NULL)
table->levels=(unsigned int *) RelinquishMagickMemory(table->levels);
table=(QuantizationTable *) RelinquishMagickMemory(table);
return(table);
}
static boolean EmptyOutputBuffer(j_compress_ptr cinfo)
{
DestinationManager
*destination;
destination=(DestinationManager *) cinfo->dest;
destination->manager.free_in_buffer=(size_t) WriteBlob(destination->image,
MagickMinBufferExtent,destination->buffer);
if (destination->manager.free_in_buffer != MagickMinBufferExtent)
ERREXIT(cinfo,JERR_FILE_WRITE);
destination->manager.next_output_byte=destination->buffer;
return(TRUE);
}
static QuantizationTable *GetQuantizationTable(const char *filename,
const char *slot,ExceptionInfo *exception)
{
char
*p,
*xml;
const char
*attribute,
*content;
double
value;
register ssize_t
i;
ssize_t
j;
QuantizationTable
*table;
size_t
length;
XMLTreeInfo
*description,
*levels,
*quantization_tables,
*table_iterator;
(void) LogMagickEvent(ConfigureEvent,GetMagickModule(),
"Loading quantization tables \"%s\" ...",filename);
table=(QuantizationTable *) NULL;
xml=FileToString(filename,~0UL,exception);
if (xml == (char *) NULL)
return(table);
quantization_tables=NewXMLTree(xml,exception);
if (quantization_tables == (XMLTreeInfo *) NULL)
{
xml=DestroyString(xml);
return(table);
}
for (table_iterator=GetXMLTreeChild(quantization_tables,"table");
table_iterator != (XMLTreeInfo *) NULL;
table_iterator=GetNextXMLTreeTag(table_iterator))
{
attribute=GetXMLTreeAttribute(table_iterator,"slot");
if ((attribute != (char *) NULL) && (LocaleCompare(slot,attribute) == 0))
break;
attribute=GetXMLTreeAttribute(table_iterator,"alias");
if ((attribute != (char *) NULL) && (LocaleCompare(slot,attribute) == 0))
break;
}
if (table_iterator == (XMLTreeInfo *) NULL)
{
xml=DestroyString(xml);
return(table);
}
description=GetXMLTreeChild(table_iterator,"description");
if (description == (XMLTreeInfo *) NULL)
{
(void) ThrowMagickException(exception,GetMagickModule(),OptionError,
"XmlMissingElement","<description>, slot \"%s\"",slot);
quantization_tables=DestroyXMLTree(quantization_tables);
xml=DestroyString(xml);
return(table);
}
levels=GetXMLTreeChild(table_iterator,"levels");
if (levels == (XMLTreeInfo *) NULL)
{
(void) ThrowMagickException(exception,GetMagickModule(),OptionError,
"XmlMissingElement","<levels>, slot \"%s\"",slot);
quantization_tables=DestroyXMLTree(quantization_tables);
xml=DestroyString(xml);
return(table);
}
table=(QuantizationTable *) AcquireCriticalMemory(sizeof(*table));
table->slot=(char *) NULL;
table->description=(char *) NULL;
table->levels=(unsigned int *) NULL;
attribute=GetXMLTreeAttribute(table_iterator,"slot");
if (attribute != (char *) NULL)
table->slot=ConstantString(attribute);
content=GetXMLTreeContent(description);
if (content != (char *) NULL)
table->description=ConstantString(content);
attribute=GetXMLTreeAttribute(levels,"width");
if (attribute == (char *) NULL)
{
(void) ThrowMagickException(exception,GetMagickModule(),OptionError,
"XmlMissingAttribute","<levels width>, slot \"%s\"",slot);
quantization_tables=DestroyXMLTree(quantization_tables);
table=DestroyQuantizationTable(table);
xml=DestroyString(xml);
return(table);
}
table->width=StringToUnsignedLong(attribute);
if (table->width == 0)
{
(void) ThrowMagickException(exception,GetMagickModule(),OptionError,
"XmlInvalidAttribute","<levels width>, table \"%s\"",slot);
quantization_tables=DestroyXMLTree(quantization_tables);
table=DestroyQuantizationTable(table);
xml=DestroyString(xml);
return(table);
}
attribute=GetXMLTreeAttribute(levels,"height");
if (attribute == (char *) NULL)
{
(void) ThrowMagickException(exception,GetMagickModule(),OptionError,
"XmlMissingAttribute","<levels height>, table \"%s\"",slot);
quantization_tables=DestroyXMLTree(quantization_tables);
table=DestroyQuantizationTable(table);
xml=DestroyString(xml);
return(table);
}
table->height=StringToUnsignedLong(attribute);
if (table->height == 0)
{
(void) ThrowMagickException(exception,GetMagickModule(),OptionError,
"XmlInvalidAttribute","<levels height>, table \"%s\"",slot);
quantization_tables=DestroyXMLTree(quantization_tables);
table=DestroyQuantizationTable(table);
xml=DestroyString(xml);
return(table);
}
attribute=GetXMLTreeAttribute(levels,"divisor");
if (attribute == (char *) NULL)
{
(void) ThrowMagickException(exception,GetMagickModule(),OptionError,
"XmlMissingAttribute","<levels divisor>, table \"%s\"",slot);
quantization_tables=DestroyXMLTree(quantization_tables);
table=DestroyQuantizationTable(table);
xml=DestroyString(xml);
return(table);
}
table->divisor=InterpretLocaleValue(attribute,(char **) NULL);
if (table->divisor == 0.0)
{
(void) ThrowMagickException(exception,GetMagickModule(),OptionError,
"XmlInvalidAttribute","<levels divisor>, table \"%s\"",slot);
quantization_tables=DestroyXMLTree(quantization_tables);
table=DestroyQuantizationTable(table);
xml=DestroyString(xml);
return(table);
}
content=GetXMLTreeContent(levels);
if (content == (char *) NULL)
{
(void) ThrowMagickException(exception,GetMagickModule(),OptionError,
"XmlMissingContent","<levels>, table \"%s\"",slot);
quantization_tables=DestroyXMLTree(quantization_tables);
table=DestroyQuantizationTable(table);
xml=DestroyString(xml);
return(table);
}
length=(size_t) table->width*table->height;
if (length < 64)
length=64;
table->levels=(unsigned int *) AcquireQuantumMemory(length,
sizeof(*table->levels));
if (table->levels == (unsigned int *) NULL)
ThrowFatalException(ResourceLimitFatalError,
"UnableToAcquireQuantizationTable");
for (i=0; i < (ssize_t) (table->width*table->height); i++)
{
table->levels[i]=(unsigned int) (InterpretLocaleValue(content,&p)/
table->divisor+0.5);
while (isspace((int) ((unsigned char) *p)) != 0)
p++;
if (*p == ',')
p++;
content=p;
}
value=InterpretLocaleValue(content,&p);
(void) value;
if (p != content)
{
(void) ThrowMagickException(exception,GetMagickModule(),OptionError,
"XmlInvalidContent","<level> too many values, table \"%s\"",slot);
quantization_tables=DestroyXMLTree(quantization_tables);
table=DestroyQuantizationTable(table);
xml=DestroyString(xml);
return(table);
}
for (j=i; j < 64; j++)
table->levels[j]=table->levels[j-1];
quantization_tables=DestroyXMLTree(quantization_tables);
xml=DestroyString(xml);
return(table);
}
static void InitializeDestination(j_compress_ptr cinfo)
{
DestinationManager
*destination;
destination=(DestinationManager *) cinfo->dest;
destination->buffer=(JOCTET *) (*cinfo->mem->alloc_small)
((j_common_ptr) cinfo,JPOOL_IMAGE,MagickMinBufferExtent*sizeof(JOCTET));
destination->manager.next_output_byte=destination->buffer;
destination->manager.free_in_buffer=MagickMinBufferExtent;
}
static void TerminateDestination(j_compress_ptr cinfo)
{
DestinationManager
*destination;
destination=(DestinationManager *) cinfo->dest;
if ((MagickMinBufferExtent-(int) destination->manager.free_in_buffer) > 0)
{
ssize_t
count;
count=WriteBlob(destination->image,MagickMinBufferExtent-
destination->manager.free_in_buffer,destination->buffer);
if (count != (ssize_t)
(MagickMinBufferExtent-destination->manager.free_in_buffer))
ERREXIT(cinfo,JERR_FILE_WRITE);
}
}
static void WriteProfile(j_compress_ptr jpeg_info,Image *image,
ExceptionInfo *exception)
{
const char
*name;
const StringInfo
*profile;
MagickBooleanType
iptc;
register ssize_t
i;
size_t
length,
tag_length;
StringInfo
*custom_profile;
/*
Save image profile as a APP marker.
*/
iptc=MagickFalse;
custom_profile=AcquireStringInfo(65535L);
ResetImageProfileIterator(image);
for (name=GetNextImageProfile(image); name != (const char *) NULL; )
{
profile=GetImageProfile(image,name);
length=GetStringInfoLength(profile);
if (LocaleNCompare(name,"APP",3) == 0)
{
int
id;
id=JPEG_APP0+StringToInteger(name+3);
for (i=0; i < (ssize_t) length; i+=65533L)
jpeg_write_marker(jpeg_info,id,GetStringInfoDatum(profile)+i,
(unsigned int) MagickMin(length-i,65533));
}
if (LocaleCompare(name,"EXIF") == 0)
{
length=GetStringInfoLength(profile);
if (length > 65533L)
{
(void) ThrowMagickException(exception,GetMagickModule(),
CoderWarning,"ExifProfileSizeExceedsLimit","`%s'",
image->filename);
length=65533L;
}
jpeg_write_marker(jpeg_info,XML_MARKER,GetStringInfoDatum(profile),
(unsigned int) length);
}
if (LocaleCompare(name,"ICC") == 0)
{
register unsigned char
*p;
tag_length=strlen(ICC_PROFILE);
p=GetStringInfoDatum(custom_profile);
(void) memcpy(p,ICC_PROFILE,tag_length);
p[tag_length]='\0';
for (i=0; i < (ssize_t) GetStringInfoLength(profile); i+=65519L)
{
length=MagickMin(GetStringInfoLength(profile)-i,65519L);
p[12]=(unsigned char) ((i/65519L)+1);
p[13]=(unsigned char) (GetStringInfoLength(profile)/65519L+1);
(void) memcpy(p+tag_length+3,GetStringInfoDatum(profile)+i,
length);
jpeg_write_marker(jpeg_info,ICC_MARKER,GetStringInfoDatum(
custom_profile),(unsigned int) (length+tag_length+3));
}
}
if (((LocaleCompare(name,"IPTC") == 0) ||
(LocaleCompare(name,"8BIM") == 0)) && (iptc == MagickFalse))
{
register unsigned char
*p;
size_t
roundup;
iptc=MagickTrue;
p=GetStringInfoDatum(custom_profile);
for (i=0; i < (ssize_t) GetStringInfoLength(profile); i+=65500L)
{
length=MagickMin(GetStringInfoLength(profile)-i,65500L);
roundup=(size_t) (length & 0x01);
if (LocaleNCompare((char *) GetStringInfoDatum(profile),"8BIM",4) == 0)
{
(void) memcpy(p,"Photoshop 3.0 ",14);
tag_length=14;
}
else
{
(void) memcpy(p,"Photoshop 3.0 8BIM\04\04\0\0\0\0",24);
tag_length=26;
p[24]=(unsigned char) (length >> 8);
p[25]=(unsigned char) (length & 0xff);
}
p[13]=0x00;
(void) memcpy(p+tag_length,GetStringInfoDatum(profile)+i,length);
if (roundup != 0)
p[length+tag_length]='\0';
jpeg_write_marker(jpeg_info,IPTC_MARKER,GetStringInfoDatum(
custom_profile),(unsigned int) (length+tag_length+roundup));
}
}
if ((LocaleCompare(name,"XMP") == 0) &&
(GetStringInfoLength(profile) <= 65502))
{
StringInfo
*xmp_profile;
/*
Add namespace to XMP profile.
*/
xmp_profile=StringToStringInfo(xmp_namespace);
if (xmp_profile != (StringInfo *) NULL)
{
if (profile != (StringInfo *) NULL)
ConcatenateStringInfo(xmp_profile,profile);
GetStringInfoDatum(xmp_profile)[XmpNamespaceExtent]='\0';
for (i=0; i < (ssize_t) GetStringInfoLength(xmp_profile); i+=65533L)
{
length=MagickMin(GetStringInfoLength(xmp_profile)-i,65533L);
jpeg_write_marker(jpeg_info,XML_MARKER,
GetStringInfoDatum(xmp_profile)+i,(unsigned int) length);
}
xmp_profile=DestroyStringInfo(xmp_profile);
}
}
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
"%s profile: %.20g bytes",name,(double) GetStringInfoLength(profile));
name=GetNextImageProfile(image);
}
custom_profile=DestroyStringInfo(custom_profile);
}
static void JPEGDestinationManager(j_compress_ptr cinfo,Image * image)
{
DestinationManager
*destination;
cinfo->dest=(struct jpeg_destination_mgr *) (*cinfo->mem->alloc_small)
((j_common_ptr) cinfo,JPOOL_IMAGE,sizeof(DestinationManager));
destination=(DestinationManager *) cinfo->dest;
destination->manager.init_destination=InitializeDestination;
destination->manager.empty_output_buffer=EmptyOutputBuffer;
destination->manager.term_destination=TerminateDestination;
destination->image=image;
}
static char **SamplingFactorToList(const char *text)
{
char
**textlist;
register char
*q;
register const char
*p;
register ssize_t
i;
if (text == (char *) NULL)
return((char **) NULL);
/*
Convert string to an ASCII list.
*/
textlist=(char **) AcquireQuantumMemory((size_t) MAX_COMPONENTS,
sizeof(*textlist));
if (textlist == (char **) NULL)
ThrowFatalException(ResourceLimitFatalError,"UnableToConvertText");
p=text;
for (i=0; i < (ssize_t) MAX_COMPONENTS; i++)
{
for (q=(char *) p; *q != '\0'; q++)
if (*q == ',')
break;
textlist[i]=(char *) AcquireQuantumMemory((size_t) (q-p)+MagickPathExtent,
sizeof(*textlist[i]));
if (textlist[i] == (char *) NULL)
ThrowFatalException(ResourceLimitFatalError,"UnableToConvertText");
(void) CopyMagickString(textlist[i],p,(size_t) (q-p+1));
if (*q == '\r')
q++;
if (*q == '\0')
break;
p=q+1;
}
for (i++; i < (ssize_t) MAX_COMPONENTS; i++)
textlist[i]=ConstantString("1x1");
return(textlist);
}
static MagickBooleanType WriteJPEGImage(const ImageInfo *image_info,
Image *image,ExceptionInfo *exception)
{
const char
*dct_method,
*option,
*sampling_factor,
*value;
ErrorManager
error_manager;
Image
*volatile volatile_image;
int
colorspace,
quality;
JSAMPLE
*volatile jpeg_pixels;
JSAMPROW
scanline[1];
MagickBooleanType
status;
MemoryInfo
*memory_info;
register JSAMPLE
*q;
register ssize_t
i;
ssize_t
y;
struct jpeg_compress_struct
jpeg_info;
struct jpeg_error_mgr
jpeg_error;
unsigned short
scale;
/*
Open image file.
*/
assert(image_info != (const ImageInfo *) NULL);
assert(image_info->signature == MagickCoreSignature);
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
if ((LocaleCompare(image_info->magick,"JPS") == 0) &&
(image->next != (Image *) NULL))
image=AppendImages(image,MagickFalse,exception);
status=OpenBlob(image_info,image,WriteBinaryBlobMode,exception);
if (status == MagickFalse)
return(status);
/*
Initialize JPEG parameters.
*/
(void) memset(&error_manager,0,sizeof(error_manager));
(void) memset(&jpeg_info,0,sizeof(jpeg_info));
(void) memset(&jpeg_error,0,sizeof(jpeg_error));
volatile_image=image;
jpeg_info.client_data=(void *) volatile_image;
jpeg_info.err=jpeg_std_error(&jpeg_error);
jpeg_info.err->emit_message=(void (*)(j_common_ptr,int)) JPEGWarningHandler;
jpeg_info.err->error_exit=(void (*)(j_common_ptr)) JPEGErrorHandler;
error_manager.exception=exception;
error_manager.image=volatile_image;
memory_info=(MemoryInfo *) NULL;
if (setjmp(error_manager.error_recovery) != 0)
{
jpeg_destroy_compress(&jpeg_info);
(void) CloseBlob(volatile_image);
return(MagickFalse);
}
jpeg_info.client_data=(void *) &error_manager;
jpeg_create_compress(&jpeg_info);
JPEGDestinationManager(&jpeg_info,image);
if ((image->columns != (unsigned int) image->columns) ||
(image->rows != (unsigned int) image->rows))
ThrowWriterException(ImageError,"WidthOrHeightExceedsLimit");
jpeg_info.image_width=(unsigned int) image->columns;
jpeg_info.image_height=(unsigned int) image->rows;
jpeg_info.input_components=3;
jpeg_info.data_precision=8;
jpeg_info.in_color_space=JCS_RGB;
switch (image->colorspace)
{
case CMYKColorspace:
{
jpeg_info.input_components=4;
jpeg_info.in_color_space=JCS_CMYK;
break;
}
case YCbCrColorspace:
case Rec601YCbCrColorspace:
case Rec709YCbCrColorspace:
{
jpeg_info.in_color_space=JCS_YCbCr;
break;
}
case LinearGRAYColorspace:
case GRAYColorspace:
{
if (image_info->type == TrueColorType)
break;
jpeg_info.input_components=1;
jpeg_info.in_color_space=JCS_GRAYSCALE;
break;
}
default:
{
(void) TransformImageColorspace(image,sRGBColorspace,exception);
if (image_info->type == TrueColorType)
break;
if (SetImageGray(image,exception) != MagickFalse)
{
jpeg_info.input_components=1;
jpeg_info.in_color_space=JCS_GRAYSCALE;
}
break;
}
}
jpeg_set_defaults(&jpeg_info);
if (jpeg_info.in_color_space == JCS_CMYK)
jpeg_set_colorspace(&jpeg_info,JCS_YCCK);
if ((jpeg_info.data_precision != 12) && (image->depth <= 8))
jpeg_info.data_precision=8;
else
jpeg_info.data_precision=BITS_IN_JSAMPLE;
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
"Image resolution: %.20g,%.20g",image->resolution.x,image->resolution.y);
if ((image->resolution.x != 0.0) && (image->resolution.y != 0.0))
{
/*
Set image resolution.
*/
jpeg_info.write_JFIF_header=TRUE;
jpeg_info.X_density=(UINT16) image->resolution.x;
jpeg_info.Y_density=(UINT16) image->resolution.y;
/*
Set image resolution units.
*/
if (image->units == PixelsPerInchResolution)
jpeg_info.density_unit=(UINT8) 1;
if (image->units == PixelsPerCentimeterResolution)
jpeg_info.density_unit=(UINT8) 2;
}
dct_method=GetImageOption(image_info,"jpeg:dct-method");
if (dct_method != (const char *) NULL)
switch (*dct_method)
{
case 'D':
case 'd':
{
if (LocaleCompare(dct_method,"default") == 0)
jpeg_info.dct_method=JDCT_DEFAULT;
break;
}
case 'F':
case 'f':
{
if (LocaleCompare(dct_method,"fastest") == 0)
jpeg_info.dct_method=JDCT_FASTEST;
if (LocaleCompare(dct_method,"float") == 0)
jpeg_info.dct_method=JDCT_FLOAT;
break;
}
case 'I':
case 'i':
{
if (LocaleCompare(dct_method,"ifast") == 0)
jpeg_info.dct_method=JDCT_IFAST;
if (LocaleCompare(dct_method,"islow") == 0)
jpeg_info.dct_method=JDCT_ISLOW;
break;
}
}
option=GetImageOption(image_info,"jpeg:optimize-coding");
if (option != (const char *) NULL)
jpeg_info.optimize_coding=IsStringTrue(option) != MagickFalse ? TRUE :
FALSE;
else
{
MagickSizeType
length;
length=(MagickSizeType) jpeg_info.input_components*image->columns*
image->rows*sizeof(JSAMPLE);
if (length == (MagickSizeType) ((size_t) length))
{
/*
Perform optimization only if available memory resources permit it.
*/
status=AcquireMagickResource(MemoryResource,length);
if (status != MagickFalse)
RelinquishMagickResource(MemoryResource,length);
jpeg_info.optimize_coding=status == MagickFalse ? FALSE : TRUE;
}
}
#if defined(C_ARITH_CODING_SUPPORTED)
option=GetImageOption(image_info,"jpeg:arithmetic-coding");
if (IsStringTrue(option) != MagickFalse)
{
jpeg_info.arith_code=TRUE;
jpeg_info.optimize_coding=FALSE; // Not supported.
}
#endif
#if (JPEG_LIB_VERSION >= 61) && defined(C_PROGRESSIVE_SUPPORTED)
if ((LocaleCompare(image_info->magick,"PJPEG") == 0) ||
(image_info->interlace != NoInterlace))
{
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
"Interlace: progressive");
jpeg_simple_progression(&jpeg_info);
}
else
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
"Interlace: non-progressive");
#else
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
"Interlace: nonprogressive");
#endif
quality=92;
if ((image_info->compression != LosslessJPEGCompression) &&
(image->quality <= 100))
{
if (image->quality != UndefinedCompressionQuality)
quality=(int) image->quality;
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),"Quality: %.20g",
(double) image->quality);
}
else
{
#if !defined(C_LOSSLESS_SUPPORTED)
quality=100;
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),"Quality: 100");
#else
if (image->quality < 100)
(void) ThrowMagickException(exception,GetMagickModule(),CoderWarning,
"LosslessToLossyJPEGConversion","`%s'",image->filename);
else
{
int
point_transform,
predictor;
predictor=image->quality/100; /* range 1-7 */
point_transform=image->quality % 20; /* range 0-15 */
jpeg_simple_lossless(&jpeg_info,predictor,point_transform);
if (image->debug != MagickFalse)
{
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
"Compression: lossless");
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
"Predictor: %d",predictor);
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
"Point Transform: %d",point_transform);
}
}
#endif
}
option=GetImageOption(image_info,"jpeg:extent");
if (option != (const char *) NULL)
{
Image
*jpeg_image;
ImageInfo
*extent_info;
extent_info=CloneImageInfo(image_info);
extent_info->blob=NULL;
jpeg_image=CloneImage(image,0,0,MagickTrue,exception);
if (jpeg_image != (Image *) NULL)
{
MagickSizeType
extent;
size_t
maximum,
minimum;
/*
Search for compression quality that does not exceed image extent.
*/
extent_info->quality=0;
extent=(MagickSizeType) SiPrefixToDoubleInterval(option,100.0);
(void) DeleteImageOption(extent_info,"jpeg:extent");
(void) DeleteImageArtifact(jpeg_image,"jpeg:extent");
maximum=image_info->quality;
if (maximum < 2)
maximum=101;
for (minimum=2; minimum < maximum; )
{
(void) AcquireUniqueFilename(jpeg_image->filename);
jpeg_image->quality=minimum+(maximum-minimum+1)/2;
status=WriteJPEGImage(extent_info,jpeg_image,exception);
if (GetBlobSize(jpeg_image) <= extent)
minimum=jpeg_image->quality+1;
else
maximum=jpeg_image->quality-1;
(void) RelinquishUniqueFileResource(jpeg_image->filename);
}
quality=(int) minimum-1;
jpeg_image=DestroyImage(jpeg_image);
}
extent_info=DestroyImageInfo(extent_info);
}
jpeg_set_quality(&jpeg_info,quality,TRUE);
if ((dct_method == (const char *) NULL) && (quality <= 90))
jpeg_info.dct_method=JDCT_IFAST;
#if (JPEG_LIB_VERSION >= 70)
option=GetImageOption(image_info,"quality");
if (option != (const char *) NULL)
{
GeometryInfo
geometry_info;
int
flags;
/*
Set quality scaling for luminance and chrominance separately.
*/
flags=ParseGeometry(option,&geometry_info);
if (((flags & RhoValue) != 0) && ((flags & SigmaValue) != 0))
{
jpeg_info.q_scale_factor[0]=jpeg_quality_scaling((int)
(geometry_info.rho+0.5));
jpeg_info.q_scale_factor[1]=jpeg_quality_scaling((int)
(geometry_info.sigma+0.5));
jpeg_default_qtables(&jpeg_info,TRUE);
}
}
#endif
colorspace=jpeg_info.in_color_space;
value=GetImageOption(image_info,"jpeg:colorspace");
if (value == (char *) NULL)
value=GetImageProperty(image,"jpeg:colorspace",exception);
if (value != (char *) NULL)
colorspace=StringToInteger(value);
sampling_factor=(const char *) NULL;
if ((J_COLOR_SPACE) colorspace == jpeg_info.in_color_space)
{
value=GetImageOption(image_info,"jpeg:sampling-factor");
if (value == (char *) NULL)
value=GetImageProperty(image,"jpeg:sampling-factor",exception);
if (value != (char *) NULL)
{
sampling_factor=value;
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" Input sampling-factors=%s",sampling_factor);
}
}
value=GetImageOption(image_info,"jpeg:sampling-factor");
if (image_info->sampling_factor != (char *) NULL)
sampling_factor=image_info->sampling_factor;
if (sampling_factor == (const char *) NULL)
{
if (quality >= 90)
for (i=0; i < MAX_COMPONENTS; i++)
{
jpeg_info.comp_info[i].h_samp_factor=1;
jpeg_info.comp_info[i].v_samp_factor=1;
}
}
else
{
char
**factors;
GeometryInfo
geometry_info;
MagickStatusType
flags;
/*
Set sampling factor.
*/
i=0;
factors=SamplingFactorToList(sampling_factor);
if (factors != (char **) NULL)
{
for (i=0; i < MAX_COMPONENTS; i++)
{
if (factors[i] == (char *) NULL)
break;
flags=ParseGeometry(factors[i],&geometry_info);
if ((flags & SigmaValue) == 0)
geometry_info.sigma=geometry_info.rho;
jpeg_info.comp_info[i].h_samp_factor=(int) geometry_info.rho;
jpeg_info.comp_info[i].v_samp_factor=(int) geometry_info.sigma;
factors[i]=(char *) RelinquishMagickMemory(factors[i]);
}
factors=(char **) RelinquishMagickMemory(factors);
}
for ( ; i < MAX_COMPONENTS; i++)
{
jpeg_info.comp_info[i].h_samp_factor=1;
jpeg_info.comp_info[i].v_samp_factor=1;
}
}
option=GetImageOption(image_info,"jpeg:q-table");
if (option != (const char *) NULL)
{
QuantizationTable
*table;
/*
Custom quantization tables.
*/
table=GetQuantizationTable(option,"0",exception);
if (table != (QuantizationTable *) NULL)
{
for (i=0; i < MAX_COMPONENTS; i++)
jpeg_info.comp_info[i].quant_tbl_no=0;
jpeg_add_quant_table(&jpeg_info,0,table->levels,
jpeg_quality_scaling(quality),0);
table=DestroyQuantizationTable(table);
}
table=GetQuantizationTable(option,"1",exception);
if (table != (QuantizationTable *) NULL)
{
for (i=1; i < MAX_COMPONENTS; i++)
jpeg_info.comp_info[i].quant_tbl_no=1;
jpeg_add_quant_table(&jpeg_info,1,table->levels,
jpeg_quality_scaling(quality),0);
table=DestroyQuantizationTable(table);
}
table=GetQuantizationTable(option,"2",exception);
if (table != (QuantizationTable *) NULL)
{
for (i=2; i < MAX_COMPONENTS; i++)
jpeg_info.comp_info[i].quant_tbl_no=2;
jpeg_add_quant_table(&jpeg_info,2,table->levels,
jpeg_quality_scaling(quality),0);
table=DestroyQuantizationTable(table);
}
table=GetQuantizationTable(option,"3",exception);
if (table != (QuantizationTable *) NULL)
{
for (i=3; i < MAX_COMPONENTS; i++)
jpeg_info.comp_info[i].quant_tbl_no=3;
jpeg_add_quant_table(&jpeg_info,3,table->levels,
jpeg_quality_scaling(quality),0);
table=DestroyQuantizationTable(table);
}
}
jpeg_start_compress(&jpeg_info,TRUE);
if (image->debug != MagickFalse)
{
if (image->storage_class == PseudoClass)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
"Storage class: PseudoClass");
else
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
"Storage class: DirectClass");
(void) LogMagickEvent(CoderEvent,GetMagickModule(),"Depth: %.20g",
(double) image->depth);
if (image->colors != 0)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
"Number of colors: %.20g",(double) image->colors);
else
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
"Number of colors: unspecified");
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
"JPEG data precision: %d",(int) jpeg_info.data_precision);
switch (image->colorspace)
{
case CMYKColorspace:
{
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
"Storage class: DirectClass");
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
"Colorspace: CMYK");
break;
}
case YCbCrColorspace:
case Rec601YCbCrColorspace:
case Rec709YCbCrColorspace:
{
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
"Colorspace: YCbCr");
break;
}
default:
break;
}
switch (image->colorspace)
{
case CMYKColorspace:
{
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
"Colorspace: CMYK");
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
"Sampling factors: %dx%d,%dx%d,%dx%d,%dx%d",
jpeg_info.comp_info[0].h_samp_factor,
jpeg_info.comp_info[0].v_samp_factor,
jpeg_info.comp_info[1].h_samp_factor,
jpeg_info.comp_info[1].v_samp_factor,
jpeg_info.comp_info[2].h_samp_factor,
jpeg_info.comp_info[2].v_samp_factor,
jpeg_info.comp_info[3].h_samp_factor,
jpeg_info.comp_info[3].v_samp_factor);
break;
}
case GRAYColorspace:
{
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
"Colorspace: GRAY");
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
"Sampling factors: %dx%d",jpeg_info.comp_info[0].h_samp_factor,
jpeg_info.comp_info[0].v_samp_factor);
break;
}
case sRGBColorspace:
case RGBColorspace:
{
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
"Image colorspace is RGB");
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
"Sampling factors: %dx%d,%dx%d,%dx%d",
jpeg_info.comp_info[0].h_samp_factor,
jpeg_info.comp_info[0].v_samp_factor,
jpeg_info.comp_info[1].h_samp_factor,
jpeg_info.comp_info[1].v_samp_factor,
jpeg_info.comp_info[2].h_samp_factor,
jpeg_info.comp_info[2].v_samp_factor);
break;
}
case YCbCrColorspace:
case Rec601YCbCrColorspace:
case Rec709YCbCrColorspace:
{
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
"Colorspace: YCbCr");
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
"Sampling factors: %dx%d,%dx%d,%dx%d",
jpeg_info.comp_info[0].h_samp_factor,
jpeg_info.comp_info[0].v_samp_factor,
jpeg_info.comp_info[1].h_samp_factor,
jpeg_info.comp_info[1].v_samp_factor,
jpeg_info.comp_info[2].h_samp_factor,
jpeg_info.comp_info[2].v_samp_factor);
break;
}
default:
{
(void) LogMagickEvent(CoderEvent,GetMagickModule(),"Colorspace: %d",
image->colorspace);
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
"Sampling factors: %dx%d,%dx%d,%dx%d,%dx%d",
jpeg_info.comp_info[0].h_samp_factor,
jpeg_info.comp_info[0].v_samp_factor,
jpeg_info.comp_info[1].h_samp_factor,
jpeg_info.comp_info[1].v_samp_factor,
jpeg_info.comp_info[2].h_samp_factor,
jpeg_info.comp_info[2].v_samp_factor,
jpeg_info.comp_info[3].h_samp_factor,
jpeg_info.comp_info[3].v_samp_factor);
break;
}
}
}
/*
Write JPEG profiles.
*/
value=GetImageProperty(image,"comment",exception);
if (value != (char *) NULL)
for (i=0; i < (ssize_t) strlen(value); i+=65533L)
jpeg_write_marker(&jpeg_info,JPEG_COM,(unsigned char *) value+i,
(unsigned int) MagickMin((size_t) strlen(value+i),65533L));
if (image->profiles != (void *) NULL)
WriteProfile(&jpeg_info,image,exception);
/*
Convert MIFF to JPEG raster pixels.
*/
memory_info=AcquireVirtualMemory((size_t) image->columns,
jpeg_info.input_components*sizeof(*jpeg_pixels));
if (memory_info == (MemoryInfo *) NULL)
ThrowWriterException(ResourceLimitError,"MemoryAllocationFailed");
jpeg_pixels=(JSAMPLE *) GetVirtualMemoryBlob(memory_info);
if (setjmp(error_manager.error_recovery) != 0)
{
jpeg_destroy_compress(&jpeg_info);
if (memory_info != (MemoryInfo *) NULL)
memory_info=RelinquishVirtualMemory(memory_info);
(void) CloseBlob(image);
return(MagickFalse);
}
scanline[0]=(JSAMPROW) jpeg_pixels;
scale=65535/(unsigned short) GetQuantumRange((size_t)
jpeg_info.data_precision);
if (scale == 0)
scale=1;
if (jpeg_info.data_precision <= 8)
{
if ((jpeg_info.in_color_space == JCS_RGB) ||
(jpeg_info.in_color_space == JCS_YCbCr))
for (y=0; y < (ssize_t) image->rows; y++)
{
register const Quantum
*p;
register ssize_t
x;
p=GetVirtualPixels(image,0,y,image->columns,1,exception);
if (p == (const Quantum *) NULL)
break;
q=jpeg_pixels;
for (x=0; x < (ssize_t) image->columns; x++)
{
*q++=(JSAMPLE) ScaleQuantumToChar(GetPixelRed(image,p));
*q++=(JSAMPLE) ScaleQuantumToChar(GetPixelGreen(image,p));
*q++=(JSAMPLE) ScaleQuantumToChar(GetPixelBlue(image,p));
p+=GetPixelChannels(image);
}
(void) jpeg_write_scanlines(&jpeg_info,scanline,1);
status=SetImageProgress(image,SaveImageTag,(MagickOffsetType) y,
image->rows);
if (status == MagickFalse)
break;
}
else
if (jpeg_info.in_color_space == JCS_GRAYSCALE)
for (y=0; y < (ssize_t) image->rows; y++)
{
register const Quantum
*p;
register ssize_t
x;
p=GetVirtualPixels(image,0,y,image->columns,1,exception);
if (p == (const Quantum *) NULL)
break;
q=jpeg_pixels;
for (x=0; x < (ssize_t) image->columns; x++)
{
*q++=(JSAMPLE) ScaleQuantumToChar(ClampToQuantum(GetPixelLuma(
image,p)));
p+=GetPixelChannels(image);
}
(void) jpeg_write_scanlines(&jpeg_info,scanline,1);
status=SetImageProgress(image,SaveImageTag,(MagickOffsetType) y,
image->rows);
if (status == MagickFalse)
break;
}
else
for (y=0; y < (ssize_t) image->rows; y++)
{
register const Quantum
*p;
register ssize_t
x;
p=GetVirtualPixels(image,0,y,image->columns,1,exception);
if (p == (const Quantum *) NULL)
break;
q=jpeg_pixels;
for (x=0; x < (ssize_t) image->columns; x++)
{
/*
Convert DirectClass packets to contiguous CMYK scanlines.
*/
*q++=(JSAMPLE) (ScaleQuantumToChar((Quantum) (QuantumRange-
GetPixelCyan(image,p))));
*q++=(JSAMPLE) (ScaleQuantumToChar((Quantum) (QuantumRange-
GetPixelMagenta(image,p))));
*q++=(JSAMPLE) (ScaleQuantumToChar((Quantum) (QuantumRange-
GetPixelYellow(image,p))));
*q++=(JSAMPLE) (ScaleQuantumToChar((Quantum) (QuantumRange-
GetPixelBlack(image,p))));
p+=GetPixelChannels(image);
}
(void) jpeg_write_scanlines(&jpeg_info,scanline,1);
status=SetImageProgress(image,SaveImageTag,(MagickOffsetType) y,
image->rows);
if (status == MagickFalse)
break;
}
}
else
if (jpeg_info.in_color_space == JCS_GRAYSCALE)
for (y=0; y < (ssize_t) image->rows; y++)
{
register const Quantum
*p;
register ssize_t
x;
p=GetVirtualPixels(image,0,y,image->columns,1,exception);
if (p == (const Quantum *) NULL)
break;
q=jpeg_pixels;
for (x=0; x < (ssize_t) image->columns; x++)
{
*q++=(JSAMPLE) (ScaleQuantumToShort(ClampToQuantum(GetPixelLuma(image,
p)))/scale);
p+=GetPixelChannels(image);
}
(void) jpeg_write_scanlines(&jpeg_info,scanline,1);
status=SetImageProgress(image,SaveImageTag,(MagickOffsetType) y,
image->rows);
if (status == MagickFalse)
break;
}
else
if ((jpeg_info.in_color_space == JCS_RGB) ||
(jpeg_info.in_color_space == JCS_YCbCr))
for (y=0; y < (ssize_t) image->rows; y++)
{
register const Quantum
*p;
register ssize_t
x;
p=GetVirtualPixels(image,0,y,image->columns,1,exception);
if (p == (const Quantum *) NULL)
break;
q=jpeg_pixels;
for (x=0; x < (ssize_t) image->columns; x++)
{
*q++=(JSAMPLE) (ScaleQuantumToShort(GetPixelRed(image,p))/scale);
*q++=(JSAMPLE) (ScaleQuantumToShort(GetPixelGreen(image,p))/scale);
*q++=(JSAMPLE) (ScaleQuantumToShort(GetPixelBlue(image,p))/scale);
p+=GetPixelChannels(image);
}
(void) jpeg_write_scanlines(&jpeg_info,scanline,1);
status=SetImageProgress(image,SaveImageTag,(MagickOffsetType) y,
image->rows);
if (status == MagickFalse)
break;
}
else
for (y=0; y < (ssize_t) image->rows; y++)
{
register const Quantum
*p;
register ssize_t
x;
p=GetVirtualPixels(image,0,y,image->columns,1,exception);
if (p == (const Quantum *) NULL)
break;
q=jpeg_pixels;
for (x=0; x < (ssize_t) image->columns; x++)
{
/*
Convert DirectClass packets to contiguous CMYK scanlines.
*/
*q++=(JSAMPLE) (ScaleQuantumToShort(QuantumRange-GetPixelRed(
image,p))/scale);
*q++=(JSAMPLE) (ScaleQuantumToShort(QuantumRange-GetPixelGreen(
image,p))/scale);
*q++=(JSAMPLE) (ScaleQuantumToShort(QuantumRange-GetPixelBlue(
image,p))/scale);
*q++=(JSAMPLE) (ScaleQuantumToShort(QuantumRange-GetPixelBlack(
image,p))/scale);
p+=GetPixelChannels(image);
}
(void) jpeg_write_scanlines(&jpeg_info,scanline,1);
status=SetImageProgress(image,SaveImageTag,(MagickOffsetType) y,
image->rows);
if (status == MagickFalse)
break;
}
if (y == (ssize_t) image->rows)
jpeg_finish_compress(&jpeg_info);
/*
Relinquish resources.
*/
jpeg_destroy_compress(&jpeg_info);
memory_info=RelinquishVirtualMemory(memory_info);
(void) CloseBlob(image);
return(MagickTrue);
}
#endif
| ./CrossVul/dataset_final_sorted/CWE-416/c/good_1181_1 |
crossvul-cpp_data_bad_1181_1 | /*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% JJJJJ PPPP EEEEE GGGG %
% J P P E G %
% J PPPP EEE G GG %
% J J P E G G %
% JJJ P EEEEE GGG %
% %
% %
% Read/Write JPEG Image Format %
% %
% Software Design %
% John Cristy %
% July 1992 %
% %
% %
% Copyright 1999-2013 ImageMagick Studio LLC, a non-profit organization %
% dedicated to making software imaging solutions freely available. %
% %
% You may not use this file except in compliance with the License. You may %
% obtain a copy of the License at %
% %
% https://imagemagick.org/script/license.php %
% %
% Unless required by applicable law or agreed to in writing, software %
% distributed under the License is distributed on an "AS IS" BASIS, %
% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. %
% See the License for the specific language governing permissions and %
% limitations under the License. %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% This software is based in part on the work of the Independent JPEG Group.
% See ftp://ftp.uu.net/graphics/jpeg/jpegsrc.v6b.tar.gz for copyright and
% licensing restrictions. Blob support contributed by Glenn Randers-Pehrson.
%
%
*/
/*
Include declarations.
*/
#include "MagickCore/studio.h"
#include "MagickCore/artifact.h"
#include "MagickCore/attribute.h"
#include "MagickCore/blob.h"
#include "MagickCore/blob-private.h"
#include "MagickCore/cache.h"
#include "MagickCore/color.h"
#include "MagickCore/colormap-private.h"
#include "MagickCore/color-private.h"
#include "MagickCore/colormap.h"
#include "MagickCore/colorspace.h"
#include "MagickCore/colorspace-private.h"
#include "MagickCore/constitute.h"
#include "MagickCore/exception.h"
#include "MagickCore/exception-private.h"
#include "MagickCore/geometry.h"
#include "MagickCore/image.h"
#include "MagickCore/image-private.h"
#include "MagickCore/list.h"
#include "MagickCore/log.h"
#include "MagickCore/magick.h"
#include "MagickCore/memory_.h"
#include "MagickCore/memory-private.h"
#include "MagickCore/module.h"
#include "MagickCore/monitor.h"
#include "MagickCore/monitor-private.h"
#include "MagickCore/option.h"
#include "MagickCore/option-private.h"
#include "MagickCore/pixel-accessor.h"
#include "MagickCore/profile.h"
#include "MagickCore/property.h"
#include "MagickCore/quantum-private.h"
#include "MagickCore/resource_.h"
#include "MagickCore/semaphore.h"
#include "MagickCore/splay-tree.h"
#include "MagickCore/static.h"
#include "MagickCore/string_.h"
#include "MagickCore/string-private.h"
#include "MagickCore/token.h"
#include "MagickCore/utility.h"
#include "MagickCore/xml-tree.h"
#include "MagickCore/xml-tree-private.h"
#include <setjmp.h>
#if defined(MAGICKCORE_JPEG_DELEGATE)
#define JPEG_INTERNAL_OPTIONS
#if defined(__MINGW32__)
# define XMD_H 1 /* Avoid conflicting typedef for INT32 */
#endif
#undef HAVE_STDLIB_H
#include "jpeglib.h"
#include "jerror.h"
#endif
/*
Define declarations.
*/
#define ICC_MARKER (JPEG_APP0+2)
#define ICC_PROFILE "ICC_PROFILE"
#define IPTC_MARKER (JPEG_APP0+13)
#define XML_MARKER (JPEG_APP0+1)
#define MaxJPEGScans 1024
/*
Typedef declarations.
*/
#if defined(MAGICKCORE_JPEG_DELEGATE)
typedef struct _DestinationManager
{
struct jpeg_destination_mgr
manager;
Image
*image;
JOCTET
*buffer;
} DestinationManager;
typedef struct _ErrorManager
{
ExceptionInfo
*exception;
Image
*image;
MagickBooleanType
finished;
StringInfo
*profile;
jmp_buf
error_recovery;
} ErrorManager;
typedef struct _SourceManager
{
struct jpeg_source_mgr
manager;
Image
*image;
JOCTET
*buffer;
boolean
start_of_blob;
} SourceManager;
#endif
typedef struct _QuantizationTable
{
char
*slot,
*description;
size_t
width,
height;
double
divisor;
unsigned int
*levels;
} QuantizationTable;
/*
Const declarations.
*/
static const char
xmp_namespace[] = "http://ns.adobe.com/xap/1.0/ ";
#define XmpNamespaceExtent 28
/*
Forward declarations.
*/
#if defined(MAGICKCORE_JPEG_DELEGATE)
static MagickBooleanType
WriteJPEGImage(const ImageInfo *,Image *,ExceptionInfo *);
#endif
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% I s J P E G %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% IsJPEG() returns MagickTrue if the image format type, identified by the
% magick string, is JPEG.
%
% The format of the IsJPEG method is:
%
% MagickBooleanType IsJPEG(const unsigned char *magick,const size_t length)
%
% A description of each parameter follows:
%
% o magick: compare image format pattern against these bytes.
%
% o length: Specifies the length of the magick string.
%
*/
static MagickBooleanType IsJPEG(const unsigned char *magick,const size_t length)
{
if (length < 3)
return(MagickFalse);
if (memcmp(magick,"\377\330\377",3) == 0)
return(MagickTrue);
return(MagickFalse);
}
#if defined(MAGICKCORE_JPEG_DELEGATE)
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% R e a d J P E G I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ReadJPEGImage() reads a JPEG image file and returns it. It allocates
% the memory necessary for the new Image structure and returns a pointer to
% the new image.
%
% The format of the ReadJPEGImage method is:
%
% Image *ReadJPEGImage(const ImageInfo *image_info,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image_info: the image info.
%
% o exception: return any errors or warnings in this structure.
%
*/
static boolean FillInputBuffer(j_decompress_ptr cinfo)
{
SourceManager
*source;
source=(SourceManager *) cinfo->src;
source->manager.bytes_in_buffer=(size_t) ReadBlob(source->image,
MagickMinBufferExtent,source->buffer);
if (source->manager.bytes_in_buffer == 0)
{
if (source->start_of_blob != FALSE)
ERREXIT(cinfo,JERR_INPUT_EMPTY);
WARNMS(cinfo,JWRN_JPEG_EOF);
source->buffer[0]=(JOCTET) 0xff;
source->buffer[1]=(JOCTET) JPEG_EOI;
source->manager.bytes_in_buffer=2;
}
source->manager.next_input_byte=source->buffer;
source->start_of_blob=FALSE;
return(TRUE);
}
static int GetCharacter(j_decompress_ptr jpeg_info)
{
if (jpeg_info->src->bytes_in_buffer == 0)
{
(void) (*jpeg_info->src->fill_input_buffer)(jpeg_info);
if (jpeg_info->err->msg_code == JWRN_JPEG_EOF)
return EOF;
}
jpeg_info->src->bytes_in_buffer--;
return((int) GETJOCTET(*jpeg_info->src->next_input_byte++));
}
static void InitializeSource(j_decompress_ptr cinfo)
{
SourceManager
*source;
source=(SourceManager *) cinfo->src;
source->start_of_blob=TRUE;
}
static MagickBooleanType IsITUFaxImage(const Image *image)
{
const StringInfo
*profile;
const unsigned char
*datum;
profile=GetImageProfile(image,"8bim");
if (profile == (const StringInfo *) NULL)
return(MagickFalse);
if (GetStringInfoLength(profile) < 5)
return(MagickFalse);
datum=GetStringInfoDatum(profile);
if ((datum[0] == 0x47) && (datum[1] == 0x33) && (datum[2] == 0x46) &&
(datum[3] == 0x41) && (datum[4] == 0x58))
return(MagickTrue);
return(MagickFalse);
}
static void JPEGErrorHandler(j_common_ptr jpeg_info)
{
char
message[JMSG_LENGTH_MAX];
ErrorManager
*error_manager;
ExceptionInfo
*exception;
Image
*image;
*message='\0';
error_manager=(ErrorManager *) jpeg_info->client_data;
image=error_manager->image;
exception=error_manager->exception;
(jpeg_info->err->format_message)(jpeg_info,message);
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
"[%s] JPEG Trace: \"%s\"",image->filename,message);
if (error_manager->finished != MagickFalse)
(void) ThrowMagickException(exception,GetMagickModule(),CorruptImageWarning,
(char *) message,"`%s'",image->filename);
else
(void) ThrowMagickException(exception,GetMagickModule(),CorruptImageError,
(char *) message,"`%s'",image->filename);
longjmp(error_manager->error_recovery,1);
}
static void JPEGProgressHandler(j_common_ptr jpeg_info)
{
ErrorManager
*error_manager;
ExceptionInfo
*exception;
Image
*image;
error_manager=(ErrorManager *) jpeg_info->client_data;
image=error_manager->image;
exception=error_manager->exception;
if (jpeg_info->is_decompressor == 0)
return;
if (((j_decompress_ptr) jpeg_info)->input_scan_number < MaxJPEGScans)
return;
(void) ThrowMagickException(exception,GetMagickModule(),CorruptImageError,
"too many scans","`%s'",image->filename);
longjmp(error_manager->error_recovery,1);
}
static MagickBooleanType JPEGWarningHandler(j_common_ptr jpeg_info,int level)
{
#define JPEGExcessiveWarnings 1000
char
message[JMSG_LENGTH_MAX];
ErrorManager
*error_manager;
ExceptionInfo
*exception;
Image
*image;
*message='\0';
error_manager=(ErrorManager *) jpeg_info->client_data;
exception=error_manager->exception;
image=error_manager->image;
if (level < 0)
{
/*
Process warning message.
*/
(jpeg_info->err->format_message)(jpeg_info,message);
if (jpeg_info->err->num_warnings++ < JPEGExcessiveWarnings)
ThrowBinaryException(CorruptImageWarning,(char *) message,
image->filename);
}
else
if ((image->debug != MagickFalse) &&
(level >= jpeg_info->err->trace_level))
{
/*
Process trace message.
*/
(jpeg_info->err->format_message)(jpeg_info,message);
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
"[%s] JPEG Trace: \"%s\"",image->filename,message);
}
return(MagickTrue);
}
static boolean ReadComment(j_decompress_ptr jpeg_info)
{
ErrorManager
*error_manager;
ExceptionInfo
*exception;
Image
*image;
register unsigned char
*p;
register ssize_t
i;
size_t
length;
StringInfo
*comment;
/*
Determine length of comment.
*/
error_manager=(ErrorManager *) jpeg_info->client_data;
exception=error_manager->exception;
image=error_manager->image;
length=(size_t) ((size_t) GetCharacter(jpeg_info) << 8);
length+=GetCharacter(jpeg_info);
if (length <= 2)
return(TRUE);
length-=2;
comment=BlobToStringInfo((const void *) NULL,length);
if (comment == (StringInfo *) NULL)
{
(void) ThrowMagickException(exception,GetMagickModule(),
ResourceLimitError,"MemoryAllocationFailed","`%s'",image->filename);
return(FALSE);
}
/*
Read comment.
*/
error_manager->profile=comment;
p=GetStringInfoDatum(comment);
for (i=0; i < (ssize_t) length; i++)
{
int
c;
c=GetCharacter(jpeg_info);
if (c == EOF)
break;
*p++=(unsigned char) c;
}
*p='\0';
error_manager->profile=NULL;
if (i != (ssize_t) length)
{
comment=DestroyStringInfo(comment);
(void) ThrowMagickException(exception,GetMagickModule(),
CorruptImageError,"InsufficientImageDataInFile","`%s'",
image->filename);
return(FALSE);
}
p=GetStringInfoDatum(comment);
(void) SetImageProperty(image,"comment",(const char *) p,exception);
comment=DestroyStringInfo(comment);
return(TRUE);
}
static boolean ReadICCProfile(j_decompress_ptr jpeg_info)
{
char
magick[12];
ErrorManager
*error_manager;
ExceptionInfo
*exception;
Image
*image;
MagickBooleanType
status;
register ssize_t
i;
register unsigned char
*p;
size_t
length;
StringInfo
*icc_profile,
*profile;
/*
Read color profile.
*/
length=(size_t) ((size_t) GetCharacter(jpeg_info) << 8);
length+=(size_t) GetCharacter(jpeg_info);
length-=2;
if (length <= 14)
{
while (length-- > 0)
if (GetCharacter(jpeg_info) == EOF)
break;
return(TRUE);
}
for (i=0; i < 12; i++)
magick[i]=(char) GetCharacter(jpeg_info);
if (LocaleCompare(magick,ICC_PROFILE) != 0)
{
/*
Not a ICC profile, return.
*/
for (i=0; i < (ssize_t) (length-12); i++)
if (GetCharacter(jpeg_info) == EOF)
break;
return(TRUE);
}
(void) GetCharacter(jpeg_info); /* id */
(void) GetCharacter(jpeg_info); /* markers */
length-=14;
error_manager=(ErrorManager *) jpeg_info->client_data;
exception=error_manager->exception;
image=error_manager->image;
profile=BlobToStringInfo((const void *) NULL,length);
if (profile == (StringInfo *) NULL)
{
(void) ThrowMagickException(exception,GetMagickModule(),
ResourceLimitError,"MemoryAllocationFailed","`%s'",image->filename);
return(FALSE);
}
error_manager->profile=profile;
p=GetStringInfoDatum(profile);
for (i=0; i < (ssize_t) length; i++)
{
int
c;
c=GetCharacter(jpeg_info);
if (c == EOF)
break;
*p++=(unsigned char) c;
}
if (i != (ssize_t) length)
{
profile=DestroyStringInfo(profile);
(void) ThrowMagickException(exception,GetMagickModule(),
CorruptImageError,"InsufficientImageDataInFile","`%s'",
image->filename);
return(FALSE);
}
error_manager->profile=NULL;
icc_profile=(StringInfo *) GetImageProfile(image,"icc");
if (icc_profile != (StringInfo *) NULL)
{
ConcatenateStringInfo(icc_profile,profile);
profile=DestroyStringInfo(profile);
}
else
{
status=SetImageProfile(image,"icc",profile,exception);
profile=DestroyStringInfo(profile);
if (status == MagickFalse)
{
(void) ThrowMagickException(exception,GetMagickModule(),
ResourceLimitError,"MemoryAllocationFailed","`%s'",image->filename);
return(FALSE);
}
}
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
"Profile: ICC, %.20g bytes",(double) length);
return(TRUE);
}
static boolean ReadIPTCProfile(j_decompress_ptr jpeg_info)
{
char
magick[MagickPathExtent];
ErrorManager
*error_manager;
ExceptionInfo
*exception;
Image
*image;
MagickBooleanType
status;
register ssize_t
i;
register unsigned char
*p;
size_t
length;
StringInfo
*iptc_profile,
*profile;
/*
Determine length of binary data stored here.
*/
length=(size_t) ((size_t) GetCharacter(jpeg_info) << 8);
length+=(size_t) GetCharacter(jpeg_info);
length-=2;
if (length <= 14)
{
while (length-- > 0)
if (GetCharacter(jpeg_info) == EOF)
break;
return(TRUE);
}
/*
Validate that this was written as a Photoshop resource format slug.
*/
for (i=0; i < 10; i++)
magick[i]=(char) GetCharacter(jpeg_info);
magick[10]='\0';
length-=10;
if (length <= 10)
return(TRUE);
if (LocaleCompare(magick,"Photoshop ") != 0)
{
/*
Not a IPTC profile, return.
*/
for (i=0; i < (ssize_t) length; i++)
if (GetCharacter(jpeg_info) == EOF)
break;
return(TRUE);
}
/*
Remove the version number.
*/
for (i=0; i < 4; i++)
if (GetCharacter(jpeg_info) == EOF)
break;
if (length <= 11)
return(TRUE);
length-=4;
error_manager=(ErrorManager *) jpeg_info->client_data;
exception=error_manager->exception;
image=error_manager->image;
profile=BlobToStringInfo((const void *) NULL,length);
if (profile == (StringInfo *) NULL)
{
(void) ThrowMagickException(exception,GetMagickModule(),
ResourceLimitError,"MemoryAllocationFailed","`%s'",image->filename);
return(FALSE);
}
error_manager->profile=profile;
p=GetStringInfoDatum(profile);
for (i=0; i < (ssize_t) length; i++)
{
int
c;
c=GetCharacter(jpeg_info);
if (c == EOF)
break;
*p++=(unsigned char) c;
}
error_manager->profile=NULL;
if (i != (ssize_t) length)
{
profile=DestroyStringInfo(profile);
(void) ThrowMagickException(exception,GetMagickModule(),
CorruptImageError,"InsufficientImageDataInFile","`%s'",
image->filename);
return(FALSE);
}
/* The IPTC profile is actually an 8bim */
iptc_profile=(StringInfo *) GetImageProfile(image,"8bim");
if (iptc_profile != (StringInfo *) NULL)
{
ConcatenateStringInfo(iptc_profile,profile);
profile=DestroyStringInfo(profile);
}
else
{
status=SetImageProfile(image,"8bim",profile,exception);
profile=DestroyStringInfo(profile);
if (status == MagickFalse)
{
(void) ThrowMagickException(exception,GetMagickModule(),
ResourceLimitError,"MemoryAllocationFailed","`%s'",image->filename);
return(FALSE);
}
}
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
"Profile: iptc, %.20g bytes",(double) length);
return(TRUE);
}
static boolean ReadProfile(j_decompress_ptr jpeg_info)
{
char
name[MagickPathExtent];
const StringInfo
*previous_profile;
ErrorManager
*error_manager;
ExceptionInfo
*exception;
Image
*image;
int
marker;
MagickBooleanType
status;
register ssize_t
i;
register unsigned char
*p;
size_t
length;
StringInfo
*profile;
/*
Read generic profile.
*/
length=(size_t) ((size_t) GetCharacter(jpeg_info) << 8);
length+=(size_t) GetCharacter(jpeg_info);
if (length <= 2)
return(TRUE);
length-=2;
marker=jpeg_info->unread_marker-JPEG_APP0;
(void) FormatLocaleString(name,MagickPathExtent,"APP%d",marker);
error_manager=(ErrorManager *) jpeg_info->client_data;
exception=error_manager->exception;
image=error_manager->image;
profile=BlobToStringInfo((const void *) NULL,length);
if (profile == (StringInfo *) NULL)
{
(void) ThrowMagickException(exception,GetMagickModule(),
ResourceLimitError,"MemoryAllocationFailed","`%s'",image->filename);
return(FALSE);
}
error_manager->profile=profile;
p=GetStringInfoDatum(profile);
for (i=0; i < (ssize_t) length; i++)
{
int
c;
c=GetCharacter(jpeg_info);
if (c == EOF)
break;
*p++=(unsigned char) c;
}
error_manager->profile=NULL;
if (i != (ssize_t) length)
{
profile=DestroyStringInfo(profile);
(void) ThrowMagickException(exception,GetMagickModule(),
CorruptImageError,"InsufficientImageDataInFile","`%s'",
image->filename);
return(FALSE);
}
if (marker == 1)
{
p=GetStringInfoDatum(profile);
if ((length > 4) && (LocaleNCompare((char *) p,"exif",4) == 0))
(void) CopyMagickString(name,"exif",MagickPathExtent);
else if ((length > XmpNamespaceExtent) &&
(LocaleNCompare((char *) p,xmp_namespace,XmpNamespaceExtent-1) == 0))
{
ssize_t
j;
/*
Extract namespace from XMP profile.
*/
p=GetStringInfoDatum(profile)+XmpNamespaceExtent;
for (j=XmpNamespaceExtent; j < (ssize_t) GetStringInfoLength(profile); j++)
{
if (*p == '\0')
break;
p++;
}
if (j < (ssize_t) GetStringInfoLength(profile))
(void) DestroyStringInfo(SplitStringInfo(profile,(size_t) (j+1)));
(void) CopyMagickString(name,"xmp",MagickPathExtent);
}
}
previous_profile=GetImageProfile(image,name);
if ((previous_profile != (const StringInfo *) NULL) &&
(CompareStringInfo(previous_profile,profile) != 0))
{
size_t
profile_length;
profile_length=GetStringInfoLength(profile);
SetStringInfoLength(profile,GetStringInfoLength(profile)+
GetStringInfoLength(previous_profile));
(void) memmove(GetStringInfoDatum(profile)+
GetStringInfoLength(previous_profile),GetStringInfoDatum(profile),
profile_length);
(void) memcpy(GetStringInfoDatum(profile),
GetStringInfoDatum(previous_profile),
GetStringInfoLength(previous_profile));
}
status=SetImageProfile(image,name,profile,exception);
profile=DestroyStringInfo(profile);
if (status == MagickFalse)
{
(void) ThrowMagickException(exception,GetMagickModule(),
ResourceLimitError,"MemoryAllocationFailed","`%s'",image->filename);
return(FALSE);
}
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
"Profile: %s, %.20g bytes",name,(double) length);
return(TRUE);
}
static void SkipInputData(j_decompress_ptr cinfo,long number_bytes)
{
SourceManager
*source;
if (number_bytes <= 0)
return;
source=(SourceManager *) cinfo->src;
while (number_bytes > (long) source->manager.bytes_in_buffer)
{
number_bytes-=(long) source->manager.bytes_in_buffer;
(void) FillInputBuffer(cinfo);
}
source->manager.next_input_byte+=number_bytes;
source->manager.bytes_in_buffer-=number_bytes;
}
static void TerminateSource(j_decompress_ptr cinfo)
{
(void) cinfo;
}
static void JPEGSourceManager(j_decompress_ptr cinfo,Image *image)
{
SourceManager
*source;
cinfo->src=(struct jpeg_source_mgr *) (*cinfo->mem->alloc_small)
((j_common_ptr) cinfo,JPOOL_IMAGE,sizeof(SourceManager));
source=(SourceManager *) cinfo->src;
source->buffer=(JOCTET *) (*cinfo->mem->alloc_small)
((j_common_ptr) cinfo,JPOOL_IMAGE,MagickMinBufferExtent*sizeof(JOCTET));
source=(SourceManager *) cinfo->src;
source->manager.init_source=InitializeSource;
source->manager.fill_input_buffer=FillInputBuffer;
source->manager.skip_input_data=SkipInputData;
source->manager.resync_to_restart=jpeg_resync_to_restart;
source->manager.term_source=TerminateSource;
source->manager.bytes_in_buffer=0;
source->manager.next_input_byte=NULL;
source->image=image;
}
static void JPEGSetImageQuality(struct jpeg_decompress_struct *jpeg_info,
Image *image)
{
image->quality=UndefinedCompressionQuality;
#if defined(D_PROGRESSIVE_SUPPORTED)
if (image->compression == LosslessJPEGCompression)
{
image->quality=100;
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
"Quality: 100 (lossless)");
}
else
#endif
{
ssize_t
j,
qvalue,
sum;
register ssize_t
i;
/*
Determine the JPEG compression quality from the quantization tables.
*/
sum=0;
for (i=0; i < NUM_QUANT_TBLS; i++)
{
if (jpeg_info->quant_tbl_ptrs[i] != NULL)
for (j=0; j < DCTSIZE2; j++)
sum+=jpeg_info->quant_tbl_ptrs[i]->quantval[j];
}
if ((jpeg_info->quant_tbl_ptrs[0] != NULL) &&
(jpeg_info->quant_tbl_ptrs[1] != NULL))
{
ssize_t
hash[101] =
{
1020, 1015, 932, 848, 780, 735, 702, 679, 660, 645,
632, 623, 613, 607, 600, 594, 589, 585, 581, 571,
555, 542, 529, 514, 494, 474, 457, 439, 424, 410,
397, 386, 373, 364, 351, 341, 334, 324, 317, 309,
299, 294, 287, 279, 274, 267, 262, 257, 251, 247,
243, 237, 232, 227, 222, 217, 213, 207, 202, 198,
192, 188, 183, 177, 173, 168, 163, 157, 153, 148,
143, 139, 132, 128, 125, 119, 115, 108, 104, 99,
94, 90, 84, 79, 74, 70, 64, 59, 55, 49,
45, 40, 34, 30, 25, 20, 15, 11, 6, 4,
0
},
sums[101] =
{
32640, 32635, 32266, 31495, 30665, 29804, 29146, 28599, 28104,
27670, 27225, 26725, 26210, 25716, 25240, 24789, 24373, 23946,
23572, 22846, 21801, 20842, 19949, 19121, 18386, 17651, 16998,
16349, 15800, 15247, 14783, 14321, 13859, 13535, 13081, 12702,
12423, 12056, 11779, 11513, 11135, 10955, 10676, 10392, 10208,
9928, 9747, 9564, 9369, 9193, 9017, 8822, 8639, 8458,
8270, 8084, 7896, 7710, 7527, 7347, 7156, 6977, 6788,
6607, 6422, 6236, 6054, 5867, 5684, 5495, 5305, 5128,
4945, 4751, 4638, 4442, 4248, 4065, 3888, 3698, 3509,
3326, 3139, 2957, 2775, 2586, 2405, 2216, 2037, 1846,
1666, 1483, 1297, 1109, 927, 735, 554, 375, 201,
128, 0
};
qvalue=(ssize_t) (jpeg_info->quant_tbl_ptrs[0]->quantval[2]+
jpeg_info->quant_tbl_ptrs[0]->quantval[53]+
jpeg_info->quant_tbl_ptrs[1]->quantval[0]+
jpeg_info->quant_tbl_ptrs[1]->quantval[DCTSIZE2-1]);
for (i=0; i < 100; i++)
{
if ((qvalue < hash[i]) && (sum < sums[i]))
continue;
if (((qvalue <= hash[i]) && (sum <= sums[i])) || (i >= 50))
image->quality=(size_t) i+1;
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
"Quality: %.20g (%s)",(double) i+1,(qvalue <= hash[i]) &&
(sum <= sums[i]) ? "exact" : "approximate");
break;
}
}
else
if (jpeg_info->quant_tbl_ptrs[0] != NULL)
{
ssize_t
hash[101] =
{
510, 505, 422, 380, 355, 338, 326, 318, 311, 305,
300, 297, 293, 291, 288, 286, 284, 283, 281, 280,
279, 278, 277, 273, 262, 251, 243, 233, 225, 218,
211, 205, 198, 193, 186, 181, 177, 172, 168, 164,
158, 156, 152, 148, 145, 142, 139, 136, 133, 131,
129, 126, 123, 120, 118, 115, 113, 110, 107, 105,
102, 100, 97, 94, 92, 89, 87, 83, 81, 79,
76, 74, 70, 68, 66, 63, 61, 57, 55, 52,
50, 48, 44, 42, 39, 37, 34, 31, 29, 26,
24, 21, 18, 16, 13, 11, 8, 6, 3, 2,
0
},
sums[101] =
{
16320, 16315, 15946, 15277, 14655, 14073, 13623, 13230, 12859,
12560, 12240, 11861, 11456, 11081, 10714, 10360, 10027, 9679,
9368, 9056, 8680, 8331, 7995, 7668, 7376, 7084, 6823,
6562, 6345, 6125, 5939, 5756, 5571, 5421, 5240, 5086,
4976, 4829, 4719, 4616, 4463, 4393, 4280, 4166, 4092,
3980, 3909, 3835, 3755, 3688, 3621, 3541, 3467, 3396,
3323, 3247, 3170, 3096, 3021, 2952, 2874, 2804, 2727,
2657, 2583, 2509, 2437, 2362, 2290, 2211, 2136, 2068,
1996, 1915, 1858, 1773, 1692, 1620, 1552, 1477, 1398,
1326, 1251, 1179, 1109, 1031, 961, 884, 814, 736,
667, 592, 518, 441, 369, 292, 221, 151, 86,
64, 0
};
qvalue=(ssize_t) (jpeg_info->quant_tbl_ptrs[0]->quantval[2]+
jpeg_info->quant_tbl_ptrs[0]->quantval[53]);
for (i=0; i < 100; i++)
{
if ((qvalue < hash[i]) && (sum < sums[i]))
continue;
if (((qvalue <= hash[i]) && (sum <= sums[i])) || (i >= 50))
image->quality=(size_t)i+1;
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
"Quality: %.20g (%s)",(double) i+1,(qvalue <= hash[i]) &&
(sum <= sums[i]) ? "exact" : "approximate");
break;
}
}
}
}
static void JPEGSetImageSamplingFactor(struct jpeg_decompress_struct *jpeg_info, Image *image,ExceptionInfo *exception)
{
char
sampling_factor[MagickPathExtent];
switch (jpeg_info->out_color_space)
{
case JCS_CMYK:
{
(void) LogMagickEvent(CoderEvent,GetMagickModule(),"Colorspace: CMYK");
(void) FormatLocaleString(sampling_factor,MagickPathExtent,
"%dx%d,%dx%d,%dx%d,%dx%d",jpeg_info->comp_info[0].h_samp_factor,
jpeg_info->comp_info[0].v_samp_factor,
jpeg_info->comp_info[1].h_samp_factor,
jpeg_info->comp_info[1].v_samp_factor,
jpeg_info->comp_info[2].h_samp_factor,
jpeg_info->comp_info[2].v_samp_factor,
jpeg_info->comp_info[3].h_samp_factor,
jpeg_info->comp_info[3].v_samp_factor);
break;
}
case JCS_GRAYSCALE:
{
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
"Colorspace: GRAYSCALE");
(void) FormatLocaleString(sampling_factor,MagickPathExtent,"%dx%d",
jpeg_info->comp_info[0].h_samp_factor,
jpeg_info->comp_info[0].v_samp_factor);
break;
}
case JCS_RGB:
{
(void) LogMagickEvent(CoderEvent,GetMagickModule(),"Colorspace: RGB");
(void) FormatLocaleString(sampling_factor,MagickPathExtent,
"%dx%d,%dx%d,%dx%d",jpeg_info->comp_info[0].h_samp_factor,
jpeg_info->comp_info[0].v_samp_factor,
jpeg_info->comp_info[1].h_samp_factor,
jpeg_info->comp_info[1].v_samp_factor,
jpeg_info->comp_info[2].h_samp_factor,
jpeg_info->comp_info[2].v_samp_factor);
break;
}
default:
{
(void) LogMagickEvent(CoderEvent,GetMagickModule(),"Colorspace: %d",
jpeg_info->out_color_space);
(void) FormatLocaleString(sampling_factor,MagickPathExtent,
"%dx%d,%dx%d,%dx%d,%dx%d",jpeg_info->comp_info[0].h_samp_factor,
jpeg_info->comp_info[0].v_samp_factor,
jpeg_info->comp_info[1].h_samp_factor,
jpeg_info->comp_info[1].v_samp_factor,
jpeg_info->comp_info[2].h_samp_factor,
jpeg_info->comp_info[2].v_samp_factor,
jpeg_info->comp_info[3].h_samp_factor,
jpeg_info->comp_info[3].v_samp_factor);
break;
}
}
(void) SetImageProperty(image,"jpeg:sampling-factor",sampling_factor,
exception);
(void) LogMagickEvent(CoderEvent,GetMagickModule(),"Sampling Factors: %s",
sampling_factor);
}
static Image *ReadJPEGImage(const ImageInfo *image_info,
ExceptionInfo *exception)
{
char
value[MagickPathExtent];
const char
*dct_method,
*option;
ErrorManager
error_manager;
Image
*image;
JSAMPLE
*volatile jpeg_pixels;
JSAMPROW
scanline[1];
MagickBooleanType
debug,
status;
MagickSizeType
number_pixels;
MemoryInfo
*memory_info;
Quantum
index;
register ssize_t
i;
struct jpeg_decompress_struct
jpeg_info;
struct jpeg_error_mgr
jpeg_error;
struct jpeg_progress_mgr
jpeg_progress;
register JSAMPLE
*p;
size_t
units;
ssize_t
y;
/*
Open image file.
*/
assert(image_info != (const ImageInfo *) NULL);
assert(image_info->signature == MagickCoreSignature);
if (image_info->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",
image_info->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
debug=IsEventLogging();
(void) debug;
image=AcquireImage(image_info,exception);
status=OpenBlob(image_info,image,ReadBinaryBlobMode,exception);
if (status == MagickFalse)
{
image=DestroyImageList(image);
return((Image *) NULL);
}
/*
Verify that file size large enough to contain a JPEG datastream.
*/
if (GetBlobSize(image) < 107)
ThrowReaderException(CorruptImageError,"InsufficientImageDataInFile");
/*
Initialize JPEG parameters.
*/
(void) memset(&error_manager,0,sizeof(error_manager));
(void) memset(&jpeg_info,0,sizeof(jpeg_info));
(void) memset(&jpeg_error,0,sizeof(jpeg_error));
(void) memset(&jpeg_progress,0,sizeof(jpeg_progress));
jpeg_info.err=jpeg_std_error(&jpeg_error);
jpeg_info.err->emit_message=(void (*)(j_common_ptr,int)) JPEGWarningHandler;
jpeg_info.err->error_exit=(void (*)(j_common_ptr)) JPEGErrorHandler;
memory_info=(MemoryInfo *) NULL;
error_manager.exception=exception;
error_manager.image=image;
if (setjmp(error_manager.error_recovery) != 0)
{
jpeg_destroy_decompress(&jpeg_info);
if (error_manager.profile != (StringInfo *) NULL)
error_manager.profile=DestroyStringInfo(error_manager.profile);
(void) CloseBlob(image);
number_pixels=(MagickSizeType) image->columns*image->rows;
if (number_pixels != 0)
return(GetFirstImageInList(image));
return(DestroyImage(image));
}
jpeg_info.client_data=(void *) &error_manager;
jpeg_create_decompress(&jpeg_info);
if (GetMaxMemoryRequest() != ~0UL)
jpeg_info.mem->max_memory_to_use=(long) GetMaxMemoryRequest();
jpeg_progress.progress_monitor=(void (*)(j_common_ptr)) JPEGProgressHandler;
jpeg_info.progress=(&jpeg_progress);
JPEGSourceManager(&jpeg_info,image);
jpeg_set_marker_processor(&jpeg_info,JPEG_COM,ReadComment);
option=GetImageOption(image_info,"profile:skip");
if (IsOptionMember("ICC",option) == MagickFalse)
jpeg_set_marker_processor(&jpeg_info,ICC_MARKER,ReadICCProfile);
if (IsOptionMember("IPTC",option) == MagickFalse)
jpeg_set_marker_processor(&jpeg_info,IPTC_MARKER,ReadIPTCProfile);
for (i=1; i < 16; i++)
if ((i != 2) && (i != 13) && (i != 14))
if (IsOptionMember("APP",option) == MagickFalse)
jpeg_set_marker_processor(&jpeg_info,(int) (JPEG_APP0+i),ReadProfile);
i=(ssize_t) jpeg_read_header(&jpeg_info,TRUE);
if ((image_info->colorspace == YCbCrColorspace) ||
(image_info->colorspace == Rec601YCbCrColorspace) ||
(image_info->colorspace == Rec709YCbCrColorspace))
jpeg_info.out_color_space=JCS_YCbCr;
/*
Set image resolution.
*/
units=0;
if ((jpeg_info.saw_JFIF_marker != 0) && (jpeg_info.X_density != 1) &&
(jpeg_info.Y_density != 1))
{
image->resolution.x=(double) jpeg_info.X_density;
image->resolution.y=(double) jpeg_info.Y_density;
units=(size_t) jpeg_info.density_unit;
}
if (units == 1)
image->units=PixelsPerInchResolution;
if (units == 2)
image->units=PixelsPerCentimeterResolution;
number_pixels=(MagickSizeType) image->columns*image->rows;
option=GetImageOption(image_info,"jpeg:size");
if ((option != (const char *) NULL) &&
(jpeg_info.out_color_space != JCS_YCbCr))
{
double
scale_factor;
GeometryInfo
geometry_info;
MagickStatusType
flags;
/*
Scale the image.
*/
flags=ParseGeometry(option,&geometry_info);
if ((flags & SigmaValue) == 0)
geometry_info.sigma=geometry_info.rho;
jpeg_calc_output_dimensions(&jpeg_info);
image->magick_columns=jpeg_info.output_width;
image->magick_rows=jpeg_info.output_height;
scale_factor=1.0;
if (geometry_info.rho != 0.0)
scale_factor=jpeg_info.output_width/geometry_info.rho;
if ((geometry_info.sigma != 0.0) &&
(scale_factor > (jpeg_info.output_height/geometry_info.sigma)))
scale_factor=jpeg_info.output_height/geometry_info.sigma;
jpeg_info.scale_num=1U;
jpeg_info.scale_denom=(unsigned int) scale_factor;
jpeg_calc_output_dimensions(&jpeg_info);
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
"Scale factor: %.20g",(double) scale_factor);
}
#if (JPEG_LIB_VERSION >= 61) && defined(D_PROGRESSIVE_SUPPORTED)
#if defined(D_LOSSLESS_SUPPORTED)
image->interlace=jpeg_info.process == JPROC_PROGRESSIVE ?
JPEGInterlace : NoInterlace;
image->compression=jpeg_info.process == JPROC_LOSSLESS ?
LosslessJPEGCompression : JPEGCompression;
if (jpeg_info.data_precision > 8)
(void) ThrowMagickException(exception,GetMagickModule(),OptionError,
"12-bit JPEG not supported. Reducing pixel data to 8 bits","`%s'",
image->filename);
if (jpeg_info.data_precision == 16)
jpeg_info.data_precision=12;
#else
image->interlace=jpeg_info.progressive_mode != 0 ? JPEGInterlace :
NoInterlace;
image->compression=JPEGCompression;
#endif
#else
image->compression=JPEGCompression;
image->interlace=JPEGInterlace;
#endif
option=GetImageOption(image_info,"jpeg:colors");
if (option != (const char *) NULL)
{
/*
Let the JPEG library quantize the image.
*/
jpeg_info.quantize_colors=TRUE;
jpeg_info.desired_number_of_colors=(int) StringToUnsignedLong(option);
}
option=GetImageOption(image_info,"jpeg:block-smoothing");
if (option != (const char *) NULL)
jpeg_info.do_block_smoothing=IsStringTrue(option) != MagickFalse ? TRUE :
FALSE;
dct_method=GetImageOption(image_info,"jpeg:dct-method");
if (dct_method != (const char *) NULL)
switch (*dct_method)
{
case 'D':
case 'd':
{
if (LocaleCompare(dct_method,"default") == 0)
jpeg_info.dct_method=JDCT_DEFAULT;
break;
}
case 'F':
case 'f':
{
if (LocaleCompare(dct_method,"fastest") == 0)
jpeg_info.dct_method=JDCT_FASTEST;
if (LocaleCompare(dct_method,"float") == 0)
jpeg_info.dct_method=JDCT_FLOAT;
break;
}
case 'I':
case 'i':
{
if (LocaleCompare(dct_method,"ifast") == 0)
jpeg_info.dct_method=JDCT_IFAST;
if (LocaleCompare(dct_method,"islow") == 0)
jpeg_info.dct_method=JDCT_ISLOW;
break;
}
}
option=GetImageOption(image_info,"jpeg:fancy-upsampling");
if (option != (const char *) NULL)
jpeg_info.do_fancy_upsampling=IsStringTrue(option) != MagickFalse ? TRUE :
FALSE;
jpeg_calc_output_dimensions(&jpeg_info);
image->columns=jpeg_info.output_width;
image->rows=jpeg_info.output_height;
image->depth=(size_t) jpeg_info.data_precision;
switch (jpeg_info.out_color_space)
{
case JCS_RGB:
default:
{
(void) SetImageColorspace(image,sRGBColorspace,exception);
break;
}
case JCS_GRAYSCALE:
{
(void) SetImageColorspace(image,GRAYColorspace,exception);
break;
}
case JCS_YCbCr:
{
(void) SetImageColorspace(image,YCbCrColorspace,exception);
break;
}
case JCS_CMYK:
{
(void) SetImageColorspace(image,CMYKColorspace,exception);
break;
}
}
if (IsITUFaxImage(image) != MagickFalse)
{
(void) SetImageColorspace(image,LabColorspace,exception);
jpeg_info.out_color_space=JCS_YCbCr;
}
option=GetImageOption(image_info,"jpeg:colors");
if (option != (const char *) NULL)
if (AcquireImageColormap(image,StringToUnsignedLong(option),exception) == MagickFalse)
{
jpeg_destroy_decompress(&jpeg_info);
ThrowReaderException(ResourceLimitError,"MemoryAllocationFailed");
}
if ((jpeg_info.output_components == 1) && (jpeg_info.quantize_colors == 0))
{
size_t
colors;
colors=(size_t) GetQuantumRange(image->depth)+1;
if (AcquireImageColormap(image,colors,exception) == MagickFalse)
{
jpeg_destroy_decompress(&jpeg_info);
ThrowReaderException(ResourceLimitError,"MemoryAllocationFailed");
}
}
if (image->debug != MagickFalse)
{
if (image->interlace != NoInterlace)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
"Interlace: progressive");
else
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
"Interlace: nonprogressive");
(void) LogMagickEvent(CoderEvent,GetMagickModule(),"Data precision: %d",
(int) jpeg_info.data_precision);
(void) LogMagickEvent(CoderEvent,GetMagickModule(),"Geometry: %dx%d",
(int) jpeg_info.output_width,(int) jpeg_info.output_height);
}
JPEGSetImageQuality(&jpeg_info,image);
JPEGSetImageSamplingFactor(&jpeg_info,image,exception);
(void) FormatLocaleString(value,MagickPathExtent,"%.20g",(double)
jpeg_info.out_color_space);
(void) SetImageProperty(image,"jpeg:colorspace",value,exception);
#if defined(D_ARITH_CODING_SUPPORTED)
if (jpeg_info.arith_code == TRUE)
(void) SetImageProperty(image,"jpeg:coding","arithmetic",exception);
#endif
if (image_info->ping != MagickFalse)
{
jpeg_destroy_decompress(&jpeg_info);
(void) CloseBlob(image);
return(GetFirstImageInList(image));
}
status=SetImageExtent(image,image->columns,image->rows,exception);
if (status == MagickFalse)
{
jpeg_destroy_decompress(&jpeg_info);
return(DestroyImageList(image));
}
(void) jpeg_start_decompress(&jpeg_info);
if ((jpeg_info.output_components != 1) &&
(jpeg_info.output_components != 3) && (jpeg_info.output_components != 4))
{
jpeg_destroy_decompress(&jpeg_info);
ThrowReaderException(CorruptImageError,"ImageTypeNotSupported");
}
memory_info=AcquireVirtualMemory((size_t) image->columns,
jpeg_info.output_components*sizeof(*jpeg_pixels));
if (memory_info == (MemoryInfo *) NULL)
{
jpeg_destroy_decompress(&jpeg_info);
ThrowReaderException(ResourceLimitError,"MemoryAllocationFailed");
}
jpeg_pixels=(JSAMPLE *) GetVirtualMemoryBlob(memory_info);
(void) memset(jpeg_pixels,0,image->columns*
jpeg_info.output_components*sizeof(*jpeg_pixels));
/*
Convert JPEG pixels to pixel packets.
*/
if (setjmp(error_manager.error_recovery) != 0)
{
if (memory_info != (MemoryInfo *) NULL)
memory_info=RelinquishVirtualMemory(memory_info);
jpeg_destroy_decompress(&jpeg_info);
(void) CloseBlob(image);
number_pixels=(MagickSizeType) image->columns*image->rows;
if (number_pixels != 0)
return(GetFirstImageInList(image));
return(DestroyImage(image));
}
if (jpeg_info.quantize_colors != 0)
{
image->colors=(size_t) jpeg_info.actual_number_of_colors;
if (jpeg_info.out_color_space == JCS_GRAYSCALE)
for (i=0; i < (ssize_t) image->colors; i++)
{
image->colormap[i].red=(double) ScaleCharToQuantum(
jpeg_info.colormap[0][i]);
image->colormap[i].green=image->colormap[i].red;
image->colormap[i].blue=image->colormap[i].red;
image->colormap[i].alpha=(MagickRealType) OpaqueAlpha;
}
else
for (i=0; i < (ssize_t) image->colors; i++)
{
image->colormap[i].red=(double) ScaleCharToQuantum(
jpeg_info.colormap[0][i]);
image->colormap[i].green=(double) ScaleCharToQuantum(
jpeg_info.colormap[1][i]);
image->colormap[i].blue=(double) ScaleCharToQuantum(
jpeg_info.colormap[2][i]);
image->colormap[i].alpha=(MagickRealType) OpaqueAlpha;
}
}
scanline[0]=(JSAMPROW) jpeg_pixels;
for (y=0; y < (ssize_t) image->rows; y++)
{
register ssize_t
x;
register Quantum
*magick_restrict q;
if (jpeg_read_scanlines(&jpeg_info,scanline,1) != 1)
{
(void) ThrowMagickException(exception,GetMagickModule(),
CorruptImageWarning,"SkipToSyncByte","`%s'",image->filename);
continue;
}
p=jpeg_pixels;
q=QueueAuthenticPixels(image,0,y,image->columns,1,exception);
if (q == (Quantum *) NULL)
break;
if (jpeg_info.data_precision > 8)
{
unsigned short
scale;
scale=65535/(unsigned short) GetQuantumRange((size_t)
jpeg_info.data_precision);
if (jpeg_info.output_components == 1)
for (x=0; x < (ssize_t) image->columns; x++)
{
ssize_t
pixel;
pixel=(ssize_t) (scale*GETJSAMPLE(*p));
index=(Quantum) ConstrainColormapIndex(image,pixel,exception);
SetPixelIndex(image,index,q);
SetPixelViaPixelInfo(image,image->colormap+(ssize_t) index,q);
p++;
q+=GetPixelChannels(image);
}
else
if (image->colorspace != CMYKColorspace)
for (x=0; x < (ssize_t) image->columns; x++)
{
SetPixelRed(image,ScaleShortToQuantum(
(unsigned short) (scale*GETJSAMPLE(*p++))),q);
SetPixelGreen(image,ScaleShortToQuantum(
(unsigned short) (scale*GETJSAMPLE(*p++))),q);
SetPixelBlue(image,ScaleShortToQuantum(
(unsigned short) (scale*GETJSAMPLE(*p++))),q);
SetPixelAlpha(image,OpaqueAlpha,q);
q+=GetPixelChannels(image);
}
else
for (x=0; x < (ssize_t) image->columns; x++)
{
SetPixelCyan(image,QuantumRange-ScaleShortToQuantum(
(unsigned short) (scale*GETJSAMPLE(*p++))),q);
SetPixelMagenta(image,QuantumRange-ScaleShortToQuantum(
(unsigned short) (scale*GETJSAMPLE(*p++))),q);
SetPixelYellow(image,QuantumRange-ScaleShortToQuantum(
(unsigned short) (scale*GETJSAMPLE(*p++))),q);
SetPixelBlack(image,QuantumRange-ScaleShortToQuantum(
(unsigned short) (scale*GETJSAMPLE(*p++))),q);
SetPixelAlpha(image,OpaqueAlpha,q);
q+=GetPixelChannels(image);
}
}
else
if (jpeg_info.output_components == 1)
for (x=0; x < (ssize_t) image->columns; x++)
{
ssize_t
pixel;
pixel=(ssize_t) GETJSAMPLE(*p);
index=(Quantum) ConstrainColormapIndex(image,pixel,exception);
SetPixelIndex(image,index,q);
SetPixelViaPixelInfo(image,image->colormap+(ssize_t) index,q);
p++;
q+=GetPixelChannels(image);
}
else
if (image->colorspace != CMYKColorspace)
for (x=0; x < (ssize_t) image->columns; x++)
{
SetPixelRed(image,ScaleCharToQuantum((unsigned char)
GETJSAMPLE(*p++)),q);
SetPixelGreen(image,ScaleCharToQuantum((unsigned char)
GETJSAMPLE(*p++)),q);
SetPixelBlue(image,ScaleCharToQuantum((unsigned char)
GETJSAMPLE(*p++)),q);
SetPixelAlpha(image,OpaqueAlpha,q);
q+=GetPixelChannels(image);
}
else
for (x=0; x < (ssize_t) image->columns; x++)
{
SetPixelCyan(image,QuantumRange-ScaleCharToQuantum(
(unsigned char) GETJSAMPLE(*p++)),q);
SetPixelMagenta(image,QuantumRange-ScaleCharToQuantum(
(unsigned char) GETJSAMPLE(*p++)),q);
SetPixelYellow(image,QuantumRange-ScaleCharToQuantum(
(unsigned char) GETJSAMPLE(*p++)),q);
SetPixelBlack(image,QuantumRange-ScaleCharToQuantum(
(unsigned char) GETJSAMPLE(*p++)),q);
SetPixelAlpha(image,OpaqueAlpha,q);
q+=GetPixelChannels(image);
}
if (SyncAuthenticPixels(image,exception) == MagickFalse)
break;
status=SetImageProgress(image,LoadImageTag,(MagickOffsetType) y,
image->rows);
if (status == MagickFalse)
{
jpeg_abort_decompress(&jpeg_info);
break;
}
}
if (status != MagickFalse)
{
error_manager.finished=MagickTrue;
if (setjmp(error_manager.error_recovery) == 0)
(void) jpeg_finish_decompress(&jpeg_info);
}
/*
Free jpeg resources.
*/
jpeg_destroy_decompress(&jpeg_info);
memory_info=RelinquishVirtualMemory(memory_info);
(void) CloseBlob(image);
return(GetFirstImageInList(image));
}
#endif
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% R e g i s t e r J P E G I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% RegisterJPEGImage() adds properties for the JPEG image format to
% the list of supported formats. The properties include the image format
% tag, a method to read and/or write the format, whether the format
% supports the saving of more than one frame to the same file or blob,
% whether the format supports native in-memory I/O, and a brief
% description of the format.
%
% The format of the RegisterJPEGImage method is:
%
% size_t RegisterJPEGImage(void)
%
*/
ModuleExport size_t RegisterJPEGImage(void)
{
#define JPEGDescription "Joint Photographic Experts Group JFIF format"
#define JPEGStringify(macro_or_string) JPEGStringifyArg(macro_or_string)
#define JPEGStringifyArg(contents) #contents
char
version[MagickPathExtent];
MagickInfo
*entry;
*version='\0';
#if defined(LIBJPEG_TURBO_VERSION)
(void) CopyMagickString(version,"libjpeg-turbo " JPEGStringify(
LIBJPEG_TURBO_VERSION),MagickPathExtent);
#elif defined(JPEG_LIB_VERSION)
(void) FormatLocaleString(version,MagickPathExtent,"libjpeg %d",
JPEG_LIB_VERSION);
#endif
entry=AcquireMagickInfo("JPEG","JPE",JPEGDescription);
#if (JPEG_LIB_VERSION < 80) && !defined(LIBJPEG_TURBO_VERSION)
entry->flags^=CoderDecoderThreadSupportFlag;
#endif
#if defined(MAGICKCORE_JPEG_DELEGATE)
entry->decoder=(DecodeImageHandler *) ReadJPEGImage;
entry->encoder=(EncodeImageHandler *) WriteJPEGImage;
#endif
entry->magick=(IsImageFormatHandler *) IsJPEG;
entry->flags|=CoderDecoderSeekableStreamFlag;
entry->flags^=CoderAdjoinFlag;
entry->flags^=CoderUseExtensionFlag;
if (*version != '\0')
entry->version=ConstantString(version);
entry->mime_type=ConstantString("image/jpeg");
(void) RegisterMagickInfo(entry);
entry=AcquireMagickInfo("JPEG","JPEG",JPEGDescription);
#if (JPEG_LIB_VERSION < 80) && !defined(LIBJPEG_TURBO_VERSION)
entry->flags^=CoderDecoderThreadSupportFlag;
#endif
#if defined(MAGICKCORE_JPEG_DELEGATE)
entry->decoder=(DecodeImageHandler *) ReadJPEGImage;
entry->encoder=(EncodeImageHandler *) WriteJPEGImage;
#endif
entry->magick=(IsImageFormatHandler *) IsJPEG;
entry->flags|=CoderDecoderSeekableStreamFlag;
entry->flags^=CoderAdjoinFlag;
if (*version != '\0')
entry->version=ConstantString(version);
entry->mime_type=ConstantString("image/jpeg");
(void) RegisterMagickInfo(entry);
entry=AcquireMagickInfo("JPEG","JPG",JPEGDescription);
#if (JPEG_LIB_VERSION < 80) && !defined(LIBJPEG_TURBO_VERSION)
entry->flags^=CoderDecoderThreadSupportFlag;
#endif
#if defined(MAGICKCORE_JPEG_DELEGATE)
entry->decoder=(DecodeImageHandler *) ReadJPEGImage;
entry->encoder=(EncodeImageHandler *) WriteJPEGImage;
#endif
entry->flags|=CoderDecoderSeekableStreamFlag;
entry->flags^=CoderAdjoinFlag;
entry->flags^=CoderUseExtensionFlag;
if (*version != '\0')
entry->version=ConstantString(version);
entry->mime_type=ConstantString("image/jpeg");
(void) RegisterMagickInfo(entry);
entry=AcquireMagickInfo("JPEG","JPS",JPEGDescription);
#if (JPEG_LIB_VERSION < 80) && !defined(LIBJPEG_TURBO_VERSION)
entry->flags^=CoderDecoderThreadSupportFlag;
#endif
#if defined(MAGICKCORE_JPEG_DELEGATE)
entry->decoder=(DecodeImageHandler *) ReadJPEGImage;
entry->encoder=(EncodeImageHandler *) WriteJPEGImage;
#endif
entry->flags|=CoderDecoderSeekableStreamFlag;
entry->flags^=CoderAdjoinFlag;
entry->flags^=CoderUseExtensionFlag;
if (*version != '\0')
entry->version=ConstantString(version);
entry->mime_type=ConstantString("image/jpeg");
(void) RegisterMagickInfo(entry);
entry=AcquireMagickInfo("JPEG","PJPEG",JPEGDescription);
#if (JPEG_LIB_VERSION < 80) && !defined(LIBJPEG_TURBO_VERSION)
entry->flags^=CoderDecoderThreadSupportFlag;
#endif
#if defined(MAGICKCORE_JPEG_DELEGATE)
entry->decoder=(DecodeImageHandler *) ReadJPEGImage;
entry->encoder=(EncodeImageHandler *) WriteJPEGImage;
#endif
entry->flags|=CoderDecoderSeekableStreamFlag;
entry->flags^=CoderAdjoinFlag;
entry->flags^=CoderUseExtensionFlag;
if (*version != '\0')
entry->version=ConstantString(version);
entry->mime_type=ConstantString("image/jpeg");
(void) RegisterMagickInfo(entry);
return(MagickImageCoderSignature);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% U n r e g i s t e r J P E G I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% UnregisterJPEGImage() removes format registrations made by the
% JPEG module from the list of supported formats.
%
% The format of the UnregisterJPEGImage method is:
%
% UnregisterJPEGImage(void)
%
*/
ModuleExport void UnregisterJPEGImage(void)
{
(void) UnregisterMagickInfo("PJPG");
(void) UnregisterMagickInfo("JPS");
(void) UnregisterMagickInfo("JPG");
(void) UnregisterMagickInfo("JPEG");
(void) UnregisterMagickInfo("JPE");
}
#if defined(MAGICKCORE_JPEG_DELEGATE)
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% W r i t e J P E G I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% WriteJPEGImage() writes a JPEG image file and returns it. It
% allocates the memory necessary for the new Image structure and returns a
% pointer to the new image.
%
% The format of the WriteJPEGImage method is:
%
% MagickBooleanType WriteJPEGImage(const ImageInfo *image_info,
% Image *image,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image_info: the image info.
%
% o jpeg_image: The image.
%
% o exception: return any errors or warnings in this structure.
%
*/
static QuantizationTable *DestroyQuantizationTable(QuantizationTable *table)
{
assert(table != (QuantizationTable *) NULL);
if (table->slot != (char *) NULL)
table->slot=DestroyString(table->slot);
if (table->description != (char *) NULL)
table->description=DestroyString(table->description);
if (table->levels != (unsigned int *) NULL)
table->levels=(unsigned int *) RelinquishMagickMemory(table->levels);
table=(QuantizationTable *) RelinquishMagickMemory(table);
return(table);
}
static boolean EmptyOutputBuffer(j_compress_ptr cinfo)
{
DestinationManager
*destination;
destination=(DestinationManager *) cinfo->dest;
destination->manager.free_in_buffer=(size_t) WriteBlob(destination->image,
MagickMinBufferExtent,destination->buffer);
if (destination->manager.free_in_buffer != MagickMinBufferExtent)
ERREXIT(cinfo,JERR_FILE_WRITE);
destination->manager.next_output_byte=destination->buffer;
return(TRUE);
}
static QuantizationTable *GetQuantizationTable(const char *filename,
const char *slot,ExceptionInfo *exception)
{
char
*p,
*xml;
const char
*attribute,
*content;
double
value;
register ssize_t
i;
ssize_t
j;
QuantizationTable
*table;
size_t
length;
XMLTreeInfo
*description,
*levels,
*quantization_tables,
*table_iterator;
(void) LogMagickEvent(ConfigureEvent,GetMagickModule(),
"Loading quantization tables \"%s\" ...",filename);
table=(QuantizationTable *) NULL;
xml=FileToString(filename,~0UL,exception);
if (xml == (char *) NULL)
return(table);
quantization_tables=NewXMLTree(xml,exception);
if (quantization_tables == (XMLTreeInfo *) NULL)
{
xml=DestroyString(xml);
return(table);
}
for (table_iterator=GetXMLTreeChild(quantization_tables,"table");
table_iterator != (XMLTreeInfo *) NULL;
table_iterator=GetNextXMLTreeTag(table_iterator))
{
attribute=GetXMLTreeAttribute(table_iterator,"slot");
if ((attribute != (char *) NULL) && (LocaleCompare(slot,attribute) == 0))
break;
attribute=GetXMLTreeAttribute(table_iterator,"alias");
if ((attribute != (char *) NULL) && (LocaleCompare(slot,attribute) == 0))
break;
}
if (table_iterator == (XMLTreeInfo *) NULL)
{
xml=DestroyString(xml);
return(table);
}
description=GetXMLTreeChild(table_iterator,"description");
if (description == (XMLTreeInfo *) NULL)
{
(void) ThrowMagickException(exception,GetMagickModule(),OptionError,
"XmlMissingElement","<description>, slot \"%s\"",slot);
quantization_tables=DestroyXMLTree(quantization_tables);
xml=DestroyString(xml);
return(table);
}
levels=GetXMLTreeChild(table_iterator,"levels");
if (levels == (XMLTreeInfo *) NULL)
{
(void) ThrowMagickException(exception,GetMagickModule(),OptionError,
"XmlMissingElement","<levels>, slot \"%s\"",slot);
quantization_tables=DestroyXMLTree(quantization_tables);
xml=DestroyString(xml);
return(table);
}
table=(QuantizationTable *) AcquireCriticalMemory(sizeof(*table));
table->slot=(char *) NULL;
table->description=(char *) NULL;
table->levels=(unsigned int *) NULL;
attribute=GetXMLTreeAttribute(table_iterator,"slot");
if (attribute != (char *) NULL)
table->slot=ConstantString(attribute);
content=GetXMLTreeContent(description);
if (content != (char *) NULL)
table->description=ConstantString(content);
attribute=GetXMLTreeAttribute(levels,"width");
if (attribute == (char *) NULL)
{
(void) ThrowMagickException(exception,GetMagickModule(),OptionError,
"XmlMissingAttribute","<levels width>, slot \"%s\"",slot);
quantization_tables=DestroyXMLTree(quantization_tables);
table=DestroyQuantizationTable(table);
xml=DestroyString(xml);
return(table);
}
table->width=StringToUnsignedLong(attribute);
if (table->width == 0)
{
(void) ThrowMagickException(exception,GetMagickModule(),OptionError,
"XmlInvalidAttribute","<levels width>, table \"%s\"",slot);
quantization_tables=DestroyXMLTree(quantization_tables);
table=DestroyQuantizationTable(table);
xml=DestroyString(xml);
return(table);
}
attribute=GetXMLTreeAttribute(levels,"height");
if (attribute == (char *) NULL)
{
(void) ThrowMagickException(exception,GetMagickModule(),OptionError,
"XmlMissingAttribute","<levels height>, table \"%s\"",slot);
quantization_tables=DestroyXMLTree(quantization_tables);
table=DestroyQuantizationTable(table);
xml=DestroyString(xml);
return(table);
}
table->height=StringToUnsignedLong(attribute);
if (table->height == 0)
{
(void) ThrowMagickException(exception,GetMagickModule(),OptionError,
"XmlInvalidAttribute","<levels height>, table \"%s\"",slot);
quantization_tables=DestroyXMLTree(quantization_tables);
table=DestroyQuantizationTable(table);
xml=DestroyString(xml);
return(table);
}
attribute=GetXMLTreeAttribute(levels,"divisor");
if (attribute == (char *) NULL)
{
(void) ThrowMagickException(exception,GetMagickModule(),OptionError,
"XmlMissingAttribute","<levels divisor>, table \"%s\"",slot);
quantization_tables=DestroyXMLTree(quantization_tables);
table=DestroyQuantizationTable(table);
xml=DestroyString(xml);
return(table);
}
table->divisor=InterpretLocaleValue(attribute,(char **) NULL);
if (table->divisor == 0.0)
{
(void) ThrowMagickException(exception,GetMagickModule(),OptionError,
"XmlInvalidAttribute","<levels divisor>, table \"%s\"",slot);
quantization_tables=DestroyXMLTree(quantization_tables);
table=DestroyQuantizationTable(table);
xml=DestroyString(xml);
return(table);
}
content=GetXMLTreeContent(levels);
if (content == (char *) NULL)
{
(void) ThrowMagickException(exception,GetMagickModule(),OptionError,
"XmlMissingContent","<levels>, table \"%s\"",slot);
quantization_tables=DestroyXMLTree(quantization_tables);
table=DestroyQuantizationTable(table);
xml=DestroyString(xml);
return(table);
}
length=(size_t) table->width*table->height;
if (length < 64)
length=64;
table->levels=(unsigned int *) AcquireQuantumMemory(length,
sizeof(*table->levels));
if (table->levels == (unsigned int *) NULL)
ThrowFatalException(ResourceLimitFatalError,
"UnableToAcquireQuantizationTable");
for (i=0; i < (ssize_t) (table->width*table->height); i++)
{
table->levels[i]=(unsigned int) (InterpretLocaleValue(content,&p)/
table->divisor+0.5);
while (isspace((int) ((unsigned char) *p)) != 0)
p++;
if (*p == ',')
p++;
content=p;
}
value=InterpretLocaleValue(content,&p);
(void) value;
if (p != content)
{
(void) ThrowMagickException(exception,GetMagickModule(),OptionError,
"XmlInvalidContent","<level> too many values, table \"%s\"",slot);
quantization_tables=DestroyXMLTree(quantization_tables);
table=DestroyQuantizationTable(table);
xml=DestroyString(xml);
return(table);
}
for (j=i; j < 64; j++)
table->levels[j]=table->levels[j-1];
quantization_tables=DestroyXMLTree(quantization_tables);
xml=DestroyString(xml);
return(table);
}
static void InitializeDestination(j_compress_ptr cinfo)
{
DestinationManager
*destination;
destination=(DestinationManager *) cinfo->dest;
destination->buffer=(JOCTET *) (*cinfo->mem->alloc_small)
((j_common_ptr) cinfo,JPOOL_IMAGE,MagickMinBufferExtent*sizeof(JOCTET));
destination->manager.next_output_byte=destination->buffer;
destination->manager.free_in_buffer=MagickMinBufferExtent;
}
static void TerminateDestination(j_compress_ptr cinfo)
{
DestinationManager
*destination;
destination=(DestinationManager *) cinfo->dest;
if ((MagickMinBufferExtent-(int) destination->manager.free_in_buffer) > 0)
{
ssize_t
count;
count=WriteBlob(destination->image,MagickMinBufferExtent-
destination->manager.free_in_buffer,destination->buffer);
if (count != (ssize_t)
(MagickMinBufferExtent-destination->manager.free_in_buffer))
ERREXIT(cinfo,JERR_FILE_WRITE);
}
}
static void WriteProfile(j_compress_ptr jpeg_info,Image *image,
ExceptionInfo *exception)
{
const char
*name;
const StringInfo
*profile;
MagickBooleanType
iptc;
register ssize_t
i;
size_t
length,
tag_length;
StringInfo
*custom_profile;
/*
Save image profile as a APP marker.
*/
iptc=MagickFalse;
custom_profile=AcquireStringInfo(65535L);
ResetImageProfileIterator(image);
for (name=GetNextImageProfile(image); name != (const char *) NULL; )
{
profile=GetImageProfile(image,name);
length=GetStringInfoLength(profile);
if (LocaleNCompare(name,"APP",3) == 0)
{
int
id;
id=JPEG_APP0+StringToInteger(name+3);
for (i=0; i < (ssize_t) length; i+=65533L)
jpeg_write_marker(jpeg_info,id,GetStringInfoDatum(profile)+i,
(unsigned int) MagickMin(length-i,65533));
}
if (LocaleCompare(name,"EXIF") == 0)
{
length=GetStringInfoLength(profile);
if (length > 65533L)
{
(void) ThrowMagickException(exception,GetMagickModule(),
CoderWarning,"ExifProfileSizeExceedsLimit","`%s'",
image->filename);
length=65533L;
}
jpeg_write_marker(jpeg_info,XML_MARKER,GetStringInfoDatum(profile),
(unsigned int) length);
}
if (LocaleCompare(name,"ICC") == 0)
{
register unsigned char
*p;
tag_length=strlen(ICC_PROFILE);
p=GetStringInfoDatum(custom_profile);
(void) memcpy(p,ICC_PROFILE,tag_length);
p[tag_length]='\0';
for (i=0; i < (ssize_t) GetStringInfoLength(profile); i+=65519L)
{
length=MagickMin(GetStringInfoLength(profile)-i,65519L);
p[12]=(unsigned char) ((i/65519L)+1);
p[13]=(unsigned char) (GetStringInfoLength(profile)/65519L+1);
(void) memcpy(p+tag_length+3,GetStringInfoDatum(profile)+i,
length);
jpeg_write_marker(jpeg_info,ICC_MARKER,GetStringInfoDatum(
custom_profile),(unsigned int) (length+tag_length+3));
}
}
if (((LocaleCompare(name,"IPTC") == 0) ||
(LocaleCompare(name,"8BIM") == 0)) && (iptc == MagickFalse))
{
register unsigned char
*p;
size_t
roundup;
iptc=MagickTrue;
p=GetStringInfoDatum(custom_profile);
for (i=0; i < (ssize_t) GetStringInfoLength(profile); i+=65500L)
{
length=MagickMin(GetStringInfoLength(profile)-i,65500L);
roundup=(size_t) (length & 0x01);
if (LocaleNCompare((char *) GetStringInfoDatum(profile),"8BIM",4) == 0)
{
(void) memcpy(p,"Photoshop 3.0 ",14);
tag_length=14;
}
else
{
(void) memcpy(p,"Photoshop 3.0 8BIM\04\04\0\0\0\0",24);
tag_length=26;
p[24]=(unsigned char) (length >> 8);
p[25]=(unsigned char) (length & 0xff);
}
p[13]=0x00;
(void) memcpy(p+tag_length,GetStringInfoDatum(profile)+i,length);
if (roundup != 0)
p[length+tag_length]='\0';
jpeg_write_marker(jpeg_info,IPTC_MARKER,GetStringInfoDatum(
custom_profile),(unsigned int) (length+tag_length+roundup));
}
}
if ((LocaleCompare(name,"XMP") == 0) &&
(GetStringInfoLength(profile) <= 65502))
{
StringInfo
*xmp_profile;
/*
Add namespace to XMP profile.
*/
xmp_profile=StringToStringInfo(xmp_namespace);
if (xmp_profile != (StringInfo *) NULL)
{
if (profile != (StringInfo *) NULL)
ConcatenateStringInfo(xmp_profile,profile);
GetStringInfoDatum(xmp_profile)[XmpNamespaceExtent]='\0';
for (i=0; i < (ssize_t) GetStringInfoLength(xmp_profile); i+=65533L)
{
length=MagickMin(GetStringInfoLength(xmp_profile)-i,65533L);
jpeg_write_marker(jpeg_info,XML_MARKER,
GetStringInfoDatum(xmp_profile)+i,(unsigned int) length);
}
xmp_profile=DestroyStringInfo(xmp_profile);
}
}
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
"%s profile: %.20g bytes",name,(double) GetStringInfoLength(profile));
name=GetNextImageProfile(image);
}
custom_profile=DestroyStringInfo(custom_profile);
}
static void JPEGDestinationManager(j_compress_ptr cinfo,Image * image)
{
DestinationManager
*destination;
cinfo->dest=(struct jpeg_destination_mgr *) (*cinfo->mem->alloc_small)
((j_common_ptr) cinfo,JPOOL_IMAGE,sizeof(DestinationManager));
destination=(DestinationManager *) cinfo->dest;
destination->manager.init_destination=InitializeDestination;
destination->manager.empty_output_buffer=EmptyOutputBuffer;
destination->manager.term_destination=TerminateDestination;
destination->image=image;
}
static char **SamplingFactorToList(const char *text)
{
char
**textlist;
register char
*q;
register const char
*p;
register ssize_t
i;
if (text == (char *) NULL)
return((char **) NULL);
/*
Convert string to an ASCII list.
*/
textlist=(char **) AcquireQuantumMemory((size_t) MAX_COMPONENTS,
sizeof(*textlist));
if (textlist == (char **) NULL)
ThrowFatalException(ResourceLimitFatalError,"UnableToConvertText");
p=text;
for (i=0; i < (ssize_t) MAX_COMPONENTS; i++)
{
for (q=(char *) p; *q != '\0'; q++)
if (*q == ',')
break;
textlist[i]=(char *) AcquireQuantumMemory((size_t) (q-p)+MagickPathExtent,
sizeof(*textlist[i]));
if (textlist[i] == (char *) NULL)
ThrowFatalException(ResourceLimitFatalError,"UnableToConvertText");
(void) CopyMagickString(textlist[i],p,(size_t) (q-p+1));
if (*q == '\r')
q++;
if (*q == '\0')
break;
p=q+1;
}
for (i++; i < (ssize_t) MAX_COMPONENTS; i++)
textlist[i]=ConstantString("1x1");
return(textlist);
}
static MagickBooleanType WriteJPEGImage(const ImageInfo *image_info,
Image *image,ExceptionInfo *exception)
{
const char
*dct_method,
*option,
*sampling_factor,
*value;
ErrorManager
error_manager;
Image
*volatile volatile_image;
int
colorspace,
quality;
JSAMPLE
*volatile jpeg_pixels;
JSAMPROW
scanline[1];
MagickBooleanType
status;
MemoryInfo
*memory_info;
register JSAMPLE
*q;
register ssize_t
i;
ssize_t
y;
struct jpeg_compress_struct
jpeg_info;
struct jpeg_error_mgr
jpeg_error;
unsigned short
scale;
/*
Open image file.
*/
assert(image_info != (const ImageInfo *) NULL);
assert(image_info->signature == MagickCoreSignature);
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
if ((LocaleCompare(image_info->magick,"JPS") == 0) &&
(image->next != (Image *) NULL))
image=AppendImages(image,MagickFalse,exception);
status=OpenBlob(image_info,image,WriteBinaryBlobMode,exception);
if (status == MagickFalse)
return(status);
/*
Initialize JPEG parameters.
*/
(void) memset(&error_manager,0,sizeof(error_manager));
(void) memset(&jpeg_info,0,sizeof(jpeg_info));
(void) memset(&jpeg_error,0,sizeof(jpeg_error));
volatile_image=image;
jpeg_info.client_data=(void *) volatile_image;
jpeg_info.err=jpeg_std_error(&jpeg_error);
jpeg_info.err->emit_message=(void (*)(j_common_ptr,int)) JPEGWarningHandler;
jpeg_info.err->error_exit=(void (*)(j_common_ptr)) JPEGErrorHandler;
error_manager.exception=exception;
error_manager.image=volatile_image;
memory_info=(MemoryInfo *) NULL;
if (setjmp(error_manager.error_recovery) != 0)
{
jpeg_destroy_compress(&jpeg_info);
(void) CloseBlob(volatile_image);
return(MagickFalse);
}
jpeg_info.client_data=(void *) &error_manager;
jpeg_create_compress(&jpeg_info);
JPEGDestinationManager(&jpeg_info,image);
if ((image->columns != (unsigned int) image->columns) ||
(image->rows != (unsigned int) image->rows))
ThrowWriterException(ImageError,"WidthOrHeightExceedsLimit");
jpeg_info.image_width=(unsigned int) image->columns;
jpeg_info.image_height=(unsigned int) image->rows;
jpeg_info.input_components=3;
jpeg_info.data_precision=8;
jpeg_info.in_color_space=JCS_RGB;
switch (image->colorspace)
{
case CMYKColorspace:
{
jpeg_info.input_components=4;
jpeg_info.in_color_space=JCS_CMYK;
break;
}
case YCbCrColorspace:
case Rec601YCbCrColorspace:
case Rec709YCbCrColorspace:
{
jpeg_info.in_color_space=JCS_YCbCr;
break;
}
case LinearGRAYColorspace:
case GRAYColorspace:
{
if (image_info->type == TrueColorType)
break;
jpeg_info.input_components=1;
jpeg_info.in_color_space=JCS_GRAYSCALE;
break;
}
default:
{
(void) TransformImageColorspace(image,sRGBColorspace,exception);
if (image_info->type == TrueColorType)
break;
if (SetImageGray(image,exception) != MagickFalse)
{
jpeg_info.input_components=1;
jpeg_info.in_color_space=JCS_GRAYSCALE;
}
break;
}
}
jpeg_set_defaults(&jpeg_info);
if (jpeg_info.in_color_space == JCS_CMYK)
jpeg_set_colorspace(&jpeg_info,JCS_YCCK);
if ((jpeg_info.data_precision != 12) && (image->depth <= 8))
jpeg_info.data_precision=8;
else
jpeg_info.data_precision=BITS_IN_JSAMPLE;
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
"Image resolution: %.20g,%.20g",image->resolution.x,image->resolution.y);
if ((image->resolution.x != 0.0) && (image->resolution.y != 0.0))
{
/*
Set image resolution.
*/
jpeg_info.write_JFIF_header=TRUE;
jpeg_info.X_density=(UINT16) image->resolution.x;
jpeg_info.Y_density=(UINT16) image->resolution.y;
/*
Set image resolution units.
*/
if (image->units == PixelsPerInchResolution)
jpeg_info.density_unit=(UINT8) 1;
if (image->units == PixelsPerCentimeterResolution)
jpeg_info.density_unit=(UINT8) 2;
}
dct_method=GetImageOption(image_info,"jpeg:dct-method");
if (dct_method != (const char *) NULL)
switch (*dct_method)
{
case 'D':
case 'd':
{
if (LocaleCompare(dct_method,"default") == 0)
jpeg_info.dct_method=JDCT_DEFAULT;
break;
}
case 'F':
case 'f':
{
if (LocaleCompare(dct_method,"fastest") == 0)
jpeg_info.dct_method=JDCT_FASTEST;
if (LocaleCompare(dct_method,"float") == 0)
jpeg_info.dct_method=JDCT_FLOAT;
break;
}
case 'I':
case 'i':
{
if (LocaleCompare(dct_method,"ifast") == 0)
jpeg_info.dct_method=JDCT_IFAST;
if (LocaleCompare(dct_method,"islow") == 0)
jpeg_info.dct_method=JDCT_ISLOW;
break;
}
}
option=GetImageOption(image_info,"jpeg:optimize-coding");
if (option != (const char *) NULL)
jpeg_info.optimize_coding=IsStringTrue(option) != MagickFalse ? TRUE :
FALSE;
else
{
MagickSizeType
length;
length=(MagickSizeType) jpeg_info.input_components*image->columns*
image->rows*sizeof(JSAMPLE);
if (length == (MagickSizeType) ((size_t) length))
{
/*
Perform optimization only if available memory resources permit it.
*/
status=AcquireMagickResource(MemoryResource,length);
if (status != MagickFalse)
RelinquishMagickResource(MemoryResource,length);
jpeg_info.optimize_coding=status == MagickFalse ? FALSE : TRUE;
}
}
#if defined(C_ARITH_CODING_SUPPORTED)
option=GetImageOption(image_info,"jpeg:arithmetic-coding");
if (IsStringTrue(option) != MagickFalse)
{
jpeg_info.arith_code=TRUE;
jpeg_info.optimize_coding=FALSE; // Not supported.
}
#endif
#if (JPEG_LIB_VERSION >= 61) && defined(C_PROGRESSIVE_SUPPORTED)
if ((LocaleCompare(image_info->magick,"PJPEG") == 0) ||
(image_info->interlace != NoInterlace))
{
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
"Interlace: progressive");
jpeg_simple_progression(&jpeg_info);
}
else
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
"Interlace: non-progressive");
#else
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
"Interlace: nonprogressive");
#endif
quality=92;
if ((image_info->compression != LosslessJPEGCompression) &&
(image->quality <= 100))
{
if (image->quality != UndefinedCompressionQuality)
quality=(int) image->quality;
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),"Quality: %.20g",
(double) image->quality);
}
else
{
#if !defined(C_LOSSLESS_SUPPORTED)
quality=100;
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),"Quality: 100");
#else
if (image->quality < 100)
(void) ThrowMagickException(exception,GetMagickModule(),CoderWarning,
"LosslessToLossyJPEGConversion","`%s'",image->filename);
else
{
int
point_transform,
predictor;
predictor=image->quality/100; /* range 1-7 */
point_transform=image->quality % 20; /* range 0-15 */
jpeg_simple_lossless(&jpeg_info,predictor,point_transform);
if (image->debug != MagickFalse)
{
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
"Compression: lossless");
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
"Predictor: %d",predictor);
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
"Point Transform: %d",point_transform);
}
}
#endif
}
option=GetImageOption(image_info,"jpeg:extent");
if (option != (const char *) NULL)
{
Image
*jpeg_image;
ImageInfo
*extent_info;
extent_info=CloneImageInfo(image_info);
extent_info->blob=NULL;
jpeg_image=CloneImage(image,0,0,MagickTrue,exception);
if (jpeg_image != (Image *) NULL)
{
MagickSizeType
extent;
size_t
maximum,
minimum;
/*
Search for compression quality that does not exceed image extent.
*/
extent_info->quality=0;
extent=(MagickSizeType) SiPrefixToDoubleInterval(option,100.0);
(void) DeleteImageOption(extent_info,"jpeg:extent");
(void) DeleteImageArtifact(jpeg_image,"jpeg:extent");
maximum=image_info->quality;
if (maximum < 2)
maximum=101;
for (minimum=2; minimum < maximum; )
{
(void) AcquireUniqueFilename(jpeg_image->filename);
jpeg_image->quality=minimum+(maximum-minimum+1)/2;
status=WriteJPEGImage(extent_info,jpeg_image,exception);
if (GetBlobSize(jpeg_image) <= extent)
minimum=jpeg_image->quality+1;
else
maximum=jpeg_image->quality-1;
(void) RelinquishUniqueFileResource(jpeg_image->filename);
}
quality=(int) minimum-1;
jpeg_image=DestroyImage(jpeg_image);
}
extent_info=DestroyImageInfo(extent_info);
}
jpeg_set_quality(&jpeg_info,quality,TRUE);
if ((dct_method == (const char *) NULL) && (quality <= 90))
jpeg_info.dct_method=JDCT_IFAST;
#if (JPEG_LIB_VERSION >= 70)
option=GetImageOption(image_info,"quality");
if (option != (const char *) NULL)
{
GeometryInfo
geometry_info;
int
flags;
/*
Set quality scaling for luminance and chrominance separately.
*/
flags=ParseGeometry(option,&geometry_info);
if (((flags & RhoValue) != 0) && ((flags & SigmaValue) != 0))
{
jpeg_info.q_scale_factor[0]=jpeg_quality_scaling((int)
(geometry_info.rho+0.5));
jpeg_info.q_scale_factor[1]=jpeg_quality_scaling((int)
(geometry_info.sigma+0.5));
jpeg_default_qtables(&jpeg_info,TRUE);
}
}
#endif
colorspace=jpeg_info.in_color_space;
value=GetImageOption(image_info,"jpeg:colorspace");
if (value == (char *) NULL)
value=GetImageProperty(image,"jpeg:colorspace",exception);
if (value != (char *) NULL)
colorspace=StringToInteger(value);
sampling_factor=(const char *) NULL;
if ((J_COLOR_SPACE) colorspace == jpeg_info.in_color_space)
{
value=GetImageOption(image_info,"jpeg:sampling-factor");
if (value == (char *) NULL)
value=GetImageProperty(image,"jpeg:sampling-factor",exception);
if (value != (char *) NULL)
{
sampling_factor=value;
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" Input sampling-factors=%s",sampling_factor);
}
}
value=GetImageOption(image_info,"jpeg:sampling-factor");
if (image_info->sampling_factor != (char *) NULL)
sampling_factor=image_info->sampling_factor;
if (sampling_factor == (const char *) NULL)
{
if (quality >= 90)
for (i=0; i < MAX_COMPONENTS; i++)
{
jpeg_info.comp_info[i].h_samp_factor=1;
jpeg_info.comp_info[i].v_samp_factor=1;
}
}
else
{
char
**factors;
GeometryInfo
geometry_info;
MagickStatusType
flags;
/*
Set sampling factor.
*/
i=0;
factors=SamplingFactorToList(sampling_factor);
if (factors != (char **) NULL)
{
for (i=0; i < MAX_COMPONENTS; i++)
{
if (factors[i] == (char *) NULL)
break;
flags=ParseGeometry(factors[i],&geometry_info);
if ((flags & SigmaValue) == 0)
geometry_info.sigma=geometry_info.rho;
jpeg_info.comp_info[i].h_samp_factor=(int) geometry_info.rho;
jpeg_info.comp_info[i].v_samp_factor=(int) geometry_info.sigma;
factors[i]=(char *) RelinquishMagickMemory(factors[i]);
}
factors=(char **) RelinquishMagickMemory(factors);
}
for ( ; i < MAX_COMPONENTS; i++)
{
jpeg_info.comp_info[i].h_samp_factor=1;
jpeg_info.comp_info[i].v_samp_factor=1;
}
}
option=GetImageOption(image_info,"jpeg:q-table");
if (option != (const char *) NULL)
{
QuantizationTable
*table;
/*
Custom quantization tables.
*/
table=GetQuantizationTable(option,"0",exception);
if (table != (QuantizationTable *) NULL)
{
for (i=0; i < MAX_COMPONENTS; i++)
jpeg_info.comp_info[i].quant_tbl_no=0;
jpeg_add_quant_table(&jpeg_info,0,table->levels,
jpeg_quality_scaling(quality),0);
table=DestroyQuantizationTable(table);
}
table=GetQuantizationTable(option,"1",exception);
if (table != (QuantizationTable *) NULL)
{
for (i=1; i < MAX_COMPONENTS; i++)
jpeg_info.comp_info[i].quant_tbl_no=1;
jpeg_add_quant_table(&jpeg_info,1,table->levels,
jpeg_quality_scaling(quality),0);
table=DestroyQuantizationTable(table);
}
table=GetQuantizationTable(option,"2",exception);
if (table != (QuantizationTable *) NULL)
{
for (i=2; i < MAX_COMPONENTS; i++)
jpeg_info.comp_info[i].quant_tbl_no=2;
jpeg_add_quant_table(&jpeg_info,2,table->levels,
jpeg_quality_scaling(quality),0);
table=DestroyQuantizationTable(table);
}
table=GetQuantizationTable(option,"3",exception);
if (table != (QuantizationTable *) NULL)
{
for (i=3; i < MAX_COMPONENTS; i++)
jpeg_info.comp_info[i].quant_tbl_no=3;
jpeg_add_quant_table(&jpeg_info,3,table->levels,
jpeg_quality_scaling(quality),0);
table=DestroyQuantizationTable(table);
}
}
jpeg_start_compress(&jpeg_info,TRUE);
if (image->debug != MagickFalse)
{
if (image->storage_class == PseudoClass)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
"Storage class: PseudoClass");
else
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
"Storage class: DirectClass");
(void) LogMagickEvent(CoderEvent,GetMagickModule(),"Depth: %.20g",
(double) image->depth);
if (image->colors != 0)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
"Number of colors: %.20g",(double) image->colors);
else
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
"Number of colors: unspecified");
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
"JPEG data precision: %d",(int) jpeg_info.data_precision);
switch (image->colorspace)
{
case CMYKColorspace:
{
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
"Storage class: DirectClass");
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
"Colorspace: CMYK");
break;
}
case YCbCrColorspace:
case Rec601YCbCrColorspace:
case Rec709YCbCrColorspace:
{
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
"Colorspace: YCbCr");
break;
}
default:
break;
}
switch (image->colorspace)
{
case CMYKColorspace:
{
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
"Colorspace: CMYK");
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
"Sampling factors: %dx%d,%dx%d,%dx%d,%dx%d",
jpeg_info.comp_info[0].h_samp_factor,
jpeg_info.comp_info[0].v_samp_factor,
jpeg_info.comp_info[1].h_samp_factor,
jpeg_info.comp_info[1].v_samp_factor,
jpeg_info.comp_info[2].h_samp_factor,
jpeg_info.comp_info[2].v_samp_factor,
jpeg_info.comp_info[3].h_samp_factor,
jpeg_info.comp_info[3].v_samp_factor);
break;
}
case GRAYColorspace:
{
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
"Colorspace: GRAY");
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
"Sampling factors: %dx%d",jpeg_info.comp_info[0].h_samp_factor,
jpeg_info.comp_info[0].v_samp_factor);
break;
}
case sRGBColorspace:
case RGBColorspace:
{
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
"Image colorspace is RGB");
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
"Sampling factors: %dx%d,%dx%d,%dx%d",
jpeg_info.comp_info[0].h_samp_factor,
jpeg_info.comp_info[0].v_samp_factor,
jpeg_info.comp_info[1].h_samp_factor,
jpeg_info.comp_info[1].v_samp_factor,
jpeg_info.comp_info[2].h_samp_factor,
jpeg_info.comp_info[2].v_samp_factor);
break;
}
case YCbCrColorspace:
case Rec601YCbCrColorspace:
case Rec709YCbCrColorspace:
{
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
"Colorspace: YCbCr");
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
"Sampling factors: %dx%d,%dx%d,%dx%d",
jpeg_info.comp_info[0].h_samp_factor,
jpeg_info.comp_info[0].v_samp_factor,
jpeg_info.comp_info[1].h_samp_factor,
jpeg_info.comp_info[1].v_samp_factor,
jpeg_info.comp_info[2].h_samp_factor,
jpeg_info.comp_info[2].v_samp_factor);
break;
}
default:
{
(void) LogMagickEvent(CoderEvent,GetMagickModule(),"Colorspace: %d",
image->colorspace);
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
"Sampling factors: %dx%d,%dx%d,%dx%d,%dx%d",
jpeg_info.comp_info[0].h_samp_factor,
jpeg_info.comp_info[0].v_samp_factor,
jpeg_info.comp_info[1].h_samp_factor,
jpeg_info.comp_info[1].v_samp_factor,
jpeg_info.comp_info[2].h_samp_factor,
jpeg_info.comp_info[2].v_samp_factor,
jpeg_info.comp_info[3].h_samp_factor,
jpeg_info.comp_info[3].v_samp_factor);
break;
}
}
}
/*
Write JPEG profiles.
*/
value=GetImageProperty(image,"comment",exception);
if (value != (char *) NULL)
for (i=0; i < (ssize_t) strlen(value); i+=65533L)
jpeg_write_marker(&jpeg_info,JPEG_COM,(unsigned char *) value+i,
(unsigned int) MagickMin((size_t) strlen(value+i),65533L));
if (image->profiles != (void *) NULL)
WriteProfile(&jpeg_info,image,exception);
/*
Convert MIFF to JPEG raster pixels.
*/
memory_info=AcquireVirtualMemory((size_t) image->columns,
jpeg_info.input_components*sizeof(*jpeg_pixels));
if (memory_info == (MemoryInfo *) NULL)
ThrowWriterException(ResourceLimitError,"MemoryAllocationFailed");
jpeg_pixels=(JSAMPLE *) GetVirtualMemoryBlob(memory_info);
if (setjmp(error_manager.error_recovery) != 0)
{
jpeg_destroy_compress(&jpeg_info);
if (memory_info != (MemoryInfo *) NULL)
memory_info=RelinquishVirtualMemory(memory_info);
(void) CloseBlob(image);
return(MagickFalse);
}
scanline[0]=(JSAMPROW) jpeg_pixels;
scale=65535/(unsigned short) GetQuantumRange((size_t)
jpeg_info.data_precision);
if (scale == 0)
scale=1;
if (jpeg_info.data_precision <= 8)
{
if ((jpeg_info.in_color_space == JCS_RGB) ||
(jpeg_info.in_color_space == JCS_YCbCr))
for (y=0; y < (ssize_t) image->rows; y++)
{
register const Quantum
*p;
register ssize_t
x;
p=GetVirtualPixels(image,0,y,image->columns,1,exception);
if (p == (const Quantum *) NULL)
break;
q=jpeg_pixels;
for (x=0; x < (ssize_t) image->columns; x++)
{
*q++=(JSAMPLE) ScaleQuantumToChar(GetPixelRed(image,p));
*q++=(JSAMPLE) ScaleQuantumToChar(GetPixelGreen(image,p));
*q++=(JSAMPLE) ScaleQuantumToChar(GetPixelBlue(image,p));
p+=GetPixelChannels(image);
}
(void) jpeg_write_scanlines(&jpeg_info,scanline,1);
status=SetImageProgress(image,SaveImageTag,(MagickOffsetType) y,
image->rows);
if (status == MagickFalse)
break;
}
else
if (jpeg_info.in_color_space == JCS_GRAYSCALE)
for (y=0; y < (ssize_t) image->rows; y++)
{
register const Quantum
*p;
register ssize_t
x;
p=GetVirtualPixels(image,0,y,image->columns,1,exception);
if (p == (const Quantum *) NULL)
break;
q=jpeg_pixels;
for (x=0; x < (ssize_t) image->columns; x++)
{
*q++=(JSAMPLE) ScaleQuantumToChar(ClampToQuantum(GetPixelLuma(
image,p)));
p+=GetPixelChannels(image);
}
(void) jpeg_write_scanlines(&jpeg_info,scanline,1);
status=SetImageProgress(image,SaveImageTag,(MagickOffsetType) y,
image->rows);
if (status == MagickFalse)
break;
}
else
for (y=0; y < (ssize_t) image->rows; y++)
{
register const Quantum
*p;
register ssize_t
x;
p=GetVirtualPixels(image,0,y,image->columns,1,exception);
if (p == (const Quantum *) NULL)
break;
q=jpeg_pixels;
for (x=0; x < (ssize_t) image->columns; x++)
{
/*
Convert DirectClass packets to contiguous CMYK scanlines.
*/
*q++=(JSAMPLE) (ScaleQuantumToChar((Quantum) (QuantumRange-
GetPixelCyan(image,p))));
*q++=(JSAMPLE) (ScaleQuantumToChar((Quantum) (QuantumRange-
GetPixelMagenta(image,p))));
*q++=(JSAMPLE) (ScaleQuantumToChar((Quantum) (QuantumRange-
GetPixelYellow(image,p))));
*q++=(JSAMPLE) (ScaleQuantumToChar((Quantum) (QuantumRange-
GetPixelBlack(image,p))));
p+=GetPixelChannels(image);
}
(void) jpeg_write_scanlines(&jpeg_info,scanline,1);
status=SetImageProgress(image,SaveImageTag,(MagickOffsetType) y,
image->rows);
if (status == MagickFalse)
break;
}
}
else
if (jpeg_info.in_color_space == JCS_GRAYSCALE)
for (y=0; y < (ssize_t) image->rows; y++)
{
register const Quantum
*p;
register ssize_t
x;
p=GetVirtualPixels(image,0,y,image->columns,1,exception);
if (p == (const Quantum *) NULL)
break;
q=jpeg_pixels;
for (x=0; x < (ssize_t) image->columns; x++)
{
*q++=(JSAMPLE) (ScaleQuantumToShort(ClampToQuantum(GetPixelLuma(image,
p)))/scale);
p+=GetPixelChannels(image);
}
(void) jpeg_write_scanlines(&jpeg_info,scanline,1);
status=SetImageProgress(image,SaveImageTag,(MagickOffsetType) y,
image->rows);
if (status == MagickFalse)
break;
}
else
if ((jpeg_info.in_color_space == JCS_RGB) ||
(jpeg_info.in_color_space == JCS_YCbCr))
for (y=0; y < (ssize_t) image->rows; y++)
{
register const Quantum
*p;
register ssize_t
x;
p=GetVirtualPixels(image,0,y,image->columns,1,exception);
if (p == (const Quantum *) NULL)
break;
q=jpeg_pixels;
for (x=0; x < (ssize_t) image->columns; x++)
{
*q++=(JSAMPLE) (ScaleQuantumToShort(GetPixelRed(image,p))/scale);
*q++=(JSAMPLE) (ScaleQuantumToShort(GetPixelGreen(image,p))/scale);
*q++=(JSAMPLE) (ScaleQuantumToShort(GetPixelBlue(image,p))/scale);
p+=GetPixelChannels(image);
}
(void) jpeg_write_scanlines(&jpeg_info,scanline,1);
status=SetImageProgress(image,SaveImageTag,(MagickOffsetType) y,
image->rows);
if (status == MagickFalse)
break;
}
else
for (y=0; y < (ssize_t) image->rows; y++)
{
register const Quantum
*p;
register ssize_t
x;
p=GetVirtualPixels(image,0,y,image->columns,1,exception);
if (p == (const Quantum *) NULL)
break;
q=jpeg_pixels;
for (x=0; x < (ssize_t) image->columns; x++)
{
/*
Convert DirectClass packets to contiguous CMYK scanlines.
*/
*q++=(JSAMPLE) (ScaleQuantumToShort(QuantumRange-GetPixelRed(
image,p))/scale);
*q++=(JSAMPLE) (ScaleQuantumToShort(QuantumRange-GetPixelGreen(
image,p))/scale);
*q++=(JSAMPLE) (ScaleQuantumToShort(QuantumRange-GetPixelBlue(
image,p))/scale);
*q++=(JSAMPLE) (ScaleQuantumToShort(QuantumRange-GetPixelBlack(
image,p))/scale);
p+=GetPixelChannels(image);
}
(void) jpeg_write_scanlines(&jpeg_info,scanline,1);
status=SetImageProgress(image,SaveImageTag,(MagickOffsetType) y,
image->rows);
if (status == MagickFalse)
break;
}
if (y == (ssize_t) image->rows)
jpeg_finish_compress(&jpeg_info);
/*
Relinquish resources.
*/
jpeg_destroy_compress(&jpeg_info);
memory_info=RelinquishVirtualMemory(memory_info);
(void) CloseBlob(image);
return(MagickTrue);
}
#endif
| ./CrossVul/dataset_final_sorted/CWE-416/c/bad_1181_1 |
crossvul-cpp_data_good_1427_0 | // SPDX-License-Identifier: GPL-2.0+
/*
* ipmi_msghandler.c
*
* Incoming and outgoing message routing for an IPMI interface.
*
* Author: MontaVista Software, Inc.
* Corey Minyard <minyard@mvista.com>
* source@mvista.com
*
* Copyright 2002 MontaVista Software Inc.
*/
#define pr_fmt(fmt) "%s" fmt, "IPMI message handler: "
#define dev_fmt pr_fmt
#include <linux/module.h>
#include <linux/errno.h>
#include <linux/poll.h>
#include <linux/sched.h>
#include <linux/seq_file.h>
#include <linux/spinlock.h>
#include <linux/mutex.h>
#include <linux/slab.h>
#include <linux/ipmi.h>
#include <linux/ipmi_smi.h>
#include <linux/notifier.h>
#include <linux/init.h>
#include <linux/proc_fs.h>
#include <linux/rcupdate.h>
#include <linux/interrupt.h>
#include <linux/moduleparam.h>
#include <linux/workqueue.h>
#include <linux/uuid.h>
#include <linux/nospec.h>
#define IPMI_DRIVER_VERSION "39.2"
static struct ipmi_recv_msg *ipmi_alloc_recv_msg(void);
static int ipmi_init_msghandler(void);
static void smi_recv_tasklet(unsigned long);
static void handle_new_recv_msgs(struct ipmi_smi *intf);
static void need_waiter(struct ipmi_smi *intf);
static int handle_one_recv_msg(struct ipmi_smi *intf,
struct ipmi_smi_msg *msg);
#ifdef DEBUG
static void ipmi_debug_msg(const char *title, unsigned char *data,
unsigned int len)
{
int i, pos;
char buf[100];
pos = snprintf(buf, sizeof(buf), "%s: ", title);
for (i = 0; i < len; i++)
pos += snprintf(buf + pos, sizeof(buf) - pos,
" %2.2x", data[i]);
pr_debug("%s\n", buf);
}
#else
static void ipmi_debug_msg(const char *title, unsigned char *data,
unsigned int len)
{ }
#endif
static int initialized;
enum ipmi_panic_event_op {
IPMI_SEND_PANIC_EVENT_NONE,
IPMI_SEND_PANIC_EVENT,
IPMI_SEND_PANIC_EVENT_STRING
};
#ifdef CONFIG_IPMI_PANIC_STRING
#define IPMI_PANIC_DEFAULT IPMI_SEND_PANIC_EVENT_STRING
#elif defined(CONFIG_IPMI_PANIC_EVENT)
#define IPMI_PANIC_DEFAULT IPMI_SEND_PANIC_EVENT
#else
#define IPMI_PANIC_DEFAULT IPMI_SEND_PANIC_EVENT_NONE
#endif
static enum ipmi_panic_event_op ipmi_send_panic_event = IPMI_PANIC_DEFAULT;
static int panic_op_write_handler(const char *val,
const struct kernel_param *kp)
{
char valcp[16];
char *s;
strncpy(valcp, val, 15);
valcp[15] = '\0';
s = strstrip(valcp);
if (strcmp(s, "none") == 0)
ipmi_send_panic_event = IPMI_SEND_PANIC_EVENT_NONE;
else if (strcmp(s, "event") == 0)
ipmi_send_panic_event = IPMI_SEND_PANIC_EVENT;
else if (strcmp(s, "string") == 0)
ipmi_send_panic_event = IPMI_SEND_PANIC_EVENT_STRING;
else
return -EINVAL;
return 0;
}
static int panic_op_read_handler(char *buffer, const struct kernel_param *kp)
{
switch (ipmi_send_panic_event) {
case IPMI_SEND_PANIC_EVENT_NONE:
strcpy(buffer, "none");
break;
case IPMI_SEND_PANIC_EVENT:
strcpy(buffer, "event");
break;
case IPMI_SEND_PANIC_EVENT_STRING:
strcpy(buffer, "string");
break;
default:
strcpy(buffer, "???");
break;
}
return strlen(buffer);
}
static const struct kernel_param_ops panic_op_ops = {
.set = panic_op_write_handler,
.get = panic_op_read_handler
};
module_param_cb(panic_op, &panic_op_ops, NULL, 0600);
MODULE_PARM_DESC(panic_op, "Sets if the IPMI driver will attempt to store panic information in the event log in the event of a panic. Set to 'none' for no, 'event' for a single event, or 'string' for a generic event and the panic string in IPMI OEM events.");
#define MAX_EVENTS_IN_QUEUE 25
/* Remain in auto-maintenance mode for this amount of time (in ms). */
static unsigned long maintenance_mode_timeout_ms = 30000;
module_param(maintenance_mode_timeout_ms, ulong, 0644);
MODULE_PARM_DESC(maintenance_mode_timeout_ms,
"The time (milliseconds) after the last maintenance message that the connection stays in maintenance mode.");
/*
* Don't let a message sit in a queue forever, always time it with at lest
* the max message timer. This is in milliseconds.
*/
#define MAX_MSG_TIMEOUT 60000
/*
* Timeout times below are in milliseconds, and are done off a 1
* second timer. So setting the value to 1000 would mean anything
* between 0 and 1000ms. So really the only reasonable minimum
* setting it 2000ms, which is between 1 and 2 seconds.
*/
/* The default timeout for message retries. */
static unsigned long default_retry_ms = 2000;
module_param(default_retry_ms, ulong, 0644);
MODULE_PARM_DESC(default_retry_ms,
"The time (milliseconds) between retry sends");
/* The default timeout for maintenance mode message retries. */
static unsigned long default_maintenance_retry_ms = 3000;
module_param(default_maintenance_retry_ms, ulong, 0644);
MODULE_PARM_DESC(default_maintenance_retry_ms,
"The time (milliseconds) between retry sends in maintenance mode");
/* The default maximum number of retries */
static unsigned int default_max_retries = 4;
module_param(default_max_retries, uint, 0644);
MODULE_PARM_DESC(default_max_retries,
"The time (milliseconds) between retry sends in maintenance mode");
/* Call every ~1000 ms. */
#define IPMI_TIMEOUT_TIME 1000
/* How many jiffies does it take to get to the timeout time. */
#define IPMI_TIMEOUT_JIFFIES ((IPMI_TIMEOUT_TIME * HZ) / 1000)
/*
* Request events from the queue every second (this is the number of
* IPMI_TIMEOUT_TIMES between event requests). Hopefully, in the
* future, IPMI will add a way to know immediately if an event is in
* the queue and this silliness can go away.
*/
#define IPMI_REQUEST_EV_TIME (1000 / (IPMI_TIMEOUT_TIME))
/* How long should we cache dynamic device IDs? */
#define IPMI_DYN_DEV_ID_EXPIRY (10 * HZ)
/*
* The main "user" data structure.
*/
struct ipmi_user {
struct list_head link;
/*
* Set to NULL when the user is destroyed, a pointer to myself
* so srcu_dereference can be used on it.
*/
struct ipmi_user *self;
struct srcu_struct release_barrier;
struct kref refcount;
/* The upper layer that handles receive messages. */
const struct ipmi_user_hndl *handler;
void *handler_data;
/* The interface this user is bound to. */
struct ipmi_smi *intf;
/* Does this interface receive IPMI events? */
bool gets_events;
};
static struct ipmi_user *acquire_ipmi_user(struct ipmi_user *user, int *index)
__acquires(user->release_barrier)
{
struct ipmi_user *ruser;
*index = srcu_read_lock(&user->release_barrier);
ruser = srcu_dereference(user->self, &user->release_barrier);
if (!ruser)
srcu_read_unlock(&user->release_barrier, *index);
return ruser;
}
static void release_ipmi_user(struct ipmi_user *user, int index)
{
srcu_read_unlock(&user->release_barrier, index);
}
struct cmd_rcvr {
struct list_head link;
struct ipmi_user *user;
unsigned char netfn;
unsigned char cmd;
unsigned int chans;
/*
* This is used to form a linked lised during mass deletion.
* Since this is in an RCU list, we cannot use the link above
* or change any data until the RCU period completes. So we
* use this next variable during mass deletion so we can have
* a list and don't have to wait and restart the search on
* every individual deletion of a command.
*/
struct cmd_rcvr *next;
};
struct seq_table {
unsigned int inuse : 1;
unsigned int broadcast : 1;
unsigned long timeout;
unsigned long orig_timeout;
unsigned int retries_left;
/*
* To verify on an incoming send message response that this is
* the message that the response is for, we keep a sequence id
* and increment it every time we send a message.
*/
long seqid;
/*
* This is held so we can properly respond to the message on a
* timeout, and it is used to hold the temporary data for
* retransmission, too.
*/
struct ipmi_recv_msg *recv_msg;
};
/*
* Store the information in a msgid (long) to allow us to find a
* sequence table entry from the msgid.
*/
#define STORE_SEQ_IN_MSGID(seq, seqid) \
((((seq) & 0x3f) << 26) | ((seqid) & 0x3ffffff))
#define GET_SEQ_FROM_MSGID(msgid, seq, seqid) \
do { \
seq = (((msgid) >> 26) & 0x3f); \
seqid = ((msgid) & 0x3ffffff); \
} while (0)
#define NEXT_SEQID(seqid) (((seqid) + 1) & 0x3ffffff)
#define IPMI_MAX_CHANNELS 16
struct ipmi_channel {
unsigned char medium;
unsigned char protocol;
};
struct ipmi_channel_set {
struct ipmi_channel c[IPMI_MAX_CHANNELS];
};
struct ipmi_my_addrinfo {
/*
* My slave address. This is initialized to IPMI_BMC_SLAVE_ADDR,
* but may be changed by the user.
*/
unsigned char address;
/*
* My LUN. This should generally stay the SMS LUN, but just in
* case...
*/
unsigned char lun;
};
/*
* Note that the product id, manufacturer id, guid, and device id are
* immutable in this structure, so dyn_mutex is not required for
* accessing those. If those change on a BMC, a new BMC is allocated.
*/
struct bmc_device {
struct platform_device pdev;
struct list_head intfs; /* Interfaces on this BMC. */
struct ipmi_device_id id;
struct ipmi_device_id fetch_id;
int dyn_id_set;
unsigned long dyn_id_expiry;
struct mutex dyn_mutex; /* Protects id, intfs, & dyn* */
guid_t guid;
guid_t fetch_guid;
int dyn_guid_set;
struct kref usecount;
struct work_struct remove_work;
};
#define to_bmc_device(x) container_of((x), struct bmc_device, pdev.dev)
static int bmc_get_device_id(struct ipmi_smi *intf, struct bmc_device *bmc,
struct ipmi_device_id *id,
bool *guid_set, guid_t *guid);
/*
* Various statistics for IPMI, these index stats[] in the ipmi_smi
* structure.
*/
enum ipmi_stat_indexes {
/* Commands we got from the user that were invalid. */
IPMI_STAT_sent_invalid_commands = 0,
/* Commands we sent to the MC. */
IPMI_STAT_sent_local_commands,
/* Responses from the MC that were delivered to a user. */
IPMI_STAT_handled_local_responses,
/* Responses from the MC that were not delivered to a user. */
IPMI_STAT_unhandled_local_responses,
/* Commands we sent out to the IPMB bus. */
IPMI_STAT_sent_ipmb_commands,
/* Commands sent on the IPMB that had errors on the SEND CMD */
IPMI_STAT_sent_ipmb_command_errs,
/* Each retransmit increments this count. */
IPMI_STAT_retransmitted_ipmb_commands,
/*
* When a message times out (runs out of retransmits) this is
* incremented.
*/
IPMI_STAT_timed_out_ipmb_commands,
/*
* This is like above, but for broadcasts. Broadcasts are
* *not* included in the above count (they are expected to
* time out).
*/
IPMI_STAT_timed_out_ipmb_broadcasts,
/* Responses I have sent to the IPMB bus. */
IPMI_STAT_sent_ipmb_responses,
/* The response was delivered to the user. */
IPMI_STAT_handled_ipmb_responses,
/* The response had invalid data in it. */
IPMI_STAT_invalid_ipmb_responses,
/* The response didn't have anyone waiting for it. */
IPMI_STAT_unhandled_ipmb_responses,
/* Commands we sent out to the IPMB bus. */
IPMI_STAT_sent_lan_commands,
/* Commands sent on the IPMB that had errors on the SEND CMD */
IPMI_STAT_sent_lan_command_errs,
/* Each retransmit increments this count. */
IPMI_STAT_retransmitted_lan_commands,
/*
* When a message times out (runs out of retransmits) this is
* incremented.
*/
IPMI_STAT_timed_out_lan_commands,
/* Responses I have sent to the IPMB bus. */
IPMI_STAT_sent_lan_responses,
/* The response was delivered to the user. */
IPMI_STAT_handled_lan_responses,
/* The response had invalid data in it. */
IPMI_STAT_invalid_lan_responses,
/* The response didn't have anyone waiting for it. */
IPMI_STAT_unhandled_lan_responses,
/* The command was delivered to the user. */
IPMI_STAT_handled_commands,
/* The command had invalid data in it. */
IPMI_STAT_invalid_commands,
/* The command didn't have anyone waiting for it. */
IPMI_STAT_unhandled_commands,
/* Invalid data in an event. */
IPMI_STAT_invalid_events,
/* Events that were received with the proper format. */
IPMI_STAT_events,
/* Retransmissions on IPMB that failed. */
IPMI_STAT_dropped_rexmit_ipmb_commands,
/* Retransmissions on LAN that failed. */
IPMI_STAT_dropped_rexmit_lan_commands,
/* This *must* remain last, add new values above this. */
IPMI_NUM_STATS
};
#define IPMI_IPMB_NUM_SEQ 64
struct ipmi_smi {
/* What interface number are we? */
int intf_num;
struct kref refcount;
/* Set when the interface is being unregistered. */
bool in_shutdown;
/* Used for a list of interfaces. */
struct list_head link;
/*
* The list of upper layers that are using me. seq_lock write
* protects this. Read protection is with srcu.
*/
struct list_head users;
struct srcu_struct users_srcu;
/* Used for wake ups at startup. */
wait_queue_head_t waitq;
/*
* Prevents the interface from being unregistered when the
* interface is used by being looked up through the BMC
* structure.
*/
struct mutex bmc_reg_mutex;
struct bmc_device tmp_bmc;
struct bmc_device *bmc;
bool bmc_registered;
struct list_head bmc_link;
char *my_dev_name;
bool in_bmc_register; /* Handle recursive situations. Yuck. */
struct work_struct bmc_reg_work;
const struct ipmi_smi_handlers *handlers;
void *send_info;
/* Driver-model device for the system interface. */
struct device *si_dev;
/*
* A table of sequence numbers for this interface. We use the
* sequence numbers for IPMB messages that go out of the
* interface to match them up with their responses. A routine
* is called periodically to time the items in this list.
*/
spinlock_t seq_lock;
struct seq_table seq_table[IPMI_IPMB_NUM_SEQ];
int curr_seq;
/*
* Messages queued for delivery. If delivery fails (out of memory
* for instance), They will stay in here to be processed later in a
* periodic timer interrupt. The tasklet is for handling received
* messages directly from the handler.
*/
spinlock_t waiting_rcv_msgs_lock;
struct list_head waiting_rcv_msgs;
atomic_t watchdog_pretimeouts_to_deliver;
struct tasklet_struct recv_tasklet;
spinlock_t xmit_msgs_lock;
struct list_head xmit_msgs;
struct ipmi_smi_msg *curr_msg;
struct list_head hp_xmit_msgs;
/*
* The list of command receivers that are registered for commands
* on this interface.
*/
struct mutex cmd_rcvrs_mutex;
struct list_head cmd_rcvrs;
/*
* Events that were queues because no one was there to receive
* them.
*/
spinlock_t events_lock; /* For dealing with event stuff. */
struct list_head waiting_events;
unsigned int waiting_events_count; /* How many events in queue? */
char delivering_events;
char event_msg_printed;
atomic_t event_waiters;
unsigned int ticks_to_req_ev;
int last_needs_timer;
/*
* The event receiver for my BMC, only really used at panic
* shutdown as a place to store this.
*/
unsigned char event_receiver;
unsigned char event_receiver_lun;
unsigned char local_sel_device;
unsigned char local_event_generator;
/* For handling of maintenance mode. */
int maintenance_mode;
bool maintenance_mode_enable;
int auto_maintenance_timeout;
spinlock_t maintenance_mode_lock; /* Used in a timer... */
/*
* If we are doing maintenance on something on IPMB, extend
* the timeout time to avoid timeouts writing firmware and
* such.
*/
int ipmb_maintenance_mode_timeout;
/*
* A cheap hack, if this is non-null and a message to an
* interface comes in with a NULL user, call this routine with
* it. Note that the message will still be freed by the
* caller. This only works on the system interface.
*
* Protected by bmc_reg_mutex.
*/
void (*null_user_handler)(struct ipmi_smi *intf,
struct ipmi_recv_msg *msg);
/*
* When we are scanning the channels for an SMI, this will
* tell which channel we are scanning.
*/
int curr_channel;
/* Channel information */
struct ipmi_channel_set *channel_list;
unsigned int curr_working_cset; /* First index into the following. */
struct ipmi_channel_set wchannels[2];
struct ipmi_my_addrinfo addrinfo[IPMI_MAX_CHANNELS];
bool channels_ready;
atomic_t stats[IPMI_NUM_STATS];
/*
* run_to_completion duplicate of smb_info, smi_info
* and ipmi_serial_info structures. Used to decrease numbers of
* parameters passed by "low" level IPMI code.
*/
int run_to_completion;
};
#define to_si_intf_from_dev(device) container_of(device, struct ipmi_smi, dev)
static void __get_guid(struct ipmi_smi *intf);
static void __ipmi_bmc_unregister(struct ipmi_smi *intf);
static int __ipmi_bmc_register(struct ipmi_smi *intf,
struct ipmi_device_id *id,
bool guid_set, guid_t *guid, int intf_num);
static int __scan_channels(struct ipmi_smi *intf, struct ipmi_device_id *id);
/**
* The driver model view of the IPMI messaging driver.
*/
static struct platform_driver ipmidriver = {
.driver = {
.name = "ipmi",
.bus = &platform_bus_type
}
};
/*
* This mutex keeps us from adding the same BMC twice.
*/
static DEFINE_MUTEX(ipmidriver_mutex);
static LIST_HEAD(ipmi_interfaces);
static DEFINE_MUTEX(ipmi_interfaces_mutex);
DEFINE_STATIC_SRCU(ipmi_interfaces_srcu);
/*
* List of watchers that want to know when smi's are added and deleted.
*/
static LIST_HEAD(smi_watchers);
static DEFINE_MUTEX(smi_watchers_mutex);
#define ipmi_inc_stat(intf, stat) \
atomic_inc(&(intf)->stats[IPMI_STAT_ ## stat])
#define ipmi_get_stat(intf, stat) \
((unsigned int) atomic_read(&(intf)->stats[IPMI_STAT_ ## stat]))
static const char * const addr_src_to_str[] = {
"invalid", "hotmod", "hardcoded", "SPMI", "ACPI", "SMBIOS", "PCI",
"device-tree", "platform"
};
const char *ipmi_addr_src_to_str(enum ipmi_addr_src src)
{
if (src >= SI_LAST)
src = 0; /* Invalid */
return addr_src_to_str[src];
}
EXPORT_SYMBOL(ipmi_addr_src_to_str);
static int is_lan_addr(struct ipmi_addr *addr)
{
return addr->addr_type == IPMI_LAN_ADDR_TYPE;
}
static int is_ipmb_addr(struct ipmi_addr *addr)
{
return addr->addr_type == IPMI_IPMB_ADDR_TYPE;
}
static int is_ipmb_bcast_addr(struct ipmi_addr *addr)
{
return addr->addr_type == IPMI_IPMB_BROADCAST_ADDR_TYPE;
}
static void free_recv_msg_list(struct list_head *q)
{
struct ipmi_recv_msg *msg, *msg2;
list_for_each_entry_safe(msg, msg2, q, link) {
list_del(&msg->link);
ipmi_free_recv_msg(msg);
}
}
static void free_smi_msg_list(struct list_head *q)
{
struct ipmi_smi_msg *msg, *msg2;
list_for_each_entry_safe(msg, msg2, q, link) {
list_del(&msg->link);
ipmi_free_smi_msg(msg);
}
}
static void clean_up_interface_data(struct ipmi_smi *intf)
{
int i;
struct cmd_rcvr *rcvr, *rcvr2;
struct list_head list;
tasklet_kill(&intf->recv_tasklet);
free_smi_msg_list(&intf->waiting_rcv_msgs);
free_recv_msg_list(&intf->waiting_events);
/*
* Wholesale remove all the entries from the list in the
* interface and wait for RCU to know that none are in use.
*/
mutex_lock(&intf->cmd_rcvrs_mutex);
INIT_LIST_HEAD(&list);
list_splice_init_rcu(&intf->cmd_rcvrs, &list, synchronize_rcu);
mutex_unlock(&intf->cmd_rcvrs_mutex);
list_for_each_entry_safe(rcvr, rcvr2, &list, link)
kfree(rcvr);
for (i = 0; i < IPMI_IPMB_NUM_SEQ; i++) {
if ((intf->seq_table[i].inuse)
&& (intf->seq_table[i].recv_msg))
ipmi_free_recv_msg(intf->seq_table[i].recv_msg);
}
}
static void intf_free(struct kref *ref)
{
struct ipmi_smi *intf = container_of(ref, struct ipmi_smi, refcount);
clean_up_interface_data(intf);
kfree(intf);
}
struct watcher_entry {
int intf_num;
struct ipmi_smi *intf;
struct list_head link;
};
int ipmi_smi_watcher_register(struct ipmi_smi_watcher *watcher)
{
struct ipmi_smi *intf;
int index;
mutex_lock(&smi_watchers_mutex);
list_add(&watcher->link, &smi_watchers);
index = srcu_read_lock(&ipmi_interfaces_srcu);
list_for_each_entry_rcu(intf, &ipmi_interfaces, link) {
int intf_num = READ_ONCE(intf->intf_num);
if (intf_num == -1)
continue;
watcher->new_smi(intf_num, intf->si_dev);
}
srcu_read_unlock(&ipmi_interfaces_srcu, index);
mutex_unlock(&smi_watchers_mutex);
return 0;
}
EXPORT_SYMBOL(ipmi_smi_watcher_register);
int ipmi_smi_watcher_unregister(struct ipmi_smi_watcher *watcher)
{
mutex_lock(&smi_watchers_mutex);
list_del(&watcher->link);
mutex_unlock(&smi_watchers_mutex);
return 0;
}
EXPORT_SYMBOL(ipmi_smi_watcher_unregister);
/*
* Must be called with smi_watchers_mutex held.
*/
static void
call_smi_watchers(int i, struct device *dev)
{
struct ipmi_smi_watcher *w;
mutex_lock(&smi_watchers_mutex);
list_for_each_entry(w, &smi_watchers, link) {
if (try_module_get(w->owner)) {
w->new_smi(i, dev);
module_put(w->owner);
}
}
mutex_unlock(&smi_watchers_mutex);
}
static int
ipmi_addr_equal(struct ipmi_addr *addr1, struct ipmi_addr *addr2)
{
if (addr1->addr_type != addr2->addr_type)
return 0;
if (addr1->channel != addr2->channel)
return 0;
if (addr1->addr_type == IPMI_SYSTEM_INTERFACE_ADDR_TYPE) {
struct ipmi_system_interface_addr *smi_addr1
= (struct ipmi_system_interface_addr *) addr1;
struct ipmi_system_interface_addr *smi_addr2
= (struct ipmi_system_interface_addr *) addr2;
return (smi_addr1->lun == smi_addr2->lun);
}
if (is_ipmb_addr(addr1) || is_ipmb_bcast_addr(addr1)) {
struct ipmi_ipmb_addr *ipmb_addr1
= (struct ipmi_ipmb_addr *) addr1;
struct ipmi_ipmb_addr *ipmb_addr2
= (struct ipmi_ipmb_addr *) addr2;
return ((ipmb_addr1->slave_addr == ipmb_addr2->slave_addr)
&& (ipmb_addr1->lun == ipmb_addr2->lun));
}
if (is_lan_addr(addr1)) {
struct ipmi_lan_addr *lan_addr1
= (struct ipmi_lan_addr *) addr1;
struct ipmi_lan_addr *lan_addr2
= (struct ipmi_lan_addr *) addr2;
return ((lan_addr1->remote_SWID == lan_addr2->remote_SWID)
&& (lan_addr1->local_SWID == lan_addr2->local_SWID)
&& (lan_addr1->session_handle
== lan_addr2->session_handle)
&& (lan_addr1->lun == lan_addr2->lun));
}
return 1;
}
int ipmi_validate_addr(struct ipmi_addr *addr, int len)
{
if (len < sizeof(struct ipmi_system_interface_addr))
return -EINVAL;
if (addr->addr_type == IPMI_SYSTEM_INTERFACE_ADDR_TYPE) {
if (addr->channel != IPMI_BMC_CHANNEL)
return -EINVAL;
return 0;
}
if ((addr->channel == IPMI_BMC_CHANNEL)
|| (addr->channel >= IPMI_MAX_CHANNELS)
|| (addr->channel < 0))
return -EINVAL;
if (is_ipmb_addr(addr) || is_ipmb_bcast_addr(addr)) {
if (len < sizeof(struct ipmi_ipmb_addr))
return -EINVAL;
return 0;
}
if (is_lan_addr(addr)) {
if (len < sizeof(struct ipmi_lan_addr))
return -EINVAL;
return 0;
}
return -EINVAL;
}
EXPORT_SYMBOL(ipmi_validate_addr);
unsigned int ipmi_addr_length(int addr_type)
{
if (addr_type == IPMI_SYSTEM_INTERFACE_ADDR_TYPE)
return sizeof(struct ipmi_system_interface_addr);
if ((addr_type == IPMI_IPMB_ADDR_TYPE)
|| (addr_type == IPMI_IPMB_BROADCAST_ADDR_TYPE))
return sizeof(struct ipmi_ipmb_addr);
if (addr_type == IPMI_LAN_ADDR_TYPE)
return sizeof(struct ipmi_lan_addr);
return 0;
}
EXPORT_SYMBOL(ipmi_addr_length);
static int deliver_response(struct ipmi_smi *intf, struct ipmi_recv_msg *msg)
{
int rv = 0;
if (!msg->user) {
/* Special handling for NULL users. */
if (intf->null_user_handler) {
intf->null_user_handler(intf, msg);
} else {
/* No handler, so give up. */
rv = -EINVAL;
}
ipmi_free_recv_msg(msg);
} else if (!oops_in_progress) {
/*
* If we are running in the panic context, calling the
* receive handler doesn't much meaning and has a deadlock
* risk. At this moment, simply skip it in that case.
*/
int index;
struct ipmi_user *user = acquire_ipmi_user(msg->user, &index);
if (user) {
user->handler->ipmi_recv_hndl(msg, user->handler_data);
release_ipmi_user(user, index);
} else {
/* User went away, give up. */
ipmi_free_recv_msg(msg);
rv = -EINVAL;
}
}
return rv;
}
static void deliver_local_response(struct ipmi_smi *intf,
struct ipmi_recv_msg *msg)
{
if (deliver_response(intf, msg))
ipmi_inc_stat(intf, unhandled_local_responses);
else
ipmi_inc_stat(intf, handled_local_responses);
}
static void deliver_err_response(struct ipmi_smi *intf,
struct ipmi_recv_msg *msg, int err)
{
msg->recv_type = IPMI_RESPONSE_RECV_TYPE;
msg->msg_data[0] = err;
msg->msg.netfn |= 1; /* Convert to a response. */
msg->msg.data_len = 1;
msg->msg.data = msg->msg_data;
deliver_local_response(intf, msg);
}
/*
* Find the next sequence number not being used and add the given
* message with the given timeout to the sequence table. This must be
* called with the interface's seq_lock held.
*/
static int intf_next_seq(struct ipmi_smi *intf,
struct ipmi_recv_msg *recv_msg,
unsigned long timeout,
int retries,
int broadcast,
unsigned char *seq,
long *seqid)
{
int rv = 0;
unsigned int i;
if (timeout == 0)
timeout = default_retry_ms;
if (retries < 0)
retries = default_max_retries;
for (i = intf->curr_seq; (i+1)%IPMI_IPMB_NUM_SEQ != intf->curr_seq;
i = (i+1)%IPMI_IPMB_NUM_SEQ) {
if (!intf->seq_table[i].inuse)
break;
}
if (!intf->seq_table[i].inuse) {
intf->seq_table[i].recv_msg = recv_msg;
/*
* Start with the maximum timeout, when the send response
* comes in we will start the real timer.
*/
intf->seq_table[i].timeout = MAX_MSG_TIMEOUT;
intf->seq_table[i].orig_timeout = timeout;
intf->seq_table[i].retries_left = retries;
intf->seq_table[i].broadcast = broadcast;
intf->seq_table[i].inuse = 1;
intf->seq_table[i].seqid = NEXT_SEQID(intf->seq_table[i].seqid);
*seq = i;
*seqid = intf->seq_table[i].seqid;
intf->curr_seq = (i+1)%IPMI_IPMB_NUM_SEQ;
need_waiter(intf);
} else {
rv = -EAGAIN;
}
return rv;
}
/*
* Return the receive message for the given sequence number and
* release the sequence number so it can be reused. Some other data
* is passed in to be sure the message matches up correctly (to help
* guard against message coming in after their timeout and the
* sequence number being reused).
*/
static int intf_find_seq(struct ipmi_smi *intf,
unsigned char seq,
short channel,
unsigned char cmd,
unsigned char netfn,
struct ipmi_addr *addr,
struct ipmi_recv_msg **recv_msg)
{
int rv = -ENODEV;
unsigned long flags;
if (seq >= IPMI_IPMB_NUM_SEQ)
return -EINVAL;
spin_lock_irqsave(&intf->seq_lock, flags);
if (intf->seq_table[seq].inuse) {
struct ipmi_recv_msg *msg = intf->seq_table[seq].recv_msg;
if ((msg->addr.channel == channel) && (msg->msg.cmd == cmd)
&& (msg->msg.netfn == netfn)
&& (ipmi_addr_equal(addr, &msg->addr))) {
*recv_msg = msg;
intf->seq_table[seq].inuse = 0;
rv = 0;
}
}
spin_unlock_irqrestore(&intf->seq_lock, flags);
return rv;
}
/* Start the timer for a specific sequence table entry. */
static int intf_start_seq_timer(struct ipmi_smi *intf,
long msgid)
{
int rv = -ENODEV;
unsigned long flags;
unsigned char seq;
unsigned long seqid;
GET_SEQ_FROM_MSGID(msgid, seq, seqid);
spin_lock_irqsave(&intf->seq_lock, flags);
/*
* We do this verification because the user can be deleted
* while a message is outstanding.
*/
if ((intf->seq_table[seq].inuse)
&& (intf->seq_table[seq].seqid == seqid)) {
struct seq_table *ent = &intf->seq_table[seq];
ent->timeout = ent->orig_timeout;
rv = 0;
}
spin_unlock_irqrestore(&intf->seq_lock, flags);
return rv;
}
/* Got an error for the send message for a specific sequence number. */
static int intf_err_seq(struct ipmi_smi *intf,
long msgid,
unsigned int err)
{
int rv = -ENODEV;
unsigned long flags;
unsigned char seq;
unsigned long seqid;
struct ipmi_recv_msg *msg = NULL;
GET_SEQ_FROM_MSGID(msgid, seq, seqid);
spin_lock_irqsave(&intf->seq_lock, flags);
/*
* We do this verification because the user can be deleted
* while a message is outstanding.
*/
if ((intf->seq_table[seq].inuse)
&& (intf->seq_table[seq].seqid == seqid)) {
struct seq_table *ent = &intf->seq_table[seq];
ent->inuse = 0;
msg = ent->recv_msg;
rv = 0;
}
spin_unlock_irqrestore(&intf->seq_lock, flags);
if (msg)
deliver_err_response(intf, msg, err);
return rv;
}
int ipmi_create_user(unsigned int if_num,
const struct ipmi_user_hndl *handler,
void *handler_data,
struct ipmi_user **user)
{
unsigned long flags;
struct ipmi_user *new_user;
int rv = 0, index;
struct ipmi_smi *intf;
/*
* There is no module usecount here, because it's not
* required. Since this can only be used by and called from
* other modules, they will implicitly use this module, and
* thus this can't be removed unless the other modules are
* removed.
*/
if (handler == NULL)
return -EINVAL;
/*
* Make sure the driver is actually initialized, this handles
* problems with initialization order.
*/
if (!initialized) {
rv = ipmi_init_msghandler();
if (rv)
return rv;
/*
* The init code doesn't return an error if it was turned
* off, but it won't initialize. Check that.
*/
if (!initialized)
return -ENODEV;
}
new_user = kmalloc(sizeof(*new_user), GFP_KERNEL);
if (!new_user)
return -ENOMEM;
index = srcu_read_lock(&ipmi_interfaces_srcu);
list_for_each_entry_rcu(intf, &ipmi_interfaces, link) {
if (intf->intf_num == if_num)
goto found;
}
/* Not found, return an error */
rv = -EINVAL;
goto out_kfree;
found:
rv = init_srcu_struct(&new_user->release_barrier);
if (rv)
goto out_kfree;
/* Note that each existing user holds a refcount to the interface. */
kref_get(&intf->refcount);
kref_init(&new_user->refcount);
new_user->handler = handler;
new_user->handler_data = handler_data;
new_user->intf = intf;
new_user->gets_events = false;
rcu_assign_pointer(new_user->self, new_user);
spin_lock_irqsave(&intf->seq_lock, flags);
list_add_rcu(&new_user->link, &intf->users);
spin_unlock_irqrestore(&intf->seq_lock, flags);
if (handler->ipmi_watchdog_pretimeout) {
/* User wants pretimeouts, so make sure to watch for them. */
if (atomic_inc_return(&intf->event_waiters) == 1)
need_waiter(intf);
}
srcu_read_unlock(&ipmi_interfaces_srcu, index);
*user = new_user;
return 0;
out_kfree:
srcu_read_unlock(&ipmi_interfaces_srcu, index);
kfree(new_user);
return rv;
}
EXPORT_SYMBOL(ipmi_create_user);
int ipmi_get_smi_info(int if_num, struct ipmi_smi_info *data)
{
int rv, index;
struct ipmi_smi *intf;
index = srcu_read_lock(&ipmi_interfaces_srcu);
list_for_each_entry_rcu(intf, &ipmi_interfaces, link) {
if (intf->intf_num == if_num)
goto found;
}
srcu_read_unlock(&ipmi_interfaces_srcu, index);
/* Not found, return an error */
return -EINVAL;
found:
if (!intf->handlers->get_smi_info)
rv = -ENOTTY;
else
rv = intf->handlers->get_smi_info(intf->send_info, data);
srcu_read_unlock(&ipmi_interfaces_srcu, index);
return rv;
}
EXPORT_SYMBOL(ipmi_get_smi_info);
static void free_user(struct kref *ref)
{
struct ipmi_user *user = container_of(ref, struct ipmi_user, refcount);
cleanup_srcu_struct(&user->release_barrier);
kfree(user);
}
static void _ipmi_destroy_user(struct ipmi_user *user)
{
struct ipmi_smi *intf = user->intf;
int i;
unsigned long flags;
struct cmd_rcvr *rcvr;
struct cmd_rcvr *rcvrs = NULL;
if (!acquire_ipmi_user(user, &i)) {
/*
* The user has already been cleaned up, just make sure
* nothing is using it and return.
*/
synchronize_srcu(&user->release_barrier);
return;
}
rcu_assign_pointer(user->self, NULL);
release_ipmi_user(user, i);
synchronize_srcu(&user->release_barrier);
if (user->handler->shutdown)
user->handler->shutdown(user->handler_data);
if (user->handler->ipmi_watchdog_pretimeout)
atomic_dec(&intf->event_waiters);
if (user->gets_events)
atomic_dec(&intf->event_waiters);
/* Remove the user from the interface's sequence table. */
spin_lock_irqsave(&intf->seq_lock, flags);
list_del_rcu(&user->link);
for (i = 0; i < IPMI_IPMB_NUM_SEQ; i++) {
if (intf->seq_table[i].inuse
&& (intf->seq_table[i].recv_msg->user == user)) {
intf->seq_table[i].inuse = 0;
ipmi_free_recv_msg(intf->seq_table[i].recv_msg);
}
}
spin_unlock_irqrestore(&intf->seq_lock, flags);
/*
* Remove the user from the command receiver's table. First
* we build a list of everything (not using the standard link,
* since other things may be using it till we do
* synchronize_srcu()) then free everything in that list.
*/
mutex_lock(&intf->cmd_rcvrs_mutex);
list_for_each_entry_rcu(rcvr, &intf->cmd_rcvrs, link) {
if (rcvr->user == user) {
list_del_rcu(&rcvr->link);
rcvr->next = rcvrs;
rcvrs = rcvr;
}
}
mutex_unlock(&intf->cmd_rcvrs_mutex);
synchronize_rcu();
while (rcvrs) {
rcvr = rcvrs;
rcvrs = rcvr->next;
kfree(rcvr);
}
kref_put(&intf->refcount, intf_free);
}
int ipmi_destroy_user(struct ipmi_user *user)
{
_ipmi_destroy_user(user);
kref_put(&user->refcount, free_user);
return 0;
}
EXPORT_SYMBOL(ipmi_destroy_user);
int ipmi_get_version(struct ipmi_user *user,
unsigned char *major,
unsigned char *minor)
{
struct ipmi_device_id id;
int rv, index;
user = acquire_ipmi_user(user, &index);
if (!user)
return -ENODEV;
rv = bmc_get_device_id(user->intf, NULL, &id, NULL, NULL);
if (!rv) {
*major = ipmi_version_major(&id);
*minor = ipmi_version_minor(&id);
}
release_ipmi_user(user, index);
return rv;
}
EXPORT_SYMBOL(ipmi_get_version);
int ipmi_set_my_address(struct ipmi_user *user,
unsigned int channel,
unsigned char address)
{
int index, rv = 0;
user = acquire_ipmi_user(user, &index);
if (!user)
return -ENODEV;
if (channel >= IPMI_MAX_CHANNELS) {
rv = -EINVAL;
} else {
channel = array_index_nospec(channel, IPMI_MAX_CHANNELS);
user->intf->addrinfo[channel].address = address;
}
release_ipmi_user(user, index);
return rv;
}
EXPORT_SYMBOL(ipmi_set_my_address);
int ipmi_get_my_address(struct ipmi_user *user,
unsigned int channel,
unsigned char *address)
{
int index, rv = 0;
user = acquire_ipmi_user(user, &index);
if (!user)
return -ENODEV;
if (channel >= IPMI_MAX_CHANNELS) {
rv = -EINVAL;
} else {
channel = array_index_nospec(channel, IPMI_MAX_CHANNELS);
*address = user->intf->addrinfo[channel].address;
}
release_ipmi_user(user, index);
return rv;
}
EXPORT_SYMBOL(ipmi_get_my_address);
int ipmi_set_my_LUN(struct ipmi_user *user,
unsigned int channel,
unsigned char LUN)
{
int index, rv = 0;
user = acquire_ipmi_user(user, &index);
if (!user)
return -ENODEV;
if (channel >= IPMI_MAX_CHANNELS) {
rv = -EINVAL;
} else {
channel = array_index_nospec(channel, IPMI_MAX_CHANNELS);
user->intf->addrinfo[channel].lun = LUN & 0x3;
}
release_ipmi_user(user, index);
return rv;
}
EXPORT_SYMBOL(ipmi_set_my_LUN);
int ipmi_get_my_LUN(struct ipmi_user *user,
unsigned int channel,
unsigned char *address)
{
int index, rv = 0;
user = acquire_ipmi_user(user, &index);
if (!user)
return -ENODEV;
if (channel >= IPMI_MAX_CHANNELS) {
rv = -EINVAL;
} else {
channel = array_index_nospec(channel, IPMI_MAX_CHANNELS);
*address = user->intf->addrinfo[channel].lun;
}
release_ipmi_user(user, index);
return rv;
}
EXPORT_SYMBOL(ipmi_get_my_LUN);
int ipmi_get_maintenance_mode(struct ipmi_user *user)
{
int mode, index;
unsigned long flags;
user = acquire_ipmi_user(user, &index);
if (!user)
return -ENODEV;
spin_lock_irqsave(&user->intf->maintenance_mode_lock, flags);
mode = user->intf->maintenance_mode;
spin_unlock_irqrestore(&user->intf->maintenance_mode_lock, flags);
release_ipmi_user(user, index);
return mode;
}
EXPORT_SYMBOL(ipmi_get_maintenance_mode);
static void maintenance_mode_update(struct ipmi_smi *intf)
{
if (intf->handlers->set_maintenance_mode)
intf->handlers->set_maintenance_mode(
intf->send_info, intf->maintenance_mode_enable);
}
int ipmi_set_maintenance_mode(struct ipmi_user *user, int mode)
{
int rv = 0, index;
unsigned long flags;
struct ipmi_smi *intf = user->intf;
user = acquire_ipmi_user(user, &index);
if (!user)
return -ENODEV;
spin_lock_irqsave(&intf->maintenance_mode_lock, flags);
if (intf->maintenance_mode != mode) {
switch (mode) {
case IPMI_MAINTENANCE_MODE_AUTO:
intf->maintenance_mode_enable
= (intf->auto_maintenance_timeout > 0);
break;
case IPMI_MAINTENANCE_MODE_OFF:
intf->maintenance_mode_enable = false;
break;
case IPMI_MAINTENANCE_MODE_ON:
intf->maintenance_mode_enable = true;
break;
default:
rv = -EINVAL;
goto out_unlock;
}
intf->maintenance_mode = mode;
maintenance_mode_update(intf);
}
out_unlock:
spin_unlock_irqrestore(&intf->maintenance_mode_lock, flags);
release_ipmi_user(user, index);
return rv;
}
EXPORT_SYMBOL(ipmi_set_maintenance_mode);
int ipmi_set_gets_events(struct ipmi_user *user, bool val)
{
unsigned long flags;
struct ipmi_smi *intf = user->intf;
struct ipmi_recv_msg *msg, *msg2;
struct list_head msgs;
int index;
user = acquire_ipmi_user(user, &index);
if (!user)
return -ENODEV;
INIT_LIST_HEAD(&msgs);
spin_lock_irqsave(&intf->events_lock, flags);
if (user->gets_events == val)
goto out;
user->gets_events = val;
if (val) {
if (atomic_inc_return(&intf->event_waiters) == 1)
need_waiter(intf);
} else {
atomic_dec(&intf->event_waiters);
}
if (intf->delivering_events)
/*
* Another thread is delivering events for this, so
* let it handle any new events.
*/
goto out;
/* Deliver any queued events. */
while (user->gets_events && !list_empty(&intf->waiting_events)) {
list_for_each_entry_safe(msg, msg2, &intf->waiting_events, link)
list_move_tail(&msg->link, &msgs);
intf->waiting_events_count = 0;
if (intf->event_msg_printed) {
dev_warn(intf->si_dev, "Event queue no longer full\n");
intf->event_msg_printed = 0;
}
intf->delivering_events = 1;
spin_unlock_irqrestore(&intf->events_lock, flags);
list_for_each_entry_safe(msg, msg2, &msgs, link) {
msg->user = user;
kref_get(&user->refcount);
deliver_local_response(intf, msg);
}
spin_lock_irqsave(&intf->events_lock, flags);
intf->delivering_events = 0;
}
out:
spin_unlock_irqrestore(&intf->events_lock, flags);
release_ipmi_user(user, index);
return 0;
}
EXPORT_SYMBOL(ipmi_set_gets_events);
static struct cmd_rcvr *find_cmd_rcvr(struct ipmi_smi *intf,
unsigned char netfn,
unsigned char cmd,
unsigned char chan)
{
struct cmd_rcvr *rcvr;
list_for_each_entry_rcu(rcvr, &intf->cmd_rcvrs, link) {
if ((rcvr->netfn == netfn) && (rcvr->cmd == cmd)
&& (rcvr->chans & (1 << chan)))
return rcvr;
}
return NULL;
}
static int is_cmd_rcvr_exclusive(struct ipmi_smi *intf,
unsigned char netfn,
unsigned char cmd,
unsigned int chans)
{
struct cmd_rcvr *rcvr;
list_for_each_entry_rcu(rcvr, &intf->cmd_rcvrs, link) {
if ((rcvr->netfn == netfn) && (rcvr->cmd == cmd)
&& (rcvr->chans & chans))
return 0;
}
return 1;
}
int ipmi_register_for_cmd(struct ipmi_user *user,
unsigned char netfn,
unsigned char cmd,
unsigned int chans)
{
struct ipmi_smi *intf = user->intf;
struct cmd_rcvr *rcvr;
int rv = 0, index;
user = acquire_ipmi_user(user, &index);
if (!user)
return -ENODEV;
rcvr = kmalloc(sizeof(*rcvr), GFP_KERNEL);
if (!rcvr) {
rv = -ENOMEM;
goto out_release;
}
rcvr->cmd = cmd;
rcvr->netfn = netfn;
rcvr->chans = chans;
rcvr->user = user;
mutex_lock(&intf->cmd_rcvrs_mutex);
/* Make sure the command/netfn is not already registered. */
if (!is_cmd_rcvr_exclusive(intf, netfn, cmd, chans)) {
rv = -EBUSY;
goto out_unlock;
}
if (atomic_inc_return(&intf->event_waiters) == 1)
need_waiter(intf);
list_add_rcu(&rcvr->link, &intf->cmd_rcvrs);
out_unlock:
mutex_unlock(&intf->cmd_rcvrs_mutex);
if (rv)
kfree(rcvr);
out_release:
release_ipmi_user(user, index);
return rv;
}
EXPORT_SYMBOL(ipmi_register_for_cmd);
int ipmi_unregister_for_cmd(struct ipmi_user *user,
unsigned char netfn,
unsigned char cmd,
unsigned int chans)
{
struct ipmi_smi *intf = user->intf;
struct cmd_rcvr *rcvr;
struct cmd_rcvr *rcvrs = NULL;
int i, rv = -ENOENT, index;
user = acquire_ipmi_user(user, &index);
if (!user)
return -ENODEV;
mutex_lock(&intf->cmd_rcvrs_mutex);
for (i = 0; i < IPMI_NUM_CHANNELS; i++) {
if (((1 << i) & chans) == 0)
continue;
rcvr = find_cmd_rcvr(intf, netfn, cmd, i);
if (rcvr == NULL)
continue;
if (rcvr->user == user) {
rv = 0;
rcvr->chans &= ~chans;
if (rcvr->chans == 0) {
list_del_rcu(&rcvr->link);
rcvr->next = rcvrs;
rcvrs = rcvr;
}
}
}
mutex_unlock(&intf->cmd_rcvrs_mutex);
synchronize_rcu();
release_ipmi_user(user, index);
while (rcvrs) {
atomic_dec(&intf->event_waiters);
rcvr = rcvrs;
rcvrs = rcvr->next;
kfree(rcvr);
}
return rv;
}
EXPORT_SYMBOL(ipmi_unregister_for_cmd);
static unsigned char
ipmb_checksum(unsigned char *data, int size)
{
unsigned char csum = 0;
for (; size > 0; size--, data++)
csum += *data;
return -csum;
}
static inline void format_ipmb_msg(struct ipmi_smi_msg *smi_msg,
struct kernel_ipmi_msg *msg,
struct ipmi_ipmb_addr *ipmb_addr,
long msgid,
unsigned char ipmb_seq,
int broadcast,
unsigned char source_address,
unsigned char source_lun)
{
int i = broadcast;
/* Format the IPMB header data. */
smi_msg->data[0] = (IPMI_NETFN_APP_REQUEST << 2);
smi_msg->data[1] = IPMI_SEND_MSG_CMD;
smi_msg->data[2] = ipmb_addr->channel;
if (broadcast)
smi_msg->data[3] = 0;
smi_msg->data[i+3] = ipmb_addr->slave_addr;
smi_msg->data[i+4] = (msg->netfn << 2) | (ipmb_addr->lun & 0x3);
smi_msg->data[i+5] = ipmb_checksum(&smi_msg->data[i + 3], 2);
smi_msg->data[i+6] = source_address;
smi_msg->data[i+7] = (ipmb_seq << 2) | source_lun;
smi_msg->data[i+8] = msg->cmd;
/* Now tack on the data to the message. */
if (msg->data_len > 0)
memcpy(&smi_msg->data[i + 9], msg->data, msg->data_len);
smi_msg->data_size = msg->data_len + 9;
/* Now calculate the checksum and tack it on. */
smi_msg->data[i+smi_msg->data_size]
= ipmb_checksum(&smi_msg->data[i + 6], smi_msg->data_size - 6);
/*
* Add on the checksum size and the offset from the
* broadcast.
*/
smi_msg->data_size += 1 + i;
smi_msg->msgid = msgid;
}
static inline void format_lan_msg(struct ipmi_smi_msg *smi_msg,
struct kernel_ipmi_msg *msg,
struct ipmi_lan_addr *lan_addr,
long msgid,
unsigned char ipmb_seq,
unsigned char source_lun)
{
/* Format the IPMB header data. */
smi_msg->data[0] = (IPMI_NETFN_APP_REQUEST << 2);
smi_msg->data[1] = IPMI_SEND_MSG_CMD;
smi_msg->data[2] = lan_addr->channel;
smi_msg->data[3] = lan_addr->session_handle;
smi_msg->data[4] = lan_addr->remote_SWID;
smi_msg->data[5] = (msg->netfn << 2) | (lan_addr->lun & 0x3);
smi_msg->data[6] = ipmb_checksum(&smi_msg->data[4], 2);
smi_msg->data[7] = lan_addr->local_SWID;
smi_msg->data[8] = (ipmb_seq << 2) | source_lun;
smi_msg->data[9] = msg->cmd;
/* Now tack on the data to the message. */
if (msg->data_len > 0)
memcpy(&smi_msg->data[10], msg->data, msg->data_len);
smi_msg->data_size = msg->data_len + 10;
/* Now calculate the checksum and tack it on. */
smi_msg->data[smi_msg->data_size]
= ipmb_checksum(&smi_msg->data[7], smi_msg->data_size - 7);
/*
* Add on the checksum size and the offset from the
* broadcast.
*/
smi_msg->data_size += 1;
smi_msg->msgid = msgid;
}
static struct ipmi_smi_msg *smi_add_send_msg(struct ipmi_smi *intf,
struct ipmi_smi_msg *smi_msg,
int priority)
{
if (intf->curr_msg) {
if (priority > 0)
list_add_tail(&smi_msg->link, &intf->hp_xmit_msgs);
else
list_add_tail(&smi_msg->link, &intf->xmit_msgs);
smi_msg = NULL;
} else {
intf->curr_msg = smi_msg;
}
return smi_msg;
}
static void smi_send(struct ipmi_smi *intf,
const struct ipmi_smi_handlers *handlers,
struct ipmi_smi_msg *smi_msg, int priority)
{
int run_to_completion = intf->run_to_completion;
if (run_to_completion) {
smi_msg = smi_add_send_msg(intf, smi_msg, priority);
} else {
unsigned long flags;
spin_lock_irqsave(&intf->xmit_msgs_lock, flags);
smi_msg = smi_add_send_msg(intf, smi_msg, priority);
spin_unlock_irqrestore(&intf->xmit_msgs_lock, flags);
}
if (smi_msg)
handlers->sender(intf->send_info, smi_msg);
}
static bool is_maintenance_mode_cmd(struct kernel_ipmi_msg *msg)
{
return (((msg->netfn == IPMI_NETFN_APP_REQUEST)
&& ((msg->cmd == IPMI_COLD_RESET_CMD)
|| (msg->cmd == IPMI_WARM_RESET_CMD)))
|| (msg->netfn == IPMI_NETFN_FIRMWARE_REQUEST));
}
static int i_ipmi_req_sysintf(struct ipmi_smi *intf,
struct ipmi_addr *addr,
long msgid,
struct kernel_ipmi_msg *msg,
struct ipmi_smi_msg *smi_msg,
struct ipmi_recv_msg *recv_msg,
int retries,
unsigned int retry_time_ms)
{
struct ipmi_system_interface_addr *smi_addr;
if (msg->netfn & 1)
/* Responses are not allowed to the SMI. */
return -EINVAL;
smi_addr = (struct ipmi_system_interface_addr *) addr;
if (smi_addr->lun > 3) {
ipmi_inc_stat(intf, sent_invalid_commands);
return -EINVAL;
}
memcpy(&recv_msg->addr, smi_addr, sizeof(*smi_addr));
if ((msg->netfn == IPMI_NETFN_APP_REQUEST)
&& ((msg->cmd == IPMI_SEND_MSG_CMD)
|| (msg->cmd == IPMI_GET_MSG_CMD)
|| (msg->cmd == IPMI_READ_EVENT_MSG_BUFFER_CMD))) {
/*
* We don't let the user do these, since we manage
* the sequence numbers.
*/
ipmi_inc_stat(intf, sent_invalid_commands);
return -EINVAL;
}
if (is_maintenance_mode_cmd(msg)) {
unsigned long flags;
spin_lock_irqsave(&intf->maintenance_mode_lock, flags);
intf->auto_maintenance_timeout
= maintenance_mode_timeout_ms;
if (!intf->maintenance_mode
&& !intf->maintenance_mode_enable) {
intf->maintenance_mode_enable = true;
maintenance_mode_update(intf);
}
spin_unlock_irqrestore(&intf->maintenance_mode_lock,
flags);
}
if (msg->data_len + 2 > IPMI_MAX_MSG_LENGTH) {
ipmi_inc_stat(intf, sent_invalid_commands);
return -EMSGSIZE;
}
smi_msg->data[0] = (msg->netfn << 2) | (smi_addr->lun & 0x3);
smi_msg->data[1] = msg->cmd;
smi_msg->msgid = msgid;
smi_msg->user_data = recv_msg;
if (msg->data_len > 0)
memcpy(&smi_msg->data[2], msg->data, msg->data_len);
smi_msg->data_size = msg->data_len + 2;
ipmi_inc_stat(intf, sent_local_commands);
return 0;
}
static int i_ipmi_req_ipmb(struct ipmi_smi *intf,
struct ipmi_addr *addr,
long msgid,
struct kernel_ipmi_msg *msg,
struct ipmi_smi_msg *smi_msg,
struct ipmi_recv_msg *recv_msg,
unsigned char source_address,
unsigned char source_lun,
int retries,
unsigned int retry_time_ms)
{
struct ipmi_ipmb_addr *ipmb_addr;
unsigned char ipmb_seq;
long seqid;
int broadcast = 0;
struct ipmi_channel *chans;
int rv = 0;
if (addr->channel >= IPMI_MAX_CHANNELS) {
ipmi_inc_stat(intf, sent_invalid_commands);
return -EINVAL;
}
chans = READ_ONCE(intf->channel_list)->c;
if (chans[addr->channel].medium != IPMI_CHANNEL_MEDIUM_IPMB) {
ipmi_inc_stat(intf, sent_invalid_commands);
return -EINVAL;
}
if (addr->addr_type == IPMI_IPMB_BROADCAST_ADDR_TYPE) {
/*
* Broadcasts add a zero at the beginning of the
* message, but otherwise is the same as an IPMB
* address.
*/
addr->addr_type = IPMI_IPMB_ADDR_TYPE;
broadcast = 1;
retries = 0; /* Don't retry broadcasts. */
}
/*
* 9 for the header and 1 for the checksum, plus
* possibly one for the broadcast.
*/
if ((msg->data_len + 10 + broadcast) > IPMI_MAX_MSG_LENGTH) {
ipmi_inc_stat(intf, sent_invalid_commands);
return -EMSGSIZE;
}
ipmb_addr = (struct ipmi_ipmb_addr *) addr;
if (ipmb_addr->lun > 3) {
ipmi_inc_stat(intf, sent_invalid_commands);
return -EINVAL;
}
memcpy(&recv_msg->addr, ipmb_addr, sizeof(*ipmb_addr));
if (recv_msg->msg.netfn & 0x1) {
/*
* It's a response, so use the user's sequence
* from msgid.
*/
ipmi_inc_stat(intf, sent_ipmb_responses);
format_ipmb_msg(smi_msg, msg, ipmb_addr, msgid,
msgid, broadcast,
source_address, source_lun);
/*
* Save the receive message so we can use it
* to deliver the response.
*/
smi_msg->user_data = recv_msg;
} else {
/* It's a command, so get a sequence for it. */
unsigned long flags;
spin_lock_irqsave(&intf->seq_lock, flags);
if (is_maintenance_mode_cmd(msg))
intf->ipmb_maintenance_mode_timeout =
maintenance_mode_timeout_ms;
if (intf->ipmb_maintenance_mode_timeout && retry_time_ms == 0)
/* Different default in maintenance mode */
retry_time_ms = default_maintenance_retry_ms;
/*
* Create a sequence number with a 1 second
* timeout and 4 retries.
*/
rv = intf_next_seq(intf,
recv_msg,
retry_time_ms,
retries,
broadcast,
&ipmb_seq,
&seqid);
if (rv)
/*
* We have used up all the sequence numbers,
* probably, so abort.
*/
goto out_err;
ipmi_inc_stat(intf, sent_ipmb_commands);
/*
* Store the sequence number in the message,
* so that when the send message response
* comes back we can start the timer.
*/
format_ipmb_msg(smi_msg, msg, ipmb_addr,
STORE_SEQ_IN_MSGID(ipmb_seq, seqid),
ipmb_seq, broadcast,
source_address, source_lun);
/*
* Copy the message into the recv message data, so we
* can retransmit it later if necessary.
*/
memcpy(recv_msg->msg_data, smi_msg->data,
smi_msg->data_size);
recv_msg->msg.data = recv_msg->msg_data;
recv_msg->msg.data_len = smi_msg->data_size;
/*
* We don't unlock until here, because we need
* to copy the completed message into the
* recv_msg before we release the lock.
* Otherwise, race conditions may bite us. I
* know that's pretty paranoid, but I prefer
* to be correct.
*/
out_err:
spin_unlock_irqrestore(&intf->seq_lock, flags);
}
return rv;
}
static int i_ipmi_req_lan(struct ipmi_smi *intf,
struct ipmi_addr *addr,
long msgid,
struct kernel_ipmi_msg *msg,
struct ipmi_smi_msg *smi_msg,
struct ipmi_recv_msg *recv_msg,
unsigned char source_lun,
int retries,
unsigned int retry_time_ms)
{
struct ipmi_lan_addr *lan_addr;
unsigned char ipmb_seq;
long seqid;
struct ipmi_channel *chans;
int rv = 0;
if (addr->channel >= IPMI_MAX_CHANNELS) {
ipmi_inc_stat(intf, sent_invalid_commands);
return -EINVAL;
}
chans = READ_ONCE(intf->channel_list)->c;
if ((chans[addr->channel].medium
!= IPMI_CHANNEL_MEDIUM_8023LAN)
&& (chans[addr->channel].medium
!= IPMI_CHANNEL_MEDIUM_ASYNC)) {
ipmi_inc_stat(intf, sent_invalid_commands);
return -EINVAL;
}
/* 11 for the header and 1 for the checksum. */
if ((msg->data_len + 12) > IPMI_MAX_MSG_LENGTH) {
ipmi_inc_stat(intf, sent_invalid_commands);
return -EMSGSIZE;
}
lan_addr = (struct ipmi_lan_addr *) addr;
if (lan_addr->lun > 3) {
ipmi_inc_stat(intf, sent_invalid_commands);
return -EINVAL;
}
memcpy(&recv_msg->addr, lan_addr, sizeof(*lan_addr));
if (recv_msg->msg.netfn & 0x1) {
/*
* It's a response, so use the user's sequence
* from msgid.
*/
ipmi_inc_stat(intf, sent_lan_responses);
format_lan_msg(smi_msg, msg, lan_addr, msgid,
msgid, source_lun);
/*
* Save the receive message so we can use it
* to deliver the response.
*/
smi_msg->user_data = recv_msg;
} else {
/* It's a command, so get a sequence for it. */
unsigned long flags;
spin_lock_irqsave(&intf->seq_lock, flags);
/*
* Create a sequence number with a 1 second
* timeout and 4 retries.
*/
rv = intf_next_seq(intf,
recv_msg,
retry_time_ms,
retries,
0,
&ipmb_seq,
&seqid);
if (rv)
/*
* We have used up all the sequence numbers,
* probably, so abort.
*/
goto out_err;
ipmi_inc_stat(intf, sent_lan_commands);
/*
* Store the sequence number in the message,
* so that when the send message response
* comes back we can start the timer.
*/
format_lan_msg(smi_msg, msg, lan_addr,
STORE_SEQ_IN_MSGID(ipmb_seq, seqid),
ipmb_seq, source_lun);
/*
* Copy the message into the recv message data, so we
* can retransmit it later if necessary.
*/
memcpy(recv_msg->msg_data, smi_msg->data,
smi_msg->data_size);
recv_msg->msg.data = recv_msg->msg_data;
recv_msg->msg.data_len = smi_msg->data_size;
/*
* We don't unlock until here, because we need
* to copy the completed message into the
* recv_msg before we release the lock.
* Otherwise, race conditions may bite us. I
* know that's pretty paranoid, but I prefer
* to be correct.
*/
out_err:
spin_unlock_irqrestore(&intf->seq_lock, flags);
}
return rv;
}
/*
* Separate from ipmi_request so that the user does not have to be
* supplied in certain circumstances (mainly at panic time). If
* messages are supplied, they will be freed, even if an error
* occurs.
*/
static int i_ipmi_request(struct ipmi_user *user,
struct ipmi_smi *intf,
struct ipmi_addr *addr,
long msgid,
struct kernel_ipmi_msg *msg,
void *user_msg_data,
void *supplied_smi,
struct ipmi_recv_msg *supplied_recv,
int priority,
unsigned char source_address,
unsigned char source_lun,
int retries,
unsigned int retry_time_ms)
{
struct ipmi_smi_msg *smi_msg;
struct ipmi_recv_msg *recv_msg;
int rv = 0;
if (supplied_recv)
recv_msg = supplied_recv;
else {
recv_msg = ipmi_alloc_recv_msg();
if (recv_msg == NULL) {
rv = -ENOMEM;
goto out;
}
}
recv_msg->user_msg_data = user_msg_data;
if (supplied_smi)
smi_msg = (struct ipmi_smi_msg *) supplied_smi;
else {
smi_msg = ipmi_alloc_smi_msg();
if (smi_msg == NULL) {
ipmi_free_recv_msg(recv_msg);
rv = -ENOMEM;
goto out;
}
}
rcu_read_lock();
if (intf->in_shutdown) {
rv = -ENODEV;
goto out_err;
}
recv_msg->user = user;
if (user)
/* The put happens when the message is freed. */
kref_get(&user->refcount);
recv_msg->msgid = msgid;
/*
* Store the message to send in the receive message so timeout
* responses can get the proper response data.
*/
recv_msg->msg = *msg;
if (addr->addr_type == IPMI_SYSTEM_INTERFACE_ADDR_TYPE) {
rv = i_ipmi_req_sysintf(intf, addr, msgid, msg, smi_msg,
recv_msg, retries, retry_time_ms);
} else if (is_ipmb_addr(addr) || is_ipmb_bcast_addr(addr)) {
rv = i_ipmi_req_ipmb(intf, addr, msgid, msg, smi_msg, recv_msg,
source_address, source_lun,
retries, retry_time_ms);
} else if (is_lan_addr(addr)) {
rv = i_ipmi_req_lan(intf, addr, msgid, msg, smi_msg, recv_msg,
source_lun, retries, retry_time_ms);
} else {
/* Unknown address type. */
ipmi_inc_stat(intf, sent_invalid_commands);
rv = -EINVAL;
}
if (rv) {
out_err:
ipmi_free_smi_msg(smi_msg);
ipmi_free_recv_msg(recv_msg);
} else {
ipmi_debug_msg("Send", smi_msg->data, smi_msg->data_size);
smi_send(intf, intf->handlers, smi_msg, priority);
}
rcu_read_unlock();
out:
return rv;
}
static int check_addr(struct ipmi_smi *intf,
struct ipmi_addr *addr,
unsigned char *saddr,
unsigned char *lun)
{
if (addr->channel >= IPMI_MAX_CHANNELS)
return -EINVAL;
addr->channel = array_index_nospec(addr->channel, IPMI_MAX_CHANNELS);
*lun = intf->addrinfo[addr->channel].lun;
*saddr = intf->addrinfo[addr->channel].address;
return 0;
}
int ipmi_request_settime(struct ipmi_user *user,
struct ipmi_addr *addr,
long msgid,
struct kernel_ipmi_msg *msg,
void *user_msg_data,
int priority,
int retries,
unsigned int retry_time_ms)
{
unsigned char saddr = 0, lun = 0;
int rv, index;
if (!user)
return -EINVAL;
user = acquire_ipmi_user(user, &index);
if (!user)
return -ENODEV;
rv = check_addr(user->intf, addr, &saddr, &lun);
if (!rv)
rv = i_ipmi_request(user,
user->intf,
addr,
msgid,
msg,
user_msg_data,
NULL, NULL,
priority,
saddr,
lun,
retries,
retry_time_ms);
release_ipmi_user(user, index);
return rv;
}
EXPORT_SYMBOL(ipmi_request_settime);
int ipmi_request_supply_msgs(struct ipmi_user *user,
struct ipmi_addr *addr,
long msgid,
struct kernel_ipmi_msg *msg,
void *user_msg_data,
void *supplied_smi,
struct ipmi_recv_msg *supplied_recv,
int priority)
{
unsigned char saddr = 0, lun = 0;
int rv, index;
if (!user)
return -EINVAL;
user = acquire_ipmi_user(user, &index);
if (!user)
return -ENODEV;
rv = check_addr(user->intf, addr, &saddr, &lun);
if (!rv)
rv = i_ipmi_request(user,
user->intf,
addr,
msgid,
msg,
user_msg_data,
supplied_smi,
supplied_recv,
priority,
saddr,
lun,
-1, 0);
release_ipmi_user(user, index);
return rv;
}
EXPORT_SYMBOL(ipmi_request_supply_msgs);
static void bmc_device_id_handler(struct ipmi_smi *intf,
struct ipmi_recv_msg *msg)
{
int rv;
if ((msg->addr.addr_type != IPMI_SYSTEM_INTERFACE_ADDR_TYPE)
|| (msg->msg.netfn != IPMI_NETFN_APP_RESPONSE)
|| (msg->msg.cmd != IPMI_GET_DEVICE_ID_CMD)) {
dev_warn(intf->si_dev,
"invalid device_id msg: addr_type=%d netfn=%x cmd=%x\n",
msg->addr.addr_type, msg->msg.netfn, msg->msg.cmd);
return;
}
rv = ipmi_demangle_device_id(msg->msg.netfn, msg->msg.cmd,
msg->msg.data, msg->msg.data_len, &intf->bmc->fetch_id);
if (rv) {
dev_warn(intf->si_dev, "device id demangle failed: %d\n", rv);
intf->bmc->dyn_id_set = 0;
} else {
/*
* Make sure the id data is available before setting
* dyn_id_set.
*/
smp_wmb();
intf->bmc->dyn_id_set = 1;
}
wake_up(&intf->waitq);
}
static int
send_get_device_id_cmd(struct ipmi_smi *intf)
{
struct ipmi_system_interface_addr si;
struct kernel_ipmi_msg msg;
si.addr_type = IPMI_SYSTEM_INTERFACE_ADDR_TYPE;
si.channel = IPMI_BMC_CHANNEL;
si.lun = 0;
msg.netfn = IPMI_NETFN_APP_REQUEST;
msg.cmd = IPMI_GET_DEVICE_ID_CMD;
msg.data = NULL;
msg.data_len = 0;
return i_ipmi_request(NULL,
intf,
(struct ipmi_addr *) &si,
0,
&msg,
intf,
NULL,
NULL,
0,
intf->addrinfo[0].address,
intf->addrinfo[0].lun,
-1, 0);
}
static int __get_device_id(struct ipmi_smi *intf, struct bmc_device *bmc)
{
int rv;
bmc->dyn_id_set = 2;
intf->null_user_handler = bmc_device_id_handler;
rv = send_get_device_id_cmd(intf);
if (rv)
return rv;
wait_event(intf->waitq, bmc->dyn_id_set != 2);
if (!bmc->dyn_id_set)
rv = -EIO; /* Something went wrong in the fetch. */
/* dyn_id_set makes the id data available. */
smp_rmb();
intf->null_user_handler = NULL;
return rv;
}
/*
* Fetch the device id for the bmc/interface. You must pass in either
* bmc or intf, this code will get the other one. If the data has
* been recently fetched, this will just use the cached data. Otherwise
* it will run a new fetch.
*
* Except for the first time this is called (in ipmi_register_smi()),
* this will always return good data;
*/
static int __bmc_get_device_id(struct ipmi_smi *intf, struct bmc_device *bmc,
struct ipmi_device_id *id,
bool *guid_set, guid_t *guid, int intf_num)
{
int rv = 0;
int prev_dyn_id_set, prev_guid_set;
bool intf_set = intf != NULL;
if (!intf) {
mutex_lock(&bmc->dyn_mutex);
retry_bmc_lock:
if (list_empty(&bmc->intfs)) {
mutex_unlock(&bmc->dyn_mutex);
return -ENOENT;
}
intf = list_first_entry(&bmc->intfs, struct ipmi_smi,
bmc_link);
kref_get(&intf->refcount);
mutex_unlock(&bmc->dyn_mutex);
mutex_lock(&intf->bmc_reg_mutex);
mutex_lock(&bmc->dyn_mutex);
if (intf != list_first_entry(&bmc->intfs, struct ipmi_smi,
bmc_link)) {
mutex_unlock(&intf->bmc_reg_mutex);
kref_put(&intf->refcount, intf_free);
goto retry_bmc_lock;
}
} else {
mutex_lock(&intf->bmc_reg_mutex);
bmc = intf->bmc;
mutex_lock(&bmc->dyn_mutex);
kref_get(&intf->refcount);
}
/* If we have a valid and current ID, just return that. */
if (intf->in_bmc_register ||
(bmc->dyn_id_set && time_is_after_jiffies(bmc->dyn_id_expiry)))
goto out_noprocessing;
prev_guid_set = bmc->dyn_guid_set;
__get_guid(intf);
prev_dyn_id_set = bmc->dyn_id_set;
rv = __get_device_id(intf, bmc);
if (rv)
goto out;
/*
* The guid, device id, manufacturer id, and product id should
* not change on a BMC. If it does we have to do some dancing.
*/
if (!intf->bmc_registered
|| (!prev_guid_set && bmc->dyn_guid_set)
|| (!prev_dyn_id_set && bmc->dyn_id_set)
|| (prev_guid_set && bmc->dyn_guid_set
&& !guid_equal(&bmc->guid, &bmc->fetch_guid))
|| bmc->id.device_id != bmc->fetch_id.device_id
|| bmc->id.manufacturer_id != bmc->fetch_id.manufacturer_id
|| bmc->id.product_id != bmc->fetch_id.product_id) {
struct ipmi_device_id id = bmc->fetch_id;
int guid_set = bmc->dyn_guid_set;
guid_t guid;
guid = bmc->fetch_guid;
mutex_unlock(&bmc->dyn_mutex);
__ipmi_bmc_unregister(intf);
/* Fill in the temporary BMC for good measure. */
intf->bmc->id = id;
intf->bmc->dyn_guid_set = guid_set;
intf->bmc->guid = guid;
if (__ipmi_bmc_register(intf, &id, guid_set, &guid, intf_num))
need_waiter(intf); /* Retry later on an error. */
else
__scan_channels(intf, &id);
if (!intf_set) {
/*
* We weren't given the interface on the
* command line, so restart the operation on
* the next interface for the BMC.
*/
mutex_unlock(&intf->bmc_reg_mutex);
mutex_lock(&bmc->dyn_mutex);
goto retry_bmc_lock;
}
/* We have a new BMC, set it up. */
bmc = intf->bmc;
mutex_lock(&bmc->dyn_mutex);
goto out_noprocessing;
} else if (memcmp(&bmc->fetch_id, &bmc->id, sizeof(bmc->id)))
/* Version info changes, scan the channels again. */
__scan_channels(intf, &bmc->fetch_id);
bmc->dyn_id_expiry = jiffies + IPMI_DYN_DEV_ID_EXPIRY;
out:
if (rv && prev_dyn_id_set) {
rv = 0; /* Ignore failures if we have previous data. */
bmc->dyn_id_set = prev_dyn_id_set;
}
if (!rv) {
bmc->id = bmc->fetch_id;
if (bmc->dyn_guid_set)
bmc->guid = bmc->fetch_guid;
else if (prev_guid_set)
/*
* The guid used to be valid and it failed to fetch,
* just use the cached value.
*/
bmc->dyn_guid_set = prev_guid_set;
}
out_noprocessing:
if (!rv) {
if (id)
*id = bmc->id;
if (guid_set)
*guid_set = bmc->dyn_guid_set;
if (guid && bmc->dyn_guid_set)
*guid = bmc->guid;
}
mutex_unlock(&bmc->dyn_mutex);
mutex_unlock(&intf->bmc_reg_mutex);
kref_put(&intf->refcount, intf_free);
return rv;
}
static int bmc_get_device_id(struct ipmi_smi *intf, struct bmc_device *bmc,
struct ipmi_device_id *id,
bool *guid_set, guid_t *guid)
{
return __bmc_get_device_id(intf, bmc, id, guid_set, guid, -1);
}
static ssize_t device_id_show(struct device *dev,
struct device_attribute *attr,
char *buf)
{
struct bmc_device *bmc = to_bmc_device(dev);
struct ipmi_device_id id;
int rv;
rv = bmc_get_device_id(NULL, bmc, &id, NULL, NULL);
if (rv)
return rv;
return snprintf(buf, 10, "%u\n", id.device_id);
}
static DEVICE_ATTR_RO(device_id);
static ssize_t provides_device_sdrs_show(struct device *dev,
struct device_attribute *attr,
char *buf)
{
struct bmc_device *bmc = to_bmc_device(dev);
struct ipmi_device_id id;
int rv;
rv = bmc_get_device_id(NULL, bmc, &id, NULL, NULL);
if (rv)
return rv;
return snprintf(buf, 10, "%u\n", (id.device_revision & 0x80) >> 7);
}
static DEVICE_ATTR_RO(provides_device_sdrs);
static ssize_t revision_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
struct bmc_device *bmc = to_bmc_device(dev);
struct ipmi_device_id id;
int rv;
rv = bmc_get_device_id(NULL, bmc, &id, NULL, NULL);
if (rv)
return rv;
return snprintf(buf, 20, "%u\n", id.device_revision & 0x0F);
}
static DEVICE_ATTR_RO(revision);
static ssize_t firmware_revision_show(struct device *dev,
struct device_attribute *attr,
char *buf)
{
struct bmc_device *bmc = to_bmc_device(dev);
struct ipmi_device_id id;
int rv;
rv = bmc_get_device_id(NULL, bmc, &id, NULL, NULL);
if (rv)
return rv;
return snprintf(buf, 20, "%u.%x\n", id.firmware_revision_1,
id.firmware_revision_2);
}
static DEVICE_ATTR_RO(firmware_revision);
static ssize_t ipmi_version_show(struct device *dev,
struct device_attribute *attr,
char *buf)
{
struct bmc_device *bmc = to_bmc_device(dev);
struct ipmi_device_id id;
int rv;
rv = bmc_get_device_id(NULL, bmc, &id, NULL, NULL);
if (rv)
return rv;
return snprintf(buf, 20, "%u.%u\n",
ipmi_version_major(&id),
ipmi_version_minor(&id));
}
static DEVICE_ATTR_RO(ipmi_version);
static ssize_t add_dev_support_show(struct device *dev,
struct device_attribute *attr,
char *buf)
{
struct bmc_device *bmc = to_bmc_device(dev);
struct ipmi_device_id id;
int rv;
rv = bmc_get_device_id(NULL, bmc, &id, NULL, NULL);
if (rv)
return rv;
return snprintf(buf, 10, "0x%02x\n", id.additional_device_support);
}
static DEVICE_ATTR(additional_device_support, S_IRUGO, add_dev_support_show,
NULL);
static ssize_t manufacturer_id_show(struct device *dev,
struct device_attribute *attr,
char *buf)
{
struct bmc_device *bmc = to_bmc_device(dev);
struct ipmi_device_id id;
int rv;
rv = bmc_get_device_id(NULL, bmc, &id, NULL, NULL);
if (rv)
return rv;
return snprintf(buf, 20, "0x%6.6x\n", id.manufacturer_id);
}
static DEVICE_ATTR_RO(manufacturer_id);
static ssize_t product_id_show(struct device *dev,
struct device_attribute *attr,
char *buf)
{
struct bmc_device *bmc = to_bmc_device(dev);
struct ipmi_device_id id;
int rv;
rv = bmc_get_device_id(NULL, bmc, &id, NULL, NULL);
if (rv)
return rv;
return snprintf(buf, 10, "0x%4.4x\n", id.product_id);
}
static DEVICE_ATTR_RO(product_id);
static ssize_t aux_firmware_rev_show(struct device *dev,
struct device_attribute *attr,
char *buf)
{
struct bmc_device *bmc = to_bmc_device(dev);
struct ipmi_device_id id;
int rv;
rv = bmc_get_device_id(NULL, bmc, &id, NULL, NULL);
if (rv)
return rv;
return snprintf(buf, 21, "0x%02x 0x%02x 0x%02x 0x%02x\n",
id.aux_firmware_revision[3],
id.aux_firmware_revision[2],
id.aux_firmware_revision[1],
id.aux_firmware_revision[0]);
}
static DEVICE_ATTR(aux_firmware_revision, S_IRUGO, aux_firmware_rev_show, NULL);
static ssize_t guid_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
struct bmc_device *bmc = to_bmc_device(dev);
bool guid_set;
guid_t guid;
int rv;
rv = bmc_get_device_id(NULL, bmc, NULL, &guid_set, &guid);
if (rv)
return rv;
if (!guid_set)
return -ENOENT;
return snprintf(buf, 38, "%pUl\n", guid.b);
}
static DEVICE_ATTR_RO(guid);
static struct attribute *bmc_dev_attrs[] = {
&dev_attr_device_id.attr,
&dev_attr_provides_device_sdrs.attr,
&dev_attr_revision.attr,
&dev_attr_firmware_revision.attr,
&dev_attr_ipmi_version.attr,
&dev_attr_additional_device_support.attr,
&dev_attr_manufacturer_id.attr,
&dev_attr_product_id.attr,
&dev_attr_aux_firmware_revision.attr,
&dev_attr_guid.attr,
NULL
};
static umode_t bmc_dev_attr_is_visible(struct kobject *kobj,
struct attribute *attr, int idx)
{
struct device *dev = kobj_to_dev(kobj);
struct bmc_device *bmc = to_bmc_device(dev);
umode_t mode = attr->mode;
int rv;
if (attr == &dev_attr_aux_firmware_revision.attr) {
struct ipmi_device_id id;
rv = bmc_get_device_id(NULL, bmc, &id, NULL, NULL);
return (!rv && id.aux_firmware_revision_set) ? mode : 0;
}
if (attr == &dev_attr_guid.attr) {
bool guid_set;
rv = bmc_get_device_id(NULL, bmc, NULL, &guid_set, NULL);
return (!rv && guid_set) ? mode : 0;
}
return mode;
}
static const struct attribute_group bmc_dev_attr_group = {
.attrs = bmc_dev_attrs,
.is_visible = bmc_dev_attr_is_visible,
};
static const struct attribute_group *bmc_dev_attr_groups[] = {
&bmc_dev_attr_group,
NULL
};
static const struct device_type bmc_device_type = {
.groups = bmc_dev_attr_groups,
};
static int __find_bmc_guid(struct device *dev, void *data)
{
guid_t *guid = data;
struct bmc_device *bmc;
int rv;
if (dev->type != &bmc_device_type)
return 0;
bmc = to_bmc_device(dev);
rv = bmc->dyn_guid_set && guid_equal(&bmc->guid, guid);
if (rv)
rv = kref_get_unless_zero(&bmc->usecount);
return rv;
}
/*
* Returns with the bmc's usecount incremented, if it is non-NULL.
*/
static struct bmc_device *ipmi_find_bmc_guid(struct device_driver *drv,
guid_t *guid)
{
struct device *dev;
struct bmc_device *bmc = NULL;
dev = driver_find_device(drv, NULL, guid, __find_bmc_guid);
if (dev) {
bmc = to_bmc_device(dev);
put_device(dev);
}
return bmc;
}
struct prod_dev_id {
unsigned int product_id;
unsigned char device_id;
};
static int __find_bmc_prod_dev_id(struct device *dev, void *data)
{
struct prod_dev_id *cid = data;
struct bmc_device *bmc;
int rv;
if (dev->type != &bmc_device_type)
return 0;
bmc = to_bmc_device(dev);
rv = (bmc->id.product_id == cid->product_id
&& bmc->id.device_id == cid->device_id);
if (rv)
rv = kref_get_unless_zero(&bmc->usecount);
return rv;
}
/*
* Returns with the bmc's usecount incremented, if it is non-NULL.
*/
static struct bmc_device *ipmi_find_bmc_prod_dev_id(
struct device_driver *drv,
unsigned int product_id, unsigned char device_id)
{
struct prod_dev_id id = {
.product_id = product_id,
.device_id = device_id,
};
struct device *dev;
struct bmc_device *bmc = NULL;
dev = driver_find_device(drv, NULL, &id, __find_bmc_prod_dev_id);
if (dev) {
bmc = to_bmc_device(dev);
put_device(dev);
}
return bmc;
}
static DEFINE_IDA(ipmi_bmc_ida);
static void
release_bmc_device(struct device *dev)
{
kfree(to_bmc_device(dev));
}
static void cleanup_bmc_work(struct work_struct *work)
{
struct bmc_device *bmc = container_of(work, struct bmc_device,
remove_work);
int id = bmc->pdev.id; /* Unregister overwrites id */
platform_device_unregister(&bmc->pdev);
ida_simple_remove(&ipmi_bmc_ida, id);
}
static void
cleanup_bmc_device(struct kref *ref)
{
struct bmc_device *bmc = container_of(ref, struct bmc_device, usecount);
/*
* Remove the platform device in a work queue to avoid issues
* with removing the device attributes while reading a device
* attribute.
*/
schedule_work(&bmc->remove_work);
}
/*
* Must be called with intf->bmc_reg_mutex held.
*/
static void __ipmi_bmc_unregister(struct ipmi_smi *intf)
{
struct bmc_device *bmc = intf->bmc;
if (!intf->bmc_registered)
return;
sysfs_remove_link(&intf->si_dev->kobj, "bmc");
sysfs_remove_link(&bmc->pdev.dev.kobj, intf->my_dev_name);
kfree(intf->my_dev_name);
intf->my_dev_name = NULL;
mutex_lock(&bmc->dyn_mutex);
list_del(&intf->bmc_link);
mutex_unlock(&bmc->dyn_mutex);
intf->bmc = &intf->tmp_bmc;
kref_put(&bmc->usecount, cleanup_bmc_device);
intf->bmc_registered = false;
}
static void ipmi_bmc_unregister(struct ipmi_smi *intf)
{
mutex_lock(&intf->bmc_reg_mutex);
__ipmi_bmc_unregister(intf);
mutex_unlock(&intf->bmc_reg_mutex);
}
/*
* Must be called with intf->bmc_reg_mutex held.
*/
static int __ipmi_bmc_register(struct ipmi_smi *intf,
struct ipmi_device_id *id,
bool guid_set, guid_t *guid, int intf_num)
{
int rv;
struct bmc_device *bmc;
struct bmc_device *old_bmc;
/*
* platform_device_register() can cause bmc_reg_mutex to
* be claimed because of the is_visible functions of
* the attributes. Eliminate possible recursion and
* release the lock.
*/
intf->in_bmc_register = true;
mutex_unlock(&intf->bmc_reg_mutex);
/*
* Try to find if there is an bmc_device struct
* representing the interfaced BMC already
*/
mutex_lock(&ipmidriver_mutex);
if (guid_set)
old_bmc = ipmi_find_bmc_guid(&ipmidriver.driver, guid);
else
old_bmc = ipmi_find_bmc_prod_dev_id(&ipmidriver.driver,
id->product_id,
id->device_id);
/*
* If there is already an bmc_device, free the new one,
* otherwise register the new BMC device
*/
if (old_bmc) {
bmc = old_bmc;
/*
* Note: old_bmc already has usecount incremented by
* the BMC find functions.
*/
intf->bmc = old_bmc;
mutex_lock(&bmc->dyn_mutex);
list_add_tail(&intf->bmc_link, &bmc->intfs);
mutex_unlock(&bmc->dyn_mutex);
dev_info(intf->si_dev,
"interfacing existing BMC (man_id: 0x%6.6x, prod_id: 0x%4.4x, dev_id: 0x%2.2x)\n",
bmc->id.manufacturer_id,
bmc->id.product_id,
bmc->id.device_id);
} else {
bmc = kzalloc(sizeof(*bmc), GFP_KERNEL);
if (!bmc) {
rv = -ENOMEM;
goto out;
}
INIT_LIST_HEAD(&bmc->intfs);
mutex_init(&bmc->dyn_mutex);
INIT_WORK(&bmc->remove_work, cleanup_bmc_work);
bmc->id = *id;
bmc->dyn_id_set = 1;
bmc->dyn_guid_set = guid_set;
bmc->guid = *guid;
bmc->dyn_id_expiry = jiffies + IPMI_DYN_DEV_ID_EXPIRY;
bmc->pdev.name = "ipmi_bmc";
rv = ida_simple_get(&ipmi_bmc_ida, 0, 0, GFP_KERNEL);
if (rv < 0)
goto out;
bmc->pdev.dev.driver = &ipmidriver.driver;
bmc->pdev.id = rv;
bmc->pdev.dev.release = release_bmc_device;
bmc->pdev.dev.type = &bmc_device_type;
kref_init(&bmc->usecount);
intf->bmc = bmc;
mutex_lock(&bmc->dyn_mutex);
list_add_tail(&intf->bmc_link, &bmc->intfs);
mutex_unlock(&bmc->dyn_mutex);
rv = platform_device_register(&bmc->pdev);
if (rv) {
dev_err(intf->si_dev,
"Unable to register bmc device: %d\n",
rv);
goto out_list_del;
}
dev_info(intf->si_dev,
"Found new BMC (man_id: 0x%6.6x, prod_id: 0x%4.4x, dev_id: 0x%2.2x)\n",
bmc->id.manufacturer_id,
bmc->id.product_id,
bmc->id.device_id);
}
/*
* create symlink from system interface device to bmc device
* and back.
*/
rv = sysfs_create_link(&intf->si_dev->kobj, &bmc->pdev.dev.kobj, "bmc");
if (rv) {
dev_err(intf->si_dev, "Unable to create bmc symlink: %d\n", rv);
goto out_put_bmc;
}
if (intf_num == -1)
intf_num = intf->intf_num;
intf->my_dev_name = kasprintf(GFP_KERNEL, "ipmi%d", intf_num);
if (!intf->my_dev_name) {
rv = -ENOMEM;
dev_err(intf->si_dev, "Unable to allocate link from BMC: %d\n",
rv);
goto out_unlink1;
}
rv = sysfs_create_link(&bmc->pdev.dev.kobj, &intf->si_dev->kobj,
intf->my_dev_name);
if (rv) {
kfree(intf->my_dev_name);
intf->my_dev_name = NULL;
dev_err(intf->si_dev, "Unable to create symlink to bmc: %d\n",
rv);
goto out_free_my_dev_name;
}
intf->bmc_registered = true;
out:
mutex_unlock(&ipmidriver_mutex);
mutex_lock(&intf->bmc_reg_mutex);
intf->in_bmc_register = false;
return rv;
out_free_my_dev_name:
kfree(intf->my_dev_name);
intf->my_dev_name = NULL;
out_unlink1:
sysfs_remove_link(&intf->si_dev->kobj, "bmc");
out_put_bmc:
mutex_lock(&bmc->dyn_mutex);
list_del(&intf->bmc_link);
mutex_unlock(&bmc->dyn_mutex);
intf->bmc = &intf->tmp_bmc;
kref_put(&bmc->usecount, cleanup_bmc_device);
goto out;
out_list_del:
mutex_lock(&bmc->dyn_mutex);
list_del(&intf->bmc_link);
mutex_unlock(&bmc->dyn_mutex);
intf->bmc = &intf->tmp_bmc;
put_device(&bmc->pdev.dev);
goto out;
}
static int
send_guid_cmd(struct ipmi_smi *intf, int chan)
{
struct kernel_ipmi_msg msg;
struct ipmi_system_interface_addr si;
si.addr_type = IPMI_SYSTEM_INTERFACE_ADDR_TYPE;
si.channel = IPMI_BMC_CHANNEL;
si.lun = 0;
msg.netfn = IPMI_NETFN_APP_REQUEST;
msg.cmd = IPMI_GET_DEVICE_GUID_CMD;
msg.data = NULL;
msg.data_len = 0;
return i_ipmi_request(NULL,
intf,
(struct ipmi_addr *) &si,
0,
&msg,
intf,
NULL,
NULL,
0,
intf->addrinfo[0].address,
intf->addrinfo[0].lun,
-1, 0);
}
static void guid_handler(struct ipmi_smi *intf, struct ipmi_recv_msg *msg)
{
struct bmc_device *bmc = intf->bmc;
if ((msg->addr.addr_type != IPMI_SYSTEM_INTERFACE_ADDR_TYPE)
|| (msg->msg.netfn != IPMI_NETFN_APP_RESPONSE)
|| (msg->msg.cmd != IPMI_GET_DEVICE_GUID_CMD))
/* Not for me */
return;
if (msg->msg.data[0] != 0) {
/* Error from getting the GUID, the BMC doesn't have one. */
bmc->dyn_guid_set = 0;
goto out;
}
if (msg->msg.data_len < 17) {
bmc->dyn_guid_set = 0;
dev_warn(intf->si_dev,
"The GUID response from the BMC was too short, it was %d but should have been 17. Assuming GUID is not available.\n",
msg->msg.data_len);
goto out;
}
memcpy(bmc->fetch_guid.b, msg->msg.data + 1, 16);
/*
* Make sure the guid data is available before setting
* dyn_guid_set.
*/
smp_wmb();
bmc->dyn_guid_set = 1;
out:
wake_up(&intf->waitq);
}
static void __get_guid(struct ipmi_smi *intf)
{
int rv;
struct bmc_device *bmc = intf->bmc;
bmc->dyn_guid_set = 2;
intf->null_user_handler = guid_handler;
rv = send_guid_cmd(intf, 0);
if (rv)
/* Send failed, no GUID available. */
bmc->dyn_guid_set = 0;
wait_event(intf->waitq, bmc->dyn_guid_set != 2);
/* dyn_guid_set makes the guid data available. */
smp_rmb();
intf->null_user_handler = NULL;
}
static int
send_channel_info_cmd(struct ipmi_smi *intf, int chan)
{
struct kernel_ipmi_msg msg;
unsigned char data[1];
struct ipmi_system_interface_addr si;
si.addr_type = IPMI_SYSTEM_INTERFACE_ADDR_TYPE;
si.channel = IPMI_BMC_CHANNEL;
si.lun = 0;
msg.netfn = IPMI_NETFN_APP_REQUEST;
msg.cmd = IPMI_GET_CHANNEL_INFO_CMD;
msg.data = data;
msg.data_len = 1;
data[0] = chan;
return i_ipmi_request(NULL,
intf,
(struct ipmi_addr *) &si,
0,
&msg,
intf,
NULL,
NULL,
0,
intf->addrinfo[0].address,
intf->addrinfo[0].lun,
-1, 0);
}
static void
channel_handler(struct ipmi_smi *intf, struct ipmi_recv_msg *msg)
{
int rv = 0;
int ch;
unsigned int set = intf->curr_working_cset;
struct ipmi_channel *chans;
if ((msg->addr.addr_type == IPMI_SYSTEM_INTERFACE_ADDR_TYPE)
&& (msg->msg.netfn == IPMI_NETFN_APP_RESPONSE)
&& (msg->msg.cmd == IPMI_GET_CHANNEL_INFO_CMD)) {
/* It's the one we want */
if (msg->msg.data[0] != 0) {
/* Got an error from the channel, just go on. */
if (msg->msg.data[0] == IPMI_INVALID_COMMAND_ERR) {
/*
* If the MC does not support this
* command, that is legal. We just
* assume it has one IPMB at channel
* zero.
*/
intf->wchannels[set].c[0].medium
= IPMI_CHANNEL_MEDIUM_IPMB;
intf->wchannels[set].c[0].protocol
= IPMI_CHANNEL_PROTOCOL_IPMB;
intf->channel_list = intf->wchannels + set;
intf->channels_ready = true;
wake_up(&intf->waitq);
goto out;
}
goto next_channel;
}
if (msg->msg.data_len < 4) {
/* Message not big enough, just go on. */
goto next_channel;
}
ch = intf->curr_channel;
chans = intf->wchannels[set].c;
chans[ch].medium = msg->msg.data[2] & 0x7f;
chans[ch].protocol = msg->msg.data[3] & 0x1f;
next_channel:
intf->curr_channel++;
if (intf->curr_channel >= IPMI_MAX_CHANNELS) {
intf->channel_list = intf->wchannels + set;
intf->channels_ready = true;
wake_up(&intf->waitq);
} else {
intf->channel_list = intf->wchannels + set;
intf->channels_ready = true;
rv = send_channel_info_cmd(intf, intf->curr_channel);
}
if (rv) {
/* Got an error somehow, just give up. */
dev_warn(intf->si_dev,
"Error sending channel information for channel %d: %d\n",
intf->curr_channel, rv);
intf->channel_list = intf->wchannels + set;
intf->channels_ready = true;
wake_up(&intf->waitq);
}
}
out:
return;
}
/*
* Must be holding intf->bmc_reg_mutex to call this.
*/
static int __scan_channels(struct ipmi_smi *intf, struct ipmi_device_id *id)
{
int rv;
if (ipmi_version_major(id) > 1
|| (ipmi_version_major(id) == 1
&& ipmi_version_minor(id) >= 5)) {
unsigned int set;
/*
* Start scanning the channels to see what is
* available.
*/
set = !intf->curr_working_cset;
intf->curr_working_cset = set;
memset(&intf->wchannels[set], 0,
sizeof(struct ipmi_channel_set));
intf->null_user_handler = channel_handler;
intf->curr_channel = 0;
rv = send_channel_info_cmd(intf, 0);
if (rv) {
dev_warn(intf->si_dev,
"Error sending channel information for channel 0, %d\n",
rv);
return -EIO;
}
/* Wait for the channel info to be read. */
wait_event(intf->waitq, intf->channels_ready);
intf->null_user_handler = NULL;
} else {
unsigned int set = intf->curr_working_cset;
/* Assume a single IPMB channel at zero. */
intf->wchannels[set].c[0].medium = IPMI_CHANNEL_MEDIUM_IPMB;
intf->wchannels[set].c[0].protocol = IPMI_CHANNEL_PROTOCOL_IPMB;
intf->channel_list = intf->wchannels + set;
intf->channels_ready = true;
}
return 0;
}
static void ipmi_poll(struct ipmi_smi *intf)
{
if (intf->handlers->poll)
intf->handlers->poll(intf->send_info);
/* In case something came in */
handle_new_recv_msgs(intf);
}
void ipmi_poll_interface(struct ipmi_user *user)
{
ipmi_poll(user->intf);
}
EXPORT_SYMBOL(ipmi_poll_interface);
static void redo_bmc_reg(struct work_struct *work)
{
struct ipmi_smi *intf = container_of(work, struct ipmi_smi,
bmc_reg_work);
if (!intf->in_shutdown)
bmc_get_device_id(intf, NULL, NULL, NULL, NULL);
kref_put(&intf->refcount, intf_free);
}
int ipmi_register_smi(const struct ipmi_smi_handlers *handlers,
void *send_info,
struct device *si_dev,
unsigned char slave_addr)
{
int i, j;
int rv;
struct ipmi_smi *intf, *tintf;
struct list_head *link;
struct ipmi_device_id id;
/*
* Make sure the driver is actually initialized, this handles
* problems with initialization order.
*/
if (!initialized) {
rv = ipmi_init_msghandler();
if (rv)
return rv;
/*
* The init code doesn't return an error if it was turned
* off, but it won't initialize. Check that.
*/
if (!initialized)
return -ENODEV;
}
intf = kzalloc(sizeof(*intf), GFP_KERNEL);
if (!intf)
return -ENOMEM;
rv = init_srcu_struct(&intf->users_srcu);
if (rv) {
kfree(intf);
return rv;
}
intf->bmc = &intf->tmp_bmc;
INIT_LIST_HEAD(&intf->bmc->intfs);
mutex_init(&intf->bmc->dyn_mutex);
INIT_LIST_HEAD(&intf->bmc_link);
mutex_init(&intf->bmc_reg_mutex);
intf->intf_num = -1; /* Mark it invalid for now. */
kref_init(&intf->refcount);
INIT_WORK(&intf->bmc_reg_work, redo_bmc_reg);
intf->si_dev = si_dev;
for (j = 0; j < IPMI_MAX_CHANNELS; j++) {
intf->addrinfo[j].address = IPMI_BMC_SLAVE_ADDR;
intf->addrinfo[j].lun = 2;
}
if (slave_addr != 0)
intf->addrinfo[0].address = slave_addr;
INIT_LIST_HEAD(&intf->users);
intf->handlers = handlers;
intf->send_info = send_info;
spin_lock_init(&intf->seq_lock);
for (j = 0; j < IPMI_IPMB_NUM_SEQ; j++) {
intf->seq_table[j].inuse = 0;
intf->seq_table[j].seqid = 0;
}
intf->curr_seq = 0;
spin_lock_init(&intf->waiting_rcv_msgs_lock);
INIT_LIST_HEAD(&intf->waiting_rcv_msgs);
tasklet_init(&intf->recv_tasklet,
smi_recv_tasklet,
(unsigned long) intf);
atomic_set(&intf->watchdog_pretimeouts_to_deliver, 0);
spin_lock_init(&intf->xmit_msgs_lock);
INIT_LIST_HEAD(&intf->xmit_msgs);
INIT_LIST_HEAD(&intf->hp_xmit_msgs);
spin_lock_init(&intf->events_lock);
atomic_set(&intf->event_waiters, 0);
intf->ticks_to_req_ev = IPMI_REQUEST_EV_TIME;
INIT_LIST_HEAD(&intf->waiting_events);
intf->waiting_events_count = 0;
mutex_init(&intf->cmd_rcvrs_mutex);
spin_lock_init(&intf->maintenance_mode_lock);
INIT_LIST_HEAD(&intf->cmd_rcvrs);
init_waitqueue_head(&intf->waitq);
for (i = 0; i < IPMI_NUM_STATS; i++)
atomic_set(&intf->stats[i], 0);
mutex_lock(&ipmi_interfaces_mutex);
/* Look for a hole in the numbers. */
i = 0;
link = &ipmi_interfaces;
list_for_each_entry_rcu(tintf, &ipmi_interfaces, link) {
if (tintf->intf_num != i) {
link = &tintf->link;
break;
}
i++;
}
/* Add the new interface in numeric order. */
if (i == 0)
list_add_rcu(&intf->link, &ipmi_interfaces);
else
list_add_tail_rcu(&intf->link, link);
rv = handlers->start_processing(send_info, intf);
if (rv)
goto out_err;
rv = __bmc_get_device_id(intf, NULL, &id, NULL, NULL, i);
if (rv) {
dev_err(si_dev, "Unable to get the device id: %d\n", rv);
goto out_err_started;
}
mutex_lock(&intf->bmc_reg_mutex);
rv = __scan_channels(intf, &id);
mutex_unlock(&intf->bmc_reg_mutex);
if (rv)
goto out_err_bmc_reg;
/*
* Keep memory order straight for RCU readers. Make
* sure everything else is committed to memory before
* setting intf_num to mark the interface valid.
*/
smp_wmb();
intf->intf_num = i;
mutex_unlock(&ipmi_interfaces_mutex);
/* After this point the interface is legal to use. */
call_smi_watchers(i, intf->si_dev);
return 0;
out_err_bmc_reg:
ipmi_bmc_unregister(intf);
out_err_started:
if (intf->handlers->shutdown)
intf->handlers->shutdown(intf->send_info);
out_err:
list_del_rcu(&intf->link);
mutex_unlock(&ipmi_interfaces_mutex);
synchronize_srcu(&ipmi_interfaces_srcu);
cleanup_srcu_struct(&intf->users_srcu);
kref_put(&intf->refcount, intf_free);
return rv;
}
EXPORT_SYMBOL(ipmi_register_smi);
static void deliver_smi_err_response(struct ipmi_smi *intf,
struct ipmi_smi_msg *msg,
unsigned char err)
{
msg->rsp[0] = msg->data[0] | 4;
msg->rsp[1] = msg->data[1];
msg->rsp[2] = err;
msg->rsp_size = 3;
/* It's an error, so it will never requeue, no need to check return. */
handle_one_recv_msg(intf, msg);
}
static void cleanup_smi_msgs(struct ipmi_smi *intf)
{
int i;
struct seq_table *ent;
struct ipmi_smi_msg *msg;
struct list_head *entry;
struct list_head tmplist;
/* Clear out our transmit queues and hold the messages. */
INIT_LIST_HEAD(&tmplist);
list_splice_tail(&intf->hp_xmit_msgs, &tmplist);
list_splice_tail(&intf->xmit_msgs, &tmplist);
/* Current message first, to preserve order */
while (intf->curr_msg && !list_empty(&intf->waiting_rcv_msgs)) {
/* Wait for the message to clear out. */
schedule_timeout(1);
}
/* No need for locks, the interface is down. */
/*
* Return errors for all pending messages in queue and in the
* tables waiting for remote responses.
*/
while (!list_empty(&tmplist)) {
entry = tmplist.next;
list_del(entry);
msg = list_entry(entry, struct ipmi_smi_msg, link);
deliver_smi_err_response(intf, msg, IPMI_ERR_UNSPECIFIED);
}
for (i = 0; i < IPMI_IPMB_NUM_SEQ; i++) {
ent = &intf->seq_table[i];
if (!ent->inuse)
continue;
deliver_err_response(intf, ent->recv_msg, IPMI_ERR_UNSPECIFIED);
}
}
void ipmi_unregister_smi(struct ipmi_smi *intf)
{
struct ipmi_smi_watcher *w;
int intf_num = intf->intf_num, index;
mutex_lock(&ipmi_interfaces_mutex);
intf->intf_num = -1;
intf->in_shutdown = true;
list_del_rcu(&intf->link);
mutex_unlock(&ipmi_interfaces_mutex);
synchronize_srcu(&ipmi_interfaces_srcu);
/* At this point no users can be added to the interface. */
/*
* Call all the watcher interfaces to tell them that
* an interface is going away.
*/
mutex_lock(&smi_watchers_mutex);
list_for_each_entry(w, &smi_watchers, link)
w->smi_gone(intf_num);
mutex_unlock(&smi_watchers_mutex);
index = srcu_read_lock(&intf->users_srcu);
while (!list_empty(&intf->users)) {
struct ipmi_user *user =
container_of(list_next_rcu(&intf->users),
struct ipmi_user, link);
_ipmi_destroy_user(user);
}
srcu_read_unlock(&intf->users_srcu, index);
if (intf->handlers->shutdown)
intf->handlers->shutdown(intf->send_info);
cleanup_smi_msgs(intf);
ipmi_bmc_unregister(intf);
cleanup_srcu_struct(&intf->users_srcu);
kref_put(&intf->refcount, intf_free);
}
EXPORT_SYMBOL(ipmi_unregister_smi);
static int handle_ipmb_get_msg_rsp(struct ipmi_smi *intf,
struct ipmi_smi_msg *msg)
{
struct ipmi_ipmb_addr ipmb_addr;
struct ipmi_recv_msg *recv_msg;
/*
* This is 11, not 10, because the response must contain a
* completion code.
*/
if (msg->rsp_size < 11) {
/* Message not big enough, just ignore it. */
ipmi_inc_stat(intf, invalid_ipmb_responses);
return 0;
}
if (msg->rsp[2] != 0) {
/* An error getting the response, just ignore it. */
return 0;
}
ipmb_addr.addr_type = IPMI_IPMB_ADDR_TYPE;
ipmb_addr.slave_addr = msg->rsp[6];
ipmb_addr.channel = msg->rsp[3] & 0x0f;
ipmb_addr.lun = msg->rsp[7] & 3;
/*
* It's a response from a remote entity. Look up the sequence
* number and handle the response.
*/
if (intf_find_seq(intf,
msg->rsp[7] >> 2,
msg->rsp[3] & 0x0f,
msg->rsp[8],
(msg->rsp[4] >> 2) & (~1),
(struct ipmi_addr *) &ipmb_addr,
&recv_msg)) {
/*
* We were unable to find the sequence number,
* so just nuke the message.
*/
ipmi_inc_stat(intf, unhandled_ipmb_responses);
return 0;
}
memcpy(recv_msg->msg_data, &msg->rsp[9], msg->rsp_size - 9);
/*
* The other fields matched, so no need to set them, except
* for netfn, which needs to be the response that was
* returned, not the request value.
*/
recv_msg->msg.netfn = msg->rsp[4] >> 2;
recv_msg->msg.data = recv_msg->msg_data;
recv_msg->msg.data_len = msg->rsp_size - 10;
recv_msg->recv_type = IPMI_RESPONSE_RECV_TYPE;
if (deliver_response(intf, recv_msg))
ipmi_inc_stat(intf, unhandled_ipmb_responses);
else
ipmi_inc_stat(intf, handled_ipmb_responses);
return 0;
}
static int handle_ipmb_get_msg_cmd(struct ipmi_smi *intf,
struct ipmi_smi_msg *msg)
{
struct cmd_rcvr *rcvr;
int rv = 0;
unsigned char netfn;
unsigned char cmd;
unsigned char chan;
struct ipmi_user *user = NULL;
struct ipmi_ipmb_addr *ipmb_addr;
struct ipmi_recv_msg *recv_msg;
if (msg->rsp_size < 10) {
/* Message not big enough, just ignore it. */
ipmi_inc_stat(intf, invalid_commands);
return 0;
}
if (msg->rsp[2] != 0) {
/* An error getting the response, just ignore it. */
return 0;
}
netfn = msg->rsp[4] >> 2;
cmd = msg->rsp[8];
chan = msg->rsp[3] & 0xf;
rcu_read_lock();
rcvr = find_cmd_rcvr(intf, netfn, cmd, chan);
if (rcvr) {
user = rcvr->user;
kref_get(&user->refcount);
} else
user = NULL;
rcu_read_unlock();
if (user == NULL) {
/* We didn't find a user, deliver an error response. */
ipmi_inc_stat(intf, unhandled_commands);
msg->data[0] = (IPMI_NETFN_APP_REQUEST << 2);
msg->data[1] = IPMI_SEND_MSG_CMD;
msg->data[2] = msg->rsp[3];
msg->data[3] = msg->rsp[6];
msg->data[4] = ((netfn + 1) << 2) | (msg->rsp[7] & 0x3);
msg->data[5] = ipmb_checksum(&msg->data[3], 2);
msg->data[6] = intf->addrinfo[msg->rsp[3] & 0xf].address;
/* rqseq/lun */
msg->data[7] = (msg->rsp[7] & 0xfc) | (msg->rsp[4] & 0x3);
msg->data[8] = msg->rsp[8]; /* cmd */
msg->data[9] = IPMI_INVALID_CMD_COMPLETION_CODE;
msg->data[10] = ipmb_checksum(&msg->data[6], 4);
msg->data_size = 11;
ipmi_debug_msg("Invalid command:", msg->data, msg->data_size);
rcu_read_lock();
if (!intf->in_shutdown) {
smi_send(intf, intf->handlers, msg, 0);
/*
* We used the message, so return the value
* that causes it to not be freed or
* queued.
*/
rv = -1;
}
rcu_read_unlock();
} else {
recv_msg = ipmi_alloc_recv_msg();
if (!recv_msg) {
/*
* We couldn't allocate memory for the
* message, so requeue it for handling
* later.
*/
rv = 1;
kref_put(&user->refcount, free_user);
} else {
/* Extract the source address from the data. */
ipmb_addr = (struct ipmi_ipmb_addr *) &recv_msg->addr;
ipmb_addr->addr_type = IPMI_IPMB_ADDR_TYPE;
ipmb_addr->slave_addr = msg->rsp[6];
ipmb_addr->lun = msg->rsp[7] & 3;
ipmb_addr->channel = msg->rsp[3] & 0xf;
/*
* Extract the rest of the message information
* from the IPMB header.
*/
recv_msg->user = user;
recv_msg->recv_type = IPMI_CMD_RECV_TYPE;
recv_msg->msgid = msg->rsp[7] >> 2;
recv_msg->msg.netfn = msg->rsp[4] >> 2;
recv_msg->msg.cmd = msg->rsp[8];
recv_msg->msg.data = recv_msg->msg_data;
/*
* We chop off 10, not 9 bytes because the checksum
* at the end also needs to be removed.
*/
recv_msg->msg.data_len = msg->rsp_size - 10;
memcpy(recv_msg->msg_data, &msg->rsp[9],
msg->rsp_size - 10);
if (deliver_response(intf, recv_msg))
ipmi_inc_stat(intf, unhandled_commands);
else
ipmi_inc_stat(intf, handled_commands);
}
}
return rv;
}
static int handle_lan_get_msg_rsp(struct ipmi_smi *intf,
struct ipmi_smi_msg *msg)
{
struct ipmi_lan_addr lan_addr;
struct ipmi_recv_msg *recv_msg;
/*
* This is 13, not 12, because the response must contain a
* completion code.
*/
if (msg->rsp_size < 13) {
/* Message not big enough, just ignore it. */
ipmi_inc_stat(intf, invalid_lan_responses);
return 0;
}
if (msg->rsp[2] != 0) {
/* An error getting the response, just ignore it. */
return 0;
}
lan_addr.addr_type = IPMI_LAN_ADDR_TYPE;
lan_addr.session_handle = msg->rsp[4];
lan_addr.remote_SWID = msg->rsp[8];
lan_addr.local_SWID = msg->rsp[5];
lan_addr.channel = msg->rsp[3] & 0x0f;
lan_addr.privilege = msg->rsp[3] >> 4;
lan_addr.lun = msg->rsp[9] & 3;
/*
* It's a response from a remote entity. Look up the sequence
* number and handle the response.
*/
if (intf_find_seq(intf,
msg->rsp[9] >> 2,
msg->rsp[3] & 0x0f,
msg->rsp[10],
(msg->rsp[6] >> 2) & (~1),
(struct ipmi_addr *) &lan_addr,
&recv_msg)) {
/*
* We were unable to find the sequence number,
* so just nuke the message.
*/
ipmi_inc_stat(intf, unhandled_lan_responses);
return 0;
}
memcpy(recv_msg->msg_data, &msg->rsp[11], msg->rsp_size - 11);
/*
* The other fields matched, so no need to set them, except
* for netfn, which needs to be the response that was
* returned, not the request value.
*/
recv_msg->msg.netfn = msg->rsp[6] >> 2;
recv_msg->msg.data = recv_msg->msg_data;
recv_msg->msg.data_len = msg->rsp_size - 12;
recv_msg->recv_type = IPMI_RESPONSE_RECV_TYPE;
if (deliver_response(intf, recv_msg))
ipmi_inc_stat(intf, unhandled_lan_responses);
else
ipmi_inc_stat(intf, handled_lan_responses);
return 0;
}
static int handle_lan_get_msg_cmd(struct ipmi_smi *intf,
struct ipmi_smi_msg *msg)
{
struct cmd_rcvr *rcvr;
int rv = 0;
unsigned char netfn;
unsigned char cmd;
unsigned char chan;
struct ipmi_user *user = NULL;
struct ipmi_lan_addr *lan_addr;
struct ipmi_recv_msg *recv_msg;
if (msg->rsp_size < 12) {
/* Message not big enough, just ignore it. */
ipmi_inc_stat(intf, invalid_commands);
return 0;
}
if (msg->rsp[2] != 0) {
/* An error getting the response, just ignore it. */
return 0;
}
netfn = msg->rsp[6] >> 2;
cmd = msg->rsp[10];
chan = msg->rsp[3] & 0xf;
rcu_read_lock();
rcvr = find_cmd_rcvr(intf, netfn, cmd, chan);
if (rcvr) {
user = rcvr->user;
kref_get(&user->refcount);
} else
user = NULL;
rcu_read_unlock();
if (user == NULL) {
/* We didn't find a user, just give up. */
ipmi_inc_stat(intf, unhandled_commands);
/*
* Don't do anything with these messages, just allow
* them to be freed.
*/
rv = 0;
} else {
recv_msg = ipmi_alloc_recv_msg();
if (!recv_msg) {
/*
* We couldn't allocate memory for the
* message, so requeue it for handling later.
*/
rv = 1;
kref_put(&user->refcount, free_user);
} else {
/* Extract the source address from the data. */
lan_addr = (struct ipmi_lan_addr *) &recv_msg->addr;
lan_addr->addr_type = IPMI_LAN_ADDR_TYPE;
lan_addr->session_handle = msg->rsp[4];
lan_addr->remote_SWID = msg->rsp[8];
lan_addr->local_SWID = msg->rsp[5];
lan_addr->lun = msg->rsp[9] & 3;
lan_addr->channel = msg->rsp[3] & 0xf;
lan_addr->privilege = msg->rsp[3] >> 4;
/*
* Extract the rest of the message information
* from the IPMB header.
*/
recv_msg->user = user;
recv_msg->recv_type = IPMI_CMD_RECV_TYPE;
recv_msg->msgid = msg->rsp[9] >> 2;
recv_msg->msg.netfn = msg->rsp[6] >> 2;
recv_msg->msg.cmd = msg->rsp[10];
recv_msg->msg.data = recv_msg->msg_data;
/*
* We chop off 12, not 11 bytes because the checksum
* at the end also needs to be removed.
*/
recv_msg->msg.data_len = msg->rsp_size - 12;
memcpy(recv_msg->msg_data, &msg->rsp[11],
msg->rsp_size - 12);
if (deliver_response(intf, recv_msg))
ipmi_inc_stat(intf, unhandled_commands);
else
ipmi_inc_stat(intf, handled_commands);
}
}
return rv;
}
/*
* This routine will handle "Get Message" command responses with
* channels that use an OEM Medium. The message format belongs to
* the OEM. See IPMI 2.0 specification, Chapter 6 and
* Chapter 22, sections 22.6 and 22.24 for more details.
*/
static int handle_oem_get_msg_cmd(struct ipmi_smi *intf,
struct ipmi_smi_msg *msg)
{
struct cmd_rcvr *rcvr;
int rv = 0;
unsigned char netfn;
unsigned char cmd;
unsigned char chan;
struct ipmi_user *user = NULL;
struct ipmi_system_interface_addr *smi_addr;
struct ipmi_recv_msg *recv_msg;
/*
* We expect the OEM SW to perform error checking
* so we just do some basic sanity checks
*/
if (msg->rsp_size < 4) {
/* Message not big enough, just ignore it. */
ipmi_inc_stat(intf, invalid_commands);
return 0;
}
if (msg->rsp[2] != 0) {
/* An error getting the response, just ignore it. */
return 0;
}
/*
* This is an OEM Message so the OEM needs to know how
* handle the message. We do no interpretation.
*/
netfn = msg->rsp[0] >> 2;
cmd = msg->rsp[1];
chan = msg->rsp[3] & 0xf;
rcu_read_lock();
rcvr = find_cmd_rcvr(intf, netfn, cmd, chan);
if (rcvr) {
user = rcvr->user;
kref_get(&user->refcount);
} else
user = NULL;
rcu_read_unlock();
if (user == NULL) {
/* We didn't find a user, just give up. */
ipmi_inc_stat(intf, unhandled_commands);
/*
* Don't do anything with these messages, just allow
* them to be freed.
*/
rv = 0;
} else {
recv_msg = ipmi_alloc_recv_msg();
if (!recv_msg) {
/*
* We couldn't allocate memory for the
* message, so requeue it for handling
* later.
*/
rv = 1;
kref_put(&user->refcount, free_user);
} else {
/*
* OEM Messages are expected to be delivered via
* the system interface to SMS software. We might
* need to visit this again depending on OEM
* requirements
*/
smi_addr = ((struct ipmi_system_interface_addr *)
&recv_msg->addr);
smi_addr->addr_type = IPMI_SYSTEM_INTERFACE_ADDR_TYPE;
smi_addr->channel = IPMI_BMC_CHANNEL;
smi_addr->lun = msg->rsp[0] & 3;
recv_msg->user = user;
recv_msg->user_msg_data = NULL;
recv_msg->recv_type = IPMI_OEM_RECV_TYPE;
recv_msg->msg.netfn = msg->rsp[0] >> 2;
recv_msg->msg.cmd = msg->rsp[1];
recv_msg->msg.data = recv_msg->msg_data;
/*
* The message starts at byte 4 which follows the
* the Channel Byte in the "GET MESSAGE" command
*/
recv_msg->msg.data_len = msg->rsp_size - 4;
memcpy(recv_msg->msg_data, &msg->rsp[4],
msg->rsp_size - 4);
if (deliver_response(intf, recv_msg))
ipmi_inc_stat(intf, unhandled_commands);
else
ipmi_inc_stat(intf, handled_commands);
}
}
return rv;
}
static void copy_event_into_recv_msg(struct ipmi_recv_msg *recv_msg,
struct ipmi_smi_msg *msg)
{
struct ipmi_system_interface_addr *smi_addr;
recv_msg->msgid = 0;
smi_addr = (struct ipmi_system_interface_addr *) &recv_msg->addr;
smi_addr->addr_type = IPMI_SYSTEM_INTERFACE_ADDR_TYPE;
smi_addr->channel = IPMI_BMC_CHANNEL;
smi_addr->lun = msg->rsp[0] & 3;
recv_msg->recv_type = IPMI_ASYNC_EVENT_RECV_TYPE;
recv_msg->msg.netfn = msg->rsp[0] >> 2;
recv_msg->msg.cmd = msg->rsp[1];
memcpy(recv_msg->msg_data, &msg->rsp[3], msg->rsp_size - 3);
recv_msg->msg.data = recv_msg->msg_data;
recv_msg->msg.data_len = msg->rsp_size - 3;
}
static int handle_read_event_rsp(struct ipmi_smi *intf,
struct ipmi_smi_msg *msg)
{
struct ipmi_recv_msg *recv_msg, *recv_msg2;
struct list_head msgs;
struct ipmi_user *user;
int rv = 0, deliver_count = 0, index;
unsigned long flags;
if (msg->rsp_size < 19) {
/* Message is too small to be an IPMB event. */
ipmi_inc_stat(intf, invalid_events);
return 0;
}
if (msg->rsp[2] != 0) {
/* An error getting the event, just ignore it. */
return 0;
}
INIT_LIST_HEAD(&msgs);
spin_lock_irqsave(&intf->events_lock, flags);
ipmi_inc_stat(intf, events);
/*
* Allocate and fill in one message for every user that is
* getting events.
*/
index = srcu_read_lock(&intf->users_srcu);
list_for_each_entry_rcu(user, &intf->users, link) {
if (!user->gets_events)
continue;
recv_msg = ipmi_alloc_recv_msg();
if (!recv_msg) {
rcu_read_unlock();
list_for_each_entry_safe(recv_msg, recv_msg2, &msgs,
link) {
list_del(&recv_msg->link);
ipmi_free_recv_msg(recv_msg);
}
/*
* We couldn't allocate memory for the
* message, so requeue it for handling
* later.
*/
rv = 1;
goto out;
}
deliver_count++;
copy_event_into_recv_msg(recv_msg, msg);
recv_msg->user = user;
kref_get(&user->refcount);
list_add_tail(&recv_msg->link, &msgs);
}
srcu_read_unlock(&intf->users_srcu, index);
if (deliver_count) {
/* Now deliver all the messages. */
list_for_each_entry_safe(recv_msg, recv_msg2, &msgs, link) {
list_del(&recv_msg->link);
deliver_local_response(intf, recv_msg);
}
} else if (intf->waiting_events_count < MAX_EVENTS_IN_QUEUE) {
/*
* No one to receive the message, put it in queue if there's
* not already too many things in the queue.
*/
recv_msg = ipmi_alloc_recv_msg();
if (!recv_msg) {
/*
* We couldn't allocate memory for the
* message, so requeue it for handling
* later.
*/
rv = 1;
goto out;
}
copy_event_into_recv_msg(recv_msg, msg);
list_add_tail(&recv_msg->link, &intf->waiting_events);
intf->waiting_events_count++;
} else if (!intf->event_msg_printed) {
/*
* There's too many things in the queue, discard this
* message.
*/
dev_warn(intf->si_dev,
"Event queue full, discarding incoming events\n");
intf->event_msg_printed = 1;
}
out:
spin_unlock_irqrestore(&intf->events_lock, flags);
return rv;
}
static int handle_bmc_rsp(struct ipmi_smi *intf,
struct ipmi_smi_msg *msg)
{
struct ipmi_recv_msg *recv_msg;
struct ipmi_system_interface_addr *smi_addr;
recv_msg = (struct ipmi_recv_msg *) msg->user_data;
if (recv_msg == NULL) {
dev_warn(intf->si_dev,
"IPMI message received with no owner. This could be because of a malformed message, or because of a hardware error. Contact your hardware vendor for assistance.\n");
return 0;
}
recv_msg->recv_type = IPMI_RESPONSE_RECV_TYPE;
recv_msg->msgid = msg->msgid;
smi_addr = ((struct ipmi_system_interface_addr *)
&recv_msg->addr);
smi_addr->addr_type = IPMI_SYSTEM_INTERFACE_ADDR_TYPE;
smi_addr->channel = IPMI_BMC_CHANNEL;
smi_addr->lun = msg->rsp[0] & 3;
recv_msg->msg.netfn = msg->rsp[0] >> 2;
recv_msg->msg.cmd = msg->rsp[1];
memcpy(recv_msg->msg_data, &msg->rsp[2], msg->rsp_size - 2);
recv_msg->msg.data = recv_msg->msg_data;
recv_msg->msg.data_len = msg->rsp_size - 2;
deliver_local_response(intf, recv_msg);
return 0;
}
/*
* Handle a received message. Return 1 if the message should be requeued,
* 0 if the message should be freed, or -1 if the message should not
* be freed or requeued.
*/
static int handle_one_recv_msg(struct ipmi_smi *intf,
struct ipmi_smi_msg *msg)
{
int requeue;
int chan;
ipmi_debug_msg("Recv:", msg->rsp, msg->rsp_size);
if (msg->rsp_size < 2) {
/* Message is too small to be correct. */
dev_warn(intf->si_dev,
"BMC returned too small a message for netfn %x cmd %x, got %d bytes\n",
(msg->data[0] >> 2) | 1, msg->data[1], msg->rsp_size);
/* Generate an error response for the message. */
msg->rsp[0] = msg->data[0] | (1 << 2);
msg->rsp[1] = msg->data[1];
msg->rsp[2] = IPMI_ERR_UNSPECIFIED;
msg->rsp_size = 3;
} else if (((msg->rsp[0] >> 2) != ((msg->data[0] >> 2) | 1))
|| (msg->rsp[1] != msg->data[1])) {
/*
* The NetFN and Command in the response is not even
* marginally correct.
*/
dev_warn(intf->si_dev,
"BMC returned incorrect response, expected netfn %x cmd %x, got netfn %x cmd %x\n",
(msg->data[0] >> 2) | 1, msg->data[1],
msg->rsp[0] >> 2, msg->rsp[1]);
/* Generate an error response for the message. */
msg->rsp[0] = msg->data[0] | (1 << 2);
msg->rsp[1] = msg->data[1];
msg->rsp[2] = IPMI_ERR_UNSPECIFIED;
msg->rsp_size = 3;
}
if ((msg->rsp[0] == ((IPMI_NETFN_APP_REQUEST|1) << 2))
&& (msg->rsp[1] == IPMI_SEND_MSG_CMD)
&& (msg->user_data != NULL)) {
/*
* It's a response to a response we sent. For this we
* deliver a send message response to the user.
*/
struct ipmi_recv_msg *recv_msg = msg->user_data;
requeue = 0;
if (msg->rsp_size < 2)
/* Message is too small to be correct. */
goto out;
chan = msg->data[2] & 0x0f;
if (chan >= IPMI_MAX_CHANNELS)
/* Invalid channel number */
goto out;
if (!recv_msg)
goto out;
recv_msg->recv_type = IPMI_RESPONSE_RESPONSE_TYPE;
recv_msg->msg.data = recv_msg->msg_data;
recv_msg->msg.data_len = 1;
recv_msg->msg_data[0] = msg->rsp[2];
deliver_local_response(intf, recv_msg);
} else if ((msg->rsp[0] == ((IPMI_NETFN_APP_REQUEST|1) << 2))
&& (msg->rsp[1] == IPMI_GET_MSG_CMD)) {
struct ipmi_channel *chans;
/* It's from the receive queue. */
chan = msg->rsp[3] & 0xf;
if (chan >= IPMI_MAX_CHANNELS) {
/* Invalid channel number */
requeue = 0;
goto out;
}
/*
* We need to make sure the channels have been initialized.
* The channel_handler routine will set the "curr_channel"
* equal to or greater than IPMI_MAX_CHANNELS when all the
* channels for this interface have been initialized.
*/
if (!intf->channels_ready) {
requeue = 0; /* Throw the message away */
goto out;
}
chans = READ_ONCE(intf->channel_list)->c;
switch (chans[chan].medium) {
case IPMI_CHANNEL_MEDIUM_IPMB:
if (msg->rsp[4] & 0x04) {
/*
* It's a response, so find the
* requesting message and send it up.
*/
requeue = handle_ipmb_get_msg_rsp(intf, msg);
} else {
/*
* It's a command to the SMS from some other
* entity. Handle that.
*/
requeue = handle_ipmb_get_msg_cmd(intf, msg);
}
break;
case IPMI_CHANNEL_MEDIUM_8023LAN:
case IPMI_CHANNEL_MEDIUM_ASYNC:
if (msg->rsp[6] & 0x04) {
/*
* It's a response, so find the
* requesting message and send it up.
*/
requeue = handle_lan_get_msg_rsp(intf, msg);
} else {
/*
* It's a command to the SMS from some other
* entity. Handle that.
*/
requeue = handle_lan_get_msg_cmd(intf, msg);
}
break;
default:
/* Check for OEM Channels. Clients had better
register for these commands. */
if ((chans[chan].medium >= IPMI_CHANNEL_MEDIUM_OEM_MIN)
&& (chans[chan].medium
<= IPMI_CHANNEL_MEDIUM_OEM_MAX)) {
requeue = handle_oem_get_msg_cmd(intf, msg);
} else {
/*
* We don't handle the channel type, so just
* free the message.
*/
requeue = 0;
}
}
} else if ((msg->rsp[0] == ((IPMI_NETFN_APP_REQUEST|1) << 2))
&& (msg->rsp[1] == IPMI_READ_EVENT_MSG_BUFFER_CMD)) {
/* It's an asynchronous event. */
requeue = handle_read_event_rsp(intf, msg);
} else {
/* It's a response from the local BMC. */
requeue = handle_bmc_rsp(intf, msg);
}
out:
return requeue;
}
/*
* If there are messages in the queue or pretimeouts, handle them.
*/
static void handle_new_recv_msgs(struct ipmi_smi *intf)
{
struct ipmi_smi_msg *smi_msg;
unsigned long flags = 0;
int rv;
int run_to_completion = intf->run_to_completion;
/* See if any waiting messages need to be processed. */
if (!run_to_completion)
spin_lock_irqsave(&intf->waiting_rcv_msgs_lock, flags);
while (!list_empty(&intf->waiting_rcv_msgs)) {
smi_msg = list_entry(intf->waiting_rcv_msgs.next,
struct ipmi_smi_msg, link);
list_del(&smi_msg->link);
if (!run_to_completion)
spin_unlock_irqrestore(&intf->waiting_rcv_msgs_lock,
flags);
rv = handle_one_recv_msg(intf, smi_msg);
if (!run_to_completion)
spin_lock_irqsave(&intf->waiting_rcv_msgs_lock, flags);
if (rv > 0) {
/*
* To preserve message order, quit if we
* can't handle a message. Add the message
* back at the head, this is safe because this
* tasklet is the only thing that pulls the
* messages.
*/
list_add(&smi_msg->link, &intf->waiting_rcv_msgs);
break;
} else {
if (rv == 0)
/* Message handled */
ipmi_free_smi_msg(smi_msg);
/* If rv < 0, fatal error, del but don't free. */
}
}
if (!run_to_completion)
spin_unlock_irqrestore(&intf->waiting_rcv_msgs_lock, flags);
/*
* If the pretimout count is non-zero, decrement one from it and
* deliver pretimeouts to all the users.
*/
if (atomic_add_unless(&intf->watchdog_pretimeouts_to_deliver, -1, 0)) {
struct ipmi_user *user;
int index;
index = srcu_read_lock(&intf->users_srcu);
list_for_each_entry_rcu(user, &intf->users, link) {
if (user->handler->ipmi_watchdog_pretimeout)
user->handler->ipmi_watchdog_pretimeout(
user->handler_data);
}
srcu_read_unlock(&intf->users_srcu, index);
}
}
static void smi_recv_tasklet(unsigned long val)
{
unsigned long flags = 0; /* keep us warning-free. */
struct ipmi_smi *intf = (struct ipmi_smi *) val;
int run_to_completion = intf->run_to_completion;
struct ipmi_smi_msg *newmsg = NULL;
/*
* Start the next message if available.
*
* Do this here, not in the actual receiver, because we may deadlock
* because the lower layer is allowed to hold locks while calling
* message delivery.
*/
rcu_read_lock();
if (!run_to_completion)
spin_lock_irqsave(&intf->xmit_msgs_lock, flags);
if (intf->curr_msg == NULL && !intf->in_shutdown) {
struct list_head *entry = NULL;
/* Pick the high priority queue first. */
if (!list_empty(&intf->hp_xmit_msgs))
entry = intf->hp_xmit_msgs.next;
else if (!list_empty(&intf->xmit_msgs))
entry = intf->xmit_msgs.next;
if (entry) {
list_del(entry);
newmsg = list_entry(entry, struct ipmi_smi_msg, link);
intf->curr_msg = newmsg;
}
}
if (!run_to_completion)
spin_unlock_irqrestore(&intf->xmit_msgs_lock, flags);
if (newmsg)
intf->handlers->sender(intf->send_info, newmsg);
rcu_read_unlock();
handle_new_recv_msgs(intf);
}
/* Handle a new message from the lower layer. */
void ipmi_smi_msg_received(struct ipmi_smi *intf,
struct ipmi_smi_msg *msg)
{
unsigned long flags = 0; /* keep us warning-free. */
int run_to_completion = intf->run_to_completion;
if ((msg->data_size >= 2)
&& (msg->data[0] == (IPMI_NETFN_APP_REQUEST << 2))
&& (msg->data[1] == IPMI_SEND_MSG_CMD)
&& (msg->user_data == NULL)) {
if (intf->in_shutdown)
goto free_msg;
/*
* This is the local response to a command send, start
* the timer for these. The user_data will not be
* NULL if this is a response send, and we will let
* response sends just go through.
*/
/*
* Check for errors, if we get certain errors (ones
* that mean basically we can try again later), we
* ignore them and start the timer. Otherwise we
* report the error immediately.
*/
if ((msg->rsp_size >= 3) && (msg->rsp[2] != 0)
&& (msg->rsp[2] != IPMI_NODE_BUSY_ERR)
&& (msg->rsp[2] != IPMI_LOST_ARBITRATION_ERR)
&& (msg->rsp[2] != IPMI_BUS_ERR)
&& (msg->rsp[2] != IPMI_NAK_ON_WRITE_ERR)) {
int ch = msg->rsp[3] & 0xf;
struct ipmi_channel *chans;
/* Got an error sending the message, handle it. */
chans = READ_ONCE(intf->channel_list)->c;
if ((chans[ch].medium == IPMI_CHANNEL_MEDIUM_8023LAN)
|| (chans[ch].medium == IPMI_CHANNEL_MEDIUM_ASYNC))
ipmi_inc_stat(intf, sent_lan_command_errs);
else
ipmi_inc_stat(intf, sent_ipmb_command_errs);
intf_err_seq(intf, msg->msgid, msg->rsp[2]);
} else
/* The message was sent, start the timer. */
intf_start_seq_timer(intf, msg->msgid);
free_msg:
ipmi_free_smi_msg(msg);
} else {
/*
* To preserve message order, we keep a queue and deliver from
* a tasklet.
*/
if (!run_to_completion)
spin_lock_irqsave(&intf->waiting_rcv_msgs_lock, flags);
list_add_tail(&msg->link, &intf->waiting_rcv_msgs);
if (!run_to_completion)
spin_unlock_irqrestore(&intf->waiting_rcv_msgs_lock,
flags);
}
if (!run_to_completion)
spin_lock_irqsave(&intf->xmit_msgs_lock, flags);
/*
* We can get an asynchronous event or receive message in addition
* to commands we send.
*/
if (msg == intf->curr_msg)
intf->curr_msg = NULL;
if (!run_to_completion)
spin_unlock_irqrestore(&intf->xmit_msgs_lock, flags);
if (run_to_completion)
smi_recv_tasklet((unsigned long) intf);
else
tasklet_schedule(&intf->recv_tasklet);
}
EXPORT_SYMBOL(ipmi_smi_msg_received);
void ipmi_smi_watchdog_pretimeout(struct ipmi_smi *intf)
{
if (intf->in_shutdown)
return;
atomic_set(&intf->watchdog_pretimeouts_to_deliver, 1);
tasklet_schedule(&intf->recv_tasklet);
}
EXPORT_SYMBOL(ipmi_smi_watchdog_pretimeout);
static struct ipmi_smi_msg *
smi_from_recv_msg(struct ipmi_smi *intf, struct ipmi_recv_msg *recv_msg,
unsigned char seq, long seqid)
{
struct ipmi_smi_msg *smi_msg = ipmi_alloc_smi_msg();
if (!smi_msg)
/*
* If we can't allocate the message, then just return, we
* get 4 retries, so this should be ok.
*/
return NULL;
memcpy(smi_msg->data, recv_msg->msg.data, recv_msg->msg.data_len);
smi_msg->data_size = recv_msg->msg.data_len;
smi_msg->msgid = STORE_SEQ_IN_MSGID(seq, seqid);
ipmi_debug_msg("Resend: ", smi_msg->data, smi_msg->data_size);
return smi_msg;
}
static void check_msg_timeout(struct ipmi_smi *intf, struct seq_table *ent,
struct list_head *timeouts,
unsigned long timeout_period,
int slot, unsigned long *flags,
unsigned int *waiting_msgs)
{
struct ipmi_recv_msg *msg;
if (intf->in_shutdown)
return;
if (!ent->inuse)
return;
if (timeout_period < ent->timeout) {
ent->timeout -= timeout_period;
(*waiting_msgs)++;
return;
}
if (ent->retries_left == 0) {
/* The message has used all its retries. */
ent->inuse = 0;
msg = ent->recv_msg;
list_add_tail(&msg->link, timeouts);
if (ent->broadcast)
ipmi_inc_stat(intf, timed_out_ipmb_broadcasts);
else if (is_lan_addr(&ent->recv_msg->addr))
ipmi_inc_stat(intf, timed_out_lan_commands);
else
ipmi_inc_stat(intf, timed_out_ipmb_commands);
} else {
struct ipmi_smi_msg *smi_msg;
/* More retries, send again. */
(*waiting_msgs)++;
/*
* Start with the max timer, set to normal timer after
* the message is sent.
*/
ent->timeout = MAX_MSG_TIMEOUT;
ent->retries_left--;
smi_msg = smi_from_recv_msg(intf, ent->recv_msg, slot,
ent->seqid);
if (!smi_msg) {
if (is_lan_addr(&ent->recv_msg->addr))
ipmi_inc_stat(intf,
dropped_rexmit_lan_commands);
else
ipmi_inc_stat(intf,
dropped_rexmit_ipmb_commands);
return;
}
spin_unlock_irqrestore(&intf->seq_lock, *flags);
/*
* Send the new message. We send with a zero
* priority. It timed out, I doubt time is that
* critical now, and high priority messages are really
* only for messages to the local MC, which don't get
* resent.
*/
if (intf->handlers) {
if (is_lan_addr(&ent->recv_msg->addr))
ipmi_inc_stat(intf,
retransmitted_lan_commands);
else
ipmi_inc_stat(intf,
retransmitted_ipmb_commands);
smi_send(intf, intf->handlers, smi_msg, 0);
} else
ipmi_free_smi_msg(smi_msg);
spin_lock_irqsave(&intf->seq_lock, *flags);
}
}
static unsigned int ipmi_timeout_handler(struct ipmi_smi *intf,
unsigned long timeout_period)
{
struct list_head timeouts;
struct ipmi_recv_msg *msg, *msg2;
unsigned long flags;
int i;
unsigned int waiting_msgs = 0;
if (!intf->bmc_registered) {
kref_get(&intf->refcount);
if (!schedule_work(&intf->bmc_reg_work)) {
kref_put(&intf->refcount, intf_free);
waiting_msgs++;
}
}
/*
* Go through the seq table and find any messages that
* have timed out, putting them in the timeouts
* list.
*/
INIT_LIST_HEAD(&timeouts);
spin_lock_irqsave(&intf->seq_lock, flags);
if (intf->ipmb_maintenance_mode_timeout) {
if (intf->ipmb_maintenance_mode_timeout <= timeout_period)
intf->ipmb_maintenance_mode_timeout = 0;
else
intf->ipmb_maintenance_mode_timeout -= timeout_period;
}
for (i = 0; i < IPMI_IPMB_NUM_SEQ; i++)
check_msg_timeout(intf, &intf->seq_table[i],
&timeouts, timeout_period, i,
&flags, &waiting_msgs);
spin_unlock_irqrestore(&intf->seq_lock, flags);
list_for_each_entry_safe(msg, msg2, &timeouts, link)
deliver_err_response(intf, msg, IPMI_TIMEOUT_COMPLETION_CODE);
/*
* Maintenance mode handling. Check the timeout
* optimistically before we claim the lock. It may
* mean a timeout gets missed occasionally, but that
* only means the timeout gets extended by one period
* in that case. No big deal, and it avoids the lock
* most of the time.
*/
if (intf->auto_maintenance_timeout > 0) {
spin_lock_irqsave(&intf->maintenance_mode_lock, flags);
if (intf->auto_maintenance_timeout > 0) {
intf->auto_maintenance_timeout
-= timeout_period;
if (!intf->maintenance_mode
&& (intf->auto_maintenance_timeout <= 0)) {
intf->maintenance_mode_enable = false;
maintenance_mode_update(intf);
}
}
spin_unlock_irqrestore(&intf->maintenance_mode_lock,
flags);
}
tasklet_schedule(&intf->recv_tasklet);
return waiting_msgs;
}
static void ipmi_request_event(struct ipmi_smi *intf)
{
/* No event requests when in maintenance mode. */
if (intf->maintenance_mode_enable)
return;
if (!intf->in_shutdown)
intf->handlers->request_events(intf->send_info);
}
static struct timer_list ipmi_timer;
static atomic_t stop_operation;
static void ipmi_timeout(struct timer_list *unused)
{
struct ipmi_smi *intf;
int nt = 0, index;
if (atomic_read(&stop_operation))
return;
index = srcu_read_lock(&ipmi_interfaces_srcu);
list_for_each_entry_rcu(intf, &ipmi_interfaces, link) {
int lnt = 0;
if (atomic_read(&intf->event_waiters)) {
intf->ticks_to_req_ev--;
if (intf->ticks_to_req_ev == 0) {
ipmi_request_event(intf);
intf->ticks_to_req_ev = IPMI_REQUEST_EV_TIME;
}
lnt++;
}
lnt += ipmi_timeout_handler(intf, IPMI_TIMEOUT_TIME);
lnt = !!lnt;
if (lnt != intf->last_needs_timer &&
intf->handlers->set_need_watch)
intf->handlers->set_need_watch(intf->send_info, lnt);
intf->last_needs_timer = lnt;
nt += lnt;
}
srcu_read_unlock(&ipmi_interfaces_srcu, index);
if (nt)
mod_timer(&ipmi_timer, jiffies + IPMI_TIMEOUT_JIFFIES);
}
static void need_waiter(struct ipmi_smi *intf)
{
/* Racy, but worst case we start the timer twice. */
if (!timer_pending(&ipmi_timer))
mod_timer(&ipmi_timer, jiffies + IPMI_TIMEOUT_JIFFIES);
}
static atomic_t smi_msg_inuse_count = ATOMIC_INIT(0);
static atomic_t recv_msg_inuse_count = ATOMIC_INIT(0);
static void free_smi_msg(struct ipmi_smi_msg *msg)
{
atomic_dec(&smi_msg_inuse_count);
kfree(msg);
}
struct ipmi_smi_msg *ipmi_alloc_smi_msg(void)
{
struct ipmi_smi_msg *rv;
rv = kmalloc(sizeof(struct ipmi_smi_msg), GFP_ATOMIC);
if (rv) {
rv->done = free_smi_msg;
rv->user_data = NULL;
atomic_inc(&smi_msg_inuse_count);
}
return rv;
}
EXPORT_SYMBOL(ipmi_alloc_smi_msg);
static void free_recv_msg(struct ipmi_recv_msg *msg)
{
atomic_dec(&recv_msg_inuse_count);
kfree(msg);
}
static struct ipmi_recv_msg *ipmi_alloc_recv_msg(void)
{
struct ipmi_recv_msg *rv;
rv = kmalloc(sizeof(struct ipmi_recv_msg), GFP_ATOMIC);
if (rv) {
rv->user = NULL;
rv->done = free_recv_msg;
atomic_inc(&recv_msg_inuse_count);
}
return rv;
}
void ipmi_free_recv_msg(struct ipmi_recv_msg *msg)
{
if (msg->user)
kref_put(&msg->user->refcount, free_user);
msg->done(msg);
}
EXPORT_SYMBOL(ipmi_free_recv_msg);
static atomic_t panic_done_count = ATOMIC_INIT(0);
static void dummy_smi_done_handler(struct ipmi_smi_msg *msg)
{
atomic_dec(&panic_done_count);
}
static void dummy_recv_done_handler(struct ipmi_recv_msg *msg)
{
atomic_dec(&panic_done_count);
}
/*
* Inside a panic, send a message and wait for a response.
*/
static void ipmi_panic_request_and_wait(struct ipmi_smi *intf,
struct ipmi_addr *addr,
struct kernel_ipmi_msg *msg)
{
struct ipmi_smi_msg smi_msg;
struct ipmi_recv_msg recv_msg;
int rv;
smi_msg.done = dummy_smi_done_handler;
recv_msg.done = dummy_recv_done_handler;
atomic_add(2, &panic_done_count);
rv = i_ipmi_request(NULL,
intf,
addr,
0,
msg,
intf,
&smi_msg,
&recv_msg,
0,
intf->addrinfo[0].address,
intf->addrinfo[0].lun,
0, 1); /* Don't retry, and don't wait. */
if (rv)
atomic_sub(2, &panic_done_count);
else if (intf->handlers->flush_messages)
intf->handlers->flush_messages(intf->send_info);
while (atomic_read(&panic_done_count) != 0)
ipmi_poll(intf);
}
static void event_receiver_fetcher(struct ipmi_smi *intf,
struct ipmi_recv_msg *msg)
{
if ((msg->addr.addr_type == IPMI_SYSTEM_INTERFACE_ADDR_TYPE)
&& (msg->msg.netfn == IPMI_NETFN_SENSOR_EVENT_RESPONSE)
&& (msg->msg.cmd == IPMI_GET_EVENT_RECEIVER_CMD)
&& (msg->msg.data[0] == IPMI_CC_NO_ERROR)) {
/* A get event receiver command, save it. */
intf->event_receiver = msg->msg.data[1];
intf->event_receiver_lun = msg->msg.data[2] & 0x3;
}
}
static void device_id_fetcher(struct ipmi_smi *intf, struct ipmi_recv_msg *msg)
{
if ((msg->addr.addr_type == IPMI_SYSTEM_INTERFACE_ADDR_TYPE)
&& (msg->msg.netfn == IPMI_NETFN_APP_RESPONSE)
&& (msg->msg.cmd == IPMI_GET_DEVICE_ID_CMD)
&& (msg->msg.data[0] == IPMI_CC_NO_ERROR)) {
/*
* A get device id command, save if we are an event
* receiver or generator.
*/
intf->local_sel_device = (msg->msg.data[6] >> 2) & 1;
intf->local_event_generator = (msg->msg.data[6] >> 5) & 1;
}
}
static void send_panic_events(struct ipmi_smi *intf, char *str)
{
struct kernel_ipmi_msg msg;
unsigned char data[16];
struct ipmi_system_interface_addr *si;
struct ipmi_addr addr;
char *p = str;
struct ipmi_ipmb_addr *ipmb;
int j;
if (ipmi_send_panic_event == IPMI_SEND_PANIC_EVENT_NONE)
return;
si = (struct ipmi_system_interface_addr *) &addr;
si->addr_type = IPMI_SYSTEM_INTERFACE_ADDR_TYPE;
si->channel = IPMI_BMC_CHANNEL;
si->lun = 0;
/* Fill in an event telling that we have failed. */
msg.netfn = 0x04; /* Sensor or Event. */
msg.cmd = 2; /* Platform event command. */
msg.data = data;
msg.data_len = 8;
data[0] = 0x41; /* Kernel generator ID, IPMI table 5-4 */
data[1] = 0x03; /* This is for IPMI 1.0. */
data[2] = 0x20; /* OS Critical Stop, IPMI table 36-3 */
data[4] = 0x6f; /* Sensor specific, IPMI table 36-1 */
data[5] = 0xa1; /* Runtime stop OEM bytes 2 & 3. */
/*
* Put a few breadcrumbs in. Hopefully later we can add more things
* to make the panic events more useful.
*/
if (str) {
data[3] = str[0];
data[6] = str[1];
data[7] = str[2];
}
/* Send the event announcing the panic. */
ipmi_panic_request_and_wait(intf, &addr, &msg);
/*
* On every interface, dump a bunch of OEM event holding the
* string.
*/
if (ipmi_send_panic_event != IPMI_SEND_PANIC_EVENT_STRING || !str)
return;
/*
* intf_num is used as an marker to tell if the
* interface is valid. Thus we need a read barrier to
* make sure data fetched before checking intf_num
* won't be used.
*/
smp_rmb();
/*
* First job here is to figure out where to send the
* OEM events. There's no way in IPMI to send OEM
* events using an event send command, so we have to
* find the SEL to put them in and stick them in
* there.
*/
/* Get capabilities from the get device id. */
intf->local_sel_device = 0;
intf->local_event_generator = 0;
intf->event_receiver = 0;
/* Request the device info from the local MC. */
msg.netfn = IPMI_NETFN_APP_REQUEST;
msg.cmd = IPMI_GET_DEVICE_ID_CMD;
msg.data = NULL;
msg.data_len = 0;
intf->null_user_handler = device_id_fetcher;
ipmi_panic_request_and_wait(intf, &addr, &msg);
if (intf->local_event_generator) {
/* Request the event receiver from the local MC. */
msg.netfn = IPMI_NETFN_SENSOR_EVENT_REQUEST;
msg.cmd = IPMI_GET_EVENT_RECEIVER_CMD;
msg.data = NULL;
msg.data_len = 0;
intf->null_user_handler = event_receiver_fetcher;
ipmi_panic_request_and_wait(intf, &addr, &msg);
}
intf->null_user_handler = NULL;
/*
* Validate the event receiver. The low bit must not
* be 1 (it must be a valid IPMB address), it cannot
* be zero, and it must not be my address.
*/
if (((intf->event_receiver & 1) == 0)
&& (intf->event_receiver != 0)
&& (intf->event_receiver != intf->addrinfo[0].address)) {
/*
* The event receiver is valid, send an IPMB
* message.
*/
ipmb = (struct ipmi_ipmb_addr *) &addr;
ipmb->addr_type = IPMI_IPMB_ADDR_TYPE;
ipmb->channel = 0; /* FIXME - is this right? */
ipmb->lun = intf->event_receiver_lun;
ipmb->slave_addr = intf->event_receiver;
} else if (intf->local_sel_device) {
/*
* The event receiver was not valid (or was
* me), but I am an SEL device, just dump it
* in my SEL.
*/
si = (struct ipmi_system_interface_addr *) &addr;
si->addr_type = IPMI_SYSTEM_INTERFACE_ADDR_TYPE;
si->channel = IPMI_BMC_CHANNEL;
si->lun = 0;
} else
return; /* No where to send the event. */
msg.netfn = IPMI_NETFN_STORAGE_REQUEST; /* Storage. */
msg.cmd = IPMI_ADD_SEL_ENTRY_CMD;
msg.data = data;
msg.data_len = 16;
j = 0;
while (*p) {
int size = strlen(p);
if (size > 11)
size = 11;
data[0] = 0;
data[1] = 0;
data[2] = 0xf0; /* OEM event without timestamp. */
data[3] = intf->addrinfo[0].address;
data[4] = j++; /* sequence # */
/*
* Always give 11 bytes, so strncpy will fill
* it with zeroes for me.
*/
strncpy(data+5, p, 11);
p += size;
ipmi_panic_request_and_wait(intf, &addr, &msg);
}
}
static int has_panicked;
static int panic_event(struct notifier_block *this,
unsigned long event,
void *ptr)
{
struct ipmi_smi *intf;
struct ipmi_user *user;
if (has_panicked)
return NOTIFY_DONE;
has_panicked = 1;
/* For every registered interface, set it to run to completion. */
list_for_each_entry_rcu(intf, &ipmi_interfaces, link) {
if (!intf->handlers || intf->intf_num == -1)
/* Interface is not ready. */
continue;
if (!intf->handlers->poll)
continue;
/*
* If we were interrupted while locking xmit_msgs_lock or
* waiting_rcv_msgs_lock, the corresponding list may be
* corrupted. In this case, drop items on the list for
* the safety.
*/
if (!spin_trylock(&intf->xmit_msgs_lock)) {
INIT_LIST_HEAD(&intf->xmit_msgs);
INIT_LIST_HEAD(&intf->hp_xmit_msgs);
} else
spin_unlock(&intf->xmit_msgs_lock);
if (!spin_trylock(&intf->waiting_rcv_msgs_lock))
INIT_LIST_HEAD(&intf->waiting_rcv_msgs);
else
spin_unlock(&intf->waiting_rcv_msgs_lock);
intf->run_to_completion = 1;
if (intf->handlers->set_run_to_completion)
intf->handlers->set_run_to_completion(intf->send_info,
1);
list_for_each_entry_rcu(user, &intf->users, link) {
if (user->handler->ipmi_panic_handler)
user->handler->ipmi_panic_handler(
user->handler_data);
}
send_panic_events(intf, ptr);
}
return NOTIFY_DONE;
}
static struct notifier_block panic_block = {
.notifier_call = panic_event,
.next = NULL,
.priority = 200 /* priority: INT_MAX >= x >= 0 */
};
static int ipmi_init_msghandler(void)
{
int rv;
if (initialized)
return 0;
rv = driver_register(&ipmidriver.driver);
if (rv) {
pr_err("Could not register IPMI driver\n");
return rv;
}
pr_info("version " IPMI_DRIVER_VERSION "\n");
timer_setup(&ipmi_timer, ipmi_timeout, 0);
mod_timer(&ipmi_timer, jiffies + IPMI_TIMEOUT_JIFFIES);
atomic_notifier_chain_register(&panic_notifier_list, &panic_block);
initialized = 1;
return 0;
}
static int __init ipmi_init_msghandler_mod(void)
{
ipmi_init_msghandler();
return 0;
}
static void __exit cleanup_ipmi(void)
{
int count;
if (!initialized)
return;
atomic_notifier_chain_unregister(&panic_notifier_list, &panic_block);
/*
* This can't be called if any interfaces exist, so no worry
* about shutting down the interfaces.
*/
/*
* Tell the timer to stop, then wait for it to stop. This
* avoids problems with race conditions removing the timer
* here.
*/
atomic_inc(&stop_operation);
del_timer_sync(&ipmi_timer);
driver_unregister(&ipmidriver.driver);
initialized = 0;
/* Check for buffer leaks. */
count = atomic_read(&smi_msg_inuse_count);
if (count != 0)
pr_warn("SMI message count %d at exit\n", count);
count = atomic_read(&recv_msg_inuse_count);
if (count != 0)
pr_warn("recv message count %d at exit\n", count);
}
module_exit(cleanup_ipmi);
module_init(ipmi_init_msghandler_mod);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Corey Minyard <minyard@mvista.com>");
MODULE_DESCRIPTION("Incoming and outgoing message routing for an IPMI"
" interface.");
MODULE_VERSION(IPMI_DRIVER_VERSION);
MODULE_SOFTDEP("post: ipmi_devintf");
| ./CrossVul/dataset_final_sorted/CWE-416/c/good_1427_0 |
crossvul-cpp_data_good_1372_0 | /*
* GPAC - Multimedia Framework C SDK
*
* Authors: Jean Le Feuvre
* Copyright (c) Telecom ParisTech 2005-2012
*
* This file is part of GPAC / MPEG2-TS sub-project
*
* GPAC is free software; you can redistribute it and/or modify
* it under the terms of the GNU Lesser General Public License as published by
* the Free Software Foundation; either version 2, or (at your option)
* any later version.
*
* GPAC is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with this library; see the file COPYING. If not, write to
* the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
*
*/
#include <gpac/mpegts.h>
#ifndef GPAC_DISABLE_MPEG2TS
#include <string.h>
#include <gpac/constants.h>
#include <gpac/internal/media_dev.h>
#include <gpac/download.h>
#ifndef GPAC_DISABLE_STREAMING
#include <gpac/internal/ietf_dev.h>
#endif
#ifdef GPAC_CONFIG_LINUX
#include <unistd.h>
#endif
#ifdef GPAC_ENABLE_MPE
#include <gpac/dvb_mpe.h>
#endif
#ifdef GPAC_ENABLE_DSMCC
#include <gpac/ait.h>
#endif
#define DEBUG_TS_PACKET 0
GF_EXPORT
const char *gf_m2ts_get_stream_name(u32 streamType)
{
switch (streamType) {
case GF_M2TS_VIDEO_MPEG1:
return "MPEG-1 Video";
case GF_M2TS_VIDEO_MPEG2:
return "MPEG-2 Video";
case GF_M2TS_AUDIO_MPEG1:
return "MPEG-1 Audio";
case GF_M2TS_AUDIO_MPEG2:
return "MPEG-2 Audio";
case GF_M2TS_PRIVATE_SECTION:
return "Private Section";
case GF_M2TS_PRIVATE_DATA:
return "Private Data";
case GF_M2TS_AUDIO_AAC:
return "AAC Audio";
case GF_M2TS_VIDEO_MPEG4:
return "MPEG-4 Video";
case GF_M2TS_VIDEO_H264:
return "MPEG-4/H264 Video";
case GF_M2TS_VIDEO_SVC:
return "H264-SVC Video";
case GF_M2TS_VIDEO_HEVC:
return "HEVC Video";
case GF_M2TS_VIDEO_SHVC:
return "SHVC Video";
case GF_M2TS_VIDEO_SHVC_TEMPORAL:
return "SHVC Video Temporal Sublayer";
case GF_M2TS_VIDEO_MHVC:
return "MHVC Video";
case GF_M2TS_VIDEO_MHVC_TEMPORAL:
return "MHVC Video Temporal Sublayer";
case GF_M2TS_AUDIO_AC3:
return "Dolby AC3 Audio";
case GF_M2TS_AUDIO_DTS:
return "Dolby DTS Audio";
case GF_M2TS_SUBTITLE_DVB:
return "DVB Subtitle";
case GF_M2TS_SYSTEMS_MPEG4_PES:
return "MPEG-4 SL (PES)";
case GF_M2TS_SYSTEMS_MPEG4_SECTIONS:
return "MPEG-4 SL (Section)";
case GF_M2TS_MPE_SECTIONS:
return "MPE (Section)";
case GF_M2TS_METADATA_PES:
return "Metadata (PES)";
case GF_M2TS_METADATA_ID3_HLS:
return "ID3/HLS Metadata (PES)";
default:
return "Unknown";
}
}
static u32 gf_m2ts_reframe_default(GF_M2TS_Demuxer *ts, GF_M2TS_PES *pes, Bool same_pts, unsigned char *data, u32 data_len, GF_M2TS_PESHeader *pes_hdr)
{
GF_M2TS_PES_PCK pck;
pck.flags = 0;
if (pes->rap) pck.flags |= GF_M2TS_PES_PCK_RAP;
if (!same_pts) pck.flags |= GF_M2TS_PES_PCK_AU_START;
pck.DTS = pes->DTS;
pck.PTS = pes->PTS;
pck.data = (char *)data;
pck.data_len = data_len;
pck.stream = pes;
ts->on_event(ts, GF_M2TS_EVT_PES_PCK, &pck);
/*we consumed all data*/
return 0;
}
static u32 gf_m2ts_reframe_reset(GF_M2TS_Demuxer *ts, GF_M2TS_PES *pes, Bool same_pts, unsigned char *data, u32 data_len, GF_M2TS_PESHeader *pes_hdr)
{
if (pes->pck_data) {
gf_free(pes->pck_data);
pes->pck_data = NULL;
}
pes->pck_data_len = pes->pck_alloc_len = 0;
if (pes->prev_data) {
gf_free(pes->prev_data);
pes->prev_data = NULL;
}
pes->prev_data_len = 0;
pes->pes_len = 0;
pes->prev_PTS = 0;
pes->reframe = NULL;
pes->cc = -1;
pes->temi_tc_desc_len = 0;
return 0;
}
static void add_text(char **buffer, u32 *size, u32 *pos, char *msg, u32 msg_len)
{
if (!msg || !buffer) return;
if (*pos+msg_len>*size) {
*size = *pos+msg_len-*size+256;
*buffer = (char *)gf_realloc(*buffer, *size);
}
strncpy((*buffer)+(*pos), msg, msg_len);
*pos += msg_len;
}
static GF_Err id3_parse_tag(char *data, u32 length, char **output, u32 *output_size, u32 *output_pos)
{
GF_BitStream *bs;
u32 pos;
if ((data[0] != 'I') || (data[1] != 'D') || (data[2] != '3'))
return GF_NOT_SUPPORTED;
bs = gf_bs_new(data, length, GF_BITSTREAM_READ);
gf_bs_skip_bytes(bs, 3);
/*u8 major = */gf_bs_read_u8(bs);
/*u8 minor = */gf_bs_read_u8(bs);
/*u8 unsync = */gf_bs_read_int(bs, 1);
/*u8 ext_hdr = */ gf_bs_read_int(bs, 1);
gf_bs_read_int(bs, 6);
u32 size = gf_id3_read_size(bs);
pos = (u32) gf_bs_get_position(bs);
if (size != length-pos)
size = length-pos;
while (size && (gf_bs_available(bs)>=10) ) {
u32 ftag = gf_bs_read_u32(bs);
u32 fsize = gf_id3_read_size(bs);
/*u16 fflags = */gf_bs_read_u16(bs);
size -= 10;
//TODO, handle more ID3 tags ?
if (ftag==ID3V2_FRAME_TXXX) {
u32 pos = (u32) gf_bs_get_position(bs);
char *text = data+pos;
add_text(output, output_size, output_pos, text, fsize);
} else {
GF_LOG(GF_LOG_WARNING, GF_LOG_CONTAINER, ("[MPEG-2 TS] ID3 tag not handled, patch welcome\n", gf_4cc_to_str(ftag) ) );
}
gf_bs_skip_bytes(bs, fsize);
}
gf_bs_del(bs);
return GF_OK;
}
static u32 gf_m2ts_reframe_id3_pes(GF_M2TS_Demuxer *ts, GF_M2TS_PES *pes, Bool same_pts, unsigned char *data, u32 data_len, GF_M2TS_PESHeader *pes_hdr)
{
char frame_header[256];
char *output_text = NULL;
u32 output_len = 0;
u32 pos = 0;
GF_M2TS_PES_PCK pck;
pck.flags = 0;
if (pes->rap) pck.flags |= GF_M2TS_PES_PCK_RAP;
if (!same_pts) pck.flags |= GF_M2TS_PES_PCK_AU_START;
pck.DTS = pes->DTS;
pck.PTS = pes->PTS;
sprintf(frame_header, LLU" --> NEXT\n", pes->PTS);
add_text(&output_text, &output_len, &pos, frame_header, (u32)strlen(frame_header));
id3_parse_tag((char *)data, data_len, &output_text, &output_len, &pos);
add_text(&output_text, &output_len, &pos, "\n\n", 2);
pck.data = (char *)output_text;
pck.data_len = pos;
pck.stream = pes;
ts->on_event(ts, GF_M2TS_EVT_PES_PCK, &pck);
gf_free(output_text);
/*we consumed all data*/
return 0;
}
static u32 gf_m2ts_sync(GF_M2TS_Demuxer *ts, char *data, u32 size, Bool simple_check)
{
u32 i=0;
/*if first byte is sync assume we're sync*/
if (simple_check && (data[i]==0x47)) return 0;
while (i < size) {
if (i+192 >= size) return size;
if ((data[i]==0x47) && (data[i+188]==0x47))
break;
if (i+192 >= size) return size;
if ((data[i]==0x47) && (data[i+192]==0x47)) {
ts->prefix_present = 1;
break;
}
i++;
}
if (i) {
GF_LOG(GF_LOG_DEBUG, GF_LOG_CONTAINER, ("[MPEG-2 TS] re-sync skipped %d bytes\n", i) );
}
return i;
}
GF_EXPORT
Bool gf_m2ts_crc32_check(u8 *data, u32 len)
{
u32 crc = gf_crc_32(data, len);
u32 crc_val = GF_4CC((u8) data[len], (u8) data[len+1], (u8) data[len+2], (u8) data[len+3]);
return (crc==crc_val) ? GF_TRUE : GF_FALSE;
}
static GF_M2TS_SectionFilter *gf_m2ts_section_filter_new(gf_m2ts_section_callback process_section_callback, Bool process_individual)
{
GF_M2TS_SectionFilter *sec;
GF_SAFEALLOC(sec, GF_M2TS_SectionFilter);
if (!sec) {
GF_LOG(GF_LOG_ERROR, GF_LOG_CONTAINER, ("[MPEG-2 TS] gf_m2ts_section_filter_new : OUT OF MEMORY\n"));
return NULL;
}
sec->cc = -1;
sec->process_section = process_section_callback;
sec->process_individual = process_individual;
return sec;
}
static void gf_m2ts_reset_sections(GF_List *sections)
{
u32 count;
GF_M2TS_Section *section;
//GF_LOG(GF_LOG_DEBUG, GF_LOG_CONTAINER, ("[MPEG-2 TS] Deleting sections\n"));
count = gf_list_count(sections);
while (count) {
section = gf_list_get(sections, 0);
gf_list_rem(sections, 0);
if (section->data) gf_free(section->data);
gf_free(section);
count--;
}
}
static void gf_m2ts_section_filter_reset(GF_M2TS_SectionFilter *sf)
{
if (sf->section) {
gf_free(sf->section);
sf->section = NULL;
}
while (sf->table) {
GF_M2TS_Table *t = sf->table;
sf->table = t->next;
gf_m2ts_reset_sections(t->sections);
gf_list_del(t->sections);
gf_free(t);
}
sf->cc = -1;
sf->length = sf->received = 0;
sf->demux_restarted = 1;
}
static void gf_m2ts_section_filter_del(GF_M2TS_SectionFilter *sf)
{
gf_m2ts_section_filter_reset(sf);
gf_free(sf);
}
static void gf_m2ts_metadata_descriptor_del(GF_M2TS_MetadataDescriptor *metad)
{
if (metad) {
if (metad->service_id_record) gf_free(metad->service_id_record);
if (metad->decoder_config) gf_free(metad->decoder_config);
if (metad->decoder_config_id) gf_free(metad->decoder_config_id);
gf_free(metad);
}
}
GF_EXPORT
void gf_m2ts_es_del(GF_M2TS_ES *es, GF_M2TS_Demuxer *ts)
{
gf_list_del_item(es->program->streams, es);
if (es->flags & GF_M2TS_ES_IS_SECTION) {
GF_M2TS_SECTION_ES *ses = (GF_M2TS_SECTION_ES *)es;
if (ses->sec) gf_m2ts_section_filter_del(ses->sec);
#ifdef GPAC_ENABLE_MPE
if (es->flags & GF_M2TS_ES_IS_MPE)
gf_dvb_mpe_section_del(es);
#endif
} else if (es->pid!=es->program->pmt_pid) {
GF_M2TS_PES *pes = (GF_M2TS_PES *)es;
if ((pes->flags & GF_M2TS_INHERIT_PCR) && ts->ess[es->program->pcr_pid]==es)
ts->ess[es->program->pcr_pid] = NULL;
if (pes->pck_data) gf_free(pes->pck_data);
if (pes->prev_data) gf_free(pes->prev_data);
if (pes->buf) gf_free(pes->buf);
if (pes->reassemble_buf) gf_free(pes->reassemble_buf);
if (pes->temi_tc_desc) gf_free(pes->temi_tc_desc);
if (pes->metadata_descriptor) gf_m2ts_metadata_descriptor_del(pes->metadata_descriptor);
}
if (es->slcfg) gf_free(es->slcfg);
gf_free(es);
}
static void gf_m2ts_reset_sdt(GF_M2TS_Demuxer *ts)
{
while (gf_list_count(ts->SDTs)) {
GF_M2TS_SDT *sdt = (GF_M2TS_SDT *)gf_list_last(ts->SDTs);
gf_list_rem_last(ts->SDTs);
if (sdt->provider) gf_free(sdt->provider);
if (sdt->service) gf_free(sdt->service);
gf_free(sdt);
}
}
GF_EXPORT
GF_M2TS_SDT *gf_m2ts_get_sdt_info(GF_M2TS_Demuxer *ts, u32 program_id)
{
u32 i;
for (i=0; i<gf_list_count(ts->SDTs); i++) {
GF_M2TS_SDT *sdt = (GF_M2TS_SDT *)gf_list_get(ts->SDTs, i);
if (sdt->service_id==program_id) return sdt;
}
return NULL;
}
static void gf_m2ts_section_complete(GF_M2TS_Demuxer *ts, GF_M2TS_SectionFilter *sec, GF_M2TS_SECTION_ES *ses)
{
//seek mode, only process PAT and PMT
if (ts->seek_mode && (sec->section[0] != GF_M2TS_TABLE_ID_PAT) && (sec->section[0] != GF_M2TS_TABLE_ID_PMT)) {
/*clean-up (including broken sections)*/
if (sec->section) gf_free(sec->section);
sec->section = NULL;
sec->length = sec->received = 0;
return;
}
if (!sec->process_section) {
if ((ts->on_event && (sec->section[0]==GF_M2TS_TABLE_ID_AIT)) ) {
#ifdef GPAC_ENABLE_DSMCC
GF_M2TS_SL_PCK pck;
pck.data_len = sec->length;
pck.data = sec->section;
pck.stream = (GF_M2TS_ES *)ses;
//ts->on_event(ts, GF_M2TS_EVT_AIT_FOUND, &pck);
on_ait_section(ts, GF_M2TS_EVT_AIT_FOUND, &pck);
#endif
} else if ((ts->on_event && (sec->section[0]==GF_M2TS_TABLE_ID_DSM_CC_ENCAPSULATED_DATA || sec->section[0]==GF_M2TS_TABLE_ID_DSM_CC_UN_MESSAGE ||
sec->section[0]==GF_M2TS_TABLE_ID_DSM_CC_DOWNLOAD_DATA_MESSAGE || sec->section[0]==GF_M2TS_TABLE_ID_DSM_CC_STREAM_DESCRIPTION || sec->section[0]==GF_M2TS_TABLE_ID_DSM_CC_PRIVATE)) ) {
#ifdef GPAC_ENABLE_DSMCC
GF_M2TS_SL_PCK pck;
pck.data_len = sec->length;
pck.data = sec->section;
pck.stream = (GF_M2TS_ES *)ses;
on_dsmcc_section(ts,GF_M2TS_EVT_DSMCC_FOUND,&pck);
//ts->on_event(ts, GF_M2TS_EVT_DSMCC_FOUND, &pck);
#endif
}
#ifdef GPAC_ENABLE_MPE
else if (ts->on_mpe_event && ((ses && (ses->flags & GF_M2TS_EVT_DVB_MPE)) || (sec->section[0]==GF_M2TS_TABLE_ID_INT)) ) {
GF_M2TS_SL_PCK pck;
pck.data_len = sec->length;
pck.data = sec->section;
pck.stream = (GF_M2TS_ES *)ses;
ts->on_mpe_event(ts, GF_M2TS_EVT_DVB_MPE, &pck);
}
#endif
else if (ts->on_event) {
GF_M2TS_SL_PCK pck;
pck.data_len = sec->length;
pck.data = sec->section;
pck.stream = (GF_M2TS_ES *)ses;
ts->on_event(ts, GF_M2TS_EVT_DVB_GENERAL, &pck);
}
} else {
Bool has_syntax_indicator;
u8 table_id;
u16 extended_table_id;
u32 status, section_start, i;
GF_M2TS_Table *t, *prev_t;
unsigned char *data;
Bool section_valid = 0;
status = 0;
/*parse header*/
data = (u8 *)sec->section;
/*look for proper table*/
table_id = data[0];
if (ts->on_event) {
switch (table_id) {
case GF_M2TS_TABLE_ID_PAT:
case GF_M2TS_TABLE_ID_SDT_ACTUAL:
case GF_M2TS_TABLE_ID_PMT:
case GF_M2TS_TABLE_ID_NIT_ACTUAL:
case GF_M2TS_TABLE_ID_TDT:
case GF_M2TS_TABLE_ID_TOT:
{
GF_M2TS_SL_PCK pck;
pck.data_len = sec->length;
pck.data = sec->section;
pck.stream = (GF_M2TS_ES *)ses;
ts->on_event(ts, GF_M2TS_EVT_DVB_GENERAL, &pck);
}
}
}
has_syntax_indicator = (data[1] & 0x80) ? 1 : 0;
if (has_syntax_indicator) {
extended_table_id = (data[3]<<8) | data[4];
} else {
extended_table_id = 0;
}
prev_t = NULL;
t = sec->table;
while (t) {
if ((t->table_id==table_id) && (t->ex_table_id == extended_table_id)) break;
prev_t = t;
t = t->next;
}
/*create table*/
if (!t) {
GF_SAFEALLOC(t, GF_M2TS_Table);
if (!t) {
GF_LOG(GF_LOG_ERROR, GF_LOG_CONTAINER, ("[MPEG-2 TS] Fail to alloc table %d %d\n", table_id, extended_table_id));
return;
}
GF_LOG(GF_LOG_DEBUG, GF_LOG_CONTAINER, ("[MPEG-2 TS] Creating table %d %d\n", table_id, extended_table_id));
t->table_id = table_id;
t->ex_table_id = extended_table_id;
t->last_version_number = 0xFF;
t->sections = gf_list_new();
if (prev_t) prev_t->next = t;
else sec->table = t;
}
if (has_syntax_indicator) {
if (sec->length < 4) {
GF_LOG(GF_LOG_ERROR, GF_LOG_CONTAINER, ("[MPEG-2 TS] corrupted section length %d less than CRC \n", sec->length));
} else {
/*remove crc32*/
sec->length -= 4;
if (gf_m2ts_crc32_check((char *)data, sec->length)) {
s32 cur_sec_num;
t->version_number = (data[5] >> 1) & 0x1f;
if (t->last_section_number && t->section_number && (t->version_number != t->last_version_number)) {
GF_LOG(GF_LOG_WARNING, GF_LOG_CONTAINER, ("[MPEG-2 TS] table transmission interrupted: previous table (v=%d) %d/%d sections - new table (v=%d) %d/%d sections\n", t->last_version_number, t->section_number, t->last_section_number, t->version_number, data[6] + 1, data[7] + 1) );
gf_m2ts_reset_sections(t->sections);
t->section_number = 0;
}
t->current_next_indicator = (data[5] & 0x1) ? 1 : 0;
/*add one to section numbers to detect if we missed or not the first section in the table*/
cur_sec_num = data[6] + 1;
t->last_section_number = data[7] + 1;
section_start = 8;
/*we missed something*/
if (!sec->process_individual && t->section_number + 1 != cur_sec_num) {
/* TODO - Check how to handle sections when the first complete section does
not have its sec num 0 */
section_valid = 0;
if (t->is_init) {
GF_LOG(GF_LOG_ERROR, GF_LOG_CONTAINER, ("[MPEG-2 TS] corrupted table (lost section %d)\n", cur_sec_num ? cur_sec_num-1 : 31) );
}
} else {
section_valid = 1;
t->section_number = cur_sec_num;
}
} else {
GF_LOG(GF_LOG_ERROR, GF_LOG_CONTAINER, ("[MPEG-2 TS] corrupted section (CRC32 failed)\n"));
}
}
} else {
section_valid = 1;
section_start = 3;
}
/*process section*/
if (section_valid) {
GF_M2TS_Section *section;
GF_SAFEALLOC(section, GF_M2TS_Section);
if (!section) {
GF_LOG(GF_LOG_ERROR, GF_LOG_CONTAINER, ("[MPEG-2 TS] Fail to create section\n"));
return;
}
section->data_size = sec->length - section_start;
section->data = (unsigned char*)gf_malloc(sizeof(unsigned char)*section->data_size);
memcpy(section->data, sec->section + section_start, sizeof(unsigned char)*section->data_size);
gf_list_add(t->sections, section);
if (t->section_number == 1) {
status |= GF_M2TS_TABLE_START;
if (t->last_version_number == t->version_number) {
t->is_repeat = 1;
} else {
t->is_repeat = 0;
}
/*only update version number in the first section of the table*/
t->last_version_number = t->version_number;
}
if (t->is_init) {
if (t->is_repeat) {
status |= GF_M2TS_TABLE_REPEAT;
} else {
status |= GF_M2TS_TABLE_UPDATE;
}
} else {
status |= GF_M2TS_TABLE_FOUND;
}
if (t->last_section_number == t->section_number) {
u32 table_size;
status |= GF_M2TS_TABLE_END;
table_size = 0;
for (i=0; i<gf_list_count(t->sections); i++) {
GF_M2TS_Section *section = gf_list_get(t->sections, i);
table_size += section->data_size;
}
if (t->is_repeat) {
if (t->table_size != table_size) {
status |= GF_M2TS_TABLE_UPDATE;
GF_LOG(GF_LOG_WARNING, GF_LOG_CONTAINER, ("[MPEG-2 TS] Repeated section found with different sizes (old table %d bytes, new table %d bytes)\n", t->table_size, table_size) );
t->table_size = table_size;
}
} else {
t->table_size = table_size;
}
t->is_init = 1;
/*reset section number*/
t->section_number = 0;
t->is_repeat = 0;
}
if (sec->process_individual) {
/*send each section of the table and not the aggregated table*/
if (sec->process_section)
sec->process_section(ts, ses, t->sections, t->table_id, t->ex_table_id, t->version_number, (u8) (t->last_section_number - 1), status);
gf_m2ts_reset_sections(t->sections);
} else {
if (status&GF_M2TS_TABLE_END) {
if (sec->process_section)
sec->process_section(ts, ses, t->sections, t->table_id, t->ex_table_id, t->version_number, (u8) (t->last_section_number - 1), status);
gf_m2ts_reset_sections(t->sections);
}
}
} else {
sec->cc = -1;
t->section_number = 0;
}
}
/*clean-up (including broken sections)*/
if (sec->section) gf_free(sec->section);
sec->section = NULL;
sec->length = sec->received = 0;
}
static Bool gf_m2ts_is_long_section(u8 table_id)
{
switch (table_id) {
case GF_M2TS_TABLE_ID_MPEG4_BIFS:
case GF_M2TS_TABLE_ID_MPEG4_OD:
case GF_M2TS_TABLE_ID_INT:
case GF_M2TS_TABLE_ID_EIT_ACTUAL_PF:
case GF_M2TS_TABLE_ID_EIT_OTHER_PF:
case GF_M2TS_TABLE_ID_ST:
case GF_M2TS_TABLE_ID_SIT:
case GF_M2TS_TABLE_ID_DSM_CC_PRIVATE:
case GF_M2TS_TABLE_ID_MPE_FEC:
case GF_M2TS_TABLE_ID_DSM_CC_DOWNLOAD_DATA_MESSAGE:
case GF_M2TS_TABLE_ID_DSM_CC_UN_MESSAGE:
return 1;
default:
if (table_id >= GF_M2TS_TABLE_ID_EIT_SCHEDULE_MIN && table_id <= GF_M2TS_TABLE_ID_EIT_SCHEDULE_MAX)
return 1;
else
return 0;
}
}
static u32 gf_m2ts_get_section_length(char byte0, char byte1, char byte2)
{
u32 length;
if (gf_m2ts_is_long_section(byte0)) {
length = 3 + ( ((((u32)byte1)<<8) | (byte2&0xff)) & 0xfff );
} else {
length = 3 + ( ((((u32)byte1)<<8) | (byte2&0xff)) & 0x3ff );
}
return length;
}
static void gf_m2ts_gather_section(GF_M2TS_Demuxer *ts, GF_M2TS_SectionFilter *sec, GF_M2TS_SECTION_ES *ses, GF_M2TS_Header *hdr, unsigned char *data, u32 data_size)
{
u32 payload_size = data_size;
u8 expect_cc = (sec->cc<0) ? hdr->continuity_counter : (sec->cc + 1) & 0xf;
Bool disc = (expect_cc == hdr->continuity_counter) ? 0 : 1;
sec->cc = expect_cc;
/*may happen if hdr->adaptation_field=2 no payload in TS packet*/
if (!data_size) return;
if (hdr->payload_start) {
u32 ptr_field;
ptr_field = data[0];
if (ptr_field+1>data_size) {
GF_LOG(GF_LOG_ERROR, GF_LOG_CONTAINER, ("[MPEG-2 TS] Invalid section start (@ptr_field=%d, @data_size=%d)\n", ptr_field, data_size) );
return;
}
/*end of previous section*/
if (!sec->length && sec->received) {
/* the length of the section could not be determined from the previous TS packet because we had only 1 or 2 bytes */
if (sec->received == 1)
sec->length = gf_m2ts_get_section_length(sec->section[0], data[1], data[2]);
else /* (sec->received == 2) */
sec->length = gf_m2ts_get_section_length(sec->section[0], sec->section[1], data[1]);
sec->section = (char*)gf_realloc(sec->section, sizeof(char)*sec->length);
}
if (sec->length && sec->received + ptr_field >= sec->length) {
u32 len = sec->length - sec->received;
memcpy(sec->section + sec->received, data+1, sizeof(char)*len);
sec->received += len;
if (ptr_field > len)
GF_LOG(GF_LOG_ERROR, GF_LOG_CONTAINER, ("[MPEG-2 TS] Invalid pointer field (@ptr_field=%d, @remaining=%d)\n", ptr_field, len) );
gf_m2ts_section_complete(ts, sec, ses);
}
data += ptr_field+1;
data_size -= ptr_field+1;
payload_size -= ptr_field+1;
aggregated_section:
if (sec->section) gf_free(sec->section);
sec->length = sec->received = 0;
sec->section = (char*)gf_malloc(sizeof(char)*data_size);
memcpy(sec->section, data, sizeof(char)*data_size);
sec->received = data_size;
} else if (disc) {
if (sec->section) gf_free(sec->section);
sec->section = NULL;
sec->received = sec->length = 0;
return;
} else if (!sec->section) {
return;
} else {
if (sec->length && sec->received+data_size > sec->length)
data_size = sec->length - sec->received;
if (sec->length) {
memcpy(sec->section + sec->received, data, sizeof(char)*data_size);
} else {
sec->section = (char*)gf_realloc(sec->section, sizeof(char)*(sec->received+data_size));
memcpy(sec->section + sec->received, data, sizeof(char)*data_size);
}
sec->received += data_size;
}
/*alloc final buffer*/
if (!sec->length && (sec->received >= 3)) {
sec->length = gf_m2ts_get_section_length(sec->section[0], sec->section[1], sec->section[2]);
sec->section = (char*)gf_realloc(sec->section, sizeof(char)*sec->length);
if (sec->received > sec->length) {
data_size -= sec->received - sec->length;
sec->received = sec->length;
}
}
if (!sec->length || sec->received < sec->length) return;
/*OK done*/
gf_m2ts_section_complete(ts, sec, ses);
if (payload_size > data_size) {
data += data_size;
/* detect padding after previous section */
if (data[0] != 0xFF) {
data_size = payload_size - data_size;
payload_size = data_size;
goto aggregated_section;
}
}
}
static void gf_m2ts_process_sdt(GF_M2TS_Demuxer *ts, GF_M2TS_SECTION_ES *ses, GF_List *sections, u8 table_id, u16 ex_table_id, u8 version_number, u8 last_section_number, u32 status)
{
u32 pos, evt_type;
u32 nb_sections;
u32 data_size;
unsigned char *data;
GF_M2TS_Section *section;
/*wait for the last section */
if (!(status&GF_M2TS_TABLE_END)) return;
/*skip if already received*/
if (status&GF_M2TS_TABLE_REPEAT) {
if (ts->on_event) ts->on_event(ts, GF_M2TS_EVT_SDT_REPEAT, NULL);
return;
}
if (table_id != GF_M2TS_TABLE_ID_SDT_ACTUAL) {
return;
}
gf_m2ts_reset_sdt(ts);
nb_sections = gf_list_count(sections);
if (nb_sections > 1) {
GF_LOG(GF_LOG_WARNING, GF_LOG_CONTAINER, ("[MPEG-2 TS] SDT on multiple sections not supported\n"));
}
section = (GF_M2TS_Section *)gf_list_get(sections, 0);
data = section->data;
data_size = section->data_size;
//orig_net_id = (data[0] << 8) | data[1];
pos = 3;
while (pos < data_size) {
GF_M2TS_SDT *sdt;
u32 descs_size, d_pos, ulen;
GF_SAFEALLOC(sdt, GF_M2TS_SDT);
if (!sdt) {
GF_LOG(GF_LOG_WARNING, GF_LOG_CONTAINER, ("[MPEG-2 TS] Fail to create SDT\n"));
return;
}
gf_list_add(ts->SDTs, sdt);
sdt->service_id = (data[pos]<<8) + data[pos+1];
sdt->EIT_schedule = (data[pos+2] & 0x2) ? 1 : 0;
sdt->EIT_present_following = (data[pos+2] & 0x1);
sdt->running_status = (data[pos+3]>>5) & 0x7;
sdt->free_CA_mode = (data[pos+3]>>4) & 0x1;
descs_size = ((data[pos+3]&0xf)<<8) | data[pos+4];
pos += 5;
d_pos = 0;
while (d_pos < descs_size) {
u8 d_tag = data[pos+d_pos];
u8 d_len = data[pos+d_pos+1];
switch (d_tag) {
case GF_M2TS_DVB_SERVICE_DESCRIPTOR:
if (sdt->provider) gf_free(sdt->provider);
sdt->provider = NULL;
if (sdt->service) gf_free(sdt->service);
sdt->service = NULL;
d_pos+=2;
sdt->service_type = data[pos+d_pos];
ulen = data[pos+d_pos+1];
d_pos += 2;
sdt->provider = (char*)gf_malloc(sizeof(char)*(ulen+1));
memcpy(sdt->provider, data+pos+d_pos, sizeof(char)*ulen);
sdt->provider[ulen] = 0;
d_pos += ulen;
ulen = data[pos+d_pos];
d_pos += 1;
sdt->service = (char*)gf_malloc(sizeof(char)*(ulen+1));
memcpy(sdt->service, data+pos+d_pos, sizeof(char)*ulen);
sdt->service[ulen] = 0;
d_pos += ulen;
break;
default:
GF_LOG(GF_LOG_DEBUG, GF_LOG_CONTAINER, ("[MPEG-2 TS] Skipping descriptor (0x%x) not supported\n", d_tag));
d_pos += d_len;
if (d_len == 0) d_pos = descs_size;
break;
}
}
pos += descs_size;
}
evt_type = GF_M2TS_EVT_SDT_FOUND;
if (ts->on_event) ts->on_event(ts, evt_type, NULL);
}
static void gf_m2ts_process_mpeg4section(GF_M2TS_Demuxer *ts, GF_M2TS_SECTION_ES *es, GF_List *sections, u8 table_id, u16 ex_table_id, u8 version_number, u8 last_section_number, u32 status)
{
GF_M2TS_SL_PCK sl_pck;
u32 nb_sections, i;
GF_M2TS_Section *section;
/*skip if already received*/
if (status & GF_M2TS_TABLE_REPEAT)
if (!(es->flags & GF_M2TS_ES_SEND_REPEATED_SECTIONS))
return;
GF_LOG(GF_LOG_DEBUG, GF_LOG_CONTAINER, ("[MPEG-2 TS] Sections for PID %d\n", es->pid) );
/*send all sections (eg SL-packets)*/
nb_sections = gf_list_count(sections);
for (i=0; i<nb_sections; i++) {
section = (GF_M2TS_Section *)gf_list_get(sections, i);
sl_pck.data = (char *)section->data;
sl_pck.data_len = section->data_size;
sl_pck.stream = (GF_M2TS_ES *)es;
sl_pck.version_number = version_number;
if (ts->on_event) ts->on_event(ts, GF_M2TS_EVT_SL_PCK, &sl_pck);
}
}
static void gf_m2ts_process_nit(GF_M2TS_Demuxer *ts, GF_M2TS_SECTION_ES *nit_es, GF_List *sections, u8 table_id, u16 ex_table_id, u8 version_number, u8 last_section_number, u32 status)
{
GF_LOG(GF_LOG_DEBUG, GF_LOG_CONTAINER, ("[MPEG-2 TS] NIT table processing (not yet implemented)"));
}
static void gf_m2ts_process_tdt_tot(GF_M2TS_Demuxer *ts, GF_M2TS_SECTION_ES *tdt_tot_es, GF_List *sections, u8 table_id, u16 ex_table_id, u8 version_number, u8 last_section_number, u32 status)
{
unsigned char *data;
u32 data_size, nb_sections;
u32 date, yp, mp, k;
GF_M2TS_Section *section;
GF_M2TS_TDT_TOT *time_table;
const char *table_name;
/*wait for the last section */
if ( !(status & GF_M2TS_TABLE_END) )
return;
switch (table_id) {
case GF_M2TS_TABLE_ID_TDT:
table_name = "TDT";
break;
case GF_M2TS_TABLE_ID_TOT:
table_name = "TOT";
break;
default:
GF_LOG(GF_LOG_DEBUG, GF_LOG_CONTAINER, ("[MPEG-2 TS] Unimplemented table_id %u for PID %u\n", table_id, GF_M2TS_PID_TDT_TOT_ST));
return;
}
nb_sections = gf_list_count(sections);
if (nb_sections > 1) {
GF_LOG(GF_LOG_WARNING, GF_LOG_CONTAINER, ("[MPEG-2 TS] %s on multiple sections not supported\n", table_name));
}
section = (GF_M2TS_Section *)gf_list_get(sections, 0);
data = section->data;
data_size = section->data_size;
/*TOT only contains 40 bits of UTC_time; TDT add descriptors and a CRC*/
if ((table_id==GF_M2TS_TABLE_ID_TDT) && (data_size != 5)) {
GF_LOG(GF_LOG_WARNING, GF_LOG_CONTAINER, ("[MPEG-2 TS] Corrupted TDT size\n", table_name));
}
GF_SAFEALLOC(time_table, GF_M2TS_TDT_TOT);
if (!time_table) {
GF_LOG(GF_LOG_ERROR, GF_LOG_CONTAINER, ("[MPEG-2 TS] Fail to alloc DVB time table\n"));
return;
}
/*UTC_time - see annex C of DVB-SI ETSI EN 300468*/
/* decodes an Modified Julian Date (MJD) into a Co-ordinated Universal Time (UTC)
See annex C of DVB-SI ETSI EN 300468 */
date = data[0]*256 + data[1];
yp = (u32)((date - 15078.2)/365.25);
mp = (u32)((date - 14956.1 - (u32)(yp * 365.25))/30.6001);
time_table->day = (u32)(date - 14956 - (u32)(yp * 365.25) - (u32)(mp * 30.6001));
if (mp == 14 || mp == 15) k = 1;
else k = 0;
time_table->year = yp + k + 1900;
time_table->month = mp - 1 - k*12;
time_table->hour = 10*((data[2]&0xf0)>>4) + (data[2]&0x0f);
time_table->minute = 10*((data[3]&0xf0)>>4) + (data[3]&0x0f);
time_table->second = 10*((data[4]&0xf0)>>4) + (data[4]&0x0f);
assert(time_table->hour<24 && time_table->minute<60 && time_table->second<60);
GF_LOG(GF_LOG_DEBUG, GF_LOG_CONTAINER, ("[MPEG-2 TS] Stream UTC time is %u/%02u/%02u %02u:%02u:%02u\n", time_table->year, time_table->month, time_table->day, time_table->hour, time_table->minute, time_table->second));
switch (table_id) {
case GF_M2TS_TABLE_ID_TDT:
if (ts->TDT_time) gf_free(ts->TDT_time);
ts->TDT_time = time_table;
if (ts->on_event) ts->on_event(ts, GF_M2TS_EVT_TDT, time_table);
break;
case GF_M2TS_TABLE_ID_TOT:
#if 0
{
u32 pos, loop_len;
loop_len = ((data[5]&0x0f) << 8) | (data[6] & 0xff);
data += 7;
pos = 0;
while (pos < loop_len) {
u8 tag = data[pos];
pos += 2;
if (tag == GF_M2TS_DVB_LOCAL_TIME_OFFSET_DESCRIPTOR) {
char tmp_time[10];
u16 offset_hours, offset_minutes;
now->country_code[0] = data[pos];
now->country_code[1] = data[pos+1];
now->country_code[2] = data[pos+2];
now->country_region_id = data[pos+3]>>2;
sprintf(tmp_time, "%02x", data[pos+4]);
offset_hours = atoi(tmp_time);
sprintf(tmp_time, "%02x", data[pos+5]);
offset_minutes = atoi(tmp_time);
now->local_time_offset_seconds = (offset_hours * 60 + offset_minutes) * 60;
if (data[pos+3] & 1) now->local_time_offset_seconds *= -1;
dvb_decode_mjd_to_unix_time(data+pos+6, &now->unix_next_toc);
sprintf(tmp_time, "%02x", data[pos+11]);
offset_hours = atoi(tmp_time);
sprintf(tmp_time, "%02x", data[pos+12]);
offset_minutes = atoi(tmp_time);
now->next_time_offset_seconds = (offset_hours * 60 + offset_minutes) * 60;
if (data[pos+3] & 1) now->next_time_offset_seconds *= -1;
pos+= 13;
}
}
/*TODO: check lengths are ok*/
if (ts->on_event) ts->on_event(ts, GF_M2TS_EVT_TOT, time_table);
}
#endif
/*check CRC32*/
if (ts->tdt_tot->length<4) {
GF_LOG(GF_LOG_ERROR, GF_LOG_CONTAINER, ("[MPEG-2 TS] corrupted %s table (less than 4 bytes but CRC32 should be present\n", table_name));
goto error_exit;
}
if (!gf_m2ts_crc32_check(ts->tdt_tot->section, ts->tdt_tot->length-4)) {
GF_LOG(GF_LOG_ERROR, GF_LOG_CONTAINER, ("[MPEG-2 TS] corrupted %s table (CRC32 failed)\n", table_name));
goto error_exit;
}
if (ts->TDT_time) gf_free(ts->TDT_time);
ts->TDT_time = time_table;
if (ts->on_event) ts->on_event(ts, GF_M2TS_EVT_TOT, time_table);
break;
default:
assert(0);
goto error_exit;
}
return; /*success*/
error_exit:
gf_free(time_table);
return;
}
static GF_M2TS_MetadataPointerDescriptor *gf_m2ts_read_metadata_pointer_descriptor(GF_BitStream *bs, u32 length)
{
u32 size;
GF_M2TS_MetadataPointerDescriptor *d;
GF_SAFEALLOC(d, GF_M2TS_MetadataPointerDescriptor);
if (!d) return NULL;
d->application_format = gf_bs_read_u16(bs);
size = 2;
if (d->application_format == 0xFFFF) {
d->application_format_identifier = gf_bs_read_u32(bs);
size += 4;
}
d->format = gf_bs_read_u8(bs);
size += 1;
if (d->format == 0xFF) {
d->format_identifier = gf_bs_read_u32(bs);
size += 4;
}
d->service_id = gf_bs_read_u8(bs);
d->locator_record_flag = (gf_bs_read_int(bs, 1) ? GF_TRUE : GF_FALSE);
d->carriage_flag = (enum metadata_carriage)gf_bs_read_int(bs, 2);
gf_bs_read_int(bs, 5); /*reserved */
size += 2;
if (d->locator_record_flag) {
d->locator_length = gf_bs_read_u8(bs);
d->locator_data = (char *)gf_malloc(d->locator_length);
size += 1 + d->locator_length;
gf_bs_read_data(bs, d->locator_data, d->locator_length);
}
if (d->carriage_flag != 3) {
d->program_number = gf_bs_read_u16(bs);
size += 2;
}
if (d->carriage_flag == 1) {
d->ts_location = gf_bs_read_u16(bs);
d->ts_id = gf_bs_read_u16(bs);
size += 4;
}
if (length-size > 0) {
d->data_size = length-size;
d->data = (char *)gf_malloc(d->data_size);
gf_bs_read_data(bs, d->data, d->data_size);
}
return d;
}
static void gf_m2ts_metadata_pointer_descriptor_del(GF_M2TS_MetadataPointerDescriptor *metapd)
{
if (metapd) {
if (metapd->locator_data) gf_free(metapd->locator_data);
if (metapd->data) gf_free(metapd->data);
gf_free(metapd);
}
}
static GF_M2TS_MetadataDescriptor *gf_m2ts_read_metadata_descriptor(GF_BitStream *bs, u32 length)
{
u32 size;
GF_M2TS_MetadataDescriptor *d;
GF_SAFEALLOC(d, GF_M2TS_MetadataDescriptor);
if (!d) return NULL;
d->application_format = gf_bs_read_u16(bs);
size = 2;
if (d->application_format == 0xFFFF) {
d->application_format_identifier = gf_bs_read_u32(bs);
size += 4;
}
d->format = gf_bs_read_u8(bs);
size += 1;
if (d->format == 0xFF) {
d->format_identifier = gf_bs_read_u32(bs);
size += 4;
}
d->service_id = gf_bs_read_u8(bs);
d->decoder_config_flags = gf_bs_read_int(bs, 3);
d->dsmcc_flag = (gf_bs_read_int(bs, 1) ? GF_TRUE : GF_FALSE);
gf_bs_read_int(bs, 4); /* reserved */
size += 2;
if (d->dsmcc_flag) {
d->service_id_record_length = gf_bs_read_u8(bs);
d->service_id_record = (char *)gf_malloc(d->service_id_record_length);
size += 1 + d->service_id_record_length;
gf_bs_read_data(bs, d->service_id_record, d->service_id_record_length);
}
if (d->decoder_config_flags == 1) {
d->decoder_config_length = gf_bs_read_u8(bs);
d->decoder_config = (char *)gf_malloc(d->decoder_config_length);
size += 1 + d->decoder_config_length;
gf_bs_read_data(bs, d->decoder_config, d->decoder_config_length);
}
if (d->decoder_config_flags == 3) {
d->decoder_config_id_length = gf_bs_read_u8(bs);
d->decoder_config_id = (char *)gf_malloc(d->decoder_config_id_length);
size += 1 + d->decoder_config_id_length;
gf_bs_read_data(bs, d->decoder_config_id, d->decoder_config_id_length);
}
if (d->decoder_config_flags == 4) {
d->decoder_config_service_id = gf_bs_read_u8(bs);
size++;
}
return d;
}
static void gf_m2ts_process_pmt(GF_M2TS_Demuxer *ts, GF_M2TS_SECTION_ES *pmt, GF_List *sections, u8 table_id, u16 ex_table_id, u8 version_number, u8 last_section_number, u32 status)
{
u32 info_length, pos, desc_len, evt_type, nb_es,i;
u32 nb_sections;
u32 data_size;
u32 nb_hevc, nb_hevc_temp, nb_shvc, nb_shvc_temp, nb_mhvc, nb_mhvc_temp;
unsigned char *data;
GF_M2TS_Section *section;
GF_Err e = GF_OK;
/*wait for the last section */
if (!(status&GF_M2TS_TABLE_END)) return;
nb_es = 0;
/*skip if already received but no update detected (eg same data) */
if ((status&GF_M2TS_TABLE_REPEAT) && !(status&GF_M2TS_TABLE_UPDATE)) {
if (ts->on_event) ts->on_event(ts, GF_M2TS_EVT_PMT_REPEAT, pmt->program);
return;
}
if (pmt->sec->demux_restarted) {
pmt->sec->demux_restarted = 0;
return;
}
GF_LOG(GF_LOG_DEBUG, GF_LOG_CONTAINER, ("[MPEG-2 TS] PMT Found or updated\n"));
nb_sections = gf_list_count(sections);
if (nb_sections > 1) {
GF_LOG(GF_LOG_WARNING, GF_LOG_CONTAINER, ("PMT on multiple sections not supported\n"));
}
section = (GF_M2TS_Section *)gf_list_get(sections, 0);
data = section->data;
data_size = section->data_size;
pmt->program->pcr_pid = ((data[0] & 0x1f) << 8) | data[1];
info_length = ((data[2]&0xf)<<8) | data[3];
if (info_length + 4 > data_size) {
GF_LOG(GF_LOG_ERROR, GF_LOG_CONTAINER, ("Broken PMT first loop, %d bytes avail but first loop size %d\n", data_size, info_length));
return;
} else if (info_length != 0) {
/* ...Read Descriptors ... */
u8 tag, len;
u32 first_loop_len = 0;
tag = data[4];
len = data[5];
while (info_length > first_loop_len) {
if (tag == GF_M2TS_MPEG4_IOD_DESCRIPTOR) {
if ((len>2) && (len - 2 <= info_length)) {
u32 size;
GF_BitStream *iod_bs;
iod_bs = gf_bs_new((char *)data+8, len-2, GF_BITSTREAM_READ);
if (pmt->program->pmt_iod) gf_odf_desc_del((GF_Descriptor *)pmt->program->pmt_iod);
e = gf_odf_parse_descriptor(iod_bs , (GF_Descriptor **) &pmt->program->pmt_iod, &size);
gf_bs_del(iod_bs );
if (e==GF_OK) {
/*remember program number for service/program selection*/
if (pmt->program->pmt_iod) pmt->program->pmt_iod->ServiceID = pmt->program->number;
/*if empty IOD (freebox case), discard it and use dynamic declaration of object*/
if (!gf_list_count(pmt->program->pmt_iod->ESDescriptors)) {
gf_odf_desc_del((GF_Descriptor *)pmt->program->pmt_iod);
pmt->program->pmt_iod = NULL;
}
}
} else {
GF_LOG(GF_LOG_ERROR, GF_LOG_CONTAINER, ("Broken IOD! len %d less than 2 bytes to declare IOD\n", len));
}
} else if (tag == GF_M2TS_METADATA_POINTER_DESCRIPTOR) {
GF_BitStream *metadatapd_bs;
GF_M2TS_MetadataPointerDescriptor *metapd;
metadatapd_bs = gf_bs_new((char *)data+6, len, GF_BITSTREAM_READ);
metapd = gf_m2ts_read_metadata_pointer_descriptor(metadatapd_bs, len);
gf_bs_del(metadatapd_bs);
if (metapd->application_format_identifier == GF_M2TS_META_ID3 &&
metapd->format_identifier == GF_M2TS_META_ID3 &&
metapd->carriage_flag == METADATA_CARRIAGE_SAME_TS) {
/*HLS ID3 Metadata */
pmt->program->metadata_pointer_descriptor = metapd;
} else {
/* don't know what to do with it for now, delete */
gf_m2ts_metadata_pointer_descriptor_del(metapd);
}
} else {
GF_LOG(GF_LOG_DEBUG, GF_LOG_CONTAINER, ("[MPEG-2 TS] Skipping descriptor (0x%x) and others not supported\n", tag));
}
first_loop_len += 2 + len;
}
}
if (data_size <= 4 + info_length) return;
data += 4 + info_length;
data_size -= 4 + info_length;
pos = 0;
/* count de number of program related PMT received */
for(i=0; i<gf_list_count(ts->programs); i++) {
GF_M2TS_Program *prog = (GF_M2TS_Program *)gf_list_get(ts->programs,i);
if(prog->pmt_pid == pmt->pid) {
break;
}
}
nb_hevc = nb_hevc_temp = nb_shvc = nb_shvc_temp = nb_mhvc = nb_mhvc_temp = 0;
while (pos<data_size) {
GF_M2TS_PES *pes = NULL;
GF_M2TS_SECTION_ES *ses = NULL;
GF_M2TS_ES *es = NULL;
Bool inherit_pcr = 0;
u32 pid, stream_type, reg_desc_format;
if (pos + 5 > data_size) {
GF_LOG(GF_LOG_ERROR, GF_LOG_CONTAINER, ("Broken PMT! size %d but position %d and need at least 5 bytes to declare es\n", data_size, pos));
break;
}
stream_type = data[0];
pid = ((data[1] & 0x1f) << 8) | data[2];
desc_len = ((data[3] & 0xf) << 8) | data[4];
GF_LOG(GF_LOG_DEBUG, GF_LOG_CONTAINER, ("stream_type :%d \n",stream_type));
switch (stream_type) {
/* PES */
case GF_M2TS_VIDEO_MPEG1:
case GF_M2TS_VIDEO_MPEG2:
case GF_M2TS_VIDEO_DCII:
case GF_M2TS_VIDEO_MPEG4:
case GF_M2TS_SYSTEMS_MPEG4_PES:
case GF_M2TS_VIDEO_H264:
case GF_M2TS_VIDEO_SVC:
case GF_M2TS_VIDEO_MVCD:
case GF_M2TS_VIDEO_HEVC:
case GF_M2TS_VIDEO_HEVC_MCTS:
case GF_M2TS_VIDEO_HEVC_TEMPORAL:
case GF_M2TS_VIDEO_SHVC:
case GF_M2TS_VIDEO_SHVC_TEMPORAL:
case GF_M2TS_VIDEO_MHVC:
case GF_M2TS_VIDEO_MHVC_TEMPORAL:
inherit_pcr = 1;
case GF_M2TS_AUDIO_MPEG1:
case GF_M2TS_AUDIO_MPEG2:
case GF_M2TS_AUDIO_AAC:
case GF_M2TS_AUDIO_LATM_AAC:
case GF_M2TS_AUDIO_AC3:
case GF_M2TS_AUDIO_DTS:
case GF_M2TS_MHAS_MAIN:
case GF_M2TS_MHAS_AUX:
case GF_M2TS_SUBTITLE_DVB:
case GF_M2TS_METADATA_PES:
GF_SAFEALLOC(pes, GF_M2TS_PES);
if (!pes) {
GF_LOG(GF_LOG_ERROR, GF_LOG_CONTAINER, ("[MPEG2TS] Failed to allocate ES for pid %d\n", pid));
return;
}
pes->cc = -1;
pes->flags = GF_M2TS_ES_IS_PES;
if (inherit_pcr)
pes->flags |= GF_M2TS_INHERIT_PCR;
es = (GF_M2TS_ES *)pes;
break;
case GF_M2TS_PRIVATE_DATA:
GF_SAFEALLOC(pes, GF_M2TS_PES);
if (!pes) {
GF_LOG(GF_LOG_ERROR, GF_LOG_CONTAINER, ("[MPEG2TS] Failed to allocate ES for pid %d\n", pid));
return;
}
pes->cc = -1;
pes->flags = GF_M2TS_ES_IS_PES;
es = (GF_M2TS_ES *)pes;
break;
/* Sections */
case GF_M2TS_SYSTEMS_MPEG4_SECTIONS:
GF_SAFEALLOC(ses, GF_M2TS_SECTION_ES);
if (!ses) {
GF_LOG(GF_LOG_ERROR, GF_LOG_CONTAINER, ("[MPEG2TS] Failed to allocate ES for pid %d\n", pid));
return;
}
es = (GF_M2TS_ES *)ses;
es->flags |= GF_M2TS_ES_IS_SECTION;
/* carriage of ISO_IEC_14496 data in sections */
if (stream_type == GF_M2TS_SYSTEMS_MPEG4_SECTIONS) {
/*MPEG-4 sections need to be fully checked: if one section is lost, this means we lost
one SL packet in the AU so we must wait for the complete section again*/
ses->sec = gf_m2ts_section_filter_new(gf_m2ts_process_mpeg4section, 0);
/*create OD container*/
if (!pmt->program->additional_ods) {
pmt->program->additional_ods = gf_list_new();
ts->has_4on2 = 1;
}
}
break;
case GF_M2TS_13818_6_ANNEX_A:
case GF_M2TS_13818_6_ANNEX_B:
case GF_M2TS_13818_6_ANNEX_C:
case GF_M2TS_13818_6_ANNEX_D:
case GF_M2TS_PRIVATE_SECTION:
case GF_M2TS_QUALITY_SEC:
case GF_M2TS_MORE_SEC:
GF_SAFEALLOC(ses, GF_M2TS_SECTION_ES);
if (!ses) {
GF_LOG(GF_LOG_ERROR, GF_LOG_CONTAINER, ("[MPEG2TS] Failed to allocate ES for pid %d\n", pid));
return;
}
es = (GF_M2TS_ES *)ses;
es->flags |= GF_M2TS_ES_IS_SECTION;
es->pid = pid;
es->service_id = pmt->program->number;
if (stream_type == GF_M2TS_PRIVATE_SECTION) {
GF_LOG(GF_LOG_INFO, GF_LOG_CONTAINER, ("AIT sections on pid %d\n", pid));
} else if (stream_type == GF_M2TS_QUALITY_SEC) {
GF_LOG(GF_LOG_INFO, GF_LOG_CONTAINER, ("Quality metadata sections on pid %d\n", pid));
} else if (stream_type == GF_M2TS_MORE_SEC) {
GF_LOG(GF_LOG_INFO, GF_LOG_CONTAINER, ("MORE sections on pid %d\n", pid));
} else {
GF_LOG(GF_LOG_INFO, GF_LOG_CONTAINER, ("stream type DSM CC user private sections on pid %d \n", pid));
}
/* NULL means: trigger the call to on_event with DVB_GENERAL type and the raw section as payload */
ses->sec = gf_m2ts_section_filter_new(NULL, 1);
//ses->sec->service_id = pmt->program->number;
break;
case GF_M2TS_MPE_SECTIONS:
if (! ts->prefix_present) {
GF_LOG(GF_LOG_INFO, GF_LOG_CONTAINER, ("stream type MPE found : pid = %d \n", pid));
#ifdef GPAC_ENABLE_MPE
es = gf_dvb_mpe_section_new();
if (es->flags & GF_M2TS_ES_IS_SECTION) {
/* NULL means: trigger the call to on_event with DVB_GENERAL type and the raw section as payload */
((GF_M2TS_SECTION_ES*)es)->sec = gf_m2ts_section_filter_new(NULL, 1);
}
#endif
break;
}
default:
GF_LOG(GF_LOG_WARNING, GF_LOG_CONTAINER, ("[MPEG-2 TS] Stream type (0x%x) for PID %d not supported\n", stream_type, pid ) );
//GF_LOG(/*GF_LOG_WARNING*/GF_LOG_ERROR, GF_LOG_CONTAINER, ("[MPEG-2 TS] Stream type (0x%x) for PID %d not supported\n", stream_type, pid ) );
break;
}
if (es) {
es->stream_type = (stream_type==GF_M2TS_PRIVATE_DATA) ? 0 : stream_type;
es->program = pmt->program;
es->pid = pid;
es->component_tag = -1;
}
pos += 5;
data += 5;
while (desc_len) {
if (pos + 2 > data_size) {
GF_LOG(GF_LOG_ERROR, GF_LOG_CONTAINER, ("Broken PMT descriptor! size %d but position %d and need at least 2 bytes to parse descritpor\n", data_size, pos));
break;
}
u8 tag = data[0];
u32 len = data[1];
if (pos + 2 + len > data_size) {
GF_LOG(GF_LOG_ERROR, GF_LOG_CONTAINER, ("Broken PMT descriptor! size %d, desc size %d but position %d\n", data_size, len, pos));
break;
}
if (es) {
switch (tag) {
case GF_M2TS_ISO_639_LANGUAGE_DESCRIPTOR:
if (pes && (len>=3) )
pes->lang = GF_4CC(' ', data[2], data[3], data[4]);
break;
case GF_M2TS_MPEG4_SL_DESCRIPTOR:
if (len>=2) {
es->mpeg4_es_id = ( (u32) data[2] & 0x1f) << 8 | data[3];
es->flags |= GF_M2TS_ES_IS_SL;
}
break;
case GF_M2TS_REGISTRATION_DESCRIPTOR:
if (len>=4) {
reg_desc_format = GF_4CC(data[2], data[3], data[4], data[5]);
/*cf http://www.smpte-ra.org/mpegreg/mpegreg.html*/
switch (reg_desc_format) {
case GF_M2TS_RA_STREAM_AC3:
es->stream_type = GF_M2TS_AUDIO_AC3;
break;
case GF_M2TS_RA_STREAM_VC1:
es->stream_type = GF_M2TS_VIDEO_VC1;
break;
case GF_M2TS_RA_STREAM_GPAC:
if (len==8) {
es->stream_type = GF_4CC(data[6], data[7], data[8], data[9]);
es->flags |= GF_M2TS_GPAC_CODEC_ID;
break;
}
default:
GF_LOG(GF_LOG_INFO, GF_LOG_CONTAINER, ("Unknown registration descriptor %s\n", gf_4cc_to_str(reg_desc_format) ));
break;
}
}
break;
case GF_M2TS_DVB_EAC3_DESCRIPTOR:
es->stream_type = GF_M2TS_AUDIO_EC3;
break;
case GF_M2TS_DVB_DATA_BROADCAST_ID_DESCRIPTOR:
if (len>=2) {
u32 id = data[2]<<8 | data[3];
if ((id == 0xB) && ses && !ses->sec) {
ses->sec = gf_m2ts_section_filter_new(NULL, 1);
}
}
break;
case GF_M2TS_DVB_SUBTITLING_DESCRIPTOR:
if (pes && (len>=8)) {
pes->sub.language[0] = data[2];
pes->sub.language[1] = data[3];
pes->sub.language[2] = data[4];
pes->sub.type = data[5];
pes->sub.composition_page_id = (data[6]<<8) | data[7];
pes->sub.ancillary_page_id = (data[8]<<8) | data[9];
}
es->stream_type = GF_M2TS_DVB_SUBTITLE;
break;
case GF_M2TS_DVB_STREAM_IDENTIFIER_DESCRIPTOR:
if (len>=1) {
es->component_tag = data[2];
GF_LOG(GF_LOG_DEBUG, GF_LOG_CONTAINER, ("Component Tag: %d on Program %d\n", es->component_tag, es->program->number));
}
break;
case GF_M2TS_DVB_TELETEXT_DESCRIPTOR:
es->stream_type = GF_M2TS_DVB_TELETEXT;
break;
case GF_M2TS_DVB_VBI_DATA_DESCRIPTOR:
es->stream_type = GF_M2TS_DVB_VBI;
break;
case GF_M2TS_HIERARCHY_DESCRIPTOR:
if (pes && (len>=4)) {
u8 hierarchy_embedded_layer_index;
GF_BitStream *hbs = gf_bs_new((const char *)data, data_size, GF_BITSTREAM_READ);
/*u32 skip = */gf_bs_read_int(hbs, 16);
/*u8 res1 = */gf_bs_read_int(hbs, 1);
/*u8 temp_scal = */gf_bs_read_int(hbs, 1);
/*u8 spatial_scal = */gf_bs_read_int(hbs, 1);
/*u8 quality_scal = */gf_bs_read_int(hbs, 1);
/*u8 hierarchy_type = */gf_bs_read_int(hbs, 4);
/*u8 res2 = */gf_bs_read_int(hbs, 2);
/*u8 hierarchy_layer_index = */gf_bs_read_int(hbs, 6);
/*u8 tref_not_present = */gf_bs_read_int(hbs, 1);
/*u8 res3 = */gf_bs_read_int(hbs, 1);
hierarchy_embedded_layer_index = gf_bs_read_int(hbs, 6);
/*u8 res4 = */gf_bs_read_int(hbs, 2);
/*u8 hierarchy_channel = */gf_bs_read_int(hbs, 6);
gf_bs_del(hbs);
pes->depends_on_pid = 1+hierarchy_embedded_layer_index;
}
break;
case GF_M2TS_METADATA_DESCRIPTOR:
{
GF_BitStream *metadatad_bs;
GF_M2TS_MetadataDescriptor *metad;
metadatad_bs = gf_bs_new((char *)data+2, len, GF_BITSTREAM_READ);
metad = gf_m2ts_read_metadata_descriptor(metadatad_bs, len);
gf_bs_del(metadatad_bs);
if (metad->application_format_identifier == GF_M2TS_META_ID3 &&
metad->format_identifier == GF_M2TS_META_ID3) {
/*HLS ID3 Metadata */
if (pes) {
pes->metadata_descriptor = metad;
pes->stream_type = GF_M2TS_METADATA_ID3_HLS;
}
} else {
/* don't know what to do with it for now, delete */
gf_m2ts_metadata_descriptor_del(metad);
}
}
break;
default:
GF_LOG(GF_LOG_DEBUG, GF_LOG_CONTAINER, ("[MPEG-2 TS] skipping descriptor (0x%x) not supported\n", tag));
break;
}
}
data += len+2;
pos += len+2;
if (desc_len < len+2) {
GF_LOG(GF_LOG_ERROR, GF_LOG_CONTAINER, ("[MPEG-2 TS] Invalid PMT es descriptor size for PID %d\n", pid ) );
break;
}
desc_len-=len+2;
}
if (es && !es->stream_type) {
gf_free(es);
es = NULL;
GF_LOG(GF_LOG_ERROR, GF_LOG_CONTAINER, ("[MPEG-2 TS] Private Stream type (0x%x) for PID %d not supported\n", stream_type, pid ) );
}
if (!es) continue;
if (ts->ess[pid]) {
//this is component reuse across programs, overwrite the previously declared stream ...
if (status & GF_M2TS_TABLE_FOUND) {
GF_LOG(GF_LOG_INFO, GF_LOG_CONTAINER, ("[MPEG-2 TS] PID %d reused across programs %d and %d, not completely supported\n", pid, ts->ess[pid]->program->number, es->program->number ) );
//add stream to program but don't reassign the pid table until the stream is playing (>GF_M2TS_PES_FRAMING_SKIP)
gf_list_add(pmt->program->streams, es);
if (!(es->flags & GF_M2TS_ES_IS_SECTION) ) gf_m2ts_set_pes_framing(pes, GF_M2TS_PES_FRAMING_SKIP);
nb_es++;
//skip assignment below
es = NULL;
}
/*watchout for pmt update - FIXME this likely won't work in most cases*/
else {
GF_M2TS_ES *o_es = ts->ess[es->pid];
if ((o_es->stream_type == es->stream_type)
&& ((o_es->flags & GF_M2TS_ES_STATIC_FLAGS_MASK) == (es->flags & GF_M2TS_ES_STATIC_FLAGS_MASK))
&& (o_es->mpeg4_es_id == es->mpeg4_es_id)
&& ((o_es->flags & GF_M2TS_ES_IS_SECTION) || ((GF_M2TS_PES *)o_es)->lang == ((GF_M2TS_PES *)es)->lang)
) {
gf_free(es);
es = NULL;
} else {
gf_m2ts_es_del(o_es, ts);
ts->ess[es->pid] = NULL;
}
}
}
if (es) {
ts->ess[es->pid] = es;
gf_list_add(pmt->program->streams, es);
if (!(es->flags & GF_M2TS_ES_IS_SECTION) ) gf_m2ts_set_pes_framing(pes, GF_M2TS_PES_FRAMING_SKIP);
nb_es++;
if (es->stream_type == GF_M2TS_VIDEO_HEVC) nb_hevc++;
else if (es->stream_type == GF_M2TS_VIDEO_HEVC_TEMPORAL) nb_hevc_temp++;
else if (es->stream_type == GF_M2TS_VIDEO_SHVC) nb_shvc++;
else if (es->stream_type == GF_M2TS_VIDEO_SHVC_TEMPORAL) nb_shvc_temp++;
else if (es->stream_type == GF_M2TS_VIDEO_MHVC) nb_mhvc++;
else if (es->stream_type == GF_M2TS_VIDEO_MHVC_TEMPORAL) nb_mhvc_temp++;
}
}
//Table 2-139, implied hierarchy indexes
if (nb_hevc_temp + nb_shvc + nb_shvc_temp + nb_mhvc+ nb_mhvc_temp) {
for (i=0; i<gf_list_count(pmt->program->streams); i++) {
GF_M2TS_PES *es = (GF_M2TS_PES *)gf_list_get(pmt->program->streams, i);
if ( !(es->flags & GF_M2TS_ES_IS_PES)) continue;
if (es->depends_on_pid) continue;
switch (es->stream_type) {
case GF_M2TS_VIDEO_HEVC_TEMPORAL:
es->depends_on_pid = 1;
break;
case GF_M2TS_VIDEO_SHVC:
if (!nb_hevc_temp) es->depends_on_pid = 1;
else es->depends_on_pid = 2;
break;
case GF_M2TS_VIDEO_SHVC_TEMPORAL:
es->depends_on_pid = 3;
break;
case GF_M2TS_VIDEO_MHVC:
if (!nb_hevc_temp) es->depends_on_pid = 1;
else es->depends_on_pid = 2;
break;
case GF_M2TS_VIDEO_MHVC_TEMPORAL:
if (!nb_hevc_temp) es->depends_on_pid = 2;
else es->depends_on_pid = 3;
break;
}
}
}
if (nb_es) {
u32 i;
//translate hierarchy descriptors indexes into PIDs - check whether the PMT-index rules are the same for HEVC
for (i=0; i<gf_list_count(pmt->program->streams); i++) {
GF_M2TS_PES *an_es = NULL;
GF_M2TS_PES *es = (GF_M2TS_PES *)gf_list_get(pmt->program->streams, i);
if ( !(es->flags & GF_M2TS_ES_IS_PES)) continue;
if (!es->depends_on_pid) continue;
//fixeme we are not always assured that hierarchy_layer_index matches the stream index...
//+1 is because our first stream is the PMT
an_es = (GF_M2TS_PES *)gf_list_get(pmt->program->streams, es->depends_on_pid);
if (an_es) {
es->depends_on_pid = an_es->pid;
} else {
GF_LOG(GF_LOG_WARNING, GF_LOG_CONTAINER, ("[M2TS] Wrong dependency index in hierarchy descriptor, assuming non-scalable stream\n"));
es->depends_on_pid = 0;
}
}
evt_type = (status&GF_M2TS_TABLE_FOUND) ? GF_M2TS_EVT_PMT_FOUND : GF_M2TS_EVT_PMT_UPDATE;
if (ts->on_event) ts->on_event(ts, evt_type, pmt->program);
} else {
/* if we found no new ES it's simply a repeat of the PMT */
if (ts->on_event) ts->on_event(ts, GF_M2TS_EVT_PMT_REPEAT, pmt->program);
}
}
static void gf_m2ts_process_pat(GF_M2TS_Demuxer *ts, GF_M2TS_SECTION_ES *ses, GF_List *sections, u8 table_id, u16 ex_table_id, u8 version_number, u8 last_section_number, u32 status)
{
GF_M2TS_Program *prog;
GF_M2TS_SECTION_ES *pmt;
u32 i, nb_progs, evt_type;
u32 nb_sections;
u32 data_size;
unsigned char *data;
GF_M2TS_Section *section;
/*wait for the last section */
if (!(status&GF_M2TS_TABLE_END)) return;
/*skip if already received*/
if (status&GF_M2TS_TABLE_REPEAT) {
if (ts->on_event) ts->on_event(ts, GF_M2TS_EVT_PAT_REPEAT, NULL);
return;
}
nb_sections = gf_list_count(sections);
if (nb_sections > 1) {
GF_LOG(GF_LOG_WARNING, GF_LOG_CONTAINER, ("PAT on multiple sections not supported\n"));
}
section = (GF_M2TS_Section *)gf_list_get(sections, 0);
data = section->data;
data_size = section->data_size;
if (!(status&GF_M2TS_TABLE_UPDATE) && gf_list_count(ts->programs)) {
if (ts->pat->demux_restarted) {
ts->pat->demux_restarted = 0;
} else {
GF_LOG(GF_LOG_ERROR, GF_LOG_CONTAINER, ("Multiple different PAT on single TS found, ignoring new PAT declaration (table id %d - extended table id %d)\n", table_id, ex_table_id));
}
return;
}
nb_progs = data_size / 4;
for (i=0; i<nb_progs; i++) {
u16 number, pid;
number = (data[0]<<8) | data[1];
pid = (data[2]&0x1f)<<8 | data[3];
data += 4;
if (number==0) {
if (!ts->nit) {
ts->nit = gf_m2ts_section_filter_new(gf_m2ts_process_nit, 0);
}
} else {
GF_SAFEALLOC(prog, GF_M2TS_Program);
if (!prog) {
GF_LOG(GF_LOG_ERROR, GF_LOG_CONTAINER, ("Fail to allocate program for pid %d\n", pid));
return;
}
prog->streams = gf_list_new();
prog->pmt_pid = pid;
prog->number = number;
prog->ts = ts;
gf_list_add(ts->programs, prog);
GF_SAFEALLOC(pmt, GF_M2TS_SECTION_ES);
if (!pmt) {
GF_LOG(GF_LOG_ERROR, GF_LOG_CONTAINER, ("Fail to allocate pmt filter for pid %d\n", pid));
return;
}
pmt->flags = GF_M2TS_ES_IS_SECTION;
gf_list_add(prog->streams, pmt);
pmt->pid = prog->pmt_pid;
pmt->program = prog;
ts->ess[pmt->pid] = (GF_M2TS_ES *)pmt;
pmt->sec = gf_m2ts_section_filter_new(gf_m2ts_process_pmt, 0);
}
}
evt_type = (status&GF_M2TS_TABLE_UPDATE) ? GF_M2TS_EVT_PAT_UPDATE : GF_M2TS_EVT_PAT_FOUND;
if (ts->on_event) ts->on_event(ts, evt_type, NULL);
}
static void gf_m2ts_process_cat(GF_M2TS_Demuxer *ts, GF_M2TS_SECTION_ES *ses, GF_List *sections, u8 table_id, u16 ex_table_id, u8 version_number, u8 last_section_number, u32 status)
{
u32 evt_type;
/*
GF_M2TS_Program *prog;
GF_M2TS_SECTION_ES *pmt;
u32 i, nb_progs;
u32 nb_sections;
u32 data_size;
unsigned char *data;
GF_M2TS_Section *section;
*/
/*wait for the last section */
if (!(status&GF_M2TS_TABLE_END)) return;
/*skip if already received*/
if (status&GF_M2TS_TABLE_REPEAT) {
if (ts->on_event) ts->on_event(ts, GF_M2TS_EVT_CAT_REPEAT, NULL);
return;
}
/*
nb_sections = gf_list_count(sections);
if (nb_sections > 1) {
GF_LOG(GF_LOG_WARNING, GF_LOG_CONTAINER, ("CAT on multiple sections not supported\n"));
}
section = (GF_M2TS_Section *)gf_list_get(sections, 0);
data = section->data;
data_size = section->data_size;
nb_progs = data_size / 4;
for (i=0; i<nb_progs; i++) {
u16 number, pid;
number = (data[0]<<8) | data[1];
pid = (data[2]&0x1f)<<8 | data[3];
data += 4;
if (number==0) {
if (!ts->nit) {
ts->nit = gf_m2ts_section_filter_new(gf_m2ts_process_nit, 0);
}
} else {
GF_SAFEALLOC(prog, GF_M2TS_Program);
prog->streams = gf_list_new();
prog->pmt_pid = pid;
prog->number = number;
gf_list_add(ts->programs, prog);
GF_SAFEALLOC(pmt, GF_M2TS_SECTION_ES);
pmt->flags = GF_M2TS_ES_IS_SECTION;
gf_list_add(prog->streams, pmt);
pmt->pid = prog->pmt_pid;
pmt->program = prog;
ts->ess[pmt->pid] = (GF_M2TS_ES *)pmt;
pmt->sec = gf_m2ts_section_filter_new(gf_m2ts_process_pmt, 0);
}
}
*/
evt_type = (status&GF_M2TS_TABLE_UPDATE) ? GF_M2TS_EVT_CAT_UPDATE : GF_M2TS_EVT_CAT_FOUND;
if (ts->on_event) ts->on_event(ts, evt_type, NULL);
}
u64 gf_m2ts_get_pts(unsigned char *data)
{
u64 pts;
u32 val;
pts = (u64)((data[0] >> 1) & 0x07) << 30;
val = (data[1] << 8) | data[2];
pts |= (u64)(val >> 1) << 15;
val = (data[3] << 8) | data[4];
pts |= (u64)(val >> 1);
return pts;
}
void gf_m2ts_pes_header(GF_M2TS_PES *pes, unsigned char *data, u32 data_size, GF_M2TS_PESHeader *pesh)
{
u32 has_pts, has_dts;
u32 len_check;
memset(pesh, 0, sizeof(GF_M2TS_PESHeader));
len_check = 0;
pesh->id = data[0];
pesh->pck_len = (data[1]<<8) | data[2];
/*
2bits
scrambling_control = gf_bs_read_int(bs,2);
priority = gf_bs_read_int(bs,1);
*/
pesh->data_alignment = (data[3] & 0x4) ? 1 : 0;
/*
copyright = gf_bs_read_int(bs,1);
original = gf_bs_read_int(bs,1);
*/
has_pts = (data[4]&0x80);
has_dts = has_pts ? (data[4]&0x40) : 0;
/*
ESCR_flag = gf_bs_read_int(bs,1);
ES_rate_flag = gf_bs_read_int(bs,1);
DSM_flag = gf_bs_read_int(bs,1);
additional_copy_flag = gf_bs_read_int(bs,1);
prev_crc_flag = gf_bs_read_int(bs,1);
extension_flag = gf_bs_read_int(bs,1);
*/
pesh->hdr_data_len = data[5];
data += 6;
if (has_pts) {
pesh->PTS = gf_m2ts_get_pts(data);
data+=5;
len_check += 5;
}
if (has_dts) {
pesh->DTS = gf_m2ts_get_pts(data);
//data+=5;
len_check += 5;
} else {
pesh->DTS = pesh->PTS;
}
if (len_check < pesh->hdr_data_len) {
GF_LOG(GF_LOG_DEBUG, GF_LOG_CONTAINER, ("[MPEG-2 TS] PID %d Skipping %d bytes in pes header\n", pes->pid, pesh->hdr_data_len - len_check));
} else if (len_check > pesh->hdr_data_len) {
GF_LOG(GF_LOG_ERROR, GF_LOG_CONTAINER, ("[MPEG-2 TS] PID %d Wrong pes_header_data_length field %d bytes - read %d\n", pes->pid, pesh->hdr_data_len, len_check));
}
if ((pesh->PTS<90000) && ((s32)pesh->DTS<0)) {
GF_LOG(GF_LOG_WARNING, GF_LOG_CONTAINER, ("[MPEG-2 TS] PID %d Wrong DTS %d negative for PTS %d - forcing to 0\n", pes->pid, pesh->DTS, pesh->PTS));
pesh->DTS=0;
}
}
static void gf_m2ts_store_temi(GF_M2TS_Demuxer *ts, GF_M2TS_PES *pes)
{
GF_BitStream *bs = gf_bs_new(pes->temi_tc_desc, pes->temi_tc_desc_len, GF_BITSTREAM_READ);
u32 has_timestamp = gf_bs_read_int(bs, 2);
Bool has_ntp = (Bool) gf_bs_read_int(bs, 1);
/*u32 has_ptp = */gf_bs_read_int(bs, 1);
/*u32 has_timecode = */gf_bs_read_int(bs, 2);
memset(&pes->temi_tc, 0, sizeof(GF_M2TS_TemiTimecodeDescriptor));
pes->temi_tc.force_reload = gf_bs_read_int(bs, 1);
pes->temi_tc.is_paused = gf_bs_read_int(bs, 1);
pes->temi_tc.is_discontinuity = gf_bs_read_int(bs, 1);
gf_bs_read_int(bs, 7);
pes->temi_tc.timeline_id = gf_bs_read_int(bs, 8);
if (has_timestamp) {
pes->temi_tc.media_timescale = gf_bs_read_u32(bs);
if (has_timestamp==2)
pes->temi_tc.media_timestamp = gf_bs_read_u64(bs);
else
pes->temi_tc.media_timestamp = gf_bs_read_u32(bs);
}
if (has_ntp) {
pes->temi_tc.ntp = gf_bs_read_u64(bs);
}
gf_bs_del(bs);
pes->temi_tc_desc_len = 0;
pes->temi_pending = 1;
}
void gf_m2ts_flush_pes(GF_M2TS_Demuxer *ts, GF_M2TS_PES *pes)
{
GF_M2TS_PESHeader pesh;
if (!ts) return;
/*we need at least a full, valid start code and PES header !!*/
if ((pes->pck_data_len >= 4) && !pes->pck_data[0] && !pes->pck_data[1] && (pes->pck_data[2] == 0x1)) {
u32 len;
Bool has_pes_header = GF_TRUE;
u32 stream_id = pes->pck_data[3];
Bool same_pts = GF_FALSE;
switch (stream_id) {
case GF_M2_STREAMID_PROGRAM_STREAM_MAP:
case GF_M2_STREAMID_PADDING:
case GF_M2_STREAMID_PRIVATE_2:
case GF_M2_STREAMID_ECM:
case GF_M2_STREAMID_EMM:
case GF_M2_STREAMID_PROGRAM_STREAM_DIRECTORY:
case GF_M2_STREAMID_DSMCC:
case GF_M2_STREAMID_H222_TYPE_E:
has_pes_header = GF_FALSE;
break;
}
if (has_pes_header) {
/*OK read header*/
gf_m2ts_pes_header(pes, pes->pck_data + 3, pes->pck_data_len - 3, &pesh);
/*send PES timing*/
if (ts->notify_pes_timing) {
GF_M2TS_PES_PCK pck;
memset(&pck, 0, sizeof(GF_M2TS_PES_PCK));
pck.PTS = pesh.PTS;
pck.DTS = pesh.DTS;
pck.stream = pes;
if (pes->rap) pck.flags |= GF_M2TS_PES_PCK_RAP;
pes->pes_end_packet_number = ts->pck_number;
if (ts->on_event) ts->on_event(ts, GF_M2TS_EVT_PES_TIMING, &pck);
}
GF_LOG(GF_LOG_DEBUG, GF_LOG_CONTAINER, ("[MPEG-2 TS] PID %d Got PES header DTS %d PTS %d\n", pes->pid, pesh.DTS, pesh.PTS));
if (pesh.PTS) {
if (pesh.PTS == pes->PTS) {
same_pts = GF_TRUE;
GF_LOG(GF_LOG_WARNING, GF_LOG_CONTAINER, ("[MPEG-2 TS] PID %d - same PTS "LLU" for two consecutive PES packets \n", pes->pid, pes->PTS));
}
#ifndef GPAC_DISABLE_LOG
/*FIXME - this test should only be done for non bi-directionnally coded media
else if (pesh.PTS < pes->PTS) {
GF_LOG(GF_LOG_WARNING, GF_LOG_CONTAINER, ("[MPEG-2 TS] PID %d - PTS "LLU" less than previous packet PTS "LLU"\n", pes->pid, pesh.PTS, pes->PTS) );
}
*/
#endif
pes->PTS = pesh.PTS;
#ifndef GPAC_DISABLE_LOG
{
if (pes->DTS && (pesh.DTS == pes->DTS)) {
GF_LOG(GF_LOG_WARNING, GF_LOG_CONTAINER, ("[MPEG-2 TS] PID %d - same DTS "LLU" for two consecutive PES packets \n", pes->pid, pes->DTS));
}
if (pesh.DTS < pes->DTS) {
GF_LOG(GF_LOG_WARNING, GF_LOG_CONTAINER, ("[MPEG-2 TS] PID %d - DTS "LLU" less than previous DTS "LLU"\n", pes->pid, pesh.DTS, pes->DTS));
}
}
#endif
pes->DTS = pesh.DTS;
}
/*no PTSs were coded, same time*/
else if (!pesh.hdr_data_len) {
same_pts = GF_TRUE;
}
/*3-byte start-code + 6 bytes header + hdr extensions*/
len = 9 + pesh.hdr_data_len;
} else {
/*3-byte start-code + 1 byte streamid*/
len = 4;
memset(&pesh, 0, sizeof(pesh));
}
if ((u8) pes->pck_data[3]==0xfa) {
GF_M2TS_SL_PCK sl_pck;
GF_LOG(GF_LOG_DEBUG, GF_LOG_CONTAINER, ("[MPEG-2 TS] SL Packet in PES for %d - ES ID %d\n", pes->pid, pes->mpeg4_es_id));
if (pes->pck_data_len > len) {
sl_pck.data = (char *)pes->pck_data + len;
sl_pck.data_len = pes->pck_data_len - len;
sl_pck.stream = (GF_M2TS_ES *)pes;
if (ts->on_event) ts->on_event(ts, GF_M2TS_EVT_SL_PCK, &sl_pck);
} else {
GF_LOG(GF_LOG_ERROR, GF_LOG_CONTAINER, ("[MPEG-2 TS] Bad SL Packet size: (%d indicated < %d header)\n", pes->pid, pes->pck_data_len, len));
}
} else if (pes->reframe) {
u32 remain = 0;
u32 offset = len;
if (pesh.pck_len && (pesh.pck_len-3-pesh.hdr_data_len != pes->pck_data_len-len)) {
GF_LOG(GF_LOG_WARNING, GF_LOG_CONTAINER, ("[MPEG-2 TS] PID %d PES payload size %d but received %d bytes\n", pes->pid, (u32) ( pesh.pck_len-3-pesh.hdr_data_len), pes->pck_data_len-len));
}
//copy over the remaining of previous PES payload before start of this PES payload
if (pes->prev_data_len) {
if (pes->prev_data_len < len) {
offset = len - pes->prev_data_len;
memcpy(pes->pck_data + offset, pes->prev_data, pes->prev_data_len);
} else {
GF_LOG(GF_LOG_WARNING, GF_LOG_CONTAINER, ("[MPEG-2 TS] PID %d PES reassembly buffer overflow (%d bytes not processed from previous PES) - discarding prev data\n", pes->pid, pes->prev_data_len ));
}
}
if (!pes->temi_pending && pes->temi_tc_desc_len) {
gf_m2ts_store_temi(ts, pes);
}
if (pes->temi_pending) {
pes->temi_pending = 0;
pes->temi_tc.pes_pts = pes->PTS;
if (ts->on_event)
ts->on_event(ts, GF_M2TS_EVT_TEMI_TIMECODE, &pes->temi_tc);
}
if (! ts->seek_mode)
remain = pes->reframe(ts, pes, same_pts, pes->pck_data+offset, pes->pck_data_len-offset, &pesh);
//CLEANUP alloc stuff
if (pes->prev_data) gf_free(pes->prev_data);
pes->prev_data = NULL;
pes->prev_data_len = 0;
if (remain) {
pes->prev_data = gf_malloc(sizeof(char)*remain);
assert(pes->pck_data_len >= remain);
memcpy(pes->prev_data, pes->pck_data + pes->pck_data_len - remain, remain);
pes->prev_data_len = remain;
}
}
} else if (pes->pck_data_len) {
GF_LOG(GF_LOG_WARNING, GF_LOG_CONTAINER, ("[MPEG-2 TS] PES %d: Bad PES Header, discarding packet (maybe stream is encrypted ?)\n", pes->pid));
}
pes->pck_data_len = 0;
pes->pes_len = 0;
pes->rap = 0;
}
static void gf_m2ts_process_pes(GF_M2TS_Demuxer *ts, GF_M2TS_PES *pes, GF_M2TS_Header *hdr, unsigned char *data, u32 data_size, GF_M2TS_AdaptationField *paf)
{
u8 expect_cc;
Bool disc=0;
Bool flush_pes = 0;
/*duplicated packet, NOT A DISCONTINUITY, we should discard the packet - however we may encounter this configuration in DASH at segment boundaries.
If payload start is set, ignore duplication*/
if (hdr->continuity_counter==pes->cc) {
if (!hdr->payload_start || (hdr->adaptation_field!=3) ) {
GF_LOG(GF_LOG_INFO, GF_LOG_CONTAINER, ("[MPEG-2 TS] PES %d: Duplicated Packet found (CC %d) - skipping\n", pes->pid, pes->cc));
return;
}
} else {
expect_cc = (pes->cc<0) ? hdr->continuity_counter : (pes->cc + 1) & 0xf;
if (expect_cc != hdr->continuity_counter)
disc = 1;
}
pes->cc = hdr->continuity_counter;
if (disc) {
if (pes->flags & GF_M2TS_ES_IGNORE_NEXT_DISCONTINUITY) {
pes->flags &= ~GF_M2TS_ES_IGNORE_NEXT_DISCONTINUITY;
disc = 0;
}
if (disc) {
if (hdr->payload_start) {
if (pes->pck_data_len) {
GF_LOG(GF_LOG_WARNING, GF_LOG_CONTAINER, ("[MPEG-2 TS] PES %d: Packet discontinuity (%d expected - got %d) - may have lost end of previous PES\n", pes->pid, expect_cc, hdr->continuity_counter));
}
} else {
if (pes->pck_data_len) {
GF_LOG(GF_LOG_ERROR, GF_LOG_CONTAINER, ("[MPEG-2 TS] PES %d: Packet discontinuity (%d expected - got %d) - trashing PES packet\n", pes->pid, expect_cc, hdr->continuity_counter));
}
pes->pck_data_len = 0;
pes->pes_len = 0;
pes->cc = -1;
return;
}
}
}
if (!pes->reframe) return;
if (hdr->payload_start) {
flush_pes = 1;
pes->pes_start_packet_number = ts->pck_number;
pes->before_last_pcr_value = pes->program->before_last_pcr_value;
pes->before_last_pcr_value_pck_number = pes->program->before_last_pcr_value_pck_number;
pes->last_pcr_value = pes->program->last_pcr_value;
pes->last_pcr_value_pck_number = pes->program->last_pcr_value_pck_number;
} else if (pes->pes_len && (pes->pck_data_len + data_size == pes->pes_len + 6)) {
/* 6 = startcode+stream_id+length*/
/*reassemble pes*/
if (pes->pck_data_len + data_size > pes->pck_alloc_len) {
pes->pck_alloc_len = pes->pck_data_len + data_size;
pes->pck_data = (u8*)gf_realloc(pes->pck_data, pes->pck_alloc_len);
}
memcpy(pes->pck_data+pes->pck_data_len, data, data_size);
pes->pck_data_len += data_size;
/*force discard*/
data_size = 0;
flush_pes = 1;
}
/*PES first fragment: flush previous packet*/
if (flush_pes && pes->pck_data_len) {
gf_m2ts_flush_pes(ts, pes);
if (!data_size) return;
}
/*we need to wait for first packet of PES*/
if (!pes->pck_data_len && !hdr->payload_start) {
GF_LOG(GF_LOG_DEBUG, GF_LOG_CONTAINER, ("[MPEG-2 TS] PID %d: Waiting for PES header, trashing data\n", hdr->pid));
return;
}
/*reassemble*/
if (pes->pck_data_len + data_size > pes->pck_alloc_len ) {
pes->pck_alloc_len = pes->pck_data_len + data_size;
pes->pck_data = (u8*)gf_realloc(pes->pck_data, pes->pck_alloc_len);
}
memcpy(pes->pck_data + pes->pck_data_len, data, data_size);
pes->pck_data_len += data_size;
if (paf && paf->random_access_indicator) pes->rap = 1;
if (hdr->payload_start && !pes->pes_len && (pes->pck_data_len>=6)) {
pes->pes_len = (pes->pck_data[4]<<8) | pes->pck_data[5];
GF_LOG(GF_LOG_DEBUG, GF_LOG_CONTAINER, ("[MPEG-2 TS] PID %d: Got PES packet len %d\n", pes->pid, pes->pes_len));
if (pes->pes_len + 6 == pes->pck_data_len) {
gf_m2ts_flush_pes(ts, pes);
}
}
}
static void gf_m2ts_get_adaptation_field(GF_M2TS_Demuxer *ts, GF_M2TS_AdaptationField *paf, unsigned char *data, u32 size, u32 pid)
{
unsigned char *af_extension;
paf->discontinuity_indicator = (data[0] & 0x80) ? 1 : 0;
paf->random_access_indicator = (data[0] & 0x40) ? 1 : 0;
paf->priority_indicator = (data[0] & 0x20) ? 1 : 0;
paf->PCR_flag = (data[0] & 0x10) ? 1 : 0;
paf->OPCR_flag = (data[0] & 0x8) ? 1 : 0;
paf->splicing_point_flag = (data[0] & 0x4) ? 1 : 0;
paf->transport_private_data_flag = (data[0] & 0x2) ? 1 : 0;
paf->adaptation_field_extension_flag = (data[0] & 0x1) ? 1 : 0;
af_extension = data + 1;
if (paf->PCR_flag == 1) {
u32 base = ((u32)data[1] << 24) | ((u32)data[2] << 16) | ((u32)data[3] << 8) | (u32)data[4];
u64 PCR = (u64) base;
paf->PCR_base = (PCR << 1) | (data[5] >> 7);
paf->PCR_ext = ((data[5] & 1) << 8) | data[6];
af_extension += 6;
}
if (paf->adaptation_field_extension_flag) {
u32 afext_bytes;
Bool ltw_flag, pwr_flag, seamless_flag, af_desc_not_present;
if (paf->OPCR_flag) {
af_extension += 6;
}
if (paf->splicing_point_flag) {
af_extension += 1;
}
if (paf->transport_private_data_flag) {
u32 priv_bytes = af_extension[0];
af_extension += 1 + priv_bytes;
}
afext_bytes = af_extension[0];
ltw_flag = af_extension[1] & 0x80 ? 1 : 0;
pwr_flag = af_extension[1] & 0x40 ? 1 : 0;
seamless_flag = af_extension[1] & 0x20 ? 1 : 0;
af_desc_not_present = af_extension[1] & 0x10 ? 1 : 0;
af_extension += 2;
if (!afext_bytes) {
GF_LOG(GF_LOG_ERROR, GF_LOG_CONTAINER, ("[MPEG-2 TS] PID %d: Bad Adaptation Extension found\n", pid));
return;
}
afext_bytes-=1;
if (ltw_flag) {
af_extension += 2;
if (afext_bytes<2) {
GF_LOG(GF_LOG_ERROR, GF_LOG_CONTAINER, ("[MPEG-2 TS] PID %d: Bad Adaptation Extension found\n", pid));
return;
}
afext_bytes-=2;
}
if (pwr_flag) {
af_extension += 3;
if (afext_bytes<3) {
GF_LOG(GF_LOG_ERROR, GF_LOG_CONTAINER, ("[MPEG-2 TS] PID %d: Bad Adaptation Extension found\n", pid));
return;
}
afext_bytes-=3;
}
if (seamless_flag) {
af_extension += 3;
if (afext_bytes<3) {
GF_LOG(GF_LOG_ERROR, GF_LOG_CONTAINER, ("[MPEG-2 TS] PID %d: Bad Adaptation Extension found\n", pid));
return;
}
afext_bytes-=3;
}
if (! af_desc_not_present) {
while (afext_bytes) {
GF_BitStream *bs;
char *desc;
u8 desc_tag = af_extension[0];
u8 desc_len = af_extension[1];
if (!desc_len || (u32) desc_len+2 > afext_bytes) {
GF_LOG(GF_LOG_ERROR, GF_LOG_CONTAINER, ("[MPEG-2 TS] PID %d: Bad Adaptation Descriptor found (tag %d) size is %d but only %d bytes available\n", pid, desc_tag, desc_len, afext_bytes));
break;
}
desc = (char *) af_extension+2;
bs = gf_bs_new(desc, desc_len, GF_BITSTREAM_READ);
switch (desc_tag) {
case GF_M2TS_AFDESC_LOCATION_DESCRIPTOR:
{
Bool use_base_temi_url;
char URL[255];
GF_M2TS_TemiLocationDescriptor temi_loc;
memset(&temi_loc, 0, sizeof(GF_M2TS_TemiLocationDescriptor) );
temi_loc.reload_external = gf_bs_read_int(bs, 1);
temi_loc.is_announce = gf_bs_read_int(bs, 1);
temi_loc.is_splicing = gf_bs_read_int(bs, 1);
use_base_temi_url = gf_bs_read_int(bs, 1);
gf_bs_read_int(bs, 5); //reserved
temi_loc.timeline_id = gf_bs_read_int(bs, 7);
if (!use_base_temi_url) {
char *_url = URL;
u8 scheme = gf_bs_read_int(bs, 8);
u8 url_len = gf_bs_read_int(bs, 8);
switch (scheme) {
case 1:
strcpy(URL, "http://");
_url = URL+7;
break;
case 2:
strcpy(URL, "https://");
_url = URL+8;
break;
}
gf_bs_read_data(bs, _url, url_len);
_url[url_len] = 0;
}
temi_loc.external_URL = URL;
GF_LOG(GF_LOG_INFO, GF_LOG_CONTAINER, ("[MPEG-2 TS] PID %d AF Location descriptor found - URL %s\n", pid, URL));
if (ts->on_event) ts->on_event(ts, GF_M2TS_EVT_TEMI_LOCATION, &temi_loc);
}
break;
case GF_M2TS_AFDESC_TIMELINE_DESCRIPTOR:
if (ts->ess[pid] && (ts->ess[pid]->flags & GF_M2TS_ES_IS_PES)) {
GF_M2TS_PES *pes = (GF_M2TS_PES *) ts->ess[pid];
if (pes->temi_tc_desc_len)
gf_m2ts_store_temi(ts, pes);
if (pes->temi_tc_desc_alloc_size < desc_len) {
pes->temi_tc_desc = gf_realloc(pes->temi_tc_desc, desc_len);
pes->temi_tc_desc_alloc_size = desc_len;
}
memcpy(pes->temi_tc_desc, desc, desc_len);
pes->temi_tc_desc_len = desc_len;
GF_LOG(GF_LOG_DEBUG, GF_LOG_CONTAINER, ("[MPEG-2 TS] PID %d AF Timeline descriptor found\n", pid));
}
break;
}
gf_bs_del(bs);
af_extension += 2+desc_len;
afext_bytes -= 2+desc_len;
}
}
}
GF_LOG(GF_LOG_DEBUG, GF_LOG_CONTAINER, ("[MPEG-2 TS] PID %d: Adaptation Field found: Discontinuity %d - RAP %d - PCR: "LLD"\n", pid, paf->discontinuity_indicator, paf->random_access_indicator, paf->PCR_flag ? paf->PCR_base * 300 + paf->PCR_ext : 0));
}
static GF_Err gf_m2ts_process_packet(GF_M2TS_Demuxer *ts, unsigned char *data)
{
GF_M2TS_ES *es;
GF_M2TS_Header hdr;
GF_M2TS_AdaptationField af, *paf;
u32 payload_size, af_size;
u32 pos = 0;
ts->pck_number++;
/* read TS packet header*/
hdr.sync = data[0];
if (hdr.sync != 0x47) {
GF_LOG(GF_LOG_WARNING, GF_LOG_CONTAINER, ("[MPEG-2 TS] TS Packet %d does not start with sync marker\n", ts->pck_number));
return GF_CORRUPTED_DATA;
}
hdr.error = (data[1] & 0x80) ? 1 : 0;
hdr.payload_start = (data[1] & 0x40) ? 1 : 0;
hdr.priority = (data[1] & 0x20) ? 1 : 0;
hdr.pid = ( (data[1]&0x1f) << 8) | data[2];
hdr.scrambling_ctrl = (data[3] >> 6) & 0x3;
hdr.adaptation_field = (data[3] >> 4) & 0x3;
hdr.continuity_counter = data[3] & 0xf;
if (hdr.error) {
GF_LOG(GF_LOG_WARNING, GF_LOG_CONTAINER, ("[MPEG-2 TS] TS Packet %d has error (PID could be %d)\n", ts->pck_number, hdr.pid));
return GF_CORRUPTED_DATA;
}
//#if DEBUG_TS_PACKET
GF_LOG(GF_LOG_DEBUG, GF_LOG_CONTAINER, ("[MPEG-2 TS] TS Packet %d PID %d CC %d Encrypted %d\n", ts->pck_number, hdr.pid, hdr.continuity_counter, hdr.scrambling_ctrl));
//#endif
if (hdr.scrambling_ctrl) {
//TODO add decyphering
GF_LOG(GF_LOG_WARNING, GF_LOG_CONTAINER, ("[MPEG-2 TS] TS Packet %d is scrambled - not supported\n", ts->pck_number, hdr.pid));
return GF_NOT_SUPPORTED;
}
paf = NULL;
payload_size = 184;
pos = 4;
switch (hdr.adaptation_field) {
/*adaptation+data*/
case 3:
af_size = data[4];
if (af_size>183) {
GF_LOG(GF_LOG_ERROR, GF_LOG_CONTAINER, ("[MPEG-2 TS] TS Packet %d AF field larger than 183 !\n", ts->pck_number));
//error
return GF_CORRUPTED_DATA;
}
paf = ⁡
memset(paf, 0, sizeof(GF_M2TS_AdaptationField));
//this will stop you when processing invalid (yet existing) mpeg2ts streams in debug
assert( af_size<=183);
if (af_size>183)
GF_LOG(GF_LOG_WARNING, GF_LOG_CONTAINER, ("[MPEG-2 TS] TS Packet %d Detected wrong adaption field size %u when control value is 3\n", ts->pck_number, af_size));
if (af_size) gf_m2ts_get_adaptation_field(ts, paf, data+5, af_size, hdr.pid);
pos += 1+af_size;
payload_size = 183 - af_size;
break;
/*adaptation only - still process in case of PCR*/
case 2:
af_size = data[4];
if (af_size != 183) {
GF_LOG(GF_LOG_WARNING, GF_LOG_CONTAINER, ("[MPEG-2 TS] TS Packet %d AF size is %d when it must be 183 for AF type 2\n", ts->pck_number, af_size));
return GF_CORRUPTED_DATA;
}
paf = ⁡
memset(paf, 0, sizeof(GF_M2TS_AdaptationField));
gf_m2ts_get_adaptation_field(ts, paf, data+5, af_size, hdr.pid);
payload_size = 0;
/*no payload and no PCR, return*/
if (!paf->PCR_flag)
return GF_OK;
break;
/*reserved*/
case 0:
return GF_OK;
default:
break;
}
data += pos;
/*PAT*/
if (hdr.pid == GF_M2TS_PID_PAT) {
gf_m2ts_gather_section(ts, ts->pat, NULL, &hdr, data, payload_size);
return GF_OK;
} else if (hdr.pid == GF_M2TS_PID_CAT) {
gf_m2ts_gather_section(ts, ts->cat, NULL, &hdr, data, payload_size);
return GF_OK;
}
es = ts->ess[hdr.pid];
if (paf && paf->PCR_flag) {
if (!es) {
u32 i, j;
for(i=0; i<gf_list_count(ts->programs); i++) {
GF_M2TS_PES *first_pes = NULL;
GF_M2TS_Program *program = (GF_M2TS_Program *)gf_list_get(ts->programs,i);
if(program->pcr_pid != hdr.pid) continue;
for (j=0; j<gf_list_count(program->streams); j++) {
GF_M2TS_PES *pes = (GF_M2TS_PES *) gf_list_get(program->streams, j);
if (pes->flags & GF_M2TS_INHERIT_PCR) {
ts->ess[hdr.pid] = (GF_M2TS_ES *) pes;
pes->flags |= GF_M2TS_FAKE_PCR;
break;
}
if (pes->flags & GF_M2TS_ES_IS_PES) {
first_pes = pes;
}
}
//non found, use the first media stream as a PCR destination - Q: is it legal to have PCR only streams not declared in PMT ?
if (!es && first_pes) {
es = (GF_M2TS_ES *) first_pes;
first_pes->flags |= GF_M2TS_FAKE_PCR;
}
break;
}
if (!es)
es = ts->ess[hdr.pid];
}
if (es) {
GF_M2TS_PES_PCK pck;
s64 prev_diff_in_us;
Bool discontinuity;
s32 cc = -1;
if (es->flags & GF_M2TS_FAKE_PCR) {
cc = es->program->pcr_cc;
es->program->pcr_cc = hdr.continuity_counter;
}
else if (es->flags & GF_M2TS_ES_IS_PES) cc = ((GF_M2TS_PES*)es)->cc;
else if (((GF_M2TS_SECTION_ES*)es)->sec) cc = ((GF_M2TS_SECTION_ES*)es)->sec->cc;
discontinuity = paf->discontinuity_indicator;
if ((cc>=0) && es->program->before_last_pcr_value) {
//no increment of CC if AF only packet
if (hdr.adaptation_field == 2) {
if (hdr.continuity_counter != cc) {
discontinuity = GF_TRUE;
}
} else if (hdr.continuity_counter != ((cc + 1) & 0xF)) {
discontinuity = GF_TRUE;
}
}
memset(&pck, 0, sizeof(GF_M2TS_PES_PCK));
prev_diff_in_us = (s64) (es->program->last_pcr_value /27- es->program->before_last_pcr_value/27);
es->program->before_last_pcr_value = es->program->last_pcr_value;
es->program->before_last_pcr_value_pck_number = es->program->last_pcr_value_pck_number;
es->program->last_pcr_value_pck_number = ts->pck_number;
es->program->last_pcr_value = paf->PCR_base * 300 + paf->PCR_ext;
if (!es->program->last_pcr_value) es->program->last_pcr_value = 1;
GF_LOG(GF_LOG_DEBUG, GF_LOG_CONTAINER, ("[MPEG-2 TS] PID %d PCR found "LLU" ("LLU" at 90kHz) - PCR diff is %d us\n", hdr.pid, es->program->last_pcr_value, es->program->last_pcr_value/300, (s32) (es->program->last_pcr_value - es->program->before_last_pcr_value)/27 ));
pck.PTS = es->program->last_pcr_value;
pck.stream = (GF_M2TS_PES *)es;
//try to ignore all discontinuities that are less than 200 ms (seen in some HLS setup ...)
if (discontinuity) {
s64 diff_in_us = (s64) (es->program->last_pcr_value - es->program->before_last_pcr_value) / 27;
u64 diff = ABS(diff_in_us - prev_diff_in_us);
if ((diff_in_us<0) && (diff_in_us >= -200000)) {
GF_LOG(GF_LOG_WARNING, GF_LOG_CONTAINER, ("[MPEG-2 TS] PID %d new PCR, with discontinuity signaled, is less than previously received PCR (diff %d us) but not too large, trying to ignore discontinuity\n", hdr.pid, diff_in_us));
}
//ignore PCR discontinuity indicator if PCR found is larger than previously received PCR and diffence between PCR before and after discontinuity indicator is smaller than 50ms
else if ((diff_in_us > 0) && (diff < 200000)) {
GF_LOG(GF_LOG_DEBUG, GF_LOG_CONTAINER, ("[MPEG-2 TS] PID %d PCR discontinuity signaled but diff is small (diff %d us - PCR diff %d vs prev PCR diff %d) - ignore it\n", hdr.pid, diff, diff_in_us, prev_diff_in_us));
} else if (paf->discontinuity_indicator) {
GF_LOG(GF_LOG_DEBUG, GF_LOG_CONTAINER, ("[MPEG-2 TS] PID %d PCR discontinuity signaled (diff %d us - PCR diff %d vs prev PCR diff %d)\n", hdr.pid, diff, diff_in_us, prev_diff_in_us));
pck.flags = GF_M2TS_PES_PCK_DISCONTINUITY;
} else {
GF_LOG(GF_LOG_WARNING, GF_LOG_CONTAINER, ("[MPEG-2 TS] PID %d PCR discontinuity not signaled (diff %d us - PCR diff %d vs prev PCR diff %d)\n", hdr.pid, diff, diff_in_us, prev_diff_in_us));
pck.flags = GF_M2TS_PES_PCK_DISCONTINUITY;
}
}
else if ( (es->program->last_pcr_value < es->program->before_last_pcr_value) ) {
s64 diff_in_us = (s64) (es->program->last_pcr_value - es->program->before_last_pcr_value) / 27;
//if less than 200 ms before PCR loop at the last PCR, this is a PCR loop
if (GF_M2TS_MAX_PCR - es->program->before_last_pcr_value < 5400000 /*2*2700000*/) {
GF_LOG(GF_LOG_INFO, GF_LOG_CONTAINER, ("[MPEG-2 TS] PID %d PCR loop found from "LLU" to "LLU" \n", hdr.pid, es->program->before_last_pcr_value, es->program->last_pcr_value));
} else if ((diff_in_us<0) && (diff_in_us >= -200000)) {
GF_LOG(GF_LOG_WARNING, GF_LOG_CONTAINER, ("[MPEG-2 TS] PID %d new PCR, without discontinuity signaled, is less than previously received PCR (diff %d us) but not too large, trying to ignore discontinuity\n", hdr.pid, diff_in_us));
} else {
GF_LOG(GF_LOG_WARNING, GF_LOG_CONTAINER, ("[MPEG-2 TS] PID %d PCR found "LLU" is less than previously received PCR "LLU" (PCR diff %g sec) but no discontinuity signaled\n", hdr.pid, es->program->last_pcr_value, es->program->before_last_pcr_value, (GF_M2TS_MAX_PCR - es->program->before_last_pcr_value + es->program->last_pcr_value) / 27000000.0));
pck.flags = GF_M2TS_PES_PCK_DISCONTINUITY;
}
}
if (pck.flags & GF_M2TS_PES_PCK_DISCONTINUITY) {
gf_m2ts_reset_parsers_for_program(ts, es->program);
}
if (ts->on_event) {
ts->on_event(ts, GF_M2TS_EVT_PES_PCR, &pck);
}
}
}
/*check for DVB reserved PIDs*/
if (!es) {
if (hdr.pid == GF_M2TS_PID_SDT_BAT_ST) {
gf_m2ts_gather_section(ts, ts->sdt, NULL, &hdr, data, payload_size);
return GF_OK;
} else if (hdr.pid == GF_M2TS_PID_NIT_ST) {
/*ignore them, unused at application level*/
gf_m2ts_gather_section(ts, ts->nit, NULL, &hdr, data, payload_size);
return GF_OK;
} else if (hdr.pid == GF_M2TS_PID_EIT_ST_CIT) {
/* ignore EIT messages for the moment */
gf_m2ts_gather_section(ts, ts->eit, NULL, &hdr, data, payload_size);
return GF_OK;
} else if (hdr.pid == GF_M2TS_PID_TDT_TOT_ST) {
gf_m2ts_gather_section(ts, ts->tdt_tot, NULL, &hdr, data, payload_size);
} else {
/* ignore packet */
}
} else if (es->flags & GF_M2TS_ES_IS_SECTION) { /* The stream uses sections to carry its payload */
GF_M2TS_SECTION_ES *ses = (GF_M2TS_SECTION_ES *)es;
if (ses->sec) gf_m2ts_gather_section(ts, ses->sec, ses, &hdr, data, payload_size);
} else {
GF_M2TS_PES *pes = (GF_M2TS_PES *)es;
/* regular stream using PES packets */
if (pes->reframe && payload_size) gf_m2ts_process_pes(ts, pes, &hdr, data, payload_size, paf);
}
return GF_OK;
}
GF_EXPORT
GF_Err gf_m2ts_process_data(GF_M2TS_Demuxer *ts, u8 *data, u32 data_size)
{
GF_Err e=GF_OK;
u32 pos, pck_size;
Bool is_align = 1;
if (ts->buffer_size) {
//we are sync, copy remaining bytes
if ( (ts->buffer[0]==0x47) && (ts->buffer_size<200)) {
u32 pck_size = ts->prefix_present ? 192 : 188;
if (ts->alloc_size < 200) {
ts->alloc_size = 200;
ts->buffer = (char*)gf_realloc(ts->buffer, sizeof(char)*ts->alloc_size);
}
memcpy(ts->buffer + ts->buffer_size, data, pck_size - ts->buffer_size);
e |= gf_m2ts_process_packet(ts, (unsigned char *)ts->buffer);
data += (pck_size - ts->buffer_size);
data_size = data_size - (pck_size - ts->buffer_size);
}
//not sync, copy over the complete buffer
else {
if (ts->alloc_size < ts->buffer_size+data_size) {
ts->alloc_size = ts->buffer_size+data_size;
ts->buffer = (char*)gf_realloc(ts->buffer, sizeof(char)*ts->alloc_size);
}
memcpy(ts->buffer + ts->buffer_size, data, sizeof(char)*data_size);
ts->buffer_size += data_size;
is_align = 0;
data = ts->buffer;
data_size = ts->buffer_size;
}
}
/*sync input data*/
pos = gf_m2ts_sync(ts, data, data_size, is_align);
if (pos==data_size) {
if (is_align) {
if (ts->alloc_size<data_size) {
ts->buffer = (char*)gf_realloc(ts->buffer, sizeof(char)*data_size);
ts->alloc_size = data_size;
}
memcpy(ts->buffer, data, sizeof(char)*data_size);
ts->buffer_size = data_size;
}
return GF_OK;
}
pck_size = ts->prefix_present ? 192 : 188;
for (;;) {
/*wait for a complete packet*/
if (data_size < pos + pck_size) {
ts->buffer_size = data_size - pos;
data += pos;
if (!ts->buffer_size) {
return e;
}
assert(ts->buffer_size<pck_size);
if (is_align) {
u32 s = ts->buffer_size;
if (s<200) s = 200;
if (ts->alloc_size < s) {
ts->alloc_size = s;
ts->buffer = (char*)gf_realloc(ts->buffer, sizeof(char)*ts->alloc_size);
}
memcpy(ts->buffer, data, sizeof(char)*ts->buffer_size);
} else {
memmove(ts->buffer, data, sizeof(char)*ts->buffer_size);
}
return e;
}
/*process*/
e |= gf_m2ts_process_packet(ts, (unsigned char *)data + pos);
pos += pck_size;
}
return e;
}
//unused
#if 0
GF_ESD *gf_m2ts_get_esd(GF_M2TS_ES *es)
{
GF_ESD *esd;
u32 k, esd_count;
esd = NULL;
if (es->program->pmt_iod && es->program->pmt_iod->ESDescriptors) {
esd_count = gf_list_count(es->program->pmt_iod->ESDescriptors);
for (k = 0; k < esd_count; k++) {
GF_ESD *esd_tmp = (GF_ESD *)gf_list_get(es->program->pmt_iod->ESDescriptors, k);
if (esd_tmp->ESID != es->mpeg4_es_id) continue;
esd = esd_tmp;
break;
}
}
if (!esd && es->program->additional_ods) {
u32 od_count, od_index;
od_count = gf_list_count(es->program->additional_ods);
for (od_index = 0; od_index < od_count; od_index++) {
GF_ObjectDescriptor *od = (GF_ObjectDescriptor *)gf_list_get(es->program->additional_ods, od_index);
esd_count = gf_list_count(od->ESDescriptors);
for (k = 0; k < esd_count; k++) {
GF_ESD *esd_tmp = (GF_ESD *)gf_list_get(od->ESDescriptors, k);
if (esd_tmp->ESID != es->mpeg4_es_id) continue;
esd = esd_tmp;
break;
}
}
}
return esd;
}
void gf_m2ts_set_segment_switch(GF_M2TS_Demuxer *ts)
{
u32 i;
for (i=0; i<GF_M2TS_MAX_STREAMS; i++) {
GF_M2TS_ES *es = (GF_M2TS_ES *) ts->ess[i];
if (!es) continue;
es->flags |= GF_M2TS_ES_IGNORE_NEXT_DISCONTINUITY;
}
}
#endif
GF_EXPORT
void gf_m2ts_reset_parsers_for_program(GF_M2TS_Demuxer *ts, GF_M2TS_Program *prog)
{
u32 i;
for (i=0; i<GF_M2TS_MAX_STREAMS; i++) {
GF_M2TS_ES *es = (GF_M2TS_ES *) ts->ess[i];
if (!es) continue;
if (prog && (es->program != prog) ) continue;
if (es->flags & GF_M2TS_ES_IS_SECTION) {
GF_M2TS_SECTION_ES *ses = (GF_M2TS_SECTION_ES *)es;
gf_m2ts_section_filter_reset(ses->sec);
} else {
GF_M2TS_PES *pes = (GF_M2TS_PES *)es;
if (!pes || (pes->pid==pes->program->pmt_pid)) continue;
pes->cc = -1;
pes->frame_state = 0;
pes->pck_data_len = 0;
if (pes->prev_data) gf_free(pes->prev_data);
pes->prev_data = NULL;
pes->prev_data_len = 0;
pes->PTS = pes->DTS = 0;
// pes->prev_PTS = 0;
// pes->first_dts = 0;
pes->pes_len = pes->pes_end_packet_number = pes->pes_start_packet_number = 0;
if (pes->buf) gf_free(pes->buf);
pes->buf = NULL;
if (pes->temi_tc_desc) gf_free(pes->temi_tc_desc);
pes->temi_tc_desc = NULL;
pes->temi_tc_desc_len = pes->temi_tc_desc_alloc_size = 0;
pes->before_last_pcr_value = pes->before_last_pcr_value_pck_number = 0;
pes->last_pcr_value = pes->last_pcr_value_pck_number = 0;
if (pes->program->pcr_pid==pes->pid) {
pes->program->last_pcr_value = pes->program->last_pcr_value_pck_number = 0;
pes->program->before_last_pcr_value = pes->program->before_last_pcr_value_pck_number = 0;
}
}
}
}
GF_EXPORT
void gf_m2ts_reset_parsers(GF_M2TS_Demuxer *ts)
{
gf_m2ts_reset_parsers_for_program(ts, NULL);
ts->pck_number = 0;
gf_m2ts_section_filter_reset(ts->cat);
gf_m2ts_section_filter_reset(ts->pat);
gf_m2ts_section_filter_reset(ts->sdt);
gf_m2ts_section_filter_reset(ts->nit);
gf_m2ts_section_filter_reset(ts->eit);
gf_m2ts_section_filter_reset(ts->tdt_tot);
}
#if 0 //unused
u32 gf_m2ts_pes_get_framing_mode(GF_M2TS_PES *pes)
{
if (pes->flags & GF_M2TS_ES_IS_SECTION) {
if (pes->flags & GF_M2TS_ES_IS_SL) {
if ( ((GF_M2TS_SECTION_ES *)pes)->sec->process_section == NULL)
return GF_M2TS_PES_FRAMING_DEFAULT;
}
return GF_M2TS_PES_FRAMING_SKIP_NO_RESET;
}
if (!pes->reframe ) return GF_M2TS_PES_FRAMING_SKIP_NO_RESET;
if (pes->reframe == gf_m2ts_reframe_default) return GF_M2TS_PES_FRAMING_RAW;
if (pes->reframe == gf_m2ts_reframe_reset) return GF_M2TS_PES_FRAMING_SKIP;
return GF_M2TS_PES_FRAMING_DEFAULT;
}
#endif
GF_EXPORT
GF_Err gf_m2ts_set_pes_framing(GF_M2TS_PES *pes, u32 mode)
{
if (!pes) return GF_BAD_PARAM;
GF_LOG(GF_LOG_DEBUG, GF_LOG_CONTAINER, ("[MPEG-2 TS] Setting pes framing mode of PID %d to %d\n", pes->pid, mode) );
/*ignore request for section PIDs*/
if (pes->flags & GF_M2TS_ES_IS_SECTION) {
if (pes->flags & GF_M2TS_ES_IS_SL) {
if (mode==GF_M2TS_PES_FRAMING_DEFAULT) {
((GF_M2TS_SECTION_ES *)pes)->sec->process_section = gf_m2ts_process_mpeg4section;
} else {
((GF_M2TS_SECTION_ES *)pes)->sec->process_section = NULL;
}
}
return GF_OK;
}
if (pes->pid==pes->program->pmt_pid) return GF_BAD_PARAM;
//if component reuse, disable previous pes
if ((mode > GF_M2TS_PES_FRAMING_SKIP) && (pes->program->ts->ess[pes->pid] != (GF_M2TS_ES *) pes)) {
GF_M2TS_PES *o_pes = (GF_M2TS_PES *) pes->program->ts->ess[pes->pid];
if (o_pes->flags & GF_M2TS_ES_IS_PES)
gf_m2ts_set_pes_framing(o_pes, GF_M2TS_PES_FRAMING_SKIP);
GF_LOG(GF_LOG_INFO, GF_LOG_CONTAINER, ("[MPEG-2 TS] Reassinging PID %d from program %d to program %d\n", pes->pid, o_pes->program->number, pes->program->number) );
pes->program->ts->ess[pes->pid] = (GF_M2TS_ES *) pes;
}
switch (mode) {
case GF_M2TS_PES_FRAMING_RAW:
pes->reframe = gf_m2ts_reframe_default;
break;
case GF_M2TS_PES_FRAMING_SKIP:
pes->reframe = gf_m2ts_reframe_reset;
break;
case GF_M2TS_PES_FRAMING_SKIP_NO_RESET:
pes->reframe = NULL;
break;
case GF_M2TS_PES_FRAMING_DEFAULT:
default:
switch (pes->stream_type) {
case GF_M2TS_VIDEO_MPEG1:
case GF_M2TS_VIDEO_MPEG2:
case GF_M2TS_VIDEO_H264:
case GF_M2TS_VIDEO_SVC:
case GF_M2TS_VIDEO_HEVC:
case GF_M2TS_VIDEO_HEVC_TEMPORAL:
case GF_M2TS_VIDEO_HEVC_MCTS:
case GF_M2TS_VIDEO_SHVC:
case GF_M2TS_VIDEO_SHVC_TEMPORAL:
case GF_M2TS_VIDEO_MHVC:
case GF_M2TS_VIDEO_MHVC_TEMPORAL:
case GF_M2TS_AUDIO_MPEG1:
case GF_M2TS_AUDIO_MPEG2:
case GF_M2TS_AUDIO_AAC:
case GF_M2TS_AUDIO_LATM_AAC:
case GF_M2TS_AUDIO_AC3:
case GF_M2TS_AUDIO_EC3:
//for all our supported codec types, use a reframer filter
pes->reframe = gf_m2ts_reframe_default;
break;
case GF_M2TS_PRIVATE_DATA:
/* TODO: handle DVB subtitle streams */
break;
case GF_M2TS_METADATA_ID3_HLS:
//TODO
pes->reframe = gf_m2ts_reframe_id3_pes;
break;
default:
pes->reframe = gf_m2ts_reframe_default;
break;
}
break;
}
return GF_OK;
}
GF_EXPORT
GF_M2TS_Demuxer *gf_m2ts_demux_new()
{
GF_M2TS_Demuxer *ts;
GF_SAFEALLOC(ts, GF_M2TS_Demuxer);
if (!ts) return NULL;
ts->programs = gf_list_new();
ts->SDTs = gf_list_new();
ts->pat = gf_m2ts_section_filter_new(gf_m2ts_process_pat, 0);
ts->cat = gf_m2ts_section_filter_new(gf_m2ts_process_cat, 0);
ts->sdt = gf_m2ts_section_filter_new(gf_m2ts_process_sdt, 1);
ts->nit = gf_m2ts_section_filter_new(gf_m2ts_process_nit, 0);
ts->eit = gf_m2ts_section_filter_new(NULL/*gf_m2ts_process_eit*/, 1);
ts->tdt_tot = gf_m2ts_section_filter_new(gf_m2ts_process_tdt_tot, 1);
#ifdef GPAC_ENABLE_MPE
gf_dvb_mpe_init(ts);
#endif
ts->nb_prog_pmt_received = 0;
ts->ChannelAppList = gf_list_new();
return ts;
}
GF_EXPORT
void gf_m2ts_demux_dmscc_init(GF_M2TS_Demuxer *ts) {
char temp_dir[GF_MAX_PATH];
u32 length;
GF_Err e;
ts->dsmcc_controler = gf_list_new();
ts->process_dmscc = 1;
strcpy(temp_dir, gf_get_default_cache_directory() );
length = (u32) strlen(temp_dir);
if(temp_dir[length-1] == GF_PATH_SEPARATOR) {
temp_dir[length-1] = 0;
}
ts->dsmcc_root_dir = (char*)gf_calloc(strlen(temp_dir)+strlen("CarouselData")+2,sizeof(char));
sprintf(ts->dsmcc_root_dir,"%s%cCarouselData",temp_dir,GF_PATH_SEPARATOR);
e = gf_mkdir(ts->dsmcc_root_dir);
if(e) {
GF_LOG(GF_LOG_INFO, GF_LOG_CONTAINER, ("[Process DSMCC] Error during the creation of the directory %s \n",ts->dsmcc_root_dir));
}
}
GF_EXPORT
void gf_m2ts_demux_del(GF_M2TS_Demuxer *ts)
{
u32 i;
if (ts->pat) gf_m2ts_section_filter_del(ts->pat);
if (ts->cat) gf_m2ts_section_filter_del(ts->cat);
if (ts->sdt) gf_m2ts_section_filter_del(ts->sdt);
if (ts->nit) gf_m2ts_section_filter_del(ts->nit);
if (ts->eit) gf_m2ts_section_filter_del(ts->eit);
if (ts->tdt_tot) gf_m2ts_section_filter_del(ts->tdt_tot);
for (i=0; i<GF_M2TS_MAX_STREAMS; i++) {
//bacause of pure PCR streams, en ES might be reassigned on 2 PIDs, one for the ES and one for the PCR
if (ts->ess[i] && (ts->ess[i]->pid==i)) gf_m2ts_es_del(ts->ess[i], ts);
}
if (ts->buffer) gf_free(ts->buffer);
while (gf_list_count(ts->programs)) {
GF_M2TS_Program *p = (GF_M2TS_Program *)gf_list_last(ts->programs);
gf_list_rem_last(ts->programs);
gf_list_del(p->streams);
/*reset OD list*/
if (p->additional_ods) {
gf_odf_desc_list_del(p->additional_ods);
gf_list_del(p->additional_ods);
}
if (p->pmt_iod) gf_odf_desc_del((GF_Descriptor *)p->pmt_iod);
if (p->metadata_pointer_descriptor) gf_m2ts_metadata_pointer_descriptor_del(p->metadata_pointer_descriptor);
gf_free(p);
}
gf_list_del(ts->programs);
if (ts->TDT_time) gf_free(ts->TDT_time);
gf_m2ts_reset_sdt(ts);
if (ts->tdt_tot)
gf_list_del(ts->SDTs);
#ifdef GPAC_ENABLE_MPE
gf_dvb_mpe_shutdown(ts);
#endif
if (ts->dsmcc_controler) {
if (gf_list_count(ts->dsmcc_controler)) {
#ifdef GPAC_ENABLE_DSMCC
GF_M2TS_DSMCC_OVERLORD* dsmcc_overlord = (GF_M2TS_DSMCC_OVERLORD*)gf_list_get(ts->dsmcc_controler,0);
gf_cleanup_dir(dsmcc_overlord->root_dir);
gf_rmdir(dsmcc_overlord->root_dir);
gf_m2ts_delete_dsmcc_overlord(dsmcc_overlord);
if(ts->dsmcc_root_dir) {
gf_free(ts->dsmcc_root_dir);
}
#endif
}
gf_list_del(ts->dsmcc_controler);
}
while(gf_list_count(ts->ChannelAppList)) {
#ifdef GPAC_ENABLE_DSMCC
GF_M2TS_CHANNEL_APPLICATION_INFO* ChanAppInfo = (GF_M2TS_CHANNEL_APPLICATION_INFO*)gf_list_get(ts->ChannelAppList,0);
gf_m2ts_delete_channel_application_info(ChanAppInfo);
gf_list_rem(ts->ChannelAppList,0);
#endif
}
gf_list_del(ts->ChannelAppList);
if (ts->dsmcc_root_dir) gf_free(ts->dsmcc_root_dir);
gf_free(ts);
}
#if 0//unused
void gf_m2ts_print_info(GF_M2TS_Demuxer *ts)
{
#ifdef GPAC_ENABLE_MPE
gf_m2ts_print_mpe_info(ts);
#endif
}
#endif
#define M2TS_PROBE_SIZE 188000
static Bool gf_m2ts_probe_buffer(char *buf, u32 size)
{
GF_Err e;
GF_M2TS_Demuxer *ts;
u32 lt;
lt = gf_log_get_tool_level(GF_LOG_CONTAINER);
gf_log_set_tool_level(GF_LOG_CONTAINER, GF_LOG_QUIET);
ts = gf_m2ts_demux_new();
e = gf_m2ts_process_data(ts, buf, size);
if (!ts->pck_number) e = GF_BAD_PARAM;
gf_m2ts_demux_del(ts);
gf_log_set_tool_level(GF_LOG_CONTAINER, lt);
if (e) return GF_FALSE;
return GF_TRUE;
}
GF_EXPORT
Bool gf_m2ts_probe_file(const char *fileName)
{
char buf[M2TS_PROBE_SIZE];
u32 size;
FILE *t;
if (!strncmp(fileName, "gmem://", 7)) {
u8 *mem_address;
if (gf_blob_get_data(fileName, &mem_address, &size) != GF_OK) {
return GF_FALSE;
}
if (size>M2TS_PROBE_SIZE) size = M2TS_PROBE_SIZE;
memcpy(buf, mem_address, size);
} else {
t = gf_fopen(fileName, "rb");
if (!t) return 0;
size = (u32) fread(buf, 1, M2TS_PROBE_SIZE, t);
gf_fclose(t);
if ((s32) size <= 0) return 0;
}
return gf_m2ts_probe_buffer(buf, size);
}
GF_EXPORT
Bool gf_m2ts_probe_data(const u8 *data, u32 size)
{
size /= 188;
size *= 188;
return gf_m2ts_probe_buffer((char *) data, size);
}
static void rewrite_pts_dts(unsigned char *ptr, u64 TS)
{
ptr[0] &= 0xf1;
ptr[0] |= (unsigned char)((TS&0x1c0000000ULL)>>29);
ptr[1] = (unsigned char)((TS&0x03fc00000ULL)>>22);
ptr[2] &= 0x1;
ptr[2] |= (unsigned char)((TS&0x0003f8000ULL)>>14);
ptr[3] = (unsigned char)((TS&0x000007f80ULL)>>7);
ptr[4] &= 0x1;
ptr[4] |= (unsigned char)((TS&0x00000007fULL)<<1);
assert(((u64)(ptr[0]&0xe)<<29) + ((u64)ptr[1]<<22) + ((u64)(ptr[2]&0xfe)<<14) + ((u64)ptr[3]<<7) + ((ptr[4]&0xfe)>>1) == TS);
}
#define ADJUST_TIMESTAMP(_TS) \
if (_TS < (u64) -ts_shift) _TS = pcr_mod + _TS + ts_shift; \
else _TS = _TS + ts_shift; \
while (_TS > pcr_mod) _TS -= pcr_mod; \
GF_EXPORT
GF_Err gf_m2ts_restamp(u8 *buffer, u32 size, s64 ts_shift, u8 *is_pes)
{
u32 done = 0;
u64 pcr_mod;
// if (!ts_shift) return GF_OK;
pcr_mod = 0x80000000;
pcr_mod*=4;
while (done + 188 <= size) {
u8 *pesh;
u8 *pck;
u64 pcr_base=0, pcr_ext=0;
u16 pid;
u8 adaptation_field, adaptation_field_length;
pck = (u8*) buffer+done;
if (pck[0]!=0x47) {
GF_LOG(GF_LOG_ERROR, GF_LOG_CONTAINER, ("[M2TS Restamp] Invalid sync byte %X\n", pck[0]));
return GF_NON_COMPLIANT_BITSTREAM;
}
pid = ((pck[1] & 0x1f) <<8 ) + pck[2];
adaptation_field_length = 0;
adaptation_field = (pck[3] >> 4) & 0x3;
if ((adaptation_field==2) || (adaptation_field==3)) {
adaptation_field_length = pck[4];
if ( pck[5]&0x10 /*PCR_flag*/) {
pcr_base = (((u64)pck[6])<<25) + (pck[7]<<17) + (pck[8]<<9) + (pck[9]<<1) + (pck[10]>>7);
pcr_ext = ((pck[10]&1)<<8) + pck[11];
ADJUST_TIMESTAMP(pcr_base);
pck[6] = (unsigned char)(0xff&(pcr_base>>25));
pck[7] = (unsigned char)(0xff&(pcr_base>>17));
pck[8] = (unsigned char)(0xff&(pcr_base>>9));
pck[9] = (unsigned char)(0xff&(pcr_base>>1));
pck[10] = (unsigned char)(((0x1&pcr_base)<<7) | 0x7e | ((0x100&pcr_ext)>>8));
if (pcr_ext != ((pck[10]&1)<<8) + pck[11]) {
GF_LOG(GF_LOG_ERROR, GF_LOG_CONTAINER, ("[M2TS Restamp] Sanity check failed for PCR restamping\n"));
return GF_IO_ERR;
}
pck[11] = (unsigned char)(0xff&pcr_ext);
}
/*add adaptation_field_length field*/
adaptation_field_length++;
}
if (!is_pes[pid] || !(pck[1]&0x40)) {
done+=188;
continue;
}
pesh = &pck[4+adaptation_field_length];
if ((pesh[0]==0x00) && (pesh[1]==0x00) && (pesh[2]==0x01)) {
Bool has_pts, has_dts;
if ((pesh[6]&0xc0)!=0x80) {
done+=188;
continue;
}
has_pts = (pesh[7]&0x80);
has_dts = has_pts ? (pesh[7]&0x40) : 0;
if (has_pts) {
u64 PTS;
if (((pesh[9]&0xe0)>>4)!=0x2) {
GF_LOG(GF_LOG_WARNING, GF_LOG_CONTAINER, ("[M2TS Restamp] PID %4d: Wrong PES header, PTS decoding: '0010' expected\n", pid));
done+=188;
continue;
}
PTS = gf_m2ts_get_pts(pesh + 9);
ADJUST_TIMESTAMP(PTS);
rewrite_pts_dts(pesh+9, PTS);
}
if (has_dts) {
u64 DTS = gf_m2ts_get_pts(pesh + 14);
ADJUST_TIMESTAMP(DTS);
rewrite_pts_dts(pesh+14, DTS);
}
} else {
GF_LOG(GF_LOG_WARNING, GF_LOG_CONTAINER, ("[M2TS Restamp] PID %4d: Wrong PES not beginning with start code\n", pid));
}
done+=188;
}
return GF_OK;
}
#endif /*GPAC_DISABLE_MPEG2TS*/
| ./CrossVul/dataset_final_sorted/CWE-416/c/good_1372_0 |
crossvul-cpp_data_good_1342_2 | /* vi:set ts=8 sts=4 sw=4 noet:
*
* VIM - Vi IMproved by Bram Moolenaar
*
* Do ":help uganda" in Vim to read a list of people who contributed.
* Do ":help credits" in Vim to see a list of people who contributed.
* See README.txt for an overview of the Vim source code.
*/
#include "vim.h"
static void cmd_with_count(char *cmd, char_u *bufp, size_t bufsize, long Prenum);
static void win_init(win_T *newp, win_T *oldp, int flags);
static void win_init_some(win_T *newp, win_T *oldp);
static void frame_comp_pos(frame_T *topfrp, int *row, int *col);
static void frame_setheight(frame_T *curfrp, int height);
static void frame_setwidth(frame_T *curfrp, int width);
static void win_exchange(long);
static void win_rotate(int, int);
static void win_totop(int size, int flags);
static void win_equal_rec(win_T *next_curwin, int current, frame_T *topfr, int dir, int col, int row, int width, int height);
static win_T *win_free_mem(win_T *win, int *dirp, tabpage_T *tp);
static frame_T *win_altframe(win_T *win, tabpage_T *tp);
static tabpage_T *alt_tabpage(void);
static win_T *frame2win(frame_T *frp);
static int frame_has_win(frame_T *frp, win_T *wp);
static void frame_new_height(frame_T *topfrp, int height, int topfirst, int wfh);
static int frame_fixed_height(frame_T *frp);
static int frame_fixed_width(frame_T *frp);
static void frame_add_statusline(frame_T *frp);
static void frame_new_width(frame_T *topfrp, int width, int leftfirst, int wfw);
static void frame_add_vsep(frame_T *frp);
static int frame_minwidth(frame_T *topfrp, win_T *next_curwin);
static void frame_fix_width(win_T *wp);
static int win_alloc_firstwin(win_T *oldwin);
static void new_frame(win_T *wp);
static tabpage_T *alloc_tabpage(void);
static int leave_tabpage(buf_T *new_curbuf, int trigger_leave_autocmds);
static void enter_tabpage(tabpage_T *tp, buf_T *old_curbuf, int trigger_enter_autocmds, int trigger_leave_autocmds);
static void frame_fix_height(win_T *wp);
static int frame_minheight(frame_T *topfrp, win_T *next_curwin);
static int may_open_tabpage(void);
static void win_enter_ext(win_T *wp, int undo_sync, int no_curwin, int trigger_new_autocmds, int trigger_enter_autocmds, int trigger_leave_autocmds);
static void win_free(win_T *wp, tabpage_T *tp);
static int win_unlisted(win_T *wp);
static void win_append(win_T *after, win_T *wp);
static void frame_append(frame_T *after, frame_T *frp);
static void frame_insert(frame_T *before, frame_T *frp);
static void frame_remove(frame_T *frp);
static void win_goto_ver(int up, long count);
static void win_goto_hor(int left, long count);
static void frame_add_height(frame_T *frp, int n);
static void last_status_rec(frame_T *fr, int statusline);
static void make_snapshot_rec(frame_T *fr, frame_T **frp);
static void clear_snapshot(tabpage_T *tp, int idx);
static void clear_snapshot_rec(frame_T *fr);
static int check_snapshot_rec(frame_T *sn, frame_T *fr);
static win_T *restore_snapshot_rec(frame_T *sn, frame_T *fr);
static int frame_check_height(frame_T *topfrp, int height);
static int frame_check_width(frame_T *topfrp, int width);
static win_T *win_alloc(win_T *after, int hidden);
#define NOWIN (win_T *)-1 /* non-existing window */
#define ROWS_AVAIL (Rows - p_ch - tabline_height())
static char *m_onlyone = N_("Already only one window");
// When non-zero splitting a window is forbidden. Used to avoid that nasty
// autocommands mess up the window structure.
static int split_disallowed = 0;
// #define WIN_DEBUG
#ifdef WIN_DEBUG
/*
* Call this method to log the current window layout.
*/
static void
log_frame_layout(frame_T *frame)
{
ch_log(NULL, "layout %s, wi: %d, he: %d, wwi: %d, whe: %d, id: %d",
frame->fr_layout == FR_LEAF ? "LEAF"
: frame->fr_layout == FR_ROW ? "ROW" : "COL",
frame->fr_width,
frame->fr_height,
frame->fr_win == NULL ? -1 : frame->fr_win->w_width,
frame->fr_win == NULL ? -1 : frame->fr_win->w_height,
frame->fr_win == NULL ? -1 : frame->fr_win->w_id);
if (frame->fr_child != NULL)
{
ch_log(NULL, "children");
log_frame_layout(frame->fr_child);
if (frame->fr_next != NULL)
ch_log(NULL, "END of children");
}
if (frame->fr_next != NULL)
log_frame_layout(frame->fr_next);
}
#endif
/*
* All CTRL-W window commands are handled here, called from normal_cmd().
*/
void
do_window(
int nchar,
long Prenum,
int xchar) /* extra char from ":wincmd gx" or NUL */
{
long Prenum1;
win_T *wp;
#if defined(FEAT_SEARCHPATH) || defined(FEAT_FIND_ID)
char_u *ptr;
linenr_T lnum = -1;
#endif
#ifdef FEAT_FIND_ID
int type = FIND_DEFINE;
int len;
#endif
char_u cbuf[40];
if (ERROR_IF_POPUP_WINDOW)
return;
#ifdef FEAT_CMDWIN
# define CHECK_CMDWIN \
do { \
if (cmdwin_type != 0) \
{ \
emsg(_(e_cmdwin)); \
return; \
} \
} while (0)
#else
# define CHECK_CMDWIN do { /**/ } while (0)
#endif
Prenum1 = Prenum == 0 ? 1 : Prenum;
switch (nchar)
{
/* split current window in two parts, horizontally */
case 'S':
case Ctrl_S:
case 's':
CHECK_CMDWIN;
reset_VIsual_and_resel(); /* stop Visual mode */
#ifdef FEAT_QUICKFIX
/* When splitting the quickfix window open a new buffer in it,
* don't replicate the quickfix buffer. */
if (bt_quickfix(curbuf))
goto newwindow;
#endif
#ifdef FEAT_GUI
need_mouse_correct = TRUE;
#endif
(void)win_split((int)Prenum, 0);
break;
/* split current window in two parts, vertically */
case Ctrl_V:
case 'v':
CHECK_CMDWIN;
reset_VIsual_and_resel(); /* stop Visual mode */
#ifdef FEAT_QUICKFIX
/* When splitting the quickfix window open a new buffer in it,
* don't replicate the quickfix buffer. */
if (bt_quickfix(curbuf))
goto newwindow;
#endif
#ifdef FEAT_GUI
need_mouse_correct = TRUE;
#endif
(void)win_split((int)Prenum, WSP_VERT);
break;
/* split current window and edit alternate file */
case Ctrl_HAT:
case '^':
CHECK_CMDWIN;
reset_VIsual_and_resel(); /* stop Visual mode */
if (buflist_findnr(Prenum == 0
? curwin->w_alt_fnum : Prenum) == NULL)
{
if (Prenum == 0)
emsg(_(e_noalt));
else
semsg(_("E92: Buffer %ld not found"), Prenum);
break;
}
if (!curbuf_locked() && win_split(0, 0) == OK)
(void)buflist_getfile(
Prenum == 0 ? curwin->w_alt_fnum : Prenum,
(linenr_T)0, GETF_ALT, FALSE);
break;
/* open new window */
case Ctrl_N:
case 'n':
CHECK_CMDWIN;
reset_VIsual_and_resel(); /* stop Visual mode */
#ifdef FEAT_QUICKFIX
newwindow:
#endif
if (Prenum)
/* window height */
vim_snprintf((char *)cbuf, sizeof(cbuf) - 5, "%ld", Prenum);
else
cbuf[0] = NUL;
#if defined(FEAT_QUICKFIX)
if (nchar == 'v' || nchar == Ctrl_V)
STRCAT(cbuf, "v");
#endif
STRCAT(cbuf, "new");
do_cmdline_cmd(cbuf);
break;
/* quit current window */
case Ctrl_Q:
case 'q':
reset_VIsual_and_resel(); /* stop Visual mode */
cmd_with_count("quit", cbuf, sizeof(cbuf), Prenum);
do_cmdline_cmd(cbuf);
break;
/* close current window */
case Ctrl_C:
case 'c':
reset_VIsual_and_resel(); /* stop Visual mode */
cmd_with_count("close", cbuf, sizeof(cbuf), Prenum);
do_cmdline_cmd(cbuf);
break;
#if defined(FEAT_QUICKFIX)
/* close preview window */
case Ctrl_Z:
case 'z':
CHECK_CMDWIN;
reset_VIsual_and_resel(); /* stop Visual mode */
do_cmdline_cmd((char_u *)"pclose");
break;
/* cursor to preview window */
case 'P':
FOR_ALL_WINDOWS(wp)
if (wp->w_p_pvw)
break;
if (wp == NULL)
emsg(_("E441: There is no preview window"));
else
win_goto(wp);
break;
#endif
/* close all but current window */
case Ctrl_O:
case 'o':
CHECK_CMDWIN;
reset_VIsual_and_resel(); /* stop Visual mode */
cmd_with_count("only", cbuf, sizeof(cbuf), Prenum);
do_cmdline_cmd(cbuf);
break;
/* cursor to next window with wrap around */
case Ctrl_W:
case 'w':
/* cursor to previous window with wrap around */
case 'W':
CHECK_CMDWIN;
if (ONE_WINDOW && Prenum != 1) /* just one window */
beep_flush();
else
{
if (Prenum) /* go to specified window */
{
for (wp = firstwin; --Prenum > 0; )
{
if (wp->w_next == NULL)
break;
else
wp = wp->w_next;
}
}
else
{
if (nchar == 'W') /* go to previous window */
{
wp = curwin->w_prev;
if (wp == NULL)
wp = lastwin; /* wrap around */
}
else /* go to next window */
{
wp = curwin->w_next;
if (wp == NULL)
wp = firstwin; /* wrap around */
}
}
win_goto(wp);
}
break;
/* cursor to window below */
case 'j':
case K_DOWN:
case Ctrl_J:
CHECK_CMDWIN;
win_goto_ver(FALSE, Prenum1);
break;
/* cursor to window above */
case 'k':
case K_UP:
case Ctrl_K:
CHECK_CMDWIN;
win_goto_ver(TRUE, Prenum1);
break;
/* cursor to left window */
case 'h':
case K_LEFT:
case Ctrl_H:
case K_BS:
CHECK_CMDWIN;
win_goto_hor(TRUE, Prenum1);
break;
/* cursor to right window */
case 'l':
case K_RIGHT:
case Ctrl_L:
CHECK_CMDWIN;
win_goto_hor(FALSE, Prenum1);
break;
/* move window to new tab page */
case 'T':
if (one_window())
msg(_(m_onlyone));
else
{
tabpage_T *oldtab = curtab;
tabpage_T *newtab;
/* First create a new tab with the window, then go back to
* the old tab and close the window there. */
wp = curwin;
if (win_new_tabpage((int)Prenum) == OK
&& valid_tabpage(oldtab))
{
newtab = curtab;
goto_tabpage_tp(oldtab, TRUE, TRUE);
if (curwin == wp)
win_close(curwin, FALSE);
if (valid_tabpage(newtab))
goto_tabpage_tp(newtab, TRUE, TRUE);
}
}
break;
/* cursor to top-left window */
case 't':
case Ctrl_T:
win_goto(firstwin);
break;
/* cursor to bottom-right window */
case 'b':
case Ctrl_B:
win_goto(lastwin);
break;
/* cursor to last accessed (previous) window */
case 'p':
case Ctrl_P:
if (!win_valid(prevwin))
beep_flush();
else
win_goto(prevwin);
break;
/* exchange current and next window */
case 'x':
case Ctrl_X:
CHECK_CMDWIN;
win_exchange(Prenum);
break;
/* rotate windows downwards */
case Ctrl_R:
case 'r':
CHECK_CMDWIN;
reset_VIsual_and_resel(); /* stop Visual mode */
win_rotate(FALSE, (int)Prenum1); /* downwards */
break;
/* rotate windows upwards */
case 'R':
CHECK_CMDWIN;
reset_VIsual_and_resel(); /* stop Visual mode */
win_rotate(TRUE, (int)Prenum1); /* upwards */
break;
/* move window to the very top/bottom/left/right */
case 'K':
case 'J':
case 'H':
case 'L':
CHECK_CMDWIN;
win_totop((int)Prenum,
((nchar == 'H' || nchar == 'L') ? WSP_VERT : 0)
| ((nchar == 'H' || nchar == 'K') ? WSP_TOP : WSP_BOT));
break;
/* make all windows the same height */
case '=':
#ifdef FEAT_GUI
need_mouse_correct = TRUE;
#endif
win_equal(NULL, FALSE, 'b');
break;
/* increase current window height */
case '+':
#ifdef FEAT_GUI
need_mouse_correct = TRUE;
#endif
win_setheight(curwin->w_height + (int)Prenum1);
break;
/* decrease current window height */
case '-':
#ifdef FEAT_GUI
need_mouse_correct = TRUE;
#endif
win_setheight(curwin->w_height - (int)Prenum1);
break;
/* set current window height */
case Ctrl__:
case '_':
#ifdef FEAT_GUI
need_mouse_correct = TRUE;
#endif
win_setheight(Prenum ? (int)Prenum : 9999);
break;
/* increase current window width */
case '>':
#ifdef FEAT_GUI
need_mouse_correct = TRUE;
#endif
win_setwidth(curwin->w_width + (int)Prenum1);
break;
/* decrease current window width */
case '<':
#ifdef FEAT_GUI
need_mouse_correct = TRUE;
#endif
win_setwidth(curwin->w_width - (int)Prenum1);
break;
/* set current window width */
case '|':
#ifdef FEAT_GUI
need_mouse_correct = TRUE;
#endif
win_setwidth(Prenum != 0 ? (int)Prenum : 9999);
break;
/* jump to tag and split window if tag exists (in preview window) */
#if defined(FEAT_QUICKFIX)
case '}':
CHECK_CMDWIN;
if (Prenum)
g_do_tagpreview = Prenum;
else
g_do_tagpreview = p_pvh;
#endif
/* FALLTHROUGH */
case ']':
case Ctrl_RSB:
CHECK_CMDWIN;
/* keep Visual mode, can select words to use as a tag */
if (Prenum)
postponed_split = Prenum;
else
postponed_split = -1;
#ifdef FEAT_QUICKFIX
if (nchar != '}')
g_do_tagpreview = 0;
#endif
/* Execute the command right here, required when "wincmd ]"
* was used in a function. */
do_nv_ident(Ctrl_RSB, NUL);
break;
#ifdef FEAT_SEARCHPATH
/* edit file name under cursor in a new window */
case 'f':
case 'F':
case Ctrl_F:
wingotofile:
CHECK_CMDWIN;
ptr = grab_file_name(Prenum1, &lnum);
if (ptr != NULL)
{
tabpage_T *oldtab = curtab;
win_T *oldwin = curwin;
# ifdef FEAT_GUI
need_mouse_correct = TRUE;
# endif
setpcmark();
if (win_split(0, 0) == OK)
{
RESET_BINDING(curwin);
if (do_ecmd(0, ptr, NULL, NULL, ECMD_LASTL,
ECMD_HIDE, NULL) == FAIL)
{
/* Failed to open the file, close the window
* opened for it. */
win_close(curwin, FALSE);
goto_tabpage_win(oldtab, oldwin);
}
else if (nchar == 'F' && lnum >= 0)
{
curwin->w_cursor.lnum = lnum;
check_cursor_lnum();
beginline(BL_SOL | BL_FIX);
}
}
vim_free(ptr);
}
break;
#endif
#ifdef FEAT_FIND_ID
/* Go to the first occurrence of the identifier under cursor along path in a
* new window -- webb
*/
case 'i': /* Go to any match */
case Ctrl_I:
type = FIND_ANY;
/* FALLTHROUGH */
case 'd': /* Go to definition, using 'define' */
case Ctrl_D:
CHECK_CMDWIN;
if ((len = find_ident_under_cursor(&ptr, FIND_IDENT)) == 0)
break;
find_pattern_in_path(ptr, 0, len, TRUE,
Prenum == 0 ? TRUE : FALSE, type,
Prenum1, ACTION_SPLIT, (linenr_T)1, (linenr_T)MAXLNUM);
curwin->w_set_curswant = TRUE;
break;
#endif
/* Quickfix window only: view the result under the cursor in a new split. */
#if defined(FEAT_QUICKFIX)
case K_KENTER:
case CAR:
if (bt_quickfix(curbuf))
qf_view_result(TRUE);
break;
#endif
/* CTRL-W g extended commands */
case 'g':
case Ctrl_G:
CHECK_CMDWIN;
#ifdef USE_ON_FLY_SCROLL
dont_scroll = TRUE; /* disallow scrolling here */
#endif
++no_mapping;
++allow_keys; /* no mapping for xchar, but allow key codes */
if (xchar == NUL)
xchar = plain_vgetc();
LANGMAP_ADJUST(xchar, TRUE);
--no_mapping;
--allow_keys;
#ifdef FEAT_CMDL_INFO
(void)add_to_showcmd(xchar);
#endif
switch (xchar)
{
#if defined(FEAT_QUICKFIX)
case '}':
xchar = Ctrl_RSB;
if (Prenum)
g_do_tagpreview = Prenum;
else
g_do_tagpreview = p_pvh;
#endif
/* FALLTHROUGH */
case ']':
case Ctrl_RSB:
/* keep Visual mode, can select words to use as a tag */
if (Prenum)
postponed_split = Prenum;
else
postponed_split = -1;
/* Execute the command right here, required when
* "wincmd g}" was used in a function. */
do_nv_ident('g', xchar);
break;
#ifdef FEAT_SEARCHPATH
case 'f': /* CTRL-W gf: "gf" in a new tab page */
case 'F': /* CTRL-W gF: "gF" in a new tab page */
cmdmod.tab = tabpage_index(curtab) + 1;
nchar = xchar;
goto wingotofile;
#endif
case 't': // CTRL-W gt: go to next tab page
goto_tabpage((int)Prenum);
break;
case 'T': // CTRL-W gT: go to previous tab page
goto_tabpage(-(int)Prenum1);
break;
default:
beep_flush();
break;
}
break;
default: beep_flush();
break;
}
}
/*
* Figure out the address type for ":wincmd".
*/
void
get_wincmd_addr_type(char_u *arg, exarg_T *eap)
{
switch (*arg)
{
case 'S':
case Ctrl_S:
case 's':
case Ctrl_N:
case 'n':
case 'j':
case Ctrl_J:
case 'k':
case Ctrl_K:
case 'T':
case Ctrl_R:
case 'r':
case 'R':
case 'K':
case 'J':
case '+':
case '-':
case Ctrl__:
case '_':
case '|':
case ']':
case Ctrl_RSB:
case 'g':
case Ctrl_G:
case Ctrl_V:
case 'v':
case 'h':
case Ctrl_H:
case 'l':
case Ctrl_L:
case 'H':
case 'L':
case '>':
case '<':
#if defined(FEAT_QUICKFIX)
case '}':
#endif
#ifdef FEAT_SEARCHPATH
case 'f':
case 'F':
case Ctrl_F:
#endif
#ifdef FEAT_FIND_ID
case 'i':
case Ctrl_I:
case 'd':
case Ctrl_D:
#endif
// window size or any count
eap->addr_type = ADDR_OTHER;
break;
case Ctrl_HAT:
case '^':
// buffer number
eap->addr_type = ADDR_BUFFERS;
break;
case Ctrl_Q:
case 'q':
case Ctrl_C:
case 'c':
case Ctrl_O:
case 'o':
case Ctrl_W:
case 'w':
case 'W':
case 'x':
case Ctrl_X:
// window number
eap->addr_type = ADDR_WINDOWS;
break;
#if defined(FEAT_QUICKFIX)
case Ctrl_Z:
case 'z':
case 'P':
#endif
case 't':
case Ctrl_T:
case 'b':
case Ctrl_B:
case 'p':
case Ctrl_P:
case '=':
case CAR:
// no count
eap->addr_type = ADDR_NONE;
break;
}
}
static void
cmd_with_count(
char *cmd,
char_u *bufp,
size_t bufsize,
long Prenum)
{
size_t len = STRLEN(cmd);
STRCPY(bufp, cmd);
if (Prenum > 0)
vim_snprintf((char *)bufp + len, bufsize - len, "%ld", Prenum);
}
/*
* If "split_disallowed" is set given an error and return FAIL.
* Otherwise return OK.
*/
static int
check_split_disallowed()
{
if (split_disallowed > 0)
{
emsg(_("E242: Can't split a window while closing another"));
return FAIL;
}
return OK;
}
/*
* split the current window, implements CTRL-W s and :split
*
* "size" is the height or width for the new window, 0 to use half of current
* height or width.
*
* "flags":
* WSP_ROOM: require enough room for new window
* WSP_VERT: vertical split.
* WSP_TOP: open window at the top-left of the shell (help window).
* WSP_BOT: open window at the bottom-right of the shell (quickfix window).
* WSP_HELP: creating the help window, keep layout snapshot
*
* return FAIL for failure, OK otherwise
*/
int
win_split(int size, int flags)
{
if (ERROR_IF_POPUP_WINDOW)
return FAIL;
/* When the ":tab" modifier was used open a new tab page instead. */
if (may_open_tabpage() == OK)
return OK;
/* Add flags from ":vertical", ":topleft" and ":botright". */
flags |= cmdmod.split;
if ((flags & WSP_TOP) && (flags & WSP_BOT))
{
emsg(_("E442: Can't split topleft and botright at the same time"));
return FAIL;
}
if (check_split_disallowed() == FAIL)
return FAIL;
/* When creating the help window make a snapshot of the window layout.
* Otherwise clear the snapshot, it's now invalid. */
if (flags & WSP_HELP)
make_snapshot(SNAP_HELP_IDX);
else
clear_snapshot(curtab, SNAP_HELP_IDX);
return win_split_ins(size, flags, NULL, 0);
}
/*
* When "new_wp" is NULL: split the current window in two.
* When "new_wp" is not NULL: insert this window at the far
* top/left/right/bottom.
* return FAIL for failure, OK otherwise
*/
int
win_split_ins(
int size,
int flags,
win_T *new_wp,
int dir)
{
win_T *wp = new_wp;
win_T *oldwin;
int new_size = size;
int i;
int need_status = 0;
int do_equal = FALSE;
int needed;
int available;
int oldwin_height = 0;
int layout;
frame_T *frp, *curfrp, *frp2, *prevfrp;
int before;
int minheight;
int wmh1;
int did_set_fraction = FALSE;
if (flags & WSP_TOP)
oldwin = firstwin;
else if (flags & WSP_BOT)
oldwin = lastwin;
else
oldwin = curwin;
/* add a status line when p_ls == 1 and splitting the first window */
if (ONE_WINDOW && p_ls == 1 && oldwin->w_status_height == 0)
{
if (VISIBLE_HEIGHT(oldwin) <= p_wmh && new_wp == NULL)
{
emsg(_(e_noroom));
return FAIL;
}
need_status = STATUS_HEIGHT;
}
#ifdef FEAT_GUI
/* May be needed for the scrollbars that are going to change. */
if (gui.in_use)
out_flush();
#endif
if (flags & WSP_VERT)
{
int wmw1;
int minwidth;
layout = FR_ROW;
/*
* Check if we are able to split the current window and compute its
* width.
*/
/* Current window requires at least 1 space. */
wmw1 = (p_wmw == 0 ? 1 : p_wmw);
needed = wmw1 + 1;
if (flags & WSP_ROOM)
needed += p_wiw - wmw1;
if (flags & (WSP_BOT | WSP_TOP))
{
minwidth = frame_minwidth(topframe, NOWIN);
available = topframe->fr_width;
needed += minwidth;
}
else if (p_ea)
{
minwidth = frame_minwidth(oldwin->w_frame, NOWIN);
prevfrp = oldwin->w_frame;
for (frp = oldwin->w_frame->fr_parent; frp != NULL;
frp = frp->fr_parent)
{
if (frp->fr_layout == FR_ROW)
FOR_ALL_FRAMES(frp2, frp->fr_child)
if (frp2 != prevfrp)
minwidth += frame_minwidth(frp2, NOWIN);
prevfrp = frp;
}
available = topframe->fr_width;
needed += minwidth;
}
else
{
minwidth = frame_minwidth(oldwin->w_frame, NOWIN);
available = oldwin->w_frame->fr_width;
needed += minwidth;
}
if (available < needed && new_wp == NULL)
{
emsg(_(e_noroom));
return FAIL;
}
if (new_size == 0)
new_size = oldwin->w_width / 2;
if (new_size > available - minwidth - 1)
new_size = available - minwidth - 1;
if (new_size < wmw1)
new_size = wmw1;
/* if it doesn't fit in the current window, need win_equal() */
if (oldwin->w_width - new_size - 1 < p_wmw)
do_equal = TRUE;
/* We don't like to take lines for the new window from a
* 'winfixwidth' window. Take them from a window to the left or right
* instead, if possible. Add one for the separator. */
if (oldwin->w_p_wfw)
win_setwidth_win(oldwin->w_width + new_size + 1, oldwin);
/* Only make all windows the same width if one of them (except oldwin)
* is wider than one of the split windows. */
if (!do_equal && p_ea && size == 0 && *p_ead != 'v'
&& oldwin->w_frame->fr_parent != NULL)
{
frp = oldwin->w_frame->fr_parent->fr_child;
while (frp != NULL)
{
if (frp->fr_win != oldwin && frp->fr_win != NULL
&& (frp->fr_win->w_width > new_size
|| frp->fr_win->w_width > oldwin->w_width
- new_size - 1))
{
do_equal = TRUE;
break;
}
frp = frp->fr_next;
}
}
}
else
{
layout = FR_COL;
/*
* Check if we are able to split the current window and compute its
* height.
*/
/* Current window requires at least 1 space. */
wmh1 = (p_wmh == 0 ? 1 : p_wmh) + WINBAR_HEIGHT(curwin);
needed = wmh1 + STATUS_HEIGHT;
if (flags & WSP_ROOM)
needed += p_wh - wmh1;
if (flags & (WSP_BOT | WSP_TOP))
{
minheight = frame_minheight(topframe, NOWIN) + need_status;
available = topframe->fr_height;
needed += minheight;
}
else if (p_ea)
{
minheight = frame_minheight(oldwin->w_frame, NOWIN) + need_status;
prevfrp = oldwin->w_frame;
for (frp = oldwin->w_frame->fr_parent; frp != NULL;
frp = frp->fr_parent)
{
if (frp->fr_layout == FR_COL)
FOR_ALL_FRAMES(frp2, frp->fr_child)
if (frp2 != prevfrp)
minheight += frame_minheight(frp2, NOWIN);
prevfrp = frp;
}
available = topframe->fr_height;
needed += minheight;
}
else
{
minheight = frame_minheight(oldwin->w_frame, NOWIN) + need_status;
available = oldwin->w_frame->fr_height;
needed += minheight;
}
if (available < needed && new_wp == NULL)
{
emsg(_(e_noroom));
return FAIL;
}
oldwin_height = oldwin->w_height;
if (need_status)
{
oldwin->w_status_height = STATUS_HEIGHT;
oldwin_height -= STATUS_HEIGHT;
}
if (new_size == 0)
new_size = oldwin_height / 2;
if (new_size > available - minheight - STATUS_HEIGHT)
new_size = available - minheight - STATUS_HEIGHT;
if (new_size < wmh1)
new_size = wmh1;
/* if it doesn't fit in the current window, need win_equal() */
if (oldwin_height - new_size - STATUS_HEIGHT < p_wmh)
do_equal = TRUE;
/* We don't like to take lines for the new window from a
* 'winfixheight' window. Take them from a window above or below
* instead, if possible. */
if (oldwin->w_p_wfh)
{
/* Set w_fraction now so that the cursor keeps the same relative
* vertical position using the old height. */
set_fraction(oldwin);
did_set_fraction = TRUE;
win_setheight_win(oldwin->w_height + new_size + STATUS_HEIGHT,
oldwin);
oldwin_height = oldwin->w_height;
if (need_status)
oldwin_height -= STATUS_HEIGHT;
}
/* Only make all windows the same height if one of them (except oldwin)
* is higher than one of the split windows. */
if (!do_equal && p_ea && size == 0 && *p_ead != 'h'
&& oldwin->w_frame->fr_parent != NULL)
{
frp = oldwin->w_frame->fr_parent->fr_child;
while (frp != NULL)
{
if (frp->fr_win != oldwin && frp->fr_win != NULL
&& (frp->fr_win->w_height > new_size
|| frp->fr_win->w_height > oldwin_height - new_size
- STATUS_HEIGHT))
{
do_equal = TRUE;
break;
}
frp = frp->fr_next;
}
}
}
/*
* allocate new window structure and link it in the window list
*/
if ((flags & WSP_TOP) == 0
&& ((flags & WSP_BOT)
|| (flags & WSP_BELOW)
|| (!(flags & WSP_ABOVE)
&& ( (flags & WSP_VERT) ? p_spr : p_sb))))
{
/* new window below/right of current one */
if (new_wp == NULL)
wp = win_alloc(oldwin, FALSE);
else
win_append(oldwin, wp);
}
else
{
if (new_wp == NULL)
wp = win_alloc(oldwin->w_prev, FALSE);
else
win_append(oldwin->w_prev, wp);
}
if (new_wp == NULL)
{
if (wp == NULL)
return FAIL;
new_frame(wp);
if (wp->w_frame == NULL)
{
win_free(wp, NULL);
return FAIL;
}
/* make the contents of the new window the same as the current one */
win_init(wp, curwin, flags);
}
/*
* Reorganise the tree of frames to insert the new window.
*/
if (flags & (WSP_TOP | WSP_BOT))
{
if ((topframe->fr_layout == FR_COL && (flags & WSP_VERT) == 0)
|| (topframe->fr_layout == FR_ROW && (flags & WSP_VERT) != 0))
{
curfrp = topframe->fr_child;
if (flags & WSP_BOT)
while (curfrp->fr_next != NULL)
curfrp = curfrp->fr_next;
}
else
curfrp = topframe;
before = (flags & WSP_TOP);
}
else
{
curfrp = oldwin->w_frame;
if (flags & WSP_BELOW)
before = FALSE;
else if (flags & WSP_ABOVE)
before = TRUE;
else if (flags & WSP_VERT)
before = !p_spr;
else
before = !p_sb;
}
if (curfrp->fr_parent == NULL || curfrp->fr_parent->fr_layout != layout)
{
/* Need to create a new frame in the tree to make a branch. */
frp = ALLOC_CLEAR_ONE(frame_T);
*frp = *curfrp;
curfrp->fr_layout = layout;
frp->fr_parent = curfrp;
frp->fr_next = NULL;
frp->fr_prev = NULL;
curfrp->fr_child = frp;
curfrp->fr_win = NULL;
curfrp = frp;
if (frp->fr_win != NULL)
oldwin->w_frame = frp;
else
FOR_ALL_FRAMES(frp, frp->fr_child)
frp->fr_parent = curfrp;
}
if (new_wp == NULL)
frp = wp->w_frame;
else
frp = new_wp->w_frame;
frp->fr_parent = curfrp->fr_parent;
/* Insert the new frame at the right place in the frame list. */
if (before)
frame_insert(curfrp, frp);
else
frame_append(curfrp, frp);
/* Set w_fraction now so that the cursor keeps the same relative
* vertical position. */
if (!did_set_fraction)
set_fraction(oldwin);
wp->w_fraction = oldwin->w_fraction;
if (flags & WSP_VERT)
{
wp->w_p_scr = curwin->w_p_scr;
if (need_status)
{
win_new_height(oldwin, oldwin->w_height - 1);
oldwin->w_status_height = need_status;
}
if (flags & (WSP_TOP | WSP_BOT))
{
/* set height and row of new window to full height */
wp->w_winrow = tabline_height();
win_new_height(wp, curfrp->fr_height - (p_ls > 0)
- WINBAR_HEIGHT(wp));
wp->w_status_height = (p_ls > 0);
}
else
{
/* height and row of new window is same as current window */
wp->w_winrow = oldwin->w_winrow;
win_new_height(wp, VISIBLE_HEIGHT(oldwin));
wp->w_status_height = oldwin->w_status_height;
}
frp->fr_height = curfrp->fr_height;
/* "new_size" of the current window goes to the new window, use
* one column for the vertical separator */
win_new_width(wp, new_size);
if (before)
wp->w_vsep_width = 1;
else
{
wp->w_vsep_width = oldwin->w_vsep_width;
oldwin->w_vsep_width = 1;
}
if (flags & (WSP_TOP | WSP_BOT))
{
if (flags & WSP_BOT)
frame_add_vsep(curfrp);
/* Set width of neighbor frame */
frame_new_width(curfrp, curfrp->fr_width
- (new_size + ((flags & WSP_TOP) != 0)), flags & WSP_TOP,
FALSE);
}
else
win_new_width(oldwin, oldwin->w_width - (new_size + 1));
if (before) /* new window left of current one */
{
wp->w_wincol = oldwin->w_wincol;
oldwin->w_wincol += new_size + 1;
}
else /* new window right of current one */
wp->w_wincol = oldwin->w_wincol + oldwin->w_width + 1;
frame_fix_width(oldwin);
frame_fix_width(wp);
}
else
{
/* width and column of new window is same as current window */
if (flags & (WSP_TOP | WSP_BOT))
{
wp->w_wincol = 0;
win_new_width(wp, Columns);
wp->w_vsep_width = 0;
}
else
{
wp->w_wincol = oldwin->w_wincol;
win_new_width(wp, oldwin->w_width);
wp->w_vsep_width = oldwin->w_vsep_width;
}
frp->fr_width = curfrp->fr_width;
/* "new_size" of the current window goes to the new window, use
* one row for the status line */
win_new_height(wp, new_size);
if (flags & (WSP_TOP | WSP_BOT))
{
int new_fr_height = curfrp->fr_height - new_size
+ WINBAR_HEIGHT(wp) ;
if (!((flags & WSP_BOT) && p_ls == 0))
new_fr_height -= STATUS_HEIGHT;
frame_new_height(curfrp, new_fr_height, flags & WSP_TOP, FALSE);
}
else
win_new_height(oldwin, oldwin_height - (new_size + STATUS_HEIGHT));
if (before) /* new window above current one */
{
wp->w_winrow = oldwin->w_winrow;
wp->w_status_height = STATUS_HEIGHT;
oldwin->w_winrow += wp->w_height + STATUS_HEIGHT;
}
else /* new window below current one */
{
wp->w_winrow = oldwin->w_winrow + VISIBLE_HEIGHT(oldwin)
+ STATUS_HEIGHT;
wp->w_status_height = oldwin->w_status_height;
if (!(flags & WSP_BOT))
oldwin->w_status_height = STATUS_HEIGHT;
}
if (flags & WSP_BOT)
frame_add_statusline(curfrp);
frame_fix_height(wp);
frame_fix_height(oldwin);
}
if (flags & (WSP_TOP | WSP_BOT))
(void)win_comp_pos();
/*
* Both windows need redrawing
*/
redraw_win_later(wp, NOT_VALID);
wp->w_redr_status = TRUE;
redraw_win_later(oldwin, NOT_VALID);
oldwin->w_redr_status = TRUE;
if (need_status)
{
msg_row = Rows - 1;
msg_col = sc_col;
msg_clr_eos_force(); /* Old command/ruler may still be there */
comp_col();
msg_row = Rows - 1;
msg_col = 0; /* put position back at start of line */
}
/*
* equalize the window sizes.
*/
if (do_equal || dir != 0)
win_equal(wp, TRUE,
(flags & WSP_VERT) ? (dir == 'v' ? 'b' : 'h')
: dir == 'h' ? 'b' : 'v');
/* Don't change the window height/width to 'winheight' / 'winwidth' if a
* size was given. */
if (flags & WSP_VERT)
{
i = p_wiw;
if (size != 0)
p_wiw = size;
# ifdef FEAT_GUI
/* When 'guioptions' includes 'L' or 'R' may have to add scrollbars. */
if (gui.in_use)
gui_init_which_components(NULL);
# endif
}
else
{
i = p_wh;
if (size != 0)
p_wh = size;
}
#ifdef FEAT_JUMPLIST
/* Keep same changelist position in new window. */
wp->w_changelistidx = oldwin->w_changelistidx;
#endif
/*
* make the new window the current window
*/
win_enter_ext(wp, FALSE, FALSE, TRUE, TRUE, TRUE);
if (flags & WSP_VERT)
p_wiw = i;
else
p_wh = i;
return OK;
}
/*
* Initialize window "newp" from window "oldp".
* Used when splitting a window and when creating a new tab page.
* The windows will both edit the same buffer.
* WSP_NEWLOC may be specified in flags to prevent the location list from
* being copied.
*/
static void
win_init(win_T *newp, win_T *oldp, int flags UNUSED)
{
int i;
newp->w_buffer = oldp->w_buffer;
#ifdef FEAT_SYN_HL
newp->w_s = &(oldp->w_buffer->b_s);
#endif
oldp->w_buffer->b_nwindows++;
newp->w_cursor = oldp->w_cursor;
newp->w_valid = 0;
newp->w_curswant = oldp->w_curswant;
newp->w_set_curswant = oldp->w_set_curswant;
newp->w_topline = oldp->w_topline;
#ifdef FEAT_DIFF
newp->w_topfill = oldp->w_topfill;
#endif
newp->w_leftcol = oldp->w_leftcol;
newp->w_pcmark = oldp->w_pcmark;
newp->w_prev_pcmark = oldp->w_prev_pcmark;
newp->w_alt_fnum = oldp->w_alt_fnum;
newp->w_wrow = oldp->w_wrow;
newp->w_fraction = oldp->w_fraction;
newp->w_prev_fraction_row = oldp->w_prev_fraction_row;
#ifdef FEAT_JUMPLIST
copy_jumplist(oldp, newp);
#endif
#ifdef FEAT_QUICKFIX
if (flags & WSP_NEWLOC)
{
/* Don't copy the location list. */
newp->w_llist = NULL;
newp->w_llist_ref = NULL;
}
else
copy_loclist_stack(oldp, newp);
#endif
newp->w_localdir = (oldp->w_localdir == NULL)
? NULL : vim_strsave(oldp->w_localdir);
/* copy tagstack and folds */
for (i = 0; i < oldp->w_tagstacklen; i++)
{
taggy_T *tag = &newp->w_tagstack[i];
*tag = oldp->w_tagstack[i];
if (tag->tagname != NULL)
tag->tagname = vim_strsave(tag->tagname);
if (tag->user_data != NULL)
tag->user_data = vim_strsave(tag->user_data);
}
newp->w_tagstackidx = oldp->w_tagstackidx;
newp->w_tagstacklen = oldp->w_tagstacklen;
#ifdef FEAT_FOLDING
copyFoldingState(oldp, newp);
#endif
win_init_some(newp, oldp);
#ifdef FEAT_SYN_HL
check_colorcolumn(newp);
#endif
}
/*
* Initialize window "newp" from window "old".
* Only the essential things are copied.
*/
static void
win_init_some(win_T *newp, win_T *oldp)
{
/* Use the same argument list. */
newp->w_alist = oldp->w_alist;
++newp->w_alist->al_refcount;
newp->w_arg_idx = oldp->w_arg_idx;
/* copy options from existing window */
win_copy_options(oldp, newp);
}
/*
* Return TRUE if "win" is a global popup or a popup in the current tab page.
*/
int
win_valid_popup(win_T *win UNUSED)
{
#ifdef FEAT_TEXT_PROP
win_T *wp;
for (wp = first_popupwin; wp != NULL; wp = wp->w_next)
if (wp == win)
return TRUE;
for (wp = curtab->tp_first_popupwin; wp != NULL; wp = wp->w_next)
if (wp == win)
return TRUE;
#endif
return FALSE;
}
/*
* Check if "win" is a pointer to an existing window in the current tab page.
*/
int
win_valid(win_T *win)
{
win_T *wp;
if (win == NULL)
return FALSE;
FOR_ALL_WINDOWS(wp)
if (wp == win)
return TRUE;
return win_valid_popup(win);
}
/*
* Check if "win" is a pointer to an existing window in any tab page.
*/
int
win_valid_any_tab(win_T *win)
{
win_T *wp;
tabpage_T *tp;
if (win == NULL)
return FALSE;
FOR_ALL_TABPAGES(tp)
{
FOR_ALL_WINDOWS_IN_TAB(tp, wp)
{
if (wp == win)
return TRUE;
}
#ifdef FEAT_TEXT_PROP
for (wp = tp->tp_first_popupwin; wp != NULL; wp = wp->w_next)
if (wp == win)
return TRUE;
#endif
}
return win_valid_popup(win);
}
/*
* Return the number of windows.
*/
int
win_count(void)
{
win_T *wp;
int count = 0;
FOR_ALL_WINDOWS(wp)
++count;
return count;
}
/*
* Make "count" windows on the screen.
* Return actual number of windows on the screen.
* Must be called when there is just one window, filling the whole screen
* (excluding the command line).
*/
int
make_windows(
int count,
int vertical UNUSED) /* split windows vertically if TRUE */
{
int maxcount;
int todo;
if (vertical)
{
/* Each windows needs at least 'winminwidth' lines and a separator
* column. */
maxcount = (curwin->w_width + curwin->w_vsep_width
- (p_wiw - p_wmw)) / (p_wmw + 1);
}
else
{
/* Each window needs at least 'winminheight' lines and a status line. */
maxcount = (VISIBLE_HEIGHT(curwin) + curwin->w_status_height
- (p_wh - p_wmh)) / (p_wmh + STATUS_HEIGHT);
}
if (maxcount < 2)
maxcount = 2;
if (count > maxcount)
count = maxcount;
/*
* add status line now, otherwise first window will be too big
*/
if (count > 1)
last_status(TRUE);
/*
* Don't execute autocommands while creating the windows. Must do that
* when putting the buffers in the windows.
*/
block_autocmds();
/* todo is number of windows left to create */
for (todo = count - 1; todo > 0; --todo)
if (vertical)
{
if (win_split(curwin->w_width - (curwin->w_width - todo)
/ (todo + 1) - 1, WSP_VERT | WSP_ABOVE) == FAIL)
break;
}
else
{
if (win_split(curwin->w_height - (curwin->w_height - todo
* STATUS_HEIGHT) / (todo + 1)
- STATUS_HEIGHT, WSP_ABOVE) == FAIL)
break;
}
unblock_autocmds();
/* return actual number of windows */
return (count - todo);
}
/*
* Exchange current and next window
*/
static void
win_exchange(long Prenum)
{
frame_T *frp;
frame_T *frp2;
win_T *wp;
win_T *wp2;
int temp;
if (ERROR_IF_POPUP_WINDOW)
return;
if (ONE_WINDOW) // just one window
{
beep_flush();
return;
}
#ifdef FEAT_GUI
need_mouse_correct = TRUE;
#endif
/*
* find window to exchange with
*/
if (Prenum)
{
frp = curwin->w_frame->fr_parent->fr_child;
while (frp != NULL && --Prenum > 0)
frp = frp->fr_next;
}
else if (curwin->w_frame->fr_next != NULL) /* Swap with next */
frp = curwin->w_frame->fr_next;
else /* Swap last window in row/col with previous */
frp = curwin->w_frame->fr_prev;
/* We can only exchange a window with another window, not with a frame
* containing windows. */
if (frp == NULL || frp->fr_win == NULL || frp->fr_win == curwin)
return;
wp = frp->fr_win;
/*
* 1. remove curwin from the list. Remember after which window it was in wp2
* 2. insert curwin before wp in the list
* if wp != wp2
* 3. remove wp from the list
* 4. insert wp after wp2
* 5. exchange the status line height and vsep width.
*/
wp2 = curwin->w_prev;
frp2 = curwin->w_frame->fr_prev;
if (wp->w_prev != curwin)
{
win_remove(curwin, NULL);
frame_remove(curwin->w_frame);
win_append(wp->w_prev, curwin);
frame_insert(frp, curwin->w_frame);
}
if (wp != wp2)
{
win_remove(wp, NULL);
frame_remove(wp->w_frame);
win_append(wp2, wp);
if (frp2 == NULL)
frame_insert(wp->w_frame->fr_parent->fr_child, wp->w_frame);
else
frame_append(frp2, wp->w_frame);
}
temp = curwin->w_status_height;
curwin->w_status_height = wp->w_status_height;
wp->w_status_height = temp;
temp = curwin->w_vsep_width;
curwin->w_vsep_width = wp->w_vsep_width;
wp->w_vsep_width = temp;
/* If the windows are not in the same frame, exchange the sizes to avoid
* messing up the window layout. Otherwise fix the frame sizes. */
if (curwin->w_frame->fr_parent != wp->w_frame->fr_parent)
{
temp = curwin->w_height;
curwin->w_height = wp->w_height;
wp->w_height = temp;
temp = curwin->w_width;
curwin->w_width = wp->w_width;
wp->w_width = temp;
}
else
{
frame_fix_height(curwin);
frame_fix_height(wp);
frame_fix_width(curwin);
frame_fix_width(wp);
}
(void)win_comp_pos(); /* recompute window positions */
win_enter(wp, TRUE);
redraw_all_later(NOT_VALID);
}
/*
* rotate windows: if upwards TRUE the second window becomes the first one
* if upwards FALSE the first window becomes the second one
*/
static void
win_rotate(int upwards, int count)
{
win_T *wp1;
win_T *wp2;
frame_T *frp;
int n;
if (ONE_WINDOW) /* nothing to do */
{
beep_flush();
return;
}
#ifdef FEAT_GUI
need_mouse_correct = TRUE;
#endif
/* Check if all frames in this row/col have one window. */
FOR_ALL_FRAMES(frp, curwin->w_frame->fr_parent->fr_child)
if (frp->fr_win == NULL)
{
emsg(_("E443: Cannot rotate when another window is split"));
return;
}
while (count--)
{
if (upwards) /* first window becomes last window */
{
/* remove first window/frame from the list */
frp = curwin->w_frame->fr_parent->fr_child;
wp1 = frp->fr_win;
win_remove(wp1, NULL);
frame_remove(frp);
/* find last frame and append removed window/frame after it */
for ( ; frp->fr_next != NULL; frp = frp->fr_next)
;
win_append(frp->fr_win, wp1);
frame_append(frp, wp1->w_frame);
wp2 = frp->fr_win; /* previously last window */
}
else /* last window becomes first window */
{
/* find last window/frame in the list and remove it */
for (frp = curwin->w_frame; frp->fr_next != NULL;
frp = frp->fr_next)
;
wp1 = frp->fr_win;
wp2 = wp1->w_prev; /* will become last window */
win_remove(wp1, NULL);
frame_remove(frp);
/* append the removed window/frame before the first in the list */
win_append(frp->fr_parent->fr_child->fr_win->w_prev, wp1);
frame_insert(frp->fr_parent->fr_child, frp);
}
/* exchange status height and vsep width of old and new last window */
n = wp2->w_status_height;
wp2->w_status_height = wp1->w_status_height;
wp1->w_status_height = n;
frame_fix_height(wp1);
frame_fix_height(wp2);
n = wp2->w_vsep_width;
wp2->w_vsep_width = wp1->w_vsep_width;
wp1->w_vsep_width = n;
frame_fix_width(wp1);
frame_fix_width(wp2);
/* recompute w_winrow and w_wincol for all windows */
(void)win_comp_pos();
}
redraw_all_later(NOT_VALID);
}
/*
* Move the current window to the very top/bottom/left/right of the screen.
*/
static void
win_totop(int size, int flags)
{
int dir;
int height = curwin->w_height;
if (ONE_WINDOW)
{
beep_flush();
return;
}
if (check_split_disallowed() == FAIL)
return;
/* Remove the window and frame from the tree of frames. */
(void)winframe_remove(curwin, &dir, NULL);
win_remove(curwin, NULL);
last_status(FALSE); /* may need to remove last status line */
(void)win_comp_pos(); /* recompute window positions */
/* Split a window on the desired side and put the window there. */
(void)win_split_ins(size, flags, curwin, dir);
if (!(flags & WSP_VERT))
{
win_setheight(height);
if (p_ea)
win_equal(curwin, TRUE, 'v');
}
#if defined(FEAT_GUI)
/* When 'guioptions' includes 'L' or 'R' may have to remove or add
* scrollbars. Have to update them anyway. */
gui_may_update_scrollbars();
#endif
}
/*
* Move window "win1" to below/right of "win2" and make "win1" the current
* window. Only works within the same frame!
*/
void
win_move_after(win_T *win1, win_T *win2)
{
int height;
/* check if the arguments are reasonable */
if (win1 == win2)
return;
/* check if there is something to do */
if (win2->w_next != win1)
{
if (win1->w_frame->fr_parent != win2->w_frame->fr_parent)
{
iemsg("INTERNAL: trying to move a window into another frame");
return;
}
/* may need move the status line/vertical separator of the last window
* */
if (win1 == lastwin)
{
height = win1->w_prev->w_status_height;
win1->w_prev->w_status_height = win1->w_status_height;
win1->w_status_height = height;
if (win1->w_prev->w_vsep_width == 1)
{
/* Remove the vertical separator from the last-but-one window,
* add it to the last window. Adjust the frame widths. */
win1->w_prev->w_vsep_width = 0;
win1->w_prev->w_frame->fr_width -= 1;
win1->w_vsep_width = 1;
win1->w_frame->fr_width += 1;
}
}
else if (win2 == lastwin)
{
height = win1->w_status_height;
win1->w_status_height = win2->w_status_height;
win2->w_status_height = height;
if (win1->w_vsep_width == 1)
{
/* Remove the vertical separator from win1, add it to the last
* window, win2. Adjust the frame widths. */
win2->w_vsep_width = 1;
win2->w_frame->fr_width += 1;
win1->w_vsep_width = 0;
win1->w_frame->fr_width -= 1;
}
}
win_remove(win1, NULL);
frame_remove(win1->w_frame);
win_append(win2, win1);
frame_append(win2->w_frame, win1->w_frame);
(void)win_comp_pos(); /* recompute w_winrow for all windows */
redraw_later(NOT_VALID);
}
win_enter(win1, FALSE);
}
/*
* Make all windows the same height.
* 'next_curwin' will soon be the current window, make sure it has enough
* rows.
*/
void
win_equal(
win_T *next_curwin, /* pointer to current window to be or NULL */
int current, /* do only frame with current window */
int dir) /* 'v' for vertically, 'h' for horizontally,
'b' for both, 0 for using p_ead */
{
if (dir == 0)
dir = *p_ead;
win_equal_rec(next_curwin == NULL ? curwin : next_curwin, current,
topframe, dir, 0, tabline_height(),
(int)Columns, topframe->fr_height);
}
/*
* Set a frame to a new position and height, spreading the available room
* equally over contained frames.
* The window "next_curwin" (if not NULL) should at least get the size from
* 'winheight' and 'winwidth' if possible.
*/
static void
win_equal_rec(
win_T *next_curwin, /* pointer to current window to be or NULL */
int current, /* do only frame with current window */
frame_T *topfr, /* frame to set size off */
int dir, /* 'v', 'h' or 'b', see win_equal() */
int col, /* horizontal position for frame */
int row, /* vertical position for frame */
int width, /* new width of frame */
int height) /* new height of frame */
{
int n, m;
int extra_sep = 0;
int wincount, totwincount = 0;
frame_T *fr;
int next_curwin_size = 0;
int room = 0;
int new_size;
int has_next_curwin = 0;
int hnc;
if (topfr->fr_layout == FR_LEAF)
{
/* Set the width/height of this frame.
* Redraw when size or position changes */
if (topfr->fr_height != height || topfr->fr_win->w_winrow != row
|| topfr->fr_width != width || topfr->fr_win->w_wincol != col
)
{
topfr->fr_win->w_winrow = row;
frame_new_height(topfr, height, FALSE, FALSE);
topfr->fr_win->w_wincol = col;
frame_new_width(topfr, width, FALSE, FALSE);
redraw_all_later(NOT_VALID);
}
}
else if (topfr->fr_layout == FR_ROW)
{
topfr->fr_width = width;
topfr->fr_height = height;
if (dir != 'v') /* equalize frame widths */
{
/* Compute the maximum number of windows horizontally in this
* frame. */
n = frame_minwidth(topfr, NOWIN);
/* add one for the rightmost window, it doesn't have a separator */
if (col + width == Columns)
extra_sep = 1;
else
extra_sep = 0;
totwincount = (n + extra_sep) / (p_wmw + 1);
has_next_curwin = frame_has_win(topfr, next_curwin);
/*
* Compute width for "next_curwin" window and room available for
* other windows.
* "m" is the minimal width when counting p_wiw for "next_curwin".
*/
m = frame_minwidth(topfr, next_curwin);
room = width - m;
if (room < 0)
{
next_curwin_size = p_wiw + room;
room = 0;
}
else
{
next_curwin_size = -1;
FOR_ALL_FRAMES(fr, topfr->fr_child)
{
/* If 'winfixwidth' set keep the window width if
* possible.
* Watch out for this window being the next_curwin. */
if (frame_fixed_width(fr))
{
n = frame_minwidth(fr, NOWIN);
new_size = fr->fr_width;
if (frame_has_win(fr, next_curwin))
{
room += p_wiw - p_wmw;
next_curwin_size = 0;
if (new_size < p_wiw)
new_size = p_wiw;
}
else
/* These windows don't use up room. */
totwincount -= (n + (fr->fr_next == NULL
? extra_sep : 0)) / (p_wmw + 1);
room -= new_size - n;
if (room < 0)
{
new_size += room;
room = 0;
}
fr->fr_newwidth = new_size;
}
}
if (next_curwin_size == -1)
{
if (!has_next_curwin)
next_curwin_size = 0;
else if (totwincount > 1
&& (room + (totwincount - 2))
/ (totwincount - 1) > p_wiw)
{
/* Can make all windows wider than 'winwidth', spread
* the room equally. */
next_curwin_size = (room + p_wiw
+ (totwincount - 1) * p_wmw
+ (totwincount - 1)) / totwincount;
room -= next_curwin_size - p_wiw;
}
else
next_curwin_size = p_wiw;
}
}
if (has_next_curwin)
--totwincount; /* don't count curwin */
}
FOR_ALL_FRAMES(fr, topfr->fr_child)
{
wincount = 1;
if (fr->fr_next == NULL)
/* last frame gets all that remains (avoid roundoff error) */
new_size = width;
else if (dir == 'v')
new_size = fr->fr_width;
else if (frame_fixed_width(fr))
{
new_size = fr->fr_newwidth;
wincount = 0; /* doesn't count as a sizeable window */
}
else
{
/* Compute the maximum number of windows horiz. in "fr". */
n = frame_minwidth(fr, NOWIN);
wincount = (n + (fr->fr_next == NULL ? extra_sep : 0))
/ (p_wmw + 1);
m = frame_minwidth(fr, next_curwin);
if (has_next_curwin)
hnc = frame_has_win(fr, next_curwin);
else
hnc = FALSE;
if (hnc) /* don't count next_curwin */
--wincount;
if (totwincount == 0)
new_size = room;
else
new_size = (wincount * room + ((unsigned)totwincount >> 1))
/ totwincount;
if (hnc) /* add next_curwin size */
{
next_curwin_size -= p_wiw - (m - n);
new_size += next_curwin_size;
room -= new_size - next_curwin_size;
}
else
room -= new_size;
new_size += n;
}
/* Skip frame that is full width when splitting or closing a
* window, unless equalizing all frames. */
if (!current || dir != 'v' || topfr->fr_parent != NULL
|| (new_size != fr->fr_width)
|| frame_has_win(fr, next_curwin))
win_equal_rec(next_curwin, current, fr, dir, col, row,
new_size, height);
col += new_size;
width -= new_size;
totwincount -= wincount;
}
}
else /* topfr->fr_layout == FR_COL */
{
topfr->fr_width = width;
topfr->fr_height = height;
if (dir != 'h') /* equalize frame heights */
{
/* Compute maximum number of windows vertically in this frame. */
n = frame_minheight(topfr, NOWIN);
/* add one for the bottom window if it doesn't have a statusline */
if (row + height == cmdline_row && p_ls == 0)
extra_sep = 1;
else
extra_sep = 0;
totwincount = (n + extra_sep) / (p_wmh + 1);
has_next_curwin = frame_has_win(topfr, next_curwin);
/*
* Compute height for "next_curwin" window and room available for
* other windows.
* "m" is the minimal height when counting p_wh for "next_curwin".
*/
m = frame_minheight(topfr, next_curwin);
room = height - m;
if (room < 0)
{
/* The room is less then 'winheight', use all space for the
* current window. */
next_curwin_size = p_wh + room;
room = 0;
}
else
{
next_curwin_size = -1;
FOR_ALL_FRAMES(fr, topfr->fr_child)
{
/* If 'winfixheight' set keep the window height if
* possible.
* Watch out for this window being the next_curwin. */
if (frame_fixed_height(fr))
{
n = frame_minheight(fr, NOWIN);
new_size = fr->fr_height;
if (frame_has_win(fr, next_curwin))
{
room += p_wh - p_wmh;
next_curwin_size = 0;
if (new_size < p_wh)
new_size = p_wh;
}
else
/* These windows don't use up room. */
totwincount -= (n + (fr->fr_next == NULL
? extra_sep : 0)) / (p_wmh + 1);
room -= new_size - n;
if (room < 0)
{
new_size += room;
room = 0;
}
fr->fr_newheight = new_size;
}
}
if (next_curwin_size == -1)
{
if (!has_next_curwin)
next_curwin_size = 0;
else if (totwincount > 1
&& (room + (totwincount - 2))
/ (totwincount - 1) > p_wh)
{
/* can make all windows higher than 'winheight',
* spread the room equally. */
next_curwin_size = (room + p_wh
+ (totwincount - 1) * p_wmh
+ (totwincount - 1)) / totwincount;
room -= next_curwin_size - p_wh;
}
else
next_curwin_size = p_wh;
}
}
if (has_next_curwin)
--totwincount; /* don't count curwin */
}
FOR_ALL_FRAMES(fr, topfr->fr_child)
{
wincount = 1;
if (fr->fr_next == NULL)
/* last frame gets all that remains (avoid roundoff error) */
new_size = height;
else if (dir == 'h')
new_size = fr->fr_height;
else if (frame_fixed_height(fr))
{
new_size = fr->fr_newheight;
wincount = 0; /* doesn't count as a sizeable window */
}
else
{
/* Compute the maximum number of windows vert. in "fr". */
n = frame_minheight(fr, NOWIN);
wincount = (n + (fr->fr_next == NULL ? extra_sep : 0))
/ (p_wmh + 1);
m = frame_minheight(fr, next_curwin);
if (has_next_curwin)
hnc = frame_has_win(fr, next_curwin);
else
hnc = FALSE;
if (hnc) /* don't count next_curwin */
--wincount;
if (totwincount == 0)
new_size = room;
else
new_size = (wincount * room + ((unsigned)totwincount >> 1))
/ totwincount;
if (hnc) /* add next_curwin size */
{
next_curwin_size -= p_wh - (m - n);
new_size += next_curwin_size;
room -= new_size - next_curwin_size;
}
else
room -= new_size;
new_size += n;
}
/* Skip frame that is full width when splitting or closing a
* window, unless equalizing all frames. */
if (!current || dir != 'h' || topfr->fr_parent != NULL
|| (new_size != fr->fr_height)
|| frame_has_win(fr, next_curwin))
win_equal_rec(next_curwin, current, fr, dir, col, row,
width, new_size);
row += new_size;
height -= new_size;
totwincount -= wincount;
}
}
}
#ifdef FEAT_JOB_CHANNEL
static void
leaving_window(win_T *win)
{
// Only matters for a prompt window.
if (!bt_prompt(win->w_buffer))
return;
// When leaving a prompt window stop Insert mode and perhaps restart
// it when entering that window again.
win->w_buffer->b_prompt_insert = restart_edit;
if (restart_edit != 0 && mode_displayed)
clear_cmdline = TRUE; /* unshow mode later */
restart_edit = NUL;
// When leaving the window (or closing the window) was done from a
// callback we need to break out of the Insert mode loop and restart Insert
// mode when entering the window again.
if (State & INSERT)
{
stop_insert_mode = TRUE;
if (win->w_buffer->b_prompt_insert == NUL)
win->w_buffer->b_prompt_insert = 'A';
}
}
static void
entering_window(win_T *win)
{
// Only matters for a prompt window.
if (!bt_prompt(win->w_buffer))
return;
// When switching to a prompt buffer that was in Insert mode, don't stop
// Insert mode, it may have been set in leaving_window().
if (win->w_buffer->b_prompt_insert != NUL)
stop_insert_mode = FALSE;
// When entering the prompt window restart Insert mode if we were in Insert
// mode when we left it.
restart_edit = win->w_buffer->b_prompt_insert;
}
#endif
/*
* Close all windows for buffer "buf".
*/
void
close_windows(
buf_T *buf,
int keep_curwin) /* don't close "curwin" */
{
win_T *wp;
tabpage_T *tp, *nexttp;
int h = tabline_height();
int count = tabpage_index(NULL);
++RedrawingDisabled;
for (wp = firstwin; wp != NULL && !ONE_WINDOW; )
{
if (wp->w_buffer == buf && (!keep_curwin || wp != curwin)
&& !(wp->w_closing || wp->w_buffer->b_locked > 0))
{
if (win_close(wp, FALSE) == FAIL)
/* If closing the window fails give up, to avoid looping
* forever. */
break;
/* Start all over, autocommands may change the window layout. */
wp = firstwin;
}
else
wp = wp->w_next;
}
/* Also check windows in other tab pages. */
for (tp = first_tabpage; tp != NULL; tp = nexttp)
{
nexttp = tp->tp_next;
if (tp != curtab)
for (wp = tp->tp_firstwin; wp != NULL; wp = wp->w_next)
if (wp->w_buffer == buf
&& !(wp->w_closing || wp->w_buffer->b_locked > 0))
{
win_close_othertab(wp, FALSE, tp);
/* Start all over, the tab page may be closed and
* autocommands may change the window layout. */
nexttp = first_tabpage;
break;
}
}
--RedrawingDisabled;
if (count != tabpage_index(NULL))
apply_autocmds(EVENT_TABCLOSED, NULL, NULL, FALSE, curbuf);
redraw_tabline = TRUE;
if (h != tabline_height())
shell_new_rows();
}
/*
* Return TRUE if the current window is the only window that exists (ignoring
* "aucmd_win").
* Returns FALSE if there is a window, possibly in another tab page.
*/
static int
last_window(void)
{
return (one_window() && first_tabpage->tp_next == NULL);
}
/*
* Return TRUE if there is only one window other than "aucmd_win" in the
* current tab page.
*/
int
one_window(void)
{
win_T *wp;
int seen_one = FALSE;
FOR_ALL_WINDOWS(wp)
{
if (wp != aucmd_win)
{
if (seen_one)
return FALSE;
seen_one = TRUE;
}
}
return TRUE;
}
/*
* Close the possibly last window in a tab page.
* Returns TRUE when the window was closed already.
*/
static int
close_last_window_tabpage(
win_T *win,
int free_buf,
tabpage_T *prev_curtab)
{
if (ONE_WINDOW)
{
buf_T *old_curbuf = curbuf;
/*
* Closing the last window in a tab page. First go to another tab
* page and then close the window and the tab page. This avoids that
* curwin and curtab are invalid while we are freeing memory, they may
* be used in GUI events.
* Don't trigger autocommands yet, they may use wrong values, so do
* that below.
*/
goto_tabpage_tp(alt_tabpage(), FALSE, TRUE);
redraw_tabline = TRUE;
/* Safety check: Autocommands may have closed the window when jumping
* to the other tab page. */
if (valid_tabpage(prev_curtab) && prev_curtab->tp_firstwin == win)
{
int h = tabline_height();
win_close_othertab(win, free_buf, prev_curtab);
if (h != tabline_height())
shell_new_rows();
}
#ifdef FEAT_JOB_CHANNEL
entering_window(curwin);
#endif
/* Since goto_tabpage_tp above did not trigger *Enter autocommands, do
* that now. */
apply_autocmds(EVENT_TABCLOSED, NULL, NULL, FALSE, curbuf);
apply_autocmds(EVENT_WINENTER, NULL, NULL, FALSE, curbuf);
apply_autocmds(EVENT_TABENTER, NULL, NULL, FALSE, curbuf);
if (old_curbuf != curbuf)
apply_autocmds(EVENT_BUFENTER, NULL, NULL, FALSE, curbuf);
return TRUE;
}
return FALSE;
}
/*
* Close the buffer of "win" and unload it if "action" is DOBUF_UNLOAD.
* "action" can also be zero (do nothing) or DOBUF_WIPE.
* "abort_if_last" is passed to close_buffer(): abort closing if all other
* windows are closed.
*/
static void
win_close_buffer(win_T *win, int action, int abort_if_last)
{
#ifdef FEAT_SYN_HL
// Free independent synblock before the buffer is freed.
if (win->w_buffer != NULL)
reset_synblock(win);
#endif
#ifdef FEAT_QUICKFIX
// When the quickfix/location list window is closed, unlist the buffer.
if (win->w_buffer != NULL && bt_quickfix(win->w_buffer))
win->w_buffer->b_p_bl = FALSE;
#endif
// Close the link to the buffer.
if (win->w_buffer != NULL)
{
bufref_T bufref;
set_bufref(&bufref, curbuf);
win->w_closing = TRUE;
close_buffer(win, win->w_buffer, action, abort_if_last);
if (win_valid_any_tab(win))
win->w_closing = FALSE;
// Make sure curbuf is valid. It can become invalid if 'bufhidden' is
// "wipe".
if (!bufref_valid(&bufref))
curbuf = firstbuf;
}
}
/*
* Close window "win". Only works for the current tab page.
* If "free_buf" is TRUE related buffer may be unloaded.
*
* Called by :quit, :close, :xit, :wq and findtag().
* Returns FAIL when the window was not closed.
*/
int
win_close(win_T *win, int free_buf)
{
win_T *wp;
int other_buffer = FALSE;
int close_curwin = FALSE;
int dir;
int help_window = FALSE;
tabpage_T *prev_curtab = curtab;
frame_T *win_frame = win->w_frame->fr_parent;
if (ERROR_IF_POPUP_WINDOW)
return FAIL;
if (last_window())
{
emsg(_("E444: Cannot close last window"));
return FAIL;
}
if (win->w_closing || (win->w_buffer != NULL
&& win->w_buffer->b_locked > 0))
return FAIL; /* window is already being closed */
if (win_unlisted(win))
{
emsg(_("E813: Cannot close autocmd or popup window"));
return FAIL;
}
if ((firstwin == aucmd_win || lastwin == aucmd_win) && one_window())
{
emsg(_("E814: Cannot close window, only autocmd window would remain"));
return FAIL;
}
/* When closing the last window in a tab page first go to another tab page
* and then close the window and the tab page to avoid that curwin and
* curtab are invalid while we are freeing memory. */
if (close_last_window_tabpage(win, free_buf, prev_curtab))
return FAIL;
/* When closing the help window, try restoring a snapshot after closing
* the window. Otherwise clear the snapshot, it's now invalid. */
if (bt_help(win->w_buffer))
help_window = TRUE;
else
clear_snapshot(curtab, SNAP_HELP_IDX);
if (win == curwin)
{
#ifdef FEAT_JOB_CHANNEL
leaving_window(curwin);
#endif
/*
* Guess which window is going to be the new current window.
* This may change because of the autocommands (sigh).
*/
wp = frame2win(win_altframe(win, NULL));
/*
* Be careful: If autocommands delete the window or cause this window
* to be the last one left, return now.
*/
if (wp->w_buffer != curbuf)
{
other_buffer = TRUE;
win->w_closing = TRUE;
apply_autocmds(EVENT_BUFLEAVE, NULL, NULL, FALSE, curbuf);
if (!win_valid(win))
return FAIL;
win->w_closing = FALSE;
if (last_window())
return FAIL;
}
win->w_closing = TRUE;
apply_autocmds(EVENT_WINLEAVE, NULL, NULL, FALSE, curbuf);
if (!win_valid(win))
return FAIL;
win->w_closing = FALSE;
if (last_window())
return FAIL;
#ifdef FEAT_EVAL
/* autocmds may abort script processing */
if (aborting())
return FAIL;
#endif
}
#ifdef FEAT_GUI
// Avoid trouble with scrollbars that are going to be deleted in
// win_free().
if (gui.in_use)
out_flush();
#endif
#ifdef FEAT_TEXT_PROP
if (popup_win_closed(win) && !win_valid(win))
return FAIL;
#endif
win_close_buffer(win, free_buf ? DOBUF_UNLOAD : 0, TRUE);
if (only_one_window() && win_valid(win) && win->w_buffer == NULL
&& (last_window() || curtab != prev_curtab
|| close_last_window_tabpage(win, free_buf, prev_curtab)))
{
/* Autocommands have closed all windows, quit now. Restore
* curwin->w_buffer, otherwise writing viminfo may fail. */
if (curwin->w_buffer == NULL)
curwin->w_buffer = curbuf;
getout(0);
}
/* Autocommands may have moved to another tab page. */
if (curtab != prev_curtab && win_valid_any_tab(win)
&& win->w_buffer == NULL)
{
/* Need to close the window anyway, since the buffer is NULL. */
win_close_othertab(win, FALSE, prev_curtab);
return FAIL;
}
/* Autocommands may have closed the window already or closed the only
* other window. */
if (!win_valid(win) || last_window()
|| close_last_window_tabpage(win, free_buf, prev_curtab))
return FAIL;
// Now we are really going to close the window. Disallow any autocommand
// to split a window to avoid trouble.
++split_disallowed;
/* Free the memory used for the window and get the window that received
* the screen space. */
wp = win_free_mem(win, &dir, NULL);
/* Make sure curwin isn't invalid. It can cause severe trouble when
* printing an error message. For win_equal() curbuf needs to be valid
* too. */
if (win == curwin)
{
curwin = wp;
#ifdef FEAT_QUICKFIX
if (wp->w_p_pvw || bt_quickfix(wp->w_buffer))
{
/*
* If the cursor goes to the preview or the quickfix window, try
* finding another window to go to.
*/
for (;;)
{
if (wp->w_next == NULL)
wp = firstwin;
else
wp = wp->w_next;
if (wp == curwin)
break;
if (!wp->w_p_pvw && !bt_quickfix(wp->w_buffer))
{
curwin = wp;
break;
}
}
}
#endif
curbuf = curwin->w_buffer;
close_curwin = TRUE;
/* The cursor position may be invalid if the buffer changed after last
* using the window. */
check_cursor();
}
if (p_ea && (*p_ead == 'b' || *p_ead == dir))
/* If the frame of the closed window contains the new current window,
* only resize that frame. Otherwise resize all windows. */
win_equal(curwin, curwin->w_frame->fr_parent == win_frame, dir);
else
win_comp_pos();
if (close_curwin)
{
win_enter_ext(wp, FALSE, TRUE, FALSE, TRUE, TRUE);
if (other_buffer)
/* careful: after this wp and win may be invalid! */
apply_autocmds(EVENT_BUFENTER, NULL, NULL, FALSE, curbuf);
}
--split_disallowed;
/*
* If last window has a status line now and we don't want one,
* remove the status line.
*/
last_status(FALSE);
/* After closing the help window, try restoring the window layout from
* before it was opened. */
if (help_window)
restore_snapshot(SNAP_HELP_IDX, close_curwin);
#if defined(FEAT_GUI)
/* When 'guioptions' includes 'L' or 'R' may have to remove scrollbars. */
if (gui.in_use && !win_hasvertsplit())
gui_init_which_components(NULL);
#endif
redraw_all_later(NOT_VALID);
return OK;
}
/*
* Close window "win" in tab page "tp", which is not the current tab page.
* This may be the last window in that tab page and result in closing the tab,
* thus "tp" may become invalid!
* Caller must check if buffer is hidden and whether the tabline needs to be
* updated.
*/
void
win_close_othertab(win_T *win, int free_buf, tabpage_T *tp)
{
win_T *wp;
int dir;
tabpage_T *ptp = NULL;
int free_tp = FALSE;
/* Get here with win->w_buffer == NULL when win_close() detects the tab
* page changed. */
if (win->w_closing || (win->w_buffer != NULL
&& win->w_buffer->b_locked > 0))
return; /* window is already being closed */
if (win->w_buffer != NULL)
/* Close the link to the buffer. */
close_buffer(win, win->w_buffer, free_buf ? DOBUF_UNLOAD : 0, FALSE);
/* Careful: Autocommands may have closed the tab page or made it the
* current tab page. */
for (ptp = first_tabpage; ptp != NULL && ptp != tp; ptp = ptp->tp_next)
;
if (ptp == NULL || tp == curtab)
return;
/* Autocommands may have closed the window already. */
for (wp = tp->tp_firstwin; wp != NULL && wp != win; wp = wp->w_next)
;
if (wp == NULL)
return;
/* When closing the last window in a tab page remove the tab page. */
if (tp->tp_firstwin == tp->tp_lastwin)
{
if (tp == first_tabpage)
first_tabpage = tp->tp_next;
else
{
for (ptp = first_tabpage; ptp != NULL && ptp->tp_next != tp;
ptp = ptp->tp_next)
;
if (ptp == NULL)
{
internal_error("win_close_othertab()");
return;
}
ptp->tp_next = tp->tp_next;
}
free_tp = TRUE;
}
/* Free the memory used for the window. */
win_free_mem(win, &dir, tp);
if (free_tp)
free_tabpage(tp);
}
/*
* Free the memory used for a window.
* Returns a pointer to the window that got the freed up space.
*/
static win_T *
win_free_mem(
win_T *win,
int *dirp, /* set to 'v' or 'h' for direction if 'ea' */
tabpage_T *tp) /* tab page "win" is in, NULL for current */
{
frame_T *frp;
win_T *wp;
/* Remove the window and its frame from the tree of frames. */
frp = win->w_frame;
wp = winframe_remove(win, dirp, tp);
vim_free(frp);
win_free(win, tp);
/* When deleting the current window of another tab page select a new
* current window. */
if (tp != NULL && win == tp->tp_curwin)
tp->tp_curwin = wp;
return wp;
}
#if defined(EXITFREE) || defined(PROTO)
void
win_free_all(void)
{
int dummy;
while (first_tabpage->tp_next != NULL)
tabpage_close(TRUE);
if (aucmd_win != NULL)
{
(void)win_free_mem(aucmd_win, &dummy, NULL);
aucmd_win = NULL;
}
# ifdef FEAT_TEXT_PROP
close_all_popups();
# endif
while (firstwin != NULL)
(void)win_free_mem(firstwin, &dummy, NULL);
/* No window should be used after this. Set curwin to NULL to crash
* instead of using freed memory. */
curwin = NULL;
}
#endif
/*
* Remove a window and its frame from the tree of frames.
* Returns a pointer to the window that got the freed up space.
*/
win_T *
winframe_remove(
win_T *win,
int *dirp UNUSED, /* set to 'v' or 'h' for direction if 'ea' */
tabpage_T *tp) /* tab page "win" is in, NULL for current */
{
frame_T *frp, *frp2, *frp3;
frame_T *frp_close = win->w_frame;
win_T *wp;
/*
* If there is only one window there is nothing to remove.
*/
if (tp == NULL ? ONE_WINDOW : tp->tp_firstwin == tp->tp_lastwin)
return NULL;
/*
* Remove the window from its frame.
*/
frp2 = win_altframe(win, tp);
wp = frame2win(frp2);
/* Remove this frame from the list of frames. */
frame_remove(frp_close);
if (frp_close->fr_parent->fr_layout == FR_COL)
{
/* When 'winfixheight' is set, try to find another frame in the column
* (as close to the closed frame as possible) to distribute the height
* to. */
if (frp2->fr_win != NULL && frp2->fr_win->w_p_wfh)
{
frp = frp_close->fr_prev;
frp3 = frp_close->fr_next;
while (frp != NULL || frp3 != NULL)
{
if (frp != NULL)
{
if (!frame_fixed_height(frp))
{
frp2 = frp;
wp = frame2win(frp2);
break;
}
frp = frp->fr_prev;
}
if (frp3 != NULL)
{
if (frp3->fr_win != NULL && !frp3->fr_win->w_p_wfh)
{
frp2 = frp3;
wp = frp3->fr_win;
break;
}
frp3 = frp3->fr_next;
}
}
}
frame_new_height(frp2, frp2->fr_height + frp_close->fr_height,
frp2 == frp_close->fr_next ? TRUE : FALSE, FALSE);
*dirp = 'v';
}
else
{
/* When 'winfixwidth' is set, try to find another frame in the column
* (as close to the closed frame as possible) to distribute the width
* to. */
if (frp2->fr_win != NULL && frp2->fr_win->w_p_wfw)
{
frp = frp_close->fr_prev;
frp3 = frp_close->fr_next;
while (frp != NULL || frp3 != NULL)
{
if (frp != NULL)
{
if (!frame_fixed_width(frp))
{
frp2 = frp;
wp = frame2win(frp2);
break;
}
frp = frp->fr_prev;
}
if (frp3 != NULL)
{
if (frp3->fr_win != NULL && !frp3->fr_win->w_p_wfw)
{
frp2 = frp3;
wp = frp3->fr_win;
break;
}
frp3 = frp3->fr_next;
}
}
}
frame_new_width(frp2, frp2->fr_width + frp_close->fr_width,
frp2 == frp_close->fr_next ? TRUE : FALSE, FALSE);
*dirp = 'h';
}
/* If rows/columns go to a window below/right its positions need to be
* updated. Can only be done after the sizes have been updated. */
if (frp2 == frp_close->fr_next)
{
int row = win->w_winrow;
int col = win->w_wincol;
frame_comp_pos(frp2, &row, &col);
}
if (frp2->fr_next == NULL && frp2->fr_prev == NULL)
{
/* There is no other frame in this list, move its info to the parent
* and remove it. */
frp2->fr_parent->fr_layout = frp2->fr_layout;
frp2->fr_parent->fr_child = frp2->fr_child;
FOR_ALL_FRAMES(frp, frp2->fr_child)
frp->fr_parent = frp2->fr_parent;
frp2->fr_parent->fr_win = frp2->fr_win;
if (frp2->fr_win != NULL)
frp2->fr_win->w_frame = frp2->fr_parent;
frp = frp2->fr_parent;
if (topframe->fr_child == frp2)
topframe->fr_child = frp;
vim_free(frp2);
frp2 = frp->fr_parent;
if (frp2 != NULL && frp2->fr_layout == frp->fr_layout)
{
/* The frame above the parent has the same layout, have to merge
* the frames into this list. */
if (frp2->fr_child == frp)
frp2->fr_child = frp->fr_child;
frp->fr_child->fr_prev = frp->fr_prev;
if (frp->fr_prev != NULL)
frp->fr_prev->fr_next = frp->fr_child;
for (frp3 = frp->fr_child; ; frp3 = frp3->fr_next)
{
frp3->fr_parent = frp2;
if (frp3->fr_next == NULL)
{
frp3->fr_next = frp->fr_next;
if (frp->fr_next != NULL)
frp->fr_next->fr_prev = frp3;
break;
}
}
if (topframe->fr_child == frp)
topframe->fr_child = frp2;
vim_free(frp);
}
}
return wp;
}
/*
* Return a pointer to the frame that will receive the empty screen space that
* is left over after "win" is closed.
*
* If 'splitbelow' or 'splitright' is set, the space goes above or to the left
* by default. Otherwise, the free space goes below or to the right. The
* result is that opening a window and then immediately closing it will
* preserve the initial window layout. The 'wfh' and 'wfw' settings are
* respected when possible.
*/
static frame_T *
win_altframe(
win_T *win,
tabpage_T *tp) /* tab page "win" is in, NULL for current */
{
frame_T *frp;
frame_T *other_fr, *target_fr;
if (tp == NULL ? ONE_WINDOW : tp->tp_firstwin == tp->tp_lastwin)
return alt_tabpage()->tp_curwin->w_frame;
frp = win->w_frame;
if (frp->fr_prev == NULL)
return frp->fr_next;
if (frp->fr_next == NULL)
return frp->fr_prev;
target_fr = frp->fr_next;
other_fr = frp->fr_prev;
if (p_spr || p_sb)
{
target_fr = frp->fr_prev;
other_fr = frp->fr_next;
}
/* If 'wfh' or 'wfw' is set for the target and not for the alternate
* window, reverse the selection. */
if (frp->fr_parent != NULL && frp->fr_parent->fr_layout == FR_ROW)
{
if (frame_fixed_width(target_fr) && !frame_fixed_width(other_fr))
target_fr = other_fr;
}
else
{
if (frame_fixed_height(target_fr) && !frame_fixed_height(other_fr))
target_fr = other_fr;
}
return target_fr;
}
/*
* Return the tabpage that will be used if the current one is closed.
*/
static tabpage_T *
alt_tabpage(void)
{
tabpage_T *tp;
/* Use the next tab page if possible. */
if (curtab->tp_next != NULL)
return curtab->tp_next;
/* Find the last but one tab page. */
for (tp = first_tabpage; tp->tp_next != curtab; tp = tp->tp_next)
;
return tp;
}
/*
* Find the left-upper window in frame "frp".
*/
static win_T *
frame2win(frame_T *frp)
{
while (frp->fr_win == NULL)
frp = frp->fr_child;
return frp->fr_win;
}
/*
* Return TRUE if frame "frp" contains window "wp".
*/
static int
frame_has_win(frame_T *frp, win_T *wp)
{
frame_T *p;
if (frp->fr_layout == FR_LEAF)
return frp->fr_win == wp;
FOR_ALL_FRAMES(p, frp->fr_child)
if (frame_has_win(p, wp))
return TRUE;
return FALSE;
}
/*
* Set a new height for a frame. Recursively sets the height for contained
* frames and windows. Caller must take care of positions.
*/
static void
frame_new_height(
frame_T *topfrp,
int height,
int topfirst, /* resize topmost contained frame first */
int wfh) /* obey 'winfixheight' when there is a choice;
may cause the height not to be set */
{
frame_T *frp;
int extra_lines;
int h;
if (topfrp->fr_win != NULL)
{
/* Simple case: just one window. */
win_new_height(topfrp->fr_win,
height - topfrp->fr_win->w_status_height
- WINBAR_HEIGHT(topfrp->fr_win));
}
else if (topfrp->fr_layout == FR_ROW)
{
do
{
/* All frames in this row get the same new height. */
FOR_ALL_FRAMES(frp, topfrp->fr_child)
{
frame_new_height(frp, height, topfirst, wfh);
if (frp->fr_height > height)
{
/* Could not fit the windows, make the whole row higher. */
height = frp->fr_height;
break;
}
}
}
while (frp != NULL);
}
else /* fr_layout == FR_COL */
{
/* Complicated case: Resize a column of frames. Resize the bottom
* frame first, frames above that when needed. */
frp = topfrp->fr_child;
if (wfh)
/* Advance past frames with one window with 'wfh' set. */
while (frame_fixed_height(frp))
{
frp = frp->fr_next;
if (frp == NULL)
return; /* no frame without 'wfh', give up */
}
if (!topfirst)
{
/* Find the bottom frame of this column */
while (frp->fr_next != NULL)
frp = frp->fr_next;
if (wfh)
/* Advance back for frames with one window with 'wfh' set. */
while (frame_fixed_height(frp))
frp = frp->fr_prev;
}
extra_lines = height - topfrp->fr_height;
if (extra_lines < 0)
{
/* reduce height of contained frames, bottom or top frame first */
while (frp != NULL)
{
h = frame_minheight(frp, NULL);
if (frp->fr_height + extra_lines < h)
{
extra_lines += frp->fr_height - h;
frame_new_height(frp, h, topfirst, wfh);
}
else
{
frame_new_height(frp, frp->fr_height + extra_lines,
topfirst, wfh);
break;
}
if (topfirst)
{
do
frp = frp->fr_next;
while (wfh && frp != NULL && frame_fixed_height(frp));
}
else
{
do
frp = frp->fr_prev;
while (wfh && frp != NULL && frame_fixed_height(frp));
}
/* Increase "height" if we could not reduce enough frames. */
if (frp == NULL)
height -= extra_lines;
}
}
else if (extra_lines > 0)
{
/* increase height of bottom or top frame */
frame_new_height(frp, frp->fr_height + extra_lines, topfirst, wfh);
}
}
topfrp->fr_height = height;
}
/*
* Return TRUE if height of frame "frp" should not be changed because of
* the 'winfixheight' option.
*/
static int
frame_fixed_height(frame_T *frp)
{
/* frame with one window: fixed height if 'winfixheight' set. */
if (frp->fr_win != NULL)
return frp->fr_win->w_p_wfh;
if (frp->fr_layout == FR_ROW)
{
/* The frame is fixed height if one of the frames in the row is fixed
* height. */
FOR_ALL_FRAMES(frp, frp->fr_child)
if (frame_fixed_height(frp))
return TRUE;
return FALSE;
}
/* frp->fr_layout == FR_COL: The frame is fixed height if all of the
* frames in the row are fixed height. */
FOR_ALL_FRAMES(frp, frp->fr_child)
if (!frame_fixed_height(frp))
return FALSE;
return TRUE;
}
/*
* Return TRUE if width of frame "frp" should not be changed because of
* the 'winfixwidth' option.
*/
static int
frame_fixed_width(frame_T *frp)
{
/* frame with one window: fixed width if 'winfixwidth' set. */
if (frp->fr_win != NULL)
return frp->fr_win->w_p_wfw;
if (frp->fr_layout == FR_COL)
{
/* The frame is fixed width if one of the frames in the row is fixed
* width. */
FOR_ALL_FRAMES(frp, frp->fr_child)
if (frame_fixed_width(frp))
return TRUE;
return FALSE;
}
/* frp->fr_layout == FR_ROW: The frame is fixed width if all of the
* frames in the row are fixed width. */
FOR_ALL_FRAMES(frp, frp->fr_child)
if (!frame_fixed_width(frp))
return FALSE;
return TRUE;
}
/*
* Add a status line to windows at the bottom of "frp".
* Note: Does not check if there is room!
*/
static void
frame_add_statusline(frame_T *frp)
{
win_T *wp;
if (frp->fr_layout == FR_LEAF)
{
wp = frp->fr_win;
if (wp->w_status_height == 0)
{
if (wp->w_height > 0) /* don't make it negative */
--wp->w_height;
wp->w_status_height = STATUS_HEIGHT;
}
}
else if (frp->fr_layout == FR_ROW)
{
/* Handle all the frames in the row. */
FOR_ALL_FRAMES(frp, frp->fr_child)
frame_add_statusline(frp);
}
else /* frp->fr_layout == FR_COL */
{
/* Only need to handle the last frame in the column. */
for (frp = frp->fr_child; frp->fr_next != NULL; frp = frp->fr_next)
;
frame_add_statusline(frp);
}
}
/*
* Set width of a frame. Handles recursively going through contained frames.
* May remove separator line for windows at the right side (for win_close()).
*/
static void
frame_new_width(
frame_T *topfrp,
int width,
int leftfirst, /* resize leftmost contained frame first */
int wfw) /* obey 'winfixwidth' when there is a choice;
may cause the width not to be set */
{
frame_T *frp;
int extra_cols;
int w;
win_T *wp;
if (topfrp->fr_layout == FR_LEAF)
{
/* Simple case: just one window. */
wp = topfrp->fr_win;
/* Find out if there are any windows right of this one. */
for (frp = topfrp; frp->fr_parent != NULL; frp = frp->fr_parent)
if (frp->fr_parent->fr_layout == FR_ROW && frp->fr_next != NULL)
break;
if (frp->fr_parent == NULL)
wp->w_vsep_width = 0;
win_new_width(wp, width - wp->w_vsep_width);
}
else if (topfrp->fr_layout == FR_COL)
{
do
{
/* All frames in this column get the same new width. */
FOR_ALL_FRAMES(frp, topfrp->fr_child)
{
frame_new_width(frp, width, leftfirst, wfw);
if (frp->fr_width > width)
{
/* Could not fit the windows, make whole column wider. */
width = frp->fr_width;
break;
}
}
} while (frp != NULL);
}
else /* fr_layout == FR_ROW */
{
/* Complicated case: Resize a row of frames. Resize the rightmost
* frame first, frames left of it when needed. */
frp = topfrp->fr_child;
if (wfw)
/* Advance past frames with one window with 'wfw' set. */
while (frame_fixed_width(frp))
{
frp = frp->fr_next;
if (frp == NULL)
return; /* no frame without 'wfw', give up */
}
if (!leftfirst)
{
/* Find the rightmost frame of this row */
while (frp->fr_next != NULL)
frp = frp->fr_next;
if (wfw)
/* Advance back for frames with one window with 'wfw' set. */
while (frame_fixed_width(frp))
frp = frp->fr_prev;
}
extra_cols = width - topfrp->fr_width;
if (extra_cols < 0)
{
/* reduce frame width, rightmost frame first */
while (frp != NULL)
{
w = frame_minwidth(frp, NULL);
if (frp->fr_width + extra_cols < w)
{
extra_cols += frp->fr_width - w;
frame_new_width(frp, w, leftfirst, wfw);
}
else
{
frame_new_width(frp, frp->fr_width + extra_cols,
leftfirst, wfw);
break;
}
if (leftfirst)
{
do
frp = frp->fr_next;
while (wfw && frp != NULL && frame_fixed_width(frp));
}
else
{
do
frp = frp->fr_prev;
while (wfw && frp != NULL && frame_fixed_width(frp));
}
/* Increase "width" if we could not reduce enough frames. */
if (frp == NULL)
width -= extra_cols;
}
}
else if (extra_cols > 0)
{
/* increase width of rightmost frame */
frame_new_width(frp, frp->fr_width + extra_cols, leftfirst, wfw);
}
}
topfrp->fr_width = width;
}
/*
* Add the vertical separator to windows at the right side of "frp".
* Note: Does not check if there is room!
*/
static void
frame_add_vsep(frame_T *frp)
{
win_T *wp;
if (frp->fr_layout == FR_LEAF)
{
wp = frp->fr_win;
if (wp->w_vsep_width == 0)
{
if (wp->w_width > 0) /* don't make it negative */
--wp->w_width;
wp->w_vsep_width = 1;
}
}
else if (frp->fr_layout == FR_COL)
{
/* Handle all the frames in the column. */
FOR_ALL_FRAMES(frp, frp->fr_child)
frame_add_vsep(frp);
}
else /* frp->fr_layout == FR_ROW */
{
/* Only need to handle the last frame in the row. */
frp = frp->fr_child;
while (frp->fr_next != NULL)
frp = frp->fr_next;
frame_add_vsep(frp);
}
}
/*
* Set frame width from the window it contains.
*/
static void
frame_fix_width(win_T *wp)
{
wp->w_frame->fr_width = wp->w_width + wp->w_vsep_width;
}
/*
* Set frame height from the window it contains.
*/
static void
frame_fix_height(win_T *wp)
{
wp->w_frame->fr_height = VISIBLE_HEIGHT(wp) + wp->w_status_height;
}
/*
* Compute the minimal height for frame "topfrp".
* Uses the 'winminheight' option.
* When "next_curwin" isn't NULL, use p_wh for this window.
* When "next_curwin" is NOWIN, don't use at least one line for the current
* window.
*/
static int
frame_minheight(frame_T *topfrp, win_T *next_curwin)
{
frame_T *frp;
int m;
int n;
if (topfrp->fr_win != NULL)
{
if (topfrp->fr_win == next_curwin)
m = p_wh + topfrp->fr_win->w_status_height;
else
{
/* window: minimal height of the window plus status line */
m = p_wmh + topfrp->fr_win->w_status_height;
if (topfrp->fr_win == curwin && next_curwin == NULL)
{
/* Current window is minimal one line high and WinBar is
* visible. */
if (p_wmh == 0)
++m;
m += WINBAR_HEIGHT(curwin);
}
}
}
else if (topfrp->fr_layout == FR_ROW)
{
/* get the minimal height from each frame in this row */
m = 0;
FOR_ALL_FRAMES(frp, topfrp->fr_child)
{
n = frame_minheight(frp, next_curwin);
if (n > m)
m = n;
}
}
else
{
/* Add up the minimal heights for all frames in this column. */
m = 0;
FOR_ALL_FRAMES(frp, topfrp->fr_child)
m += frame_minheight(frp, next_curwin);
}
return m;
}
/*
* Compute the minimal width for frame "topfrp".
* When "next_curwin" isn't NULL, use p_wiw for this window.
* When "next_curwin" is NOWIN, don't use at least one column for the current
* window.
*/
static int
frame_minwidth(
frame_T *topfrp,
win_T *next_curwin) /* use p_wh and p_wiw for next_curwin */
{
frame_T *frp;
int m, n;
if (topfrp->fr_win != NULL)
{
if (topfrp->fr_win == next_curwin)
m = p_wiw + topfrp->fr_win->w_vsep_width;
else
{
/* window: minimal width of the window plus separator column */
m = p_wmw + topfrp->fr_win->w_vsep_width;
/* Current window is minimal one column wide */
if (p_wmw == 0 && topfrp->fr_win == curwin && next_curwin == NULL)
++m;
}
}
else if (topfrp->fr_layout == FR_COL)
{
/* get the minimal width from each frame in this column */
m = 0;
FOR_ALL_FRAMES(frp, topfrp->fr_child)
{
n = frame_minwidth(frp, next_curwin);
if (n > m)
m = n;
}
}
else
{
/* Add up the minimal widths for all frames in this row. */
m = 0;
FOR_ALL_FRAMES(frp, topfrp->fr_child)
m += frame_minwidth(frp, next_curwin);
}
return m;
}
/*
* Try to close all windows except current one.
* Buffers in the other windows become hidden if 'hidden' is set, or '!' is
* used and the buffer was modified.
*
* Used by ":bdel" and ":only".
*/
void
close_others(
int message,
int forceit) /* always hide all other windows */
{
win_T *wp;
win_T *nextwp;
int r;
if (one_window())
{
if (message && !autocmd_busy)
msg(_(m_onlyone));
return;
}
/* Be very careful here: autocommands may change the window layout. */
for (wp = firstwin; win_valid(wp); wp = nextwp)
{
nextwp = wp->w_next;
if (wp != curwin) /* don't close current window */
{
/* Check if it's allowed to abandon this window */
r = can_abandon(wp->w_buffer, forceit);
if (!win_valid(wp)) /* autocommands messed wp up */
{
nextwp = firstwin;
continue;
}
if (!r)
{
#if defined(FEAT_GUI_DIALOG) || defined(FEAT_CON_DIALOG)
if (message && (p_confirm || cmdmod.confirm) && p_write)
{
dialog_changed(wp->w_buffer, FALSE);
if (!win_valid(wp)) /* autocommands messed wp up */
{
nextwp = firstwin;
continue;
}
}
if (bufIsChanged(wp->w_buffer))
#endif
continue;
}
win_close(wp, !buf_hide(wp->w_buffer)
&& !bufIsChanged(wp->w_buffer));
}
}
if (message && !ONE_WINDOW)
emsg(_("E445: Other window contains changes"));
}
static void
win_init_empty(win_T *wp)
{
redraw_win_later(wp, NOT_VALID);
wp->w_lines_valid = 0;
wp->w_cursor.lnum = 1;
wp->w_curswant = wp->w_cursor.col = 0;
wp->w_cursor.coladd = 0;
wp->w_pcmark.lnum = 1; /* pcmark not cleared but set to line 1 */
wp->w_pcmark.col = 0;
wp->w_prev_pcmark.lnum = 0;
wp->w_prev_pcmark.col = 0;
wp->w_topline = 1;
#ifdef FEAT_DIFF
wp->w_topfill = 0;
#endif
wp->w_botline = 2;
#if defined(FEAT_SYN_HL) || defined(FEAT_SPELL)
wp->w_s = &wp->w_buffer->b_s;
#endif
}
/*
* Init the current window "curwin".
* Called when a new file is being edited.
*/
void
curwin_init(void)
{
win_init_empty(curwin);
}
/*
* Allocate the first window and put an empty buffer in it.
* Called from main().
* Return FAIL when something goes wrong (out of memory).
*/
int
win_alloc_first(void)
{
if (win_alloc_firstwin(NULL) == FAIL)
return FAIL;
first_tabpage = alloc_tabpage();
if (first_tabpage == NULL)
return FAIL;
first_tabpage->tp_topframe = topframe;
curtab = first_tabpage;
return OK;
}
/*
* Allocate and init a window that is not a regular window.
* This can only be done after the first window is fully initialized, thus it
* can't be in win_alloc_first().
*/
win_T *
win_alloc_popup_win(void)
{
win_T *wp;
wp = win_alloc(NULL, TRUE);
if (wp != NULL)
{
// We need to initialize options with something, using the current
// window makes most sense.
win_init_some(wp, curwin);
RESET_BINDING(wp);
new_frame(wp);
}
return wp;
}
/*
* Initialize window "wp" to display buffer "buf".
*/
void
win_init_popup_win(win_T *wp, buf_T *buf)
{
wp->w_buffer = buf;
++buf->b_nwindows;
win_init_empty(wp); // set cursor and topline to safe values
// Make sure w_localdir and globaldir are NULL to avoid a chdir() in
// win_enter_ext().
VIM_CLEAR(wp->w_localdir);
}
/*
* Allocate the first window or the first window in a new tab page.
* When "oldwin" is NULL create an empty buffer for it.
* When "oldwin" is not NULL copy info from it to the new window.
* Return FAIL when something goes wrong (out of memory).
*/
static int
win_alloc_firstwin(win_T *oldwin)
{
curwin = win_alloc(NULL, FALSE);
if (oldwin == NULL)
{
/* Very first window, need to create an empty buffer for it and
* initialize from scratch. */
curbuf = buflist_new(NULL, NULL, 1L, BLN_LISTED);
if (curwin == NULL || curbuf == NULL)
return FAIL;
curwin->w_buffer = curbuf;
#ifdef FEAT_SYN_HL
curwin->w_s = &(curbuf->b_s);
#endif
curbuf->b_nwindows = 1; /* there is one window */
curwin->w_alist = &global_alist;
curwin_init(); /* init current window */
}
else
{
/* First window in new tab page, initialize it from "oldwin". */
win_init(curwin, oldwin, 0);
/* We don't want cursor- and scroll-binding in the first window. */
RESET_BINDING(curwin);
}
new_frame(curwin);
if (curwin->w_frame == NULL)
return FAIL;
topframe = curwin->w_frame;
topframe->fr_width = Columns;
topframe->fr_height = Rows - p_ch;
return OK;
}
/*
* Create a frame for window "wp".
*/
static void
new_frame(win_T *wp)
{
frame_T *frp = ALLOC_CLEAR_ONE(frame_T);
wp->w_frame = frp;
if (frp != NULL)
{
frp->fr_layout = FR_LEAF;
frp->fr_win = wp;
}
}
/*
* Initialize the window and frame size to the maximum.
*/
void
win_init_size(void)
{
firstwin->w_height = ROWS_AVAIL;
topframe->fr_height = ROWS_AVAIL;
firstwin->w_width = Columns;
topframe->fr_width = Columns;
}
/*
* Allocate a new tabpage_T and init the values.
* Returns NULL when out of memory.
*/
static tabpage_T *
alloc_tabpage(void)
{
tabpage_T *tp;
# ifdef FEAT_GUI
int i;
# endif
tp = ALLOC_CLEAR_ONE(tabpage_T);
if (tp == NULL)
return NULL;
# ifdef FEAT_EVAL
/* init t: variables */
tp->tp_vars = dict_alloc();
if (tp->tp_vars == NULL)
{
vim_free(tp);
return NULL;
}
init_var_dict(tp->tp_vars, &tp->tp_winvar, VAR_SCOPE);
# endif
# ifdef FEAT_GUI
for (i = 0; i < 3; i++)
tp->tp_prev_which_scrollbars[i] = -1;
# endif
# ifdef FEAT_DIFF
tp->tp_diff_invalid = TRUE;
# endif
tp->tp_ch_used = p_ch;
return tp;
}
void
free_tabpage(tabpage_T *tp)
{
int idx;
# ifdef FEAT_DIFF
diff_clear(tp);
# endif
# ifdef FEAT_TEXT_PROP
while (tp->tp_first_popupwin != NULL)
popup_close_tabpage(tp, tp->tp_first_popupwin->w_id);
#endif
for (idx = 0; idx < SNAP_COUNT; ++idx)
clear_snapshot(tp, idx);
#ifdef FEAT_EVAL
vars_clear(&tp->tp_vars->dv_hashtab); /* free all t: variables */
hash_init(&tp->tp_vars->dv_hashtab);
unref_var_dict(tp->tp_vars);
#endif
vim_free(tp->tp_localdir);
#ifdef FEAT_PYTHON
python_tabpage_free(tp);
#endif
#ifdef FEAT_PYTHON3
python3_tabpage_free(tp);
#endif
vim_free(tp);
}
/*
* Create a new Tab page with one window.
* It will edit the current buffer, like after ":split".
* When "after" is 0 put it just after the current Tab page.
* Otherwise put it just before tab page "after".
* Return FAIL or OK.
*/
int
win_new_tabpage(int after)
{
tabpage_T *tp = curtab;
tabpage_T *newtp;
int n;
newtp = alloc_tabpage();
if (newtp == NULL)
return FAIL;
/* Remember the current windows in this Tab page. */
if (leave_tabpage(curbuf, TRUE) == FAIL)
{
vim_free(newtp);
return FAIL;
}
curtab = newtp;
newtp->tp_localdir = (tp->tp_localdir == NULL)
? NULL : vim_strsave(tp->tp_localdir);
/* Create a new empty window. */
if (win_alloc_firstwin(tp->tp_curwin) == OK)
{
/* Make the new Tab page the new topframe. */
if (after == 1)
{
/* New tab page becomes the first one. */
newtp->tp_next = first_tabpage;
first_tabpage = newtp;
}
else
{
if (after > 0)
{
/* Put new tab page before tab page "after". */
n = 2;
for (tp = first_tabpage; tp->tp_next != NULL
&& n < after; tp = tp->tp_next)
++n;
}
newtp->tp_next = tp->tp_next;
tp->tp_next = newtp;
}
win_init_size();
firstwin->w_winrow = tabline_height();
win_comp_scroll(curwin);
newtp->tp_topframe = topframe;
last_status(FALSE);
#if defined(FEAT_GUI)
/* When 'guioptions' includes 'L' or 'R' may have to remove or add
* scrollbars. Have to update them anyway. */
gui_may_update_scrollbars();
#endif
#ifdef FEAT_JOB_CHANNEL
entering_window(curwin);
#endif
redraw_all_later(NOT_VALID);
apply_autocmds(EVENT_WINNEW, NULL, NULL, FALSE, curbuf);
apply_autocmds(EVENT_WINENTER, NULL, NULL, FALSE, curbuf);
apply_autocmds(EVENT_TABNEW, NULL, NULL, FALSE, curbuf);
apply_autocmds(EVENT_TABENTER, NULL, NULL, FALSE, curbuf);
return OK;
}
/* Failed, get back the previous Tab page */
enter_tabpage(curtab, curbuf, TRUE, TRUE);
return FAIL;
}
/*
* Open a new tab page if ":tab cmd" was used. It will edit the same buffer,
* like with ":split".
* Returns OK if a new tab page was created, FAIL otherwise.
*/
static int
may_open_tabpage(void)
{
int n = (cmdmod.tab == 0) ? postponed_split_tab : cmdmod.tab;
if (n != 0)
{
cmdmod.tab = 0; /* reset it to avoid doing it twice */
postponed_split_tab = 0;
return win_new_tabpage(n);
}
return FAIL;
}
/*
* Create up to "maxcount" tabpages with empty windows.
* Returns the number of resulting tab pages.
*/
int
make_tabpages(int maxcount)
{
int count = maxcount;
int todo;
/* Limit to 'tabpagemax' tabs. */
if (count > p_tpm)
count = p_tpm;
/*
* Don't execute autocommands while creating the tab pages. Must do that
* when putting the buffers in the windows.
*/
block_autocmds();
for (todo = count - 1; todo > 0; --todo)
if (win_new_tabpage(0) == FAIL)
break;
unblock_autocmds();
/* return actual number of tab pages */
return (count - todo);
}
/*
* Return TRUE when "tpc" points to a valid tab page.
*/
int
valid_tabpage(tabpage_T *tpc)
{
tabpage_T *tp;
FOR_ALL_TABPAGES(tp)
if (tp == tpc)
return TRUE;
return FALSE;
}
/*
* Return TRUE when "tpc" points to a valid tab page and at least one window is
* valid.
*/
int
valid_tabpage_win(tabpage_T *tpc)
{
tabpage_T *tp;
win_T *wp;
FOR_ALL_TABPAGES(tp)
{
if (tp == tpc)
{
FOR_ALL_WINDOWS_IN_TAB(tp, wp)
{
if (win_valid_any_tab(wp))
return TRUE;
}
return FALSE;
}
}
/* shouldn't happen */
return FALSE;
}
/*
* Close tabpage "tab", assuming it has no windows in it.
* There must be another tabpage or this will crash.
*/
void
close_tabpage(tabpage_T *tab)
{
tabpage_T *ptp;
if (tab == first_tabpage)
{
first_tabpage = tab->tp_next;
ptp = first_tabpage;
}
else
{
for (ptp = first_tabpage; ptp != NULL && ptp->tp_next != tab;
ptp = ptp->tp_next)
;
assert(ptp != NULL);
ptp->tp_next = tab->tp_next;
}
goto_tabpage_tp(ptp, FALSE, FALSE);
free_tabpage(tab);
}
/*
* Find tab page "n" (first one is 1). Returns NULL when not found.
*/
tabpage_T *
find_tabpage(int n)
{
tabpage_T *tp;
int i = 1;
if (n == 0)
return curtab;
for (tp = first_tabpage; tp != NULL && i != n; tp = tp->tp_next)
++i;
return tp;
}
/*
* Get index of tab page "tp". First one has index 1.
* When not found returns number of tab pages plus one.
*/
int
tabpage_index(tabpage_T *ftp)
{
int i = 1;
tabpage_T *tp;
for (tp = first_tabpage; tp != NULL && tp != ftp; tp = tp->tp_next)
++i;
return i;
}
/*
* Prepare for leaving the current tab page.
* When autocommands change "curtab" we don't leave the tab page and return
* FAIL.
* Careful: When OK is returned need to get a new tab page very very soon!
*/
static int
leave_tabpage(
buf_T *new_curbuf UNUSED, /* what is going to be the new curbuf,
NULL if unknown */
int trigger_leave_autocmds UNUSED)
{
tabpage_T *tp = curtab;
#ifdef FEAT_JOB_CHANNEL
leaving_window(curwin);
#endif
reset_VIsual_and_resel(); /* stop Visual mode */
if (trigger_leave_autocmds)
{
if (new_curbuf != curbuf)
{
apply_autocmds(EVENT_BUFLEAVE, NULL, NULL, FALSE, curbuf);
if (curtab != tp)
return FAIL;
}
apply_autocmds(EVENT_WINLEAVE, NULL, NULL, FALSE, curbuf);
if (curtab != tp)
return FAIL;
apply_autocmds(EVENT_TABLEAVE, NULL, NULL, FALSE, curbuf);
if (curtab != tp)
return FAIL;
}
#if defined(FEAT_GUI)
/* Remove the scrollbars. They may be added back later. */
if (gui.in_use)
gui_remove_scrollbars();
#endif
tp->tp_curwin = curwin;
tp->tp_prevwin = prevwin;
tp->tp_firstwin = firstwin;
tp->tp_lastwin = lastwin;
tp->tp_old_Rows = Rows;
tp->tp_old_Columns = Columns;
firstwin = NULL;
lastwin = NULL;
return OK;
}
/*
* Start using tab page "tp".
* Only to be used after leave_tabpage() or freeing the current tab page.
* Only trigger *Enter autocommands when trigger_enter_autocmds is TRUE.
* Only trigger *Leave autocommands when trigger_leave_autocmds is TRUE.
*/
static void
enter_tabpage(
tabpage_T *tp,
buf_T *old_curbuf UNUSED,
int trigger_enter_autocmds,
int trigger_leave_autocmds)
{
int old_off = tp->tp_firstwin->w_winrow;
win_T *next_prevwin = tp->tp_prevwin;
curtab = tp;
firstwin = tp->tp_firstwin;
lastwin = tp->tp_lastwin;
topframe = tp->tp_topframe;
/* We would like doing the TabEnter event first, but we don't have a
* valid current window yet, which may break some commands.
* This triggers autocommands, thus may make "tp" invalid. */
win_enter_ext(tp->tp_curwin, FALSE, TRUE, FALSE,
trigger_enter_autocmds, trigger_leave_autocmds);
prevwin = next_prevwin;
last_status(FALSE); /* status line may appear or disappear */
(void)win_comp_pos(); /* recompute w_winrow for all windows */
#ifdef FEAT_DIFF
diff_need_scrollbind = TRUE;
#endif
/* The tabpage line may have appeared or disappeared, may need to resize
* the frames for that. When the Vim window was resized need to update
* frame sizes too. Use the stored value of p_ch, so that it can be
* different for each tab page. */
if (p_ch != curtab->tp_ch_used)
clear_cmdline = TRUE;
p_ch = curtab->tp_ch_used;
if (curtab->tp_old_Rows != Rows || (old_off != firstwin->w_winrow
#ifdef FEAT_GUI_TABLINE
&& !gui_use_tabline()
#endif
))
shell_new_rows();
if (curtab->tp_old_Columns != Columns && starting == 0)
shell_new_columns(); /* update window widths */
#if defined(FEAT_GUI)
/* When 'guioptions' includes 'L' or 'R' may have to remove or add
* scrollbars. Have to update them anyway. */
gui_may_update_scrollbars();
#endif
/* Apply autocommands after updating the display, when 'rows' and
* 'columns' have been set correctly. */
if (trigger_enter_autocmds)
{
apply_autocmds(EVENT_TABENTER, NULL, NULL, FALSE, curbuf);
if (old_curbuf != curbuf)
apply_autocmds(EVENT_BUFENTER, NULL, NULL, FALSE, curbuf);
}
redraw_all_later(NOT_VALID);
}
/*
* Go to tab page "n". For ":tab N" and "Ngt".
* When "n" is 9999 go to the last tab page.
*/
void
goto_tabpage(int n)
{
tabpage_T *tp = NULL; // shut up compiler
tabpage_T *ttp;
int i;
if (text_locked())
{
/* Not allowed when editing the command line. */
text_locked_msg();
return;
}
/* If there is only one it can't work. */
if (first_tabpage->tp_next == NULL)
{
if (n > 1)
beep_flush();
return;
}
if (n == 0)
{
/* No count, go to next tab page, wrap around end. */
if (curtab->tp_next == NULL)
tp = first_tabpage;
else
tp = curtab->tp_next;
}
else if (n < 0)
{
/* "gT": go to previous tab page, wrap around end. "N gT" repeats
* this N times. */
ttp = curtab;
for (i = n; i < 0; ++i)
{
for (tp = first_tabpage; tp->tp_next != ttp && tp->tp_next != NULL;
tp = tp->tp_next)
;
ttp = tp;
}
}
else if (n == 9999)
{
/* Go to last tab page. */
for (tp = first_tabpage; tp->tp_next != NULL; tp = tp->tp_next)
;
}
else
{
/* Go to tab page "n". */
tp = find_tabpage(n);
if (tp == NULL)
{
beep_flush();
return;
}
}
goto_tabpage_tp(tp, TRUE, TRUE);
#ifdef FEAT_GUI_TABLINE
if (gui_use_tabline())
gui_mch_set_curtab(tabpage_index(curtab));
#endif
}
/*
* Go to tabpage "tp".
* Only trigger *Enter autocommands when trigger_enter_autocmds is TRUE.
* Only trigger *Leave autocommands when trigger_leave_autocmds is TRUE.
* Note: doesn't update the GUI tab.
*/
void
goto_tabpage_tp(
tabpage_T *tp,
int trigger_enter_autocmds,
int trigger_leave_autocmds)
{
/* Don't repeat a message in another tab page. */
set_keep_msg(NULL, 0);
if (tp != curtab && leave_tabpage(tp->tp_curwin->w_buffer,
trigger_leave_autocmds) == OK)
{
if (valid_tabpage(tp))
enter_tabpage(tp, curbuf, trigger_enter_autocmds,
trigger_leave_autocmds);
else
enter_tabpage(curtab, curbuf, trigger_enter_autocmds,
trigger_leave_autocmds);
}
}
/*
* Enter window "wp" in tab page "tp".
* Also updates the GUI tab.
*/
void
goto_tabpage_win(tabpage_T *tp, win_T *wp)
{
goto_tabpage_tp(tp, TRUE, TRUE);
if (curtab == tp && win_valid(wp))
{
win_enter(wp, TRUE);
# ifdef FEAT_GUI_TABLINE
if (gui_use_tabline())
gui_mch_set_curtab(tabpage_index(curtab));
# endif
}
}
/*
* Move the current tab page to after tab page "nr".
*/
void
tabpage_move(int nr)
{
int n = 1;
tabpage_T *tp, *tp_dst;
if (first_tabpage->tp_next == NULL)
return;
for (tp = first_tabpage; tp->tp_next != NULL && n < nr; tp = tp->tp_next)
++n;
if (tp == curtab || (nr > 0 && tp->tp_next != NULL
&& tp->tp_next == curtab))
return;
tp_dst = tp;
/* Remove the current tab page from the list of tab pages. */
if (curtab == first_tabpage)
first_tabpage = curtab->tp_next;
else
{
FOR_ALL_TABPAGES(tp)
if (tp->tp_next == curtab)
break;
if (tp == NULL) /* "cannot happen" */
return;
tp->tp_next = curtab->tp_next;
}
/* Re-insert it at the specified position. */
if (nr <= 0)
{
curtab->tp_next = first_tabpage;
first_tabpage = curtab;
}
else
{
curtab->tp_next = tp_dst->tp_next;
tp_dst->tp_next = curtab;
}
/* Need to redraw the tabline. Tab page contents doesn't change. */
redraw_tabline = TRUE;
}
/*
* Go to another window.
* When jumping to another buffer, stop Visual mode. Do this before
* changing windows so we can yank the selection into the '*' register.
* When jumping to another window on the same buffer, adjust its cursor
* position to keep the same Visual area.
*/
void
win_goto(win_T *wp)
{
#ifdef FEAT_CONCEAL
win_T *owp = curwin;
#endif
if (ERROR_IF_POPUP_WINDOW)
return;
if (text_locked())
{
beep_flush();
text_locked_msg();
return;
}
if (curbuf_locked())
return;
if (wp->w_buffer != curbuf)
reset_VIsual_and_resel();
else if (VIsual_active)
wp->w_cursor = curwin->w_cursor;
#ifdef FEAT_GUI
need_mouse_correct = TRUE;
#endif
win_enter(wp, TRUE);
#ifdef FEAT_CONCEAL
// Conceal cursor line in previous window, unconceal in current window.
if (win_valid(owp) && owp->w_p_cole > 0 && !msg_scrolled)
redrawWinline(owp, owp->w_cursor.lnum);
if (curwin->w_p_cole > 0 && !msg_scrolled)
need_cursor_line_redraw = TRUE;
#endif
}
#if defined(FEAT_PERL) || defined(PROTO)
/*
* Find window number "winnr" (counting top to bottom).
*/
win_T *
win_find_nr(int winnr)
{
win_T *wp;
FOR_ALL_WINDOWS(wp)
if (--winnr == 0)
break;
return wp;
}
#endif
#if ((defined(FEAT_PYTHON) || defined(FEAT_PYTHON3))) || defined(PROTO)
/*
* Find the tabpage for window "win".
*/
tabpage_T *
win_find_tabpage(win_T *win)
{
win_T *wp;
tabpage_T *tp;
FOR_ALL_TAB_WINDOWS(tp, wp)
if (wp == win)
return tp;
return NULL;
}
#endif
/*
* Get the above or below neighbor window of the specified window.
* up - TRUE for the above neighbor
* count - nth neighbor window
* Returns the specified window if the neighbor is not found.
*/
win_T *
win_vert_neighbor(tabpage_T *tp, win_T *wp, int up, long count)
{
frame_T *fr;
frame_T *nfr;
frame_T *foundfr;
foundfr = wp->w_frame;
while (count--)
{
/*
* First go upwards in the tree of frames until we find a upwards or
* downwards neighbor.
*/
fr = foundfr;
for (;;)
{
if (fr == tp->tp_topframe)
goto end;
if (up)
nfr = fr->fr_prev;
else
nfr = fr->fr_next;
if (fr->fr_parent->fr_layout == FR_COL && nfr != NULL)
break;
fr = fr->fr_parent;
}
/*
* Now go downwards to find the bottom or top frame in it.
*/
for (;;)
{
if (nfr->fr_layout == FR_LEAF)
{
foundfr = nfr;
break;
}
fr = nfr->fr_child;
if (nfr->fr_layout == FR_ROW)
{
/* Find the frame at the cursor row. */
while (fr->fr_next != NULL
&& frame2win(fr)->w_wincol + fr->fr_width
<= wp->w_wincol + wp->w_wcol)
fr = fr->fr_next;
}
if (nfr->fr_layout == FR_COL && up)
while (fr->fr_next != NULL)
fr = fr->fr_next;
nfr = fr;
}
}
end:
return foundfr != NULL ? foundfr->fr_win : NULL;
}
/*
* Move to window above or below "count" times.
*/
static void
win_goto_ver(
int up, // TRUE to go to win above
long count)
{
win_T *win;
win = win_vert_neighbor(curtab, curwin, up, count);
if (win != NULL)
win_goto(win);
}
/*
* Get the left or right neighbor window of the specified window.
* left - TRUE for the left neighbor
* count - nth neighbor window
* Returns the specified window if the neighbor is not found.
*/
win_T *
win_horz_neighbor(tabpage_T *tp, win_T *wp, int left, long count)
{
frame_T *fr;
frame_T *nfr;
frame_T *foundfr;
foundfr = wp->w_frame;
while (count--)
{
/*
* First go upwards in the tree of frames until we find a left or
* right neighbor.
*/
fr = foundfr;
for (;;)
{
if (fr == tp->tp_topframe)
goto end;
if (left)
nfr = fr->fr_prev;
else
nfr = fr->fr_next;
if (fr->fr_parent->fr_layout == FR_ROW && nfr != NULL)
break;
fr = fr->fr_parent;
}
/*
* Now go downwards to find the leftmost or rightmost frame in it.
*/
for (;;)
{
if (nfr->fr_layout == FR_LEAF)
{
foundfr = nfr;
break;
}
fr = nfr->fr_child;
if (nfr->fr_layout == FR_COL)
{
/* Find the frame at the cursor row. */
while (fr->fr_next != NULL
&& frame2win(fr)->w_winrow + fr->fr_height
<= wp->w_winrow + wp->w_wrow)
fr = fr->fr_next;
}
if (nfr->fr_layout == FR_ROW && left)
while (fr->fr_next != NULL)
fr = fr->fr_next;
nfr = fr;
}
}
end:
return foundfr != NULL ? foundfr->fr_win : NULL;
}
/*
* Move to left or right window.
*/
static void
win_goto_hor(
int left, // TRUE to go to left win
long count)
{
win_T *win;
win = win_horz_neighbor(curtab, curwin, left, count);
if (win != NULL)
win_goto(win);
}
/*
* Make window "wp" the current window.
*/
void
win_enter(win_T *wp, int undo_sync)
{
win_enter_ext(wp, undo_sync, FALSE, FALSE, TRUE, TRUE);
}
/*
* Make window wp the current window.
* Can be called with "curwin_invalid" TRUE, which means that curwin has just
* been closed and isn't valid.
*/
static void
win_enter_ext(
win_T *wp,
int undo_sync,
int curwin_invalid,
int trigger_new_autocmds,
int trigger_enter_autocmds,
int trigger_leave_autocmds)
{
int other_buffer = FALSE;
if (wp == curwin && !curwin_invalid) /* nothing to do */
return;
#ifdef FEAT_JOB_CHANNEL
if (!curwin_invalid)
leaving_window(curwin);
#endif
if (!curwin_invalid && trigger_leave_autocmds)
{
/*
* Be careful: If autocommands delete the window, return now.
*/
if (wp->w_buffer != curbuf)
{
apply_autocmds(EVENT_BUFLEAVE, NULL, NULL, FALSE, curbuf);
other_buffer = TRUE;
if (!win_valid(wp))
return;
}
apply_autocmds(EVENT_WINLEAVE, NULL, NULL, FALSE, curbuf);
if (!win_valid(wp))
return;
#ifdef FEAT_EVAL
/* autocmds may abort script processing */
if (aborting())
return;
#endif
}
/* sync undo before leaving the current buffer */
if (undo_sync && curbuf != wp->w_buffer)
u_sync(FALSE);
/* Might need to scroll the old window before switching, e.g., when the
* cursor was moved. */
update_topline();
/* may have to copy the buffer options when 'cpo' contains 'S' */
if (wp->w_buffer != curbuf)
buf_copy_options(wp->w_buffer, BCO_ENTER | BCO_NOHELP);
if (!curwin_invalid)
{
prevwin = curwin; /* remember for CTRL-W p */
curwin->w_redr_status = TRUE;
}
curwin = wp;
curbuf = wp->w_buffer;
check_cursor();
if (!virtual_active())
curwin->w_cursor.coladd = 0;
changed_line_abv_curs(); /* assume cursor position needs updating */
if (curwin->w_localdir != NULL || curtab->tp_localdir != NULL)
{
char_u *dirname;
// Window or tab has a local directory: Save current directory as
// global directory (unless that was done already) and change to the
// local directory.
if (globaldir == NULL)
{
char_u cwd[MAXPATHL];
if (mch_dirname(cwd, MAXPATHL) == OK)
globaldir = vim_strsave(cwd);
}
if (curwin->w_localdir != NULL)
dirname = curwin->w_localdir;
else
dirname = curtab->tp_localdir;
if (mch_chdir((char *)dirname) == 0)
shorten_fnames(TRUE);
}
else if (globaldir != NULL)
{
/* Window doesn't have a local directory and we are not in the global
* directory: Change to the global directory. */
vim_ignored = mch_chdir((char *)globaldir);
VIM_CLEAR(globaldir);
shorten_fnames(TRUE);
}
#ifdef FEAT_JOB_CHANNEL
entering_window(curwin);
#endif
// Careful: autocommands may close the window and make "wp" invalid
if (trigger_new_autocmds)
apply_autocmds(EVENT_WINNEW, NULL, NULL, FALSE, curbuf);
if (trigger_enter_autocmds)
{
apply_autocmds(EVENT_WINENTER, NULL, NULL, FALSE, curbuf);
if (other_buffer)
apply_autocmds(EVENT_BUFENTER, NULL, NULL, FALSE, curbuf);
}
#ifdef FEAT_TITLE
maketitle();
#endif
curwin->w_redr_status = TRUE;
#ifdef FEAT_TERMINAL
if (bt_terminal(curwin->w_buffer))
// terminal is likely in another mode
redraw_mode = TRUE;
#endif
redraw_tabline = TRUE;
if (restart_edit)
redraw_later(VALID); /* causes status line redraw */
/* set window height to desired minimal value */
if (curwin->w_height < p_wh && !curwin->w_p_wfh
#ifdef FEAT_TEXT_PROP
&& !popup_is_popup(curwin)
#endif
)
win_setheight((int)p_wh);
else if (curwin->w_height == 0)
win_setheight(1);
/* set window width to desired minimal value */
if (curwin->w_width < p_wiw && !curwin->w_p_wfw)
win_setwidth((int)p_wiw);
setmouse(); // in case jumped to/from help buffer
/* Change directories when the 'acd' option is set. */
DO_AUTOCHDIR;
}
/*
* Jump to the first open window that contains buffer "buf", if one exists.
* Returns a pointer to the window found, otherwise NULL.
*/
win_T *
buf_jump_open_win(buf_T *buf)
{
win_T *wp = NULL;
if (curwin->w_buffer == buf)
wp = curwin;
else
FOR_ALL_WINDOWS(wp)
if (wp->w_buffer == buf)
break;
if (wp != NULL)
win_enter(wp, FALSE);
return wp;
}
/*
* Jump to the first open window in any tab page that contains buffer "buf",
* if one exists.
* Returns a pointer to the window found, otherwise NULL.
*/
win_T *
buf_jump_open_tab(buf_T *buf)
{
win_T *wp = buf_jump_open_win(buf);
tabpage_T *tp;
if (wp != NULL)
return wp;
FOR_ALL_TABPAGES(tp)
if (tp != curtab)
{
for (wp = tp->tp_firstwin; wp != NULL; wp = wp->w_next)
if (wp->w_buffer == buf)
break;
if (wp != NULL)
{
goto_tabpage_win(tp, wp);
if (curwin != wp)
wp = NULL; /* something went wrong */
break;
}
}
return wp;
}
static int last_win_id = LOWEST_WIN_ID - 1;
/*
* Allocate a window structure and link it in the window list when "hidden" is
* FALSE.
*/
static win_T *
win_alloc(win_T *after UNUSED, int hidden UNUSED)
{
win_T *new_wp;
/*
* allocate window structure and linesizes arrays
*/
new_wp = ALLOC_CLEAR_ONE(win_T);
if (new_wp == NULL)
return NULL;
if (win_alloc_lines(new_wp) == FAIL)
{
vim_free(new_wp);
return NULL;
}
new_wp->w_id = ++last_win_id;
#ifdef FEAT_EVAL
/* init w: variables */
new_wp->w_vars = dict_alloc();
if (new_wp->w_vars == NULL)
{
win_free_lsize(new_wp);
vim_free(new_wp);
return NULL;
}
init_var_dict(new_wp->w_vars, &new_wp->w_winvar, VAR_SCOPE);
#endif
/* Don't execute autocommands while the window is not properly
* initialized yet. gui_create_scrollbar() may trigger a FocusGained
* event. */
block_autocmds();
/*
* link the window in the window list
*/
if (!hidden)
win_append(after, new_wp);
new_wp->w_wincol = 0;
new_wp->w_width = Columns;
/* position the display and the cursor at the top of the file. */
new_wp->w_topline = 1;
#ifdef FEAT_DIFF
new_wp->w_topfill = 0;
#endif
new_wp->w_botline = 2;
new_wp->w_cursor.lnum = 1;
new_wp->w_scbind_pos = 1;
// use global option value for global-local options
new_wp->w_p_so = -1;
new_wp->w_p_siso = -1;
/* We won't calculate w_fraction until resizing the window */
new_wp->w_fraction = 0;
new_wp->w_prev_fraction_row = -1;
#ifdef FEAT_GUI
if (gui.in_use)
{
gui_create_scrollbar(&new_wp->w_scrollbars[SBAR_LEFT],
SBAR_LEFT, new_wp);
gui_create_scrollbar(&new_wp->w_scrollbars[SBAR_RIGHT],
SBAR_RIGHT, new_wp);
}
#endif
#ifdef FEAT_FOLDING
foldInitWin(new_wp);
#endif
unblock_autocmds();
#ifdef FEAT_SEARCH_EXTRA
new_wp->w_match_head = NULL;
new_wp->w_next_match_id = 4;
#endif
return new_wp;
}
/*
* Remove window 'wp' from the window list and free the structure.
*/
static void
win_free(
win_T *wp,
tabpage_T *tp) /* tab page "win" is in, NULL for current */
{
int i;
buf_T *buf;
wininfo_T *wip;
#ifdef FEAT_FOLDING
clearFolding(wp);
#endif
/* reduce the reference count to the argument list. */
alist_unlink(wp->w_alist);
/* Don't execute autocommands while the window is halfway being deleted.
* gui_mch_destroy_scrollbar() may trigger a FocusGained event. */
block_autocmds();
#ifdef FEAT_LUA
lua_window_free(wp);
#endif
#ifdef FEAT_MZSCHEME
mzscheme_window_free(wp);
#endif
#ifdef FEAT_PERL
perl_win_free(wp);
#endif
#ifdef FEAT_PYTHON
python_window_free(wp);
#endif
#ifdef FEAT_PYTHON3
python3_window_free(wp);
#endif
#ifdef FEAT_TCL
tcl_window_free(wp);
#endif
#ifdef FEAT_RUBY
ruby_window_free(wp);
#endif
clear_winopt(&wp->w_onebuf_opt);
clear_winopt(&wp->w_allbuf_opt);
#ifdef FEAT_EVAL
vars_clear(&wp->w_vars->dv_hashtab); /* free all w: variables */
hash_init(&wp->w_vars->dv_hashtab);
unref_var_dict(wp->w_vars);
#endif
{
tabpage_T *ttp;
if (prevwin == wp)
prevwin = NULL;
FOR_ALL_TABPAGES(ttp)
if (ttp->tp_prevwin == wp)
ttp->tp_prevwin = NULL;
}
win_free_lsize(wp);
for (i = 0; i < wp->w_tagstacklen; ++i)
{
vim_free(wp->w_tagstack[i].tagname);
vim_free(wp->w_tagstack[i].user_data);
}
vim_free(wp->w_localdir);
/* Remove the window from the b_wininfo lists, it may happen that the
* freed memory is re-used for another window. */
FOR_ALL_BUFFERS(buf)
for (wip = buf->b_wininfo; wip != NULL; wip = wip->wi_next)
if (wip->wi_win == wp)
wip->wi_win = NULL;
#ifdef FEAT_SEARCH_EXTRA
clear_matches(wp);
#endif
#ifdef FEAT_JUMPLIST
free_jumplist(wp);
#endif
#ifdef FEAT_QUICKFIX
qf_free_all(wp);
#endif
#ifdef FEAT_GUI
if (gui.in_use)
{
gui_mch_destroy_scrollbar(&wp->w_scrollbars[SBAR_LEFT]);
gui_mch_destroy_scrollbar(&wp->w_scrollbars[SBAR_RIGHT]);
}
#endif /* FEAT_GUI */
#ifdef FEAT_MENU
remove_winbar(wp);
#endif
#ifdef FEAT_TEXT_PROP
free_callback(&wp->w_close_cb);
free_callback(&wp->w_filter_cb);
for (i = 0; i < 4; ++i)
VIM_CLEAR(wp->w_border_highlight[i]);
vim_free(wp->w_scrollbar_highlight);
vim_free(wp->w_thumb_highlight);
vim_free(wp->w_popup_title);
list_unref(wp->w_popup_mask);
vim_free(wp->w_popup_mask_cells);
#endif
#ifdef FEAT_SYN_HL
vim_free(wp->w_p_cc_cols);
#endif
if (win_valid_any_tab(wp))
win_remove(wp, tp);
if (autocmd_busy)
{
wp->w_next = au_pending_free_win;
au_pending_free_win = wp;
}
else
vim_free(wp);
unblock_autocmds();
}
/*
* Return TRUE if "wp" is not in the list of windows: the autocmd window or a
* popup window.
*/
static int
win_unlisted(win_T *wp)
{
return wp == aucmd_win || WIN_IS_POPUP(wp);
}
#if defined(FEAT_TEXT_PROP) || defined(PROTO)
/*
* Free a popup window. This does not take the window out of the window list
* and assumes there is only one toplevel frame, no split.
*/
void
win_free_popup(win_T *win)
{
if (bt_popup(win->w_buffer))
win_close_buffer(win, DOBUF_WIPE_REUSE, FALSE);
else
close_buffer(win, win->w_buffer, 0, FALSE);
# if defined(FEAT_TIMERS)
if (win->w_popup_timer != NULL)
stop_timer(win->w_popup_timer);
# endif
vim_free(win->w_frame);
win_free(win, NULL);
}
#endif
/*
* Append window "wp" in the window list after window "after".
*/
static void
win_append(win_T *after, win_T *wp)
{
win_T *before;
if (after == NULL) /* after NULL is in front of the first */
before = firstwin;
else
before = after->w_next;
wp->w_next = before;
wp->w_prev = after;
if (after == NULL)
firstwin = wp;
else
after->w_next = wp;
if (before == NULL)
lastwin = wp;
else
before->w_prev = wp;
}
/*
* Remove a window from the window list.
*/
void
win_remove(
win_T *wp,
tabpage_T *tp) /* tab page "win" is in, NULL for current */
{
if (wp->w_prev != NULL)
wp->w_prev->w_next = wp->w_next;
else if (tp == NULL)
firstwin = curtab->tp_firstwin = wp->w_next;
else
tp->tp_firstwin = wp->w_next;
if (wp->w_next != NULL)
wp->w_next->w_prev = wp->w_prev;
else if (tp == NULL)
lastwin = curtab->tp_lastwin = wp->w_prev;
else
tp->tp_lastwin = wp->w_prev;
}
/*
* Append frame "frp" in a frame list after frame "after".
*/
static void
frame_append(frame_T *after, frame_T *frp)
{
frp->fr_next = after->fr_next;
after->fr_next = frp;
if (frp->fr_next != NULL)
frp->fr_next->fr_prev = frp;
frp->fr_prev = after;
}
/*
* Insert frame "frp" in a frame list before frame "before".
*/
static void
frame_insert(frame_T *before, frame_T *frp)
{
frp->fr_next = before;
frp->fr_prev = before->fr_prev;
before->fr_prev = frp;
if (frp->fr_prev != NULL)
frp->fr_prev->fr_next = frp;
else
frp->fr_parent->fr_child = frp;
}
/*
* Remove a frame from a frame list.
*/
static void
frame_remove(frame_T *frp)
{
if (frp->fr_prev != NULL)
frp->fr_prev->fr_next = frp->fr_next;
else
{
frp->fr_parent->fr_child = frp->fr_next;
/* special case: topframe->fr_child == frp */
if (topframe->fr_child == frp)
topframe->fr_child = frp->fr_next;
}
if (frp->fr_next != NULL)
frp->fr_next->fr_prev = frp->fr_prev;
}
/*
* Allocate w_lines[] for window "wp".
* Return FAIL for failure, OK for success.
*/
int
win_alloc_lines(win_T *wp)
{
wp->w_lines_valid = 0;
wp->w_lines = ALLOC_CLEAR_MULT(wline_T, Rows );
if (wp->w_lines == NULL)
return FAIL;
return OK;
}
/*
* free lsize arrays for a window
*/
void
win_free_lsize(win_T *wp)
{
/* TODO: why would wp be NULL here? */
if (wp != NULL)
VIM_CLEAR(wp->w_lines);
}
/*
* Called from win_new_shellsize() after Rows changed.
* This only does the current tab page, others must be done when made active.
*/
void
shell_new_rows(void)
{
int h = (int)ROWS_AVAIL;
if (firstwin == NULL) /* not initialized yet */
return;
if (h < frame_minheight(topframe, NULL))
h = frame_minheight(topframe, NULL);
/* First try setting the heights of windows with 'winfixheight'. If
* that doesn't result in the right height, forget about that option. */
frame_new_height(topframe, h, FALSE, TRUE);
if (!frame_check_height(topframe, h))
frame_new_height(topframe, h, FALSE, FALSE);
(void)win_comp_pos(); /* recompute w_winrow and w_wincol */
compute_cmdrow();
curtab->tp_ch_used = p_ch;
#if 0
/* Disabled: don't want making the screen smaller make a window larger. */
if (p_ea)
win_equal(curwin, FALSE, 'v');
#endif
}
/*
* Called from win_new_shellsize() after Columns changed.
*/
void
shell_new_columns(void)
{
if (firstwin == NULL) /* not initialized yet */
return;
/* First try setting the widths of windows with 'winfixwidth'. If that
* doesn't result in the right width, forget about that option. */
frame_new_width(topframe, (int)Columns, FALSE, TRUE);
if (!frame_check_width(topframe, Columns))
frame_new_width(topframe, (int)Columns, FALSE, FALSE);
(void)win_comp_pos(); /* recompute w_winrow and w_wincol */
#if 0
/* Disabled: don't want making the screen smaller make a window larger. */
if (p_ea)
win_equal(curwin, FALSE, 'h');
#endif
}
#if defined(FEAT_CMDWIN) || defined(PROTO)
/*
* Save the size of all windows in "gap".
*/
void
win_size_save(garray_T *gap)
{
win_T *wp;
ga_init2(gap, (int)sizeof(int), 1);
if (ga_grow(gap, win_count() * 2) == OK)
FOR_ALL_WINDOWS(wp)
{
((int *)gap->ga_data)[gap->ga_len++] =
wp->w_width + wp->w_vsep_width;
((int *)gap->ga_data)[gap->ga_len++] = wp->w_height;
}
}
/*
* Restore window sizes, but only if the number of windows is still the same.
* Does not free the growarray.
*/
void
win_size_restore(garray_T *gap)
{
win_T *wp;
int i, j;
if (win_count() * 2 == gap->ga_len)
{
/* The order matters, because frames contain other frames, but it's
* difficult to get right. The easy way out is to do it twice. */
for (j = 0; j < 2; ++j)
{
i = 0;
FOR_ALL_WINDOWS(wp)
{
frame_setwidth(wp->w_frame, ((int *)gap->ga_data)[i++]);
win_setheight_win(((int *)gap->ga_data)[i++], wp);
}
}
/* recompute the window positions */
(void)win_comp_pos();
}
}
#endif /* FEAT_CMDWIN */
/*
* Update the position for all windows, using the width and height of the
* frames.
* Returns the row just after the last window.
*/
int
win_comp_pos(void)
{
int row = tabline_height();
int col = 0;
frame_comp_pos(topframe, &row, &col);
return row;
}
/*
* Update the position of the windows in frame "topfrp", using the width and
* height of the frames.
* "*row" and "*col" are the top-left position of the frame. They are updated
* to the bottom-right position plus one.
*/
static void
frame_comp_pos(frame_T *topfrp, int *row, int *col)
{
win_T *wp;
frame_T *frp;
int startcol;
int startrow;
int h;
wp = topfrp->fr_win;
if (wp != NULL)
{
if (wp->w_winrow != *row || wp->w_wincol != *col)
{
/* position changed, redraw */
wp->w_winrow = *row;
wp->w_wincol = *col;
redraw_win_later(wp, NOT_VALID);
wp->w_redr_status = TRUE;
}
/* WinBar will not show if the window height is zero */
h = VISIBLE_HEIGHT(wp) + wp->w_status_height;
*row += h > topfrp->fr_height ? topfrp->fr_height : h;
*col += wp->w_width + wp->w_vsep_width;
}
else
{
startrow = *row;
startcol = *col;
FOR_ALL_FRAMES(frp, topfrp->fr_child)
{
if (topfrp->fr_layout == FR_ROW)
*row = startrow; /* all frames are at the same row */
else
*col = startcol; /* all frames are at the same col */
frame_comp_pos(frp, row, col);
}
}
}
/*
* Set current window height and take care of repositioning other windows to
* fit around it.
*/
void
win_setheight(int height)
{
win_setheight_win(height, curwin);
}
/*
* Set the window height of window "win" and take care of repositioning other
* windows to fit around it.
*/
void
win_setheight_win(int height, win_T *win)
{
int row;
if (win == curwin)
{
/* Always keep current window at least one line high, even when
* 'winminheight' is zero. */
if (height < p_wmh)
height = p_wmh;
if (height == 0)
height = 1;
height += WINBAR_HEIGHT(curwin);
}
frame_setheight(win->w_frame, height + win->w_status_height);
/* recompute the window positions */
row = win_comp_pos();
/*
* If there is extra space created between the last window and the command
* line, clear it.
*/
if (full_screen && msg_scrolled == 0 && row < cmdline_row)
screen_fill(row, cmdline_row, 0, (int)Columns, ' ', ' ', 0);
cmdline_row = row;
msg_row = row;
msg_col = 0;
redraw_all_later(NOT_VALID);
}
/*
* Set the height of a frame to "height" and take care that all frames and
* windows inside it are resized. Also resize frames on the left and right if
* the are in the same FR_ROW frame.
*
* Strategy:
* If the frame is part of a FR_COL frame, try fitting the frame in that
* frame. If that doesn't work (the FR_COL frame is too small), recursively
* go to containing frames to resize them and make room.
* If the frame is part of a FR_ROW frame, all frames must be resized as well.
* Check for the minimal height of the FR_ROW frame.
* At the top level we can also use change the command line height.
*/
static void
frame_setheight(frame_T *curfrp, int height)
{
int room; /* total number of lines available */
int take; /* number of lines taken from other windows */
int room_cmdline; /* lines available from cmdline */
int run;
frame_T *frp;
int h;
int room_reserved;
/* If the height already is the desired value, nothing to do. */
if (curfrp->fr_height == height)
return;
if (curfrp->fr_parent == NULL)
{
/* topframe: can only change the command line */
if (height > ROWS_AVAIL)
height = ROWS_AVAIL;
if (height > 0)
frame_new_height(curfrp, height, FALSE, FALSE);
}
else if (curfrp->fr_parent->fr_layout == FR_ROW)
{
/* Row of frames: Also need to resize frames left and right of this
* one. First check for the minimal height of these. */
h = frame_minheight(curfrp->fr_parent, NULL);
if (height < h)
height = h;
frame_setheight(curfrp->fr_parent, height);
}
else
{
/*
* Column of frames: try to change only frames in this column.
*/
/*
* Do this twice:
* 1: compute room available, if it's not enough try resizing the
* containing frame.
* 2: compute the room available and adjust the height to it.
* Try not to reduce the height of a window with 'winfixheight' set.
*/
for (run = 1; run <= 2; ++run)
{
room = 0;
room_reserved = 0;
FOR_ALL_FRAMES(frp, curfrp->fr_parent->fr_child)
{
if (frp != curfrp
&& frp->fr_win != NULL
&& frp->fr_win->w_p_wfh)
room_reserved += frp->fr_height;
room += frp->fr_height;
if (frp != curfrp)
room -= frame_minheight(frp, NULL);
}
if (curfrp->fr_width != Columns)
room_cmdline = 0;
else
{
room_cmdline = Rows - p_ch - (lastwin->w_winrow
+ VISIBLE_HEIGHT(lastwin)
+ lastwin->w_status_height);
if (room_cmdline < 0)
room_cmdline = 0;
}
if (height <= room + room_cmdline)
break;
if (run == 2 || curfrp->fr_width == Columns)
{
if (height > room + room_cmdline)
height = room + room_cmdline;
break;
}
frame_setheight(curfrp->fr_parent, height
+ frame_minheight(curfrp->fr_parent, NOWIN) - (int)p_wmh - 1);
}
/*
* Compute the number of lines we will take from others frames (can be
* negative!).
*/
take = height - curfrp->fr_height;
/* If there is not enough room, also reduce the height of a window
* with 'winfixheight' set. */
if (height > room + room_cmdline - room_reserved)
room_reserved = room + room_cmdline - height;
/* If there is only a 'winfixheight' window and making the
* window smaller, need to make the other window taller. */
if (take < 0 && room - curfrp->fr_height < room_reserved)
room_reserved = 0;
if (take > 0 && room_cmdline > 0)
{
/* use lines from cmdline first */
if (take < room_cmdline)
room_cmdline = take;
take -= room_cmdline;
topframe->fr_height += room_cmdline;
}
/*
* set the current frame to the new height
*/
frame_new_height(curfrp, height, FALSE, FALSE);
/*
* First take lines from the frames after the current frame. If
* that is not enough, takes lines from frames above the current
* frame.
*/
for (run = 0; run < 2; ++run)
{
if (run == 0)
frp = curfrp->fr_next; /* 1st run: start with next window */
else
frp = curfrp->fr_prev; /* 2nd run: start with prev window */
while (frp != NULL && take != 0)
{
h = frame_minheight(frp, NULL);
if (room_reserved > 0
&& frp->fr_win != NULL
&& frp->fr_win->w_p_wfh)
{
if (room_reserved >= frp->fr_height)
room_reserved -= frp->fr_height;
else
{
if (frp->fr_height - room_reserved > take)
room_reserved = frp->fr_height - take;
take -= frp->fr_height - room_reserved;
frame_new_height(frp, room_reserved, FALSE, FALSE);
room_reserved = 0;
}
}
else
{
if (frp->fr_height - take < h)
{
take -= frp->fr_height - h;
frame_new_height(frp, h, FALSE, FALSE);
}
else
{
frame_new_height(frp, frp->fr_height - take,
FALSE, FALSE);
take = 0;
}
}
if (run == 0)
frp = frp->fr_next;
else
frp = frp->fr_prev;
}
}
}
}
/*
* Set current window width and take care of repositioning other windows to
* fit around it.
*/
void
win_setwidth(int width)
{
win_setwidth_win(width, curwin);
}
void
win_setwidth_win(int width, win_T *wp)
{
/* Always keep current window at least one column wide, even when
* 'winminwidth' is zero. */
if (wp == curwin)
{
if (width < p_wmw)
width = p_wmw;
if (width == 0)
width = 1;
}
frame_setwidth(wp->w_frame, width + wp->w_vsep_width);
/* recompute the window positions */
(void)win_comp_pos();
redraw_all_later(NOT_VALID);
}
/*
* Set the width of a frame to "width" and take care that all frames and
* windows inside it are resized. Also resize frames above and below if the
* are in the same FR_ROW frame.
*
* Strategy is similar to frame_setheight().
*/
static void
frame_setwidth(frame_T *curfrp, int width)
{
int room; /* total number of lines available */
int take; /* number of lines taken from other windows */
int run;
frame_T *frp;
int w;
int room_reserved;
/* If the width already is the desired value, nothing to do. */
if (curfrp->fr_width == width)
return;
if (curfrp->fr_parent == NULL)
/* topframe: can't change width */
return;
if (curfrp->fr_parent->fr_layout == FR_COL)
{
/* Column of frames: Also need to resize frames above and below of
* this one. First check for the minimal width of these. */
w = frame_minwidth(curfrp->fr_parent, NULL);
if (width < w)
width = w;
frame_setwidth(curfrp->fr_parent, width);
}
else
{
/*
* Row of frames: try to change only frames in this row.
*
* Do this twice:
* 1: compute room available, if it's not enough try resizing the
* containing frame.
* 2: compute the room available and adjust the width to it.
*/
for (run = 1; run <= 2; ++run)
{
room = 0;
room_reserved = 0;
FOR_ALL_FRAMES(frp, curfrp->fr_parent->fr_child)
{
if (frp != curfrp
&& frp->fr_win != NULL
&& frp->fr_win->w_p_wfw)
room_reserved += frp->fr_width;
room += frp->fr_width;
if (frp != curfrp)
room -= frame_minwidth(frp, NULL);
}
if (width <= room)
break;
if (run == 2 || curfrp->fr_height >= ROWS_AVAIL)
{
if (width > room)
width = room;
break;
}
frame_setwidth(curfrp->fr_parent, width
+ frame_minwidth(curfrp->fr_parent, NOWIN) - (int)p_wmw - 1);
}
/*
* Compute the number of lines we will take from others frames (can be
* negative!).
*/
take = width - curfrp->fr_width;
/* If there is not enough room, also reduce the width of a window
* with 'winfixwidth' set. */
if (width > room - room_reserved)
room_reserved = room - width;
/* If there is only a 'winfixwidth' window and making the
* window smaller, need to make the other window narrower. */
if (take < 0 && room - curfrp->fr_width < room_reserved)
room_reserved = 0;
/*
* set the current frame to the new width
*/
frame_new_width(curfrp, width, FALSE, FALSE);
/*
* First take lines from the frames right of the current frame. If
* that is not enough, takes lines from frames left of the current
* frame.
*/
for (run = 0; run < 2; ++run)
{
if (run == 0)
frp = curfrp->fr_next; /* 1st run: start with next window */
else
frp = curfrp->fr_prev; /* 2nd run: start with prev window */
while (frp != NULL && take != 0)
{
w = frame_minwidth(frp, NULL);
if (room_reserved > 0
&& frp->fr_win != NULL
&& frp->fr_win->w_p_wfw)
{
if (room_reserved >= frp->fr_width)
room_reserved -= frp->fr_width;
else
{
if (frp->fr_width - room_reserved > take)
room_reserved = frp->fr_width - take;
take -= frp->fr_width - room_reserved;
frame_new_width(frp, room_reserved, FALSE, FALSE);
room_reserved = 0;
}
}
else
{
if (frp->fr_width - take < w)
{
take -= frp->fr_width - w;
frame_new_width(frp, w, FALSE, FALSE);
}
else
{
frame_new_width(frp, frp->fr_width - take,
FALSE, FALSE);
take = 0;
}
}
if (run == 0)
frp = frp->fr_next;
else
frp = frp->fr_prev;
}
}
}
}
/*
* Check 'winminheight' for a valid value and reduce it if needed.
*/
void
win_setminheight(void)
{
int room;
int needed;
int first = TRUE;
// loop until there is a 'winminheight' that is possible
while (p_wmh > 0)
{
room = Rows - p_ch;
needed = frame_minheight(topframe, NULL);
if (room >= needed)
break;
--p_wmh;
if (first)
{
emsg(_(e_noroom));
first = FALSE;
}
}
}
/*
* Check 'winminwidth' for a valid value and reduce it if needed.
*/
void
win_setminwidth(void)
{
int room;
int needed;
int first = TRUE;
// loop until there is a 'winminheight' that is possible
while (p_wmw > 0)
{
room = Columns;
needed = frame_minwidth(topframe, NULL);
if (room >= needed)
break;
--p_wmw;
if (first)
{
emsg(_(e_noroom));
first = FALSE;
}
}
}
#if defined(FEAT_MOUSE) || defined(PROTO)
/*
* Status line of dragwin is dragged "offset" lines down (negative is up).
*/
void
win_drag_status_line(win_T *dragwin, int offset)
{
frame_T *curfr;
frame_T *fr;
int room;
int row;
int up; /* if TRUE, drag status line up, otherwise down */
int n;
fr = dragwin->w_frame;
curfr = fr;
if (fr != topframe) /* more than one window */
{
fr = fr->fr_parent;
/* When the parent frame is not a column of frames, its parent should
* be. */
if (fr->fr_layout != FR_COL)
{
curfr = fr;
if (fr != topframe) /* only a row of windows, may drag statusline */
fr = fr->fr_parent;
}
}
/* If this is the last frame in a column, may want to resize the parent
* frame instead (go two up to skip a row of frames). */
while (curfr != topframe && curfr->fr_next == NULL)
{
if (fr != topframe)
fr = fr->fr_parent;
curfr = fr;
if (fr != topframe)
fr = fr->fr_parent;
}
if (offset < 0) /* drag up */
{
up = TRUE;
offset = -offset;
/* sum up the room of the current frame and above it */
if (fr == curfr)
{
/* only one window */
room = fr->fr_height - frame_minheight(fr, NULL);
}
else
{
room = 0;
for (fr = fr->fr_child; ; fr = fr->fr_next)
{
room += fr->fr_height - frame_minheight(fr, NULL);
if (fr == curfr)
break;
}
}
fr = curfr->fr_next; /* put fr at frame that grows */
}
else /* drag down */
{
up = FALSE;
/*
* Only dragging the last status line can reduce p_ch.
*/
room = Rows - cmdline_row;
if (curfr->fr_next == NULL)
room -= 1;
else
room -= p_ch;
if (room < 0)
room = 0;
/* sum up the room of frames below of the current one */
FOR_ALL_FRAMES(fr, curfr->fr_next)
room += fr->fr_height - frame_minheight(fr, NULL);
fr = curfr; /* put fr at window that grows */
}
if (room < offset) /* Not enough room */
offset = room; /* Move as far as we can */
if (offset <= 0)
return;
/*
* Grow frame fr by "offset" lines.
* Doesn't happen when dragging the last status line up.
*/
if (fr != NULL)
frame_new_height(fr, fr->fr_height + offset, up, FALSE);
if (up)
fr = curfr; /* current frame gets smaller */
else
fr = curfr->fr_next; /* next frame gets smaller */
/*
* Now make the other frames smaller.
*/
while (fr != NULL && offset > 0)
{
n = frame_minheight(fr, NULL);
if (fr->fr_height - offset <= n)
{
offset -= fr->fr_height - n;
frame_new_height(fr, n, !up, FALSE);
}
else
{
frame_new_height(fr, fr->fr_height - offset, !up, FALSE);
break;
}
if (up)
fr = fr->fr_prev;
else
fr = fr->fr_next;
}
row = win_comp_pos();
screen_fill(row, cmdline_row, 0, (int)Columns, ' ', ' ', 0);
cmdline_row = row;
p_ch = Rows - cmdline_row;
if (p_ch < 1)
p_ch = 1;
curtab->tp_ch_used = p_ch;
redraw_all_later(SOME_VALID);
showmode();
}
/*
* Separator line of dragwin is dragged "offset" lines right (negative is left).
*/
void
win_drag_vsep_line(win_T *dragwin, int offset)
{
frame_T *curfr;
frame_T *fr;
int room;
int left; /* if TRUE, drag separator line left, otherwise right */
int n;
fr = dragwin->w_frame;
if (fr == topframe) /* only one window (cannot happen?) */
return;
curfr = fr;
fr = fr->fr_parent;
/* When the parent frame is not a row of frames, its parent should be. */
if (fr->fr_layout != FR_ROW)
{
if (fr == topframe) /* only a column of windows (cannot happen?) */
return;
curfr = fr;
fr = fr->fr_parent;
}
/* If this is the last frame in a row, may want to resize a parent
* frame instead. */
while (curfr->fr_next == NULL)
{
if (fr == topframe)
break;
curfr = fr;
fr = fr->fr_parent;
if (fr != topframe)
{
curfr = fr;
fr = fr->fr_parent;
}
}
if (offset < 0) /* drag left */
{
left = TRUE;
offset = -offset;
/* sum up the room of the current frame and left of it */
room = 0;
for (fr = fr->fr_child; ; fr = fr->fr_next)
{
room += fr->fr_width - frame_minwidth(fr, NULL);
if (fr == curfr)
break;
}
fr = curfr->fr_next; /* put fr at frame that grows */
}
else /* drag right */
{
left = FALSE;
/* sum up the room of frames right of the current one */
room = 0;
FOR_ALL_FRAMES(fr, curfr->fr_next)
room += fr->fr_width - frame_minwidth(fr, NULL);
fr = curfr; /* put fr at window that grows */
}
if (room < offset) /* Not enough room */
offset = room; /* Move as far as we can */
if (offset <= 0) /* No room at all, quit. */
return;
if (fr == NULL)
return; /* Safety check, should not happen. */
/* grow frame fr by offset lines */
frame_new_width(fr, fr->fr_width + offset, left, FALSE);
/* shrink other frames: current and at the left or at the right */
if (left)
fr = curfr; /* current frame gets smaller */
else
fr = curfr->fr_next; /* next frame gets smaller */
while (fr != NULL && offset > 0)
{
n = frame_minwidth(fr, NULL);
if (fr->fr_width - offset <= n)
{
offset -= fr->fr_width - n;
frame_new_width(fr, n, !left, FALSE);
}
else
{
frame_new_width(fr, fr->fr_width - offset, !left, FALSE);
break;
}
if (left)
fr = fr->fr_prev;
else
fr = fr->fr_next;
}
(void)win_comp_pos();
redraw_all_later(NOT_VALID);
}
#endif /* FEAT_MOUSE */
#define FRACTION_MULT 16384L
/*
* Set wp->w_fraction for the current w_wrow and w_height.
* Has no effect when the window is less than two lines.
*/
void
set_fraction(win_T *wp)
{
if (wp->w_height > 1)
// When cursor is in the first line the percentage is computed as if
// it's halfway that line. Thus with two lines it is 25%, with three
// lines 17%, etc. Similarly for the last line: 75%, 83%, etc.
wp->w_fraction = ((long)wp->w_wrow * FRACTION_MULT
+ FRACTION_MULT / 2) / (long)wp->w_height;
}
/*
* Set the height of a window.
* "height" excludes any window toolbar.
* This takes care of the things inside the window, not what happens to the
* window position, the frame or to other windows.
*/
void
win_new_height(win_T *wp, int height)
{
int prev_height = wp->w_height;
/* Don't want a negative height. Happens when splitting a tiny window.
* Will equalize heights soon to fix it. */
if (height < 0)
height = 0;
if (wp->w_height == height)
return; /* nothing to do */
if (wp->w_height > 0)
{
if (wp == curwin)
/* w_wrow needs to be valid. When setting 'laststatus' this may
* call win_new_height() recursively. */
validate_cursor();
if (wp->w_height != prev_height)
return; /* Recursive call already changed the size, bail out here
to avoid the following to mess things up. */
if (wp->w_wrow != wp->w_prev_fraction_row)
set_fraction(wp);
}
wp->w_height = height;
wp->w_skipcol = 0;
/* There is no point in adjusting the scroll position when exiting. Some
* values might be invalid. */
if (!exiting)
scroll_to_fraction(wp, prev_height);
}
void
scroll_to_fraction(win_T *wp, int prev_height)
{
linenr_T lnum;
int sline, line_size;
int height = wp->w_height;
// Don't change w_topline in any of these cases:
// - window height is 0
// - 'scrollbind' is set and this isn't the current window
// - window height is sufficient to display the whole buffer and first line
// is visible.
if (height > 0
&& (!wp->w_p_scb || wp == curwin)
&& (height < wp->w_buffer->b_ml.ml_line_count || wp->w_topline > 1))
{
/*
* Find a value for w_topline that shows the cursor at the same
* relative position in the window as before (more or less).
*/
lnum = wp->w_cursor.lnum;
if (lnum < 1) /* can happen when starting up */
lnum = 1;
wp->w_wrow = ((long)wp->w_fraction * (long)height - 1L)
/ FRACTION_MULT;
line_size = plines_win_col(wp, lnum, (long)(wp->w_cursor.col)) - 1;
sline = wp->w_wrow - line_size;
if (sline >= 0)
{
/* Make sure the whole cursor line is visible, if possible. */
int rows = plines_win(wp, lnum, FALSE);
if (sline > wp->w_height - rows)
{
sline = wp->w_height - rows;
wp->w_wrow -= rows - line_size;
}
}
if (sline < 0)
{
/*
* Cursor line would go off top of screen if w_wrow was this high.
* Make cursor line the first line in the window. If not enough
* room use w_skipcol;
*/
wp->w_wrow = line_size;
if (wp->w_wrow >= wp->w_height
&& (wp->w_width - win_col_off(wp)) > 0)
{
wp->w_skipcol += wp->w_width - win_col_off(wp);
--wp->w_wrow;
while (wp->w_wrow >= wp->w_height)
{
wp->w_skipcol += wp->w_width - win_col_off(wp)
+ win_col_off2(wp);
--wp->w_wrow;
}
}
}
else if (sline > 0)
{
while (sline > 0 && lnum > 1)
{
#ifdef FEAT_FOLDING
hasFoldingWin(wp, lnum, &lnum, NULL, TRUE, NULL);
if (lnum == 1)
{
/* first line in buffer is folded */
line_size = 1;
--sline;
break;
}
#endif
--lnum;
#ifdef FEAT_DIFF
if (lnum == wp->w_topline)
line_size = plines_win_nofill(wp, lnum, TRUE)
+ wp->w_topfill;
else
#endif
line_size = plines_win(wp, lnum, TRUE);
sline -= line_size;
}
if (sline < 0)
{
/*
* Line we want at top would go off top of screen. Use next
* line instead.
*/
#ifdef FEAT_FOLDING
hasFoldingWin(wp, lnum, NULL, &lnum, TRUE, NULL);
#endif
lnum++;
wp->w_wrow -= line_size + sline;
}
else if (sline > 0)
{
// First line of file reached, use that as topline.
lnum = 1;
wp->w_wrow -= sline;
}
}
set_topline(wp, lnum);
}
if (wp == curwin)
{
if (get_scrolloff_value())
update_topline();
curs_columns(FALSE); /* validate w_wrow */
}
if (prev_height > 0)
wp->w_prev_fraction_row = wp->w_wrow;
win_comp_scroll(wp);
redraw_win_later(wp, SOME_VALID);
wp->w_redr_status = TRUE;
invalidate_botline_win(wp);
}
/*
* Set the width of a window.
*/
void
win_new_width(win_T *wp, int width)
{
wp->w_width = width;
wp->w_lines_valid = 0;
changed_line_abv_curs_win(wp);
invalidate_botline_win(wp);
if (wp == curwin)
{
update_topline();
curs_columns(TRUE); /* validate w_wrow */
}
redraw_win_later(wp, NOT_VALID);
wp->w_redr_status = TRUE;
}
void
win_comp_scroll(win_T *wp)
{
wp->w_p_scr = ((unsigned)wp->w_height >> 1);
if (wp->w_p_scr == 0)
wp->w_p_scr = 1;
}
/*
* command_height: called whenever p_ch has been changed
*/
void
command_height(void)
{
int h;
frame_T *frp;
int old_p_ch = curtab->tp_ch_used;
/* Use the value of p_ch that we remembered. This is needed for when the
* GUI starts up, we can't be sure in what order things happen. And when
* p_ch was changed in another tab page. */
curtab->tp_ch_used = p_ch;
/* Find bottom frame with width of screen. */
frp = lastwin->w_frame;
while (frp->fr_width != Columns && frp->fr_parent != NULL)
frp = frp->fr_parent;
/* Avoid changing the height of a window with 'winfixheight' set. */
while (frp->fr_prev != NULL && frp->fr_layout == FR_LEAF
&& frp->fr_win->w_p_wfh)
frp = frp->fr_prev;
if (starting != NO_SCREEN)
{
cmdline_row = Rows - p_ch;
if (p_ch > old_p_ch) /* p_ch got bigger */
{
while (p_ch > old_p_ch)
{
if (frp == NULL)
{
emsg(_(e_noroom));
p_ch = old_p_ch;
curtab->tp_ch_used = p_ch;
cmdline_row = Rows - p_ch;
break;
}
h = frp->fr_height - frame_minheight(frp, NULL);
if (h > p_ch - old_p_ch)
h = p_ch - old_p_ch;
old_p_ch += h;
frame_add_height(frp, -h);
frp = frp->fr_prev;
}
/* Recompute window positions. */
(void)win_comp_pos();
/* clear the lines added to cmdline */
if (full_screen)
screen_fill((int)(cmdline_row), (int)Rows, 0,
(int)Columns, ' ', ' ', 0);
msg_row = cmdline_row;
redraw_cmdline = TRUE;
return;
}
if (msg_row < cmdline_row)
msg_row = cmdline_row;
redraw_cmdline = TRUE;
}
frame_add_height(frp, (int)(old_p_ch - p_ch));
/* Recompute window positions. */
if (frp != lastwin->w_frame)
(void)win_comp_pos();
}
/*
* Resize frame "frp" to be "n" lines higher (negative for less high).
* Also resize the frames it is contained in.
*/
static void
frame_add_height(frame_T *frp, int n)
{
frame_new_height(frp, frp->fr_height + n, FALSE, FALSE);
for (;;)
{
frp = frp->fr_parent;
if (frp == NULL)
break;
frp->fr_height += n;
}
}
/*
* Add or remove a status line for the bottom window(s), according to the
* value of 'laststatus'.
*/
void
last_status(
int morewin) /* pretend there are two or more windows */
{
/* Don't make a difference between horizontal or vertical split. */
last_status_rec(topframe, (p_ls == 2
|| (p_ls == 1 && (morewin || !ONE_WINDOW))));
}
static void
last_status_rec(frame_T *fr, int statusline)
{
frame_T *fp;
win_T *wp;
if (fr->fr_layout == FR_LEAF)
{
wp = fr->fr_win;
if (wp->w_status_height != 0 && !statusline)
{
/* remove status line */
win_new_height(wp, wp->w_height + 1);
wp->w_status_height = 0;
comp_col();
}
else if (wp->w_status_height == 0 && statusline)
{
/* Find a frame to take a line from. */
fp = fr;
while (fp->fr_height <= frame_minheight(fp, NULL))
{
if (fp == topframe)
{
emsg(_(e_noroom));
return;
}
/* In a column of frames: go to frame above. If already at
* the top or in a row of frames: go to parent. */
if (fp->fr_parent->fr_layout == FR_COL && fp->fr_prev != NULL)
fp = fp->fr_prev;
else
fp = fp->fr_parent;
}
wp->w_status_height = 1;
if (fp != fr)
{
frame_new_height(fp, fp->fr_height - 1, FALSE, FALSE);
frame_fix_height(wp);
(void)win_comp_pos();
}
else
win_new_height(wp, wp->w_height - 1);
comp_col();
redraw_all_later(SOME_VALID);
}
}
else if (fr->fr_layout == FR_ROW)
{
/* vertically split windows, set status line for each one */
FOR_ALL_FRAMES(fp, fr->fr_child)
last_status_rec(fp, statusline);
}
else
{
/* horizontally split window, set status line for last one */
for (fp = fr->fr_child; fp->fr_next != NULL; fp = fp->fr_next)
;
last_status_rec(fp, statusline);
}
}
/*
* Return the number of lines used by the tab page line.
*/
int
tabline_height(void)
{
#ifdef FEAT_GUI_TABLINE
/* When the GUI has the tabline then this always returns zero. */
if (gui_use_tabline())
return 0;
#endif
switch (p_stal)
{
case 0: return 0;
case 1: return (first_tabpage->tp_next == NULL) ? 0 : 1;
}
return 1;
}
/*
* Return the minimal number of rows that is needed on the screen to display
* the current number of windows.
*/
int
min_rows(void)
{
int total;
tabpage_T *tp;
int n;
if (firstwin == NULL) /* not initialized yet */
return MIN_LINES;
total = 0;
FOR_ALL_TABPAGES(tp)
{
n = frame_minheight(tp->tp_topframe, NULL);
if (total < n)
total = n;
}
total += tabline_height();
total += 1; /* count the room for the command line */
return total;
}
/*
* Return TRUE if there is only one window (in the current tab page), not
* counting a help or preview window, unless it is the current window.
* Does not count unlisted windows.
*/
int
only_one_window(void)
{
int count = 0;
win_T *wp;
/* If there is another tab page there always is another window. */
if (first_tabpage->tp_next != NULL)
return FALSE;
FOR_ALL_WINDOWS(wp)
if (wp->w_buffer != NULL
&& (!((bt_help(wp->w_buffer) && !bt_help(curbuf))
# ifdef FEAT_QUICKFIX
|| wp->w_p_pvw
# endif
) || wp == curwin) && wp != aucmd_win)
++count;
return (count <= 1);
}
/*
* Correct the cursor line number in other windows. Used after changing the
* current buffer, and before applying autocommands.
* When "do_curwin" is TRUE, also check current window.
*/
void
check_lnums(int do_curwin)
{
win_T *wp;
tabpage_T *tp;
FOR_ALL_TAB_WINDOWS(tp, wp)
if ((do_curwin || wp != curwin) && wp->w_buffer == curbuf)
{
// save the original cursor position and topline
wp->w_save_cursor.w_cursor_save = wp->w_cursor;
wp->w_save_cursor.w_topline_save = wp->w_topline;
if (wp->w_cursor.lnum > curbuf->b_ml.ml_line_count)
wp->w_cursor.lnum = curbuf->b_ml.ml_line_count;
if (wp->w_topline > curbuf->b_ml.ml_line_count)
wp->w_topline = curbuf->b_ml.ml_line_count;
// save the corrected cursor position and topline
wp->w_save_cursor.w_cursor_corr = wp->w_cursor;
wp->w_save_cursor.w_topline_corr = wp->w_topline;
}
}
/*
* Reset cursor and topline to its stored values from check_lnums().
* check_lnums() must have been called first!
*/
void
reset_lnums()
{
win_T *wp;
tabpage_T *tp;
FOR_ALL_TAB_WINDOWS(tp, wp)
if (wp->w_buffer == curbuf)
{
// Restore the value if the autocommand didn't change it.
if (EQUAL_POS(wp->w_save_cursor.w_cursor_corr, wp->w_cursor))
wp->w_cursor = wp->w_save_cursor.w_cursor_save;
if (wp->w_save_cursor.w_topline_corr == wp->w_topline)
wp->w_topline = wp->w_save_cursor.w_topline_save;
}
}
/*
* A snapshot of the window sizes, to restore them after closing the help
* window.
* Only these fields are used:
* fr_layout
* fr_width
* fr_height
* fr_next
* fr_child
* fr_win (only valid for the old curwin, NULL otherwise)
*/
/*
* Create a snapshot of the current frame sizes.
*/
void
make_snapshot(int idx)
{
clear_snapshot(curtab, idx);
make_snapshot_rec(topframe, &curtab->tp_snapshot[idx]);
}
static void
make_snapshot_rec(frame_T *fr, frame_T **frp)
{
*frp = ALLOC_CLEAR_ONE(frame_T);
if (*frp == NULL)
return;
(*frp)->fr_layout = fr->fr_layout;
(*frp)->fr_width = fr->fr_width;
(*frp)->fr_height = fr->fr_height;
if (fr->fr_next != NULL)
make_snapshot_rec(fr->fr_next, &((*frp)->fr_next));
if (fr->fr_child != NULL)
make_snapshot_rec(fr->fr_child, &((*frp)->fr_child));
if (fr->fr_layout == FR_LEAF && fr->fr_win == curwin)
(*frp)->fr_win = curwin;
}
/*
* Remove any existing snapshot.
*/
static void
clear_snapshot(tabpage_T *tp, int idx)
{
clear_snapshot_rec(tp->tp_snapshot[idx]);
tp->tp_snapshot[idx] = NULL;
}
static void
clear_snapshot_rec(frame_T *fr)
{
if (fr != NULL)
{
clear_snapshot_rec(fr->fr_next);
clear_snapshot_rec(fr->fr_child);
vim_free(fr);
}
}
/*
* Restore a previously created snapshot, if there is any.
* This is only done if the screen size didn't change and the window layout is
* still the same.
*/
void
restore_snapshot(
int idx,
int close_curwin) /* closing current window */
{
win_T *wp;
if (curtab->tp_snapshot[idx] != NULL
&& curtab->tp_snapshot[idx]->fr_width == topframe->fr_width
&& curtab->tp_snapshot[idx]->fr_height == topframe->fr_height
&& check_snapshot_rec(curtab->tp_snapshot[idx], topframe) == OK)
{
wp = restore_snapshot_rec(curtab->tp_snapshot[idx], topframe);
win_comp_pos();
if (wp != NULL && close_curwin)
win_goto(wp);
redraw_all_later(NOT_VALID);
}
clear_snapshot(curtab, idx);
}
/*
* Check if frames "sn" and "fr" have the same layout, same following frames
* and same children. And the window pointer is valid.
*/
static int
check_snapshot_rec(frame_T *sn, frame_T *fr)
{
if (sn->fr_layout != fr->fr_layout
|| (sn->fr_next == NULL) != (fr->fr_next == NULL)
|| (sn->fr_child == NULL) != (fr->fr_child == NULL)
|| (sn->fr_next != NULL
&& check_snapshot_rec(sn->fr_next, fr->fr_next) == FAIL)
|| (sn->fr_child != NULL
&& check_snapshot_rec(sn->fr_child, fr->fr_child) == FAIL)
|| (sn->fr_win != NULL && !win_valid(sn->fr_win)))
return FAIL;
return OK;
}
/*
* Copy the size of snapshot frame "sn" to frame "fr". Do the same for all
* following frames and children.
* Returns a pointer to the old current window, or NULL.
*/
static win_T *
restore_snapshot_rec(frame_T *sn, frame_T *fr)
{
win_T *wp = NULL;
win_T *wp2;
fr->fr_height = sn->fr_height;
fr->fr_width = sn->fr_width;
if (fr->fr_layout == FR_LEAF)
{
frame_new_height(fr, fr->fr_height, FALSE, FALSE);
frame_new_width(fr, fr->fr_width, FALSE, FALSE);
wp = sn->fr_win;
}
if (sn->fr_next != NULL)
{
wp2 = restore_snapshot_rec(sn->fr_next, fr->fr_next);
if (wp2 != NULL)
wp = wp2;
}
if (sn->fr_child != NULL)
{
wp2 = restore_snapshot_rec(sn->fr_child, fr->fr_child);
if (wp2 != NULL)
wp = wp2;
}
return wp;
}
#if defined(FEAT_GUI) || defined(PROTO)
/*
* Return TRUE if there is any vertically split window.
*/
int
win_hasvertsplit(void)
{
frame_T *fr;
if (topframe->fr_layout == FR_ROW)
return TRUE;
if (topframe->fr_layout == FR_COL)
FOR_ALL_FRAMES(fr, topframe->fr_child)
if (fr->fr_layout == FR_ROW)
return TRUE;
return FALSE;
}
#endif
#if defined(FEAT_PYTHON) || defined(FEAT_PYTHON3) || defined(PROTO)
int
get_win_number(win_T *wp, win_T *first_win)
{
int i = 1;
win_T *w;
for (w = first_win; w != NULL && w != wp; w = W_NEXT(w))
++i;
if (w == NULL)
return 0;
else
return i;
}
int
get_tab_number(tabpage_T *tp UNUSED)
{
int i = 1;
tabpage_T *t;
for (t = first_tabpage; t != NULL && t != tp; t = t->tp_next)
++i;
if (t == NULL)
return 0;
else
return i;
}
#endif
/*
* Return TRUE if "topfrp" and its children are at the right height.
*/
static int
frame_check_height(frame_T *topfrp, int height)
{
frame_T *frp;
if (topfrp->fr_height != height)
return FALSE;
if (topfrp->fr_layout == FR_ROW)
FOR_ALL_FRAMES(frp, topfrp->fr_child)
if (frp->fr_height != height)
return FALSE;
return TRUE;
}
/*
* Return TRUE if "topfrp" and its children are at the right width.
*/
static int
frame_check_width(frame_T *topfrp, int width)
{
frame_T *frp;
if (topfrp->fr_width != width)
return FALSE;
if (topfrp->fr_layout == FR_COL)
FOR_ALL_FRAMES(frp, topfrp->fr_child)
if (frp->fr_width != width)
return FALSE;
return TRUE;
}
#if defined(FEAT_SYN_HL) || defined(PROTO)
/*
* Simple int comparison function for use with qsort()
*/
static int
int_cmp(const void *a, const void *b)
{
return *(const int *)a - *(const int *)b;
}
/*
* Handle setting 'colorcolumn' or 'textwidth' in window "wp".
* Returns error message, NULL if it's OK.
*/
char *
check_colorcolumn(win_T *wp)
{
char_u *s;
int col;
int count = 0;
int color_cols[256];
int i;
int j = 0;
if (wp->w_buffer == NULL)
return NULL; // buffer was closed
for (s = wp->w_p_cc; *s != NUL && count < 255;)
{
if (*s == '-' || *s == '+')
{
// -N and +N: add to 'textwidth'
col = (*s == '-') ? -1 : 1;
++s;
if (!VIM_ISDIGIT(*s))
return e_invarg;
col = col * getdigits(&s);
if (wp->w_buffer->b_p_tw == 0)
goto skip; // 'textwidth' not set, skip this item
col += wp->w_buffer->b_p_tw;
if (col < 0)
goto skip;
}
else if (VIM_ISDIGIT(*s))
col = getdigits(&s);
else
return e_invarg;
color_cols[count++] = col - 1; // 1-based to 0-based
skip:
if (*s == NUL)
break;
if (*s != ',')
return e_invarg;
if (*++s == NUL)
return e_invarg; // illegal trailing comma as in "set cc=80,"
}
vim_free(wp->w_p_cc_cols);
if (count == 0)
wp->w_p_cc_cols = NULL;
else
{
wp->w_p_cc_cols = ALLOC_MULT(int, count + 1);
if (wp->w_p_cc_cols != NULL)
{
// sort the columns for faster usage on screen redraw inside
// win_line()
qsort(color_cols, count, sizeof(int), int_cmp);
for (i = 0; i < count; ++i)
// skip duplicates
if (j == 0 || wp->w_p_cc_cols[j - 1] != color_cols[i])
wp->w_p_cc_cols[j++] = color_cols[i];
wp->w_p_cc_cols[j] = -1; // end marker
}
}
return NULL; // no error
}
#endif
| ./CrossVul/dataset_final_sorted/CWE-416/c/good_1342_2 |
crossvul-cpp_data_good_2851_1 | /*
* ALSA sequencer Ports
* Copyright (c) 1998 by Frank van de Pol <fvdpol@coil.demon.nl>
* Jaroslav Kysela <perex@perex.cz>
*
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*
*/
#include <sound/core.h>
#include <linux/slab.h>
#include <linux/module.h>
#include "seq_system.h"
#include "seq_ports.h"
#include "seq_clientmgr.h"
/*
registration of client ports
*/
/*
NOTE: the current implementation of the port structure as a linked list is
not optimal for clients that have many ports. For sending messages to all
subscribers of a port we first need to find the address of the port
structure, which means we have to traverse the list. A direct access table
(array) would be better, but big preallocated arrays waste memory.
Possible actions:
1) leave it this way, a client does normaly does not have more than a few
ports
2) replace the linked list of ports by a array of pointers which is
dynamicly kmalloced. When a port is added or deleted we can simply allocate
a new array, copy the corresponding pointers, and delete the old one. We
then only need a pointer to this array, and an integer that tells us how
much elements are in array.
*/
/* return pointer to port structure - port is locked if found */
struct snd_seq_client_port *snd_seq_port_use_ptr(struct snd_seq_client *client,
int num)
{
struct snd_seq_client_port *port;
if (client == NULL)
return NULL;
read_lock(&client->ports_lock);
list_for_each_entry(port, &client->ports_list_head, list) {
if (port->addr.port == num) {
if (port->closing)
break; /* deleting now */
snd_use_lock_use(&port->use_lock);
read_unlock(&client->ports_lock);
return port;
}
}
read_unlock(&client->ports_lock);
return NULL; /* not found */
}
/* search for the next port - port is locked if found */
struct snd_seq_client_port *snd_seq_port_query_nearest(struct snd_seq_client *client,
struct snd_seq_port_info *pinfo)
{
int num;
struct snd_seq_client_port *port, *found;
num = pinfo->addr.port;
found = NULL;
read_lock(&client->ports_lock);
list_for_each_entry(port, &client->ports_list_head, list) {
if (port->addr.port < num)
continue;
if (port->addr.port == num) {
found = port;
break;
}
if (found == NULL || port->addr.port < found->addr.port)
found = port;
}
if (found) {
if (found->closing)
found = NULL;
else
snd_use_lock_use(&found->use_lock);
}
read_unlock(&client->ports_lock);
return found;
}
/* initialize snd_seq_port_subs_info */
static void port_subs_info_init(struct snd_seq_port_subs_info *grp)
{
INIT_LIST_HEAD(&grp->list_head);
grp->count = 0;
grp->exclusive = 0;
rwlock_init(&grp->list_lock);
init_rwsem(&grp->list_mutex);
grp->open = NULL;
grp->close = NULL;
}
/* create a port, port number is returned (-1 on failure);
* the caller needs to unref the port via snd_seq_port_unlock() appropriately
*/
struct snd_seq_client_port *snd_seq_create_port(struct snd_seq_client *client,
int port)
{
unsigned long flags;
struct snd_seq_client_port *new_port, *p;
int num = -1;
/* sanity check */
if (snd_BUG_ON(!client))
return NULL;
if (client->num_ports >= SNDRV_SEQ_MAX_PORTS) {
pr_warn("ALSA: seq: too many ports for client %d\n", client->number);
return NULL;
}
/* create a new port */
new_port = kzalloc(sizeof(*new_port), GFP_KERNEL);
if (!new_port)
return NULL; /* failure, out of memory */
/* init port data */
new_port->addr.client = client->number;
new_port->addr.port = -1;
new_port->owner = THIS_MODULE;
sprintf(new_port->name, "port-%d", num);
snd_use_lock_init(&new_port->use_lock);
port_subs_info_init(&new_port->c_src);
port_subs_info_init(&new_port->c_dest);
snd_use_lock_use(&new_port->use_lock);
num = port >= 0 ? port : 0;
mutex_lock(&client->ports_mutex);
write_lock_irqsave(&client->ports_lock, flags);
list_for_each_entry(p, &client->ports_list_head, list) {
if (p->addr.port > num)
break;
if (port < 0) /* auto-probe mode */
num = p->addr.port + 1;
}
/* insert the new port */
list_add_tail(&new_port->list, &p->list);
client->num_ports++;
new_port->addr.port = num; /* store the port number in the port */
sprintf(new_port->name, "port-%d", num);
write_unlock_irqrestore(&client->ports_lock, flags);
mutex_unlock(&client->ports_mutex);
return new_port;
}
/* */
static int subscribe_port(struct snd_seq_client *client,
struct snd_seq_client_port *port,
struct snd_seq_port_subs_info *grp,
struct snd_seq_port_subscribe *info, int send_ack);
static int unsubscribe_port(struct snd_seq_client *client,
struct snd_seq_client_port *port,
struct snd_seq_port_subs_info *grp,
struct snd_seq_port_subscribe *info, int send_ack);
static struct snd_seq_client_port *get_client_port(struct snd_seq_addr *addr,
struct snd_seq_client **cp)
{
struct snd_seq_client_port *p;
*cp = snd_seq_client_use_ptr(addr->client);
if (*cp) {
p = snd_seq_port_use_ptr(*cp, addr->port);
if (! p) {
snd_seq_client_unlock(*cp);
*cp = NULL;
}
return p;
}
return NULL;
}
static void delete_and_unsubscribe_port(struct snd_seq_client *client,
struct snd_seq_client_port *port,
struct snd_seq_subscribers *subs,
bool is_src, bool ack);
static inline struct snd_seq_subscribers *
get_subscriber(struct list_head *p, bool is_src)
{
if (is_src)
return list_entry(p, struct snd_seq_subscribers, src_list);
else
return list_entry(p, struct snd_seq_subscribers, dest_list);
}
/*
* remove all subscribers on the list
* this is called from port_delete, for each src and dest list.
*/
static void clear_subscriber_list(struct snd_seq_client *client,
struct snd_seq_client_port *port,
struct snd_seq_port_subs_info *grp,
int is_src)
{
struct list_head *p, *n;
list_for_each_safe(p, n, &grp->list_head) {
struct snd_seq_subscribers *subs;
struct snd_seq_client *c;
struct snd_seq_client_port *aport;
subs = get_subscriber(p, is_src);
if (is_src)
aport = get_client_port(&subs->info.dest, &c);
else
aport = get_client_port(&subs->info.sender, &c);
delete_and_unsubscribe_port(client, port, subs, is_src, false);
if (!aport) {
/* looks like the connected port is being deleted.
* we decrease the counter, and when both ports are deleted
* remove the subscriber info
*/
if (atomic_dec_and_test(&subs->ref_count))
kfree(subs);
continue;
}
/* ok we got the connected port */
delete_and_unsubscribe_port(c, aport, subs, !is_src, true);
kfree(subs);
snd_seq_port_unlock(aport);
snd_seq_client_unlock(c);
}
}
/* delete port data */
static int port_delete(struct snd_seq_client *client,
struct snd_seq_client_port *port)
{
/* set closing flag and wait for all port access are gone */
port->closing = 1;
snd_use_lock_sync(&port->use_lock);
/* clear subscribers info */
clear_subscriber_list(client, port, &port->c_src, true);
clear_subscriber_list(client, port, &port->c_dest, false);
if (port->private_free)
port->private_free(port->private_data);
snd_BUG_ON(port->c_src.count != 0);
snd_BUG_ON(port->c_dest.count != 0);
kfree(port);
return 0;
}
/* delete a port with the given port id */
int snd_seq_delete_port(struct snd_seq_client *client, int port)
{
unsigned long flags;
struct snd_seq_client_port *found = NULL, *p;
mutex_lock(&client->ports_mutex);
write_lock_irqsave(&client->ports_lock, flags);
list_for_each_entry(p, &client->ports_list_head, list) {
if (p->addr.port == port) {
/* ok found. delete from the list at first */
list_del(&p->list);
client->num_ports--;
found = p;
break;
}
}
write_unlock_irqrestore(&client->ports_lock, flags);
mutex_unlock(&client->ports_mutex);
if (found)
return port_delete(client, found);
else
return -ENOENT;
}
/* delete the all ports belonging to the given client */
int snd_seq_delete_all_ports(struct snd_seq_client *client)
{
unsigned long flags;
struct list_head deleted_list;
struct snd_seq_client_port *port, *tmp;
/* move the port list to deleted_list, and
* clear the port list in the client data.
*/
mutex_lock(&client->ports_mutex);
write_lock_irqsave(&client->ports_lock, flags);
if (! list_empty(&client->ports_list_head)) {
list_add(&deleted_list, &client->ports_list_head);
list_del_init(&client->ports_list_head);
} else {
INIT_LIST_HEAD(&deleted_list);
}
client->num_ports = 0;
write_unlock_irqrestore(&client->ports_lock, flags);
/* remove each port in deleted_list */
list_for_each_entry_safe(port, tmp, &deleted_list, list) {
list_del(&port->list);
snd_seq_system_client_ev_port_exit(port->addr.client, port->addr.port);
port_delete(client, port);
}
mutex_unlock(&client->ports_mutex);
return 0;
}
/* set port info fields */
int snd_seq_set_port_info(struct snd_seq_client_port * port,
struct snd_seq_port_info * info)
{
if (snd_BUG_ON(!port || !info))
return -EINVAL;
/* set port name */
if (info->name[0])
strlcpy(port->name, info->name, sizeof(port->name));
/* set capabilities */
port->capability = info->capability;
/* get port type */
port->type = info->type;
/* information about supported channels/voices */
port->midi_channels = info->midi_channels;
port->midi_voices = info->midi_voices;
port->synth_voices = info->synth_voices;
/* timestamping */
port->timestamping = (info->flags & SNDRV_SEQ_PORT_FLG_TIMESTAMP) ? 1 : 0;
port->time_real = (info->flags & SNDRV_SEQ_PORT_FLG_TIME_REAL) ? 1 : 0;
port->time_queue = info->time_queue;
return 0;
}
/* get port info fields */
int snd_seq_get_port_info(struct snd_seq_client_port * port,
struct snd_seq_port_info * info)
{
if (snd_BUG_ON(!port || !info))
return -EINVAL;
/* get port name */
strlcpy(info->name, port->name, sizeof(info->name));
/* get capabilities */
info->capability = port->capability;
/* get port type */
info->type = port->type;
/* information about supported channels/voices */
info->midi_channels = port->midi_channels;
info->midi_voices = port->midi_voices;
info->synth_voices = port->synth_voices;
/* get subscriber counts */
info->read_use = port->c_src.count;
info->write_use = port->c_dest.count;
/* timestamping */
info->flags = 0;
if (port->timestamping) {
info->flags |= SNDRV_SEQ_PORT_FLG_TIMESTAMP;
if (port->time_real)
info->flags |= SNDRV_SEQ_PORT_FLG_TIME_REAL;
info->time_queue = port->time_queue;
}
return 0;
}
/*
* call callback functions (if any):
* the callbacks are invoked only when the first (for connection) or
* the last subscription (for disconnection) is done. Second or later
* subscription results in increment of counter, but no callback is
* invoked.
* This feature is useful if these callbacks are associated with
* initialization or termination of devices (see seq_midi.c).
*/
static int subscribe_port(struct snd_seq_client *client,
struct snd_seq_client_port *port,
struct snd_seq_port_subs_info *grp,
struct snd_seq_port_subscribe *info,
int send_ack)
{
int err = 0;
if (!try_module_get(port->owner))
return -EFAULT;
grp->count++;
if (grp->open && grp->count == 1) {
err = grp->open(port->private_data, info);
if (err < 0) {
module_put(port->owner);
grp->count--;
}
}
if (err >= 0 && send_ack && client->type == USER_CLIENT)
snd_seq_client_notify_subscription(port->addr.client, port->addr.port,
info, SNDRV_SEQ_EVENT_PORT_SUBSCRIBED);
return err;
}
static int unsubscribe_port(struct snd_seq_client *client,
struct snd_seq_client_port *port,
struct snd_seq_port_subs_info *grp,
struct snd_seq_port_subscribe *info,
int send_ack)
{
int err = 0;
if (! grp->count)
return -EINVAL;
grp->count--;
if (grp->close && grp->count == 0)
err = grp->close(port->private_data, info);
if (send_ack && client->type == USER_CLIENT)
snd_seq_client_notify_subscription(port->addr.client, port->addr.port,
info, SNDRV_SEQ_EVENT_PORT_UNSUBSCRIBED);
module_put(port->owner);
return err;
}
/* check if both addresses are identical */
static inline int addr_match(struct snd_seq_addr *r, struct snd_seq_addr *s)
{
return (r->client == s->client) && (r->port == s->port);
}
/* check the two subscribe info match */
/* if flags is zero, checks only sender and destination addresses */
static int match_subs_info(struct snd_seq_port_subscribe *r,
struct snd_seq_port_subscribe *s)
{
if (addr_match(&r->sender, &s->sender) &&
addr_match(&r->dest, &s->dest)) {
if (r->flags && r->flags == s->flags)
return r->queue == s->queue;
else if (! r->flags)
return 1;
}
return 0;
}
static int check_and_subscribe_port(struct snd_seq_client *client,
struct snd_seq_client_port *port,
struct snd_seq_subscribers *subs,
bool is_src, bool exclusive, bool ack)
{
struct snd_seq_port_subs_info *grp;
struct list_head *p;
struct snd_seq_subscribers *s;
int err;
grp = is_src ? &port->c_src : &port->c_dest;
err = -EBUSY;
down_write(&grp->list_mutex);
if (exclusive) {
if (!list_empty(&grp->list_head))
goto __error;
} else {
if (grp->exclusive)
goto __error;
/* check whether already exists */
list_for_each(p, &grp->list_head) {
s = get_subscriber(p, is_src);
if (match_subs_info(&subs->info, &s->info))
goto __error;
}
}
err = subscribe_port(client, port, grp, &subs->info, ack);
if (err < 0) {
grp->exclusive = 0;
goto __error;
}
/* add to list */
write_lock_irq(&grp->list_lock);
if (is_src)
list_add_tail(&subs->src_list, &grp->list_head);
else
list_add_tail(&subs->dest_list, &grp->list_head);
grp->exclusive = exclusive;
atomic_inc(&subs->ref_count);
write_unlock_irq(&grp->list_lock);
err = 0;
__error:
up_write(&grp->list_mutex);
return err;
}
static void delete_and_unsubscribe_port(struct snd_seq_client *client,
struct snd_seq_client_port *port,
struct snd_seq_subscribers *subs,
bool is_src, bool ack)
{
struct snd_seq_port_subs_info *grp;
struct list_head *list;
bool empty;
grp = is_src ? &port->c_src : &port->c_dest;
list = is_src ? &subs->src_list : &subs->dest_list;
down_write(&grp->list_mutex);
write_lock_irq(&grp->list_lock);
empty = list_empty(list);
if (!empty)
list_del_init(list);
grp->exclusive = 0;
write_unlock_irq(&grp->list_lock);
up_write(&grp->list_mutex);
if (!empty)
unsubscribe_port(client, port, grp, &subs->info, ack);
}
/* connect two ports */
int snd_seq_port_connect(struct snd_seq_client *connector,
struct snd_seq_client *src_client,
struct snd_seq_client_port *src_port,
struct snd_seq_client *dest_client,
struct snd_seq_client_port *dest_port,
struct snd_seq_port_subscribe *info)
{
struct snd_seq_subscribers *subs;
bool exclusive;
int err;
subs = kzalloc(sizeof(*subs), GFP_KERNEL);
if (!subs)
return -ENOMEM;
subs->info = *info;
atomic_set(&subs->ref_count, 0);
INIT_LIST_HEAD(&subs->src_list);
INIT_LIST_HEAD(&subs->dest_list);
exclusive = !!(info->flags & SNDRV_SEQ_PORT_SUBS_EXCLUSIVE);
err = check_and_subscribe_port(src_client, src_port, subs, true,
exclusive,
connector->number != src_client->number);
if (err < 0)
goto error;
err = check_and_subscribe_port(dest_client, dest_port, subs, false,
exclusive,
connector->number != dest_client->number);
if (err < 0)
goto error_dest;
return 0;
error_dest:
delete_and_unsubscribe_port(src_client, src_port, subs, true,
connector->number != src_client->number);
error:
kfree(subs);
return err;
}
/* remove the connection */
int snd_seq_port_disconnect(struct snd_seq_client *connector,
struct snd_seq_client *src_client,
struct snd_seq_client_port *src_port,
struct snd_seq_client *dest_client,
struct snd_seq_client_port *dest_port,
struct snd_seq_port_subscribe *info)
{
struct snd_seq_port_subs_info *src = &src_port->c_src;
struct snd_seq_subscribers *subs;
int err = -ENOENT;
down_write(&src->list_mutex);
/* look for the connection */
list_for_each_entry(subs, &src->list_head, src_list) {
if (match_subs_info(info, &subs->info)) {
atomic_dec(&subs->ref_count); /* mark as not ready */
err = 0;
break;
}
}
up_write(&src->list_mutex);
if (err < 0)
return err;
delete_and_unsubscribe_port(src_client, src_port, subs, true,
connector->number != src_client->number);
delete_and_unsubscribe_port(dest_client, dest_port, subs, false,
connector->number != dest_client->number);
kfree(subs);
return 0;
}
/* get matched subscriber */
struct snd_seq_subscribers *snd_seq_port_get_subscription(struct snd_seq_port_subs_info *src_grp,
struct snd_seq_addr *dest_addr)
{
struct snd_seq_subscribers *s, *found = NULL;
down_read(&src_grp->list_mutex);
list_for_each_entry(s, &src_grp->list_head, src_list) {
if (addr_match(dest_addr, &s->info.dest)) {
found = s;
break;
}
}
up_read(&src_grp->list_mutex);
return found;
}
/*
* Attach a device driver that wants to receive events from the
* sequencer. Returns the new port number on success.
* A driver that wants to receive the events converted to midi, will
* use snd_seq_midisynth_register_port().
*/
/* exported */
int snd_seq_event_port_attach(int client,
struct snd_seq_port_callback *pcbp,
int cap, int type, int midi_channels,
int midi_voices, char *portname)
{
struct snd_seq_port_info portinfo;
int ret;
/* Set up the port */
memset(&portinfo, 0, sizeof(portinfo));
portinfo.addr.client = client;
strlcpy(portinfo.name, portname ? portname : "Unamed port",
sizeof(portinfo.name));
portinfo.capability = cap;
portinfo.type = type;
portinfo.kernel = pcbp;
portinfo.midi_channels = midi_channels;
portinfo.midi_voices = midi_voices;
/* Create it */
ret = snd_seq_kernel_client_ctl(client,
SNDRV_SEQ_IOCTL_CREATE_PORT,
&portinfo);
if (ret >= 0)
ret = portinfo.addr.port;
return ret;
}
EXPORT_SYMBOL(snd_seq_event_port_attach);
/*
* Detach the driver from a port.
*/
/* exported */
int snd_seq_event_port_detach(int client, int port)
{
struct snd_seq_port_info portinfo;
int err;
memset(&portinfo, 0, sizeof(portinfo));
portinfo.addr.client = client;
portinfo.addr.port = port;
err = snd_seq_kernel_client_ctl(client,
SNDRV_SEQ_IOCTL_DELETE_PORT,
&portinfo);
return err;
}
EXPORT_SYMBOL(snd_seq_event_port_detach);
| ./CrossVul/dataset_final_sorted/CWE-416/c/good_2851_1 |
crossvul-cpp_data_good_5021_8 | /*
* RAW sockets for IPv6
* Linux INET6 implementation
*
* Authors:
* Pedro Roque <roque@di.fc.ul.pt>
*
* Adapted from linux/net/ipv4/raw.c
*
* Fixes:
* Hideaki YOSHIFUJI : sin6_scope_id support
* YOSHIFUJI,H.@USAGI : raw checksum (RFC2292(bis) compliance)
* Kazunori MIYAZAWA @USAGI: change process style to use ip6_append_data
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version
* 2 of the License, or (at your option) any later version.
*/
#include <linux/errno.h>
#include <linux/types.h>
#include <linux/socket.h>
#include <linux/slab.h>
#include <linux/sockios.h>
#include <linux/net.h>
#include <linux/in6.h>
#include <linux/netdevice.h>
#include <linux/if_arp.h>
#include <linux/icmpv6.h>
#include <linux/netfilter.h>
#include <linux/netfilter_ipv6.h>
#include <linux/skbuff.h>
#include <linux/compat.h>
#include <linux/uaccess.h>
#include <asm/ioctls.h>
#include <net/net_namespace.h>
#include <net/ip.h>
#include <net/sock.h>
#include <net/snmp.h>
#include <net/ipv6.h>
#include <net/ndisc.h>
#include <net/protocol.h>
#include <net/ip6_route.h>
#include <net/ip6_checksum.h>
#include <net/addrconf.h>
#include <net/transp_v6.h>
#include <net/udp.h>
#include <net/inet_common.h>
#include <net/tcp_states.h>
#if IS_ENABLED(CONFIG_IPV6_MIP6)
#include <net/mip6.h>
#endif
#include <linux/mroute6.h>
#include <net/raw.h>
#include <net/rawv6.h>
#include <net/xfrm.h>
#include <linux/proc_fs.h>
#include <linux/seq_file.h>
#include <linux/export.h>
#define ICMPV6_HDRLEN 4 /* ICMPv6 header, RFC 4443 Section 2.1 */
static struct raw_hashinfo raw_v6_hashinfo = {
.lock = __RW_LOCK_UNLOCKED(raw_v6_hashinfo.lock),
};
static struct sock *__raw_v6_lookup(struct net *net, struct sock *sk,
unsigned short num, const struct in6_addr *loc_addr,
const struct in6_addr *rmt_addr, int dif)
{
bool is_multicast = ipv6_addr_is_multicast(loc_addr);
sk_for_each_from(sk)
if (inet_sk(sk)->inet_num == num) {
if (!net_eq(sock_net(sk), net))
continue;
if (!ipv6_addr_any(&sk->sk_v6_daddr) &&
!ipv6_addr_equal(&sk->sk_v6_daddr, rmt_addr))
continue;
if (sk->sk_bound_dev_if && sk->sk_bound_dev_if != dif)
continue;
if (!ipv6_addr_any(&sk->sk_v6_rcv_saddr)) {
if (ipv6_addr_equal(&sk->sk_v6_rcv_saddr, loc_addr))
goto found;
if (is_multicast &&
inet6_mc_check(sk, loc_addr, rmt_addr))
goto found;
continue;
}
goto found;
}
sk = NULL;
found:
return sk;
}
/*
* 0 - deliver
* 1 - block
*/
static int icmpv6_filter(const struct sock *sk, const struct sk_buff *skb)
{
struct icmp6hdr _hdr;
const struct icmp6hdr *hdr;
/* We require only the four bytes of the ICMPv6 header, not any
* additional bytes of message body in "struct icmp6hdr".
*/
hdr = skb_header_pointer(skb, skb_transport_offset(skb),
ICMPV6_HDRLEN, &_hdr);
if (hdr) {
const __u32 *data = &raw6_sk(sk)->filter.data[0];
unsigned int type = hdr->icmp6_type;
return (data[type >> 5] & (1U << (type & 31))) != 0;
}
return 1;
}
#if IS_ENABLED(CONFIG_IPV6_MIP6)
typedef int mh_filter_t(struct sock *sock, struct sk_buff *skb);
static mh_filter_t __rcu *mh_filter __read_mostly;
int rawv6_mh_filter_register(mh_filter_t filter)
{
rcu_assign_pointer(mh_filter, filter);
return 0;
}
EXPORT_SYMBOL(rawv6_mh_filter_register);
int rawv6_mh_filter_unregister(mh_filter_t filter)
{
RCU_INIT_POINTER(mh_filter, NULL);
synchronize_rcu();
return 0;
}
EXPORT_SYMBOL(rawv6_mh_filter_unregister);
#endif
/*
* demultiplex raw sockets.
* (should consider queueing the skb in the sock receive_queue
* without calling rawv6.c)
*
* Caller owns SKB so we must make clones.
*/
static bool ipv6_raw_deliver(struct sk_buff *skb, int nexthdr)
{
const struct in6_addr *saddr;
const struct in6_addr *daddr;
struct sock *sk;
bool delivered = false;
__u8 hash;
struct net *net;
saddr = &ipv6_hdr(skb)->saddr;
daddr = saddr + 1;
hash = nexthdr & (RAW_HTABLE_SIZE - 1);
read_lock(&raw_v6_hashinfo.lock);
sk = sk_head(&raw_v6_hashinfo.ht[hash]);
if (!sk)
goto out;
net = dev_net(skb->dev);
sk = __raw_v6_lookup(net, sk, nexthdr, daddr, saddr, inet6_iif(skb));
while (sk) {
int filtered;
delivered = true;
switch (nexthdr) {
case IPPROTO_ICMPV6:
filtered = icmpv6_filter(sk, skb);
break;
#if IS_ENABLED(CONFIG_IPV6_MIP6)
case IPPROTO_MH:
{
/* XXX: To validate MH only once for each packet,
* this is placed here. It should be after checking
* xfrm policy, however it doesn't. The checking xfrm
* policy is placed in rawv6_rcv() because it is
* required for each socket.
*/
mh_filter_t *filter;
filter = rcu_dereference(mh_filter);
filtered = filter ? (*filter)(sk, skb) : 0;
break;
}
#endif
default:
filtered = 0;
break;
}
if (filtered < 0)
break;
if (filtered == 0) {
struct sk_buff *clone = skb_clone(skb, GFP_ATOMIC);
/* Not releasing hash table! */
if (clone) {
nf_reset(clone);
rawv6_rcv(sk, clone);
}
}
sk = __raw_v6_lookup(net, sk_next(sk), nexthdr, daddr, saddr,
inet6_iif(skb));
}
out:
read_unlock(&raw_v6_hashinfo.lock);
return delivered;
}
bool raw6_local_deliver(struct sk_buff *skb, int nexthdr)
{
struct sock *raw_sk;
raw_sk = sk_head(&raw_v6_hashinfo.ht[nexthdr & (RAW_HTABLE_SIZE - 1)]);
if (raw_sk && !ipv6_raw_deliver(skb, nexthdr))
raw_sk = NULL;
return raw_sk != NULL;
}
/* This cleans up af_inet6 a bit. -DaveM */
static int rawv6_bind(struct sock *sk, struct sockaddr *uaddr, int addr_len)
{
struct inet_sock *inet = inet_sk(sk);
struct ipv6_pinfo *np = inet6_sk(sk);
struct sockaddr_in6 *addr = (struct sockaddr_in6 *) uaddr;
__be32 v4addr = 0;
int addr_type;
int err;
if (addr_len < SIN6_LEN_RFC2133)
return -EINVAL;
if (addr->sin6_family != AF_INET6)
return -EINVAL;
addr_type = ipv6_addr_type(&addr->sin6_addr);
/* Raw sockets are IPv6 only */
if (addr_type == IPV6_ADDR_MAPPED)
return -EADDRNOTAVAIL;
lock_sock(sk);
err = -EINVAL;
if (sk->sk_state != TCP_CLOSE)
goto out;
rcu_read_lock();
/* Check if the address belongs to the host. */
if (addr_type != IPV6_ADDR_ANY) {
struct net_device *dev = NULL;
if (__ipv6_addr_needs_scope_id(addr_type)) {
if (addr_len >= sizeof(struct sockaddr_in6) &&
addr->sin6_scope_id) {
/* Override any existing binding, if another
* one is supplied by user.
*/
sk->sk_bound_dev_if = addr->sin6_scope_id;
}
/* Binding to link-local address requires an interface */
if (!sk->sk_bound_dev_if)
goto out_unlock;
err = -ENODEV;
dev = dev_get_by_index_rcu(sock_net(sk),
sk->sk_bound_dev_if);
if (!dev)
goto out_unlock;
}
/* ipv4 addr of the socket is invalid. Only the
* unspecified and mapped address have a v4 equivalent.
*/
v4addr = LOOPBACK4_IPV6;
if (!(addr_type & IPV6_ADDR_MULTICAST) &&
!sock_net(sk)->ipv6.sysctl.ip_nonlocal_bind) {
err = -EADDRNOTAVAIL;
if (!ipv6_chk_addr(sock_net(sk), &addr->sin6_addr,
dev, 0)) {
goto out_unlock;
}
}
}
inet->inet_rcv_saddr = inet->inet_saddr = v4addr;
sk->sk_v6_rcv_saddr = addr->sin6_addr;
if (!(addr_type & IPV6_ADDR_MULTICAST))
np->saddr = addr->sin6_addr;
err = 0;
out_unlock:
rcu_read_unlock();
out:
release_sock(sk);
return err;
}
static void rawv6_err(struct sock *sk, struct sk_buff *skb,
struct inet6_skb_parm *opt,
u8 type, u8 code, int offset, __be32 info)
{
struct inet_sock *inet = inet_sk(sk);
struct ipv6_pinfo *np = inet6_sk(sk);
int err;
int harderr;
/* Report error on raw socket, if:
1. User requested recverr.
2. Socket is connected (otherwise the error indication
is useless without recverr and error is hard.
*/
if (!np->recverr && sk->sk_state != TCP_ESTABLISHED)
return;
harderr = icmpv6_err_convert(type, code, &err);
if (type == ICMPV6_PKT_TOOBIG) {
ip6_sk_update_pmtu(skb, sk, info);
harderr = (np->pmtudisc == IPV6_PMTUDISC_DO);
}
if (type == NDISC_REDIRECT) {
ip6_sk_redirect(skb, sk);
return;
}
if (np->recverr) {
u8 *payload = skb->data;
if (!inet->hdrincl)
payload += offset;
ipv6_icmp_error(sk, skb, err, 0, ntohl(info), payload);
}
if (np->recverr || harderr) {
sk->sk_err = err;
sk->sk_error_report(sk);
}
}
void raw6_icmp_error(struct sk_buff *skb, int nexthdr,
u8 type, u8 code, int inner_offset, __be32 info)
{
struct sock *sk;
int hash;
const struct in6_addr *saddr, *daddr;
struct net *net;
hash = nexthdr & (RAW_HTABLE_SIZE - 1);
read_lock(&raw_v6_hashinfo.lock);
sk = sk_head(&raw_v6_hashinfo.ht[hash]);
if (sk) {
/* Note: ipv6_hdr(skb) != skb->data */
const struct ipv6hdr *ip6h = (const struct ipv6hdr *)skb->data;
saddr = &ip6h->saddr;
daddr = &ip6h->daddr;
net = dev_net(skb->dev);
while ((sk = __raw_v6_lookup(net, sk, nexthdr, saddr, daddr,
inet6_iif(skb)))) {
rawv6_err(sk, skb, NULL, type, code,
inner_offset, info);
sk = sk_next(sk);
}
}
read_unlock(&raw_v6_hashinfo.lock);
}
static inline int rawv6_rcv_skb(struct sock *sk, struct sk_buff *skb)
{
if ((raw6_sk(sk)->checksum || rcu_access_pointer(sk->sk_filter)) &&
skb_checksum_complete(skb)) {
atomic_inc(&sk->sk_drops);
kfree_skb(skb);
return NET_RX_DROP;
}
/* Charge it to the socket. */
skb_dst_drop(skb);
if (sock_queue_rcv_skb(sk, skb) < 0) {
kfree_skb(skb);
return NET_RX_DROP;
}
return 0;
}
/*
* This is next to useless...
* if we demultiplex in network layer we don't need the extra call
* just to queue the skb...
* maybe we could have the network decide upon a hint if it
* should call raw_rcv for demultiplexing
*/
int rawv6_rcv(struct sock *sk, struct sk_buff *skb)
{
struct inet_sock *inet = inet_sk(sk);
struct raw6_sock *rp = raw6_sk(sk);
if (!xfrm6_policy_check(sk, XFRM_POLICY_IN, skb)) {
atomic_inc(&sk->sk_drops);
kfree_skb(skb);
return NET_RX_DROP;
}
if (!rp->checksum)
skb->ip_summed = CHECKSUM_UNNECESSARY;
if (skb->ip_summed == CHECKSUM_COMPLETE) {
skb_postpull_rcsum(skb, skb_network_header(skb),
skb_network_header_len(skb));
if (!csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
&ipv6_hdr(skb)->daddr,
skb->len, inet->inet_num, skb->csum))
skb->ip_summed = CHECKSUM_UNNECESSARY;
}
if (!skb_csum_unnecessary(skb))
skb->csum = ~csum_unfold(csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
&ipv6_hdr(skb)->daddr,
skb->len,
inet->inet_num, 0));
if (inet->hdrincl) {
if (skb_checksum_complete(skb)) {
atomic_inc(&sk->sk_drops);
kfree_skb(skb);
return NET_RX_DROP;
}
}
rawv6_rcv_skb(sk, skb);
return 0;
}
/*
* This should be easy, if there is something there
* we return it, otherwise we block.
*/
static int rawv6_recvmsg(struct sock *sk, struct msghdr *msg, size_t len,
int noblock, int flags, int *addr_len)
{
struct ipv6_pinfo *np = inet6_sk(sk);
DECLARE_SOCKADDR(struct sockaddr_in6 *, sin6, msg->msg_name);
struct sk_buff *skb;
size_t copied;
int err;
if (flags & MSG_OOB)
return -EOPNOTSUPP;
if (flags & MSG_ERRQUEUE)
return ipv6_recv_error(sk, msg, len, addr_len);
if (np->rxpmtu && np->rxopt.bits.rxpmtu)
return ipv6_recv_rxpmtu(sk, msg, len, addr_len);
skb = skb_recv_datagram(sk, flags, noblock, &err);
if (!skb)
goto out;
copied = skb->len;
if (copied > len) {
copied = len;
msg->msg_flags |= MSG_TRUNC;
}
if (skb_csum_unnecessary(skb)) {
err = skb_copy_datagram_msg(skb, 0, msg, copied);
} else if (msg->msg_flags&MSG_TRUNC) {
if (__skb_checksum_complete(skb))
goto csum_copy_err;
err = skb_copy_datagram_msg(skb, 0, msg, copied);
} else {
err = skb_copy_and_csum_datagram_msg(skb, 0, msg);
if (err == -EINVAL)
goto csum_copy_err;
}
if (err)
goto out_free;
/* Copy the address. */
if (sin6) {
sin6->sin6_family = AF_INET6;
sin6->sin6_port = 0;
sin6->sin6_addr = ipv6_hdr(skb)->saddr;
sin6->sin6_flowinfo = 0;
sin6->sin6_scope_id = ipv6_iface_scope_id(&sin6->sin6_addr,
inet6_iif(skb));
*addr_len = sizeof(*sin6);
}
sock_recv_ts_and_drops(msg, sk, skb);
if (np->rxopt.all)
ip6_datagram_recv_ctl(sk, msg, skb);
err = copied;
if (flags & MSG_TRUNC)
err = skb->len;
out_free:
skb_free_datagram(sk, skb);
out:
return err;
csum_copy_err:
skb_kill_datagram(sk, skb, flags);
/* Error for blocking case is chosen to masquerade
as some normal condition.
*/
err = (flags&MSG_DONTWAIT) ? -EAGAIN : -EHOSTUNREACH;
goto out;
}
static int rawv6_push_pending_frames(struct sock *sk, struct flowi6 *fl6,
struct raw6_sock *rp)
{
struct sk_buff *skb;
int err = 0;
int offset;
int len;
int total_len;
__wsum tmp_csum;
__sum16 csum;
if (!rp->checksum)
goto send;
skb = skb_peek(&sk->sk_write_queue);
if (!skb)
goto out;
offset = rp->offset;
total_len = inet_sk(sk)->cork.base.length;
if (offset >= total_len - 1) {
err = -EINVAL;
ip6_flush_pending_frames(sk);
goto out;
}
/* should be check HW csum miyazawa */
if (skb_queue_len(&sk->sk_write_queue) == 1) {
/*
* Only one fragment on the socket.
*/
tmp_csum = skb->csum;
} else {
struct sk_buff *csum_skb = NULL;
tmp_csum = 0;
skb_queue_walk(&sk->sk_write_queue, skb) {
tmp_csum = csum_add(tmp_csum, skb->csum);
if (csum_skb)
continue;
len = skb->len - skb_transport_offset(skb);
if (offset >= len) {
offset -= len;
continue;
}
csum_skb = skb;
}
skb = csum_skb;
}
offset += skb_transport_offset(skb);
BUG_ON(skb_copy_bits(skb, offset, &csum, 2));
/* in case cksum was not initialized */
if (unlikely(csum))
tmp_csum = csum_sub(tmp_csum, csum_unfold(csum));
csum = csum_ipv6_magic(&fl6->saddr, &fl6->daddr,
total_len, fl6->flowi6_proto, tmp_csum);
if (csum == 0 && fl6->flowi6_proto == IPPROTO_UDP)
csum = CSUM_MANGLED_0;
BUG_ON(skb_store_bits(skb, offset, &csum, 2));
send:
err = ip6_push_pending_frames(sk);
out:
return err;
}
static int rawv6_send_hdrinc(struct sock *sk, struct msghdr *msg, int length,
struct flowi6 *fl6, struct dst_entry **dstp,
unsigned int flags)
{
struct ipv6_pinfo *np = inet6_sk(sk);
struct net *net = sock_net(sk);
struct ipv6hdr *iph;
struct sk_buff *skb;
int err;
struct rt6_info *rt = (struct rt6_info *)*dstp;
int hlen = LL_RESERVED_SPACE(rt->dst.dev);
int tlen = rt->dst.dev->needed_tailroom;
if (length > rt->dst.dev->mtu) {
ipv6_local_error(sk, EMSGSIZE, fl6, rt->dst.dev->mtu);
return -EMSGSIZE;
}
if (flags&MSG_PROBE)
goto out;
skb = sock_alloc_send_skb(sk,
length + hlen + tlen + 15,
flags & MSG_DONTWAIT, &err);
if (!skb)
goto error;
skb_reserve(skb, hlen);
skb->protocol = htons(ETH_P_IPV6);
skb->priority = sk->sk_priority;
skb->mark = sk->sk_mark;
skb_dst_set(skb, &rt->dst);
*dstp = NULL;
skb_put(skb, length);
skb_reset_network_header(skb);
iph = ipv6_hdr(skb);
skb->ip_summed = CHECKSUM_NONE;
skb->transport_header = skb->network_header;
err = memcpy_from_msg(iph, msg, length);
if (err)
goto error_fault;
IP6_UPD_PO_STATS(net, rt->rt6i_idev, IPSTATS_MIB_OUT, skb->len);
err = NF_HOOK(NFPROTO_IPV6, NF_INET_LOCAL_OUT, net, sk, skb,
NULL, rt->dst.dev, dst_output);
if (err > 0)
err = net_xmit_errno(err);
if (err)
goto error;
out:
return 0;
error_fault:
err = -EFAULT;
kfree_skb(skb);
error:
IP6_INC_STATS(net, rt->rt6i_idev, IPSTATS_MIB_OUTDISCARDS);
if (err == -ENOBUFS && !np->recverr)
err = 0;
return err;
}
struct raw6_frag_vec {
struct msghdr *msg;
int hlen;
char c[4];
};
static int rawv6_probe_proto_opt(struct raw6_frag_vec *rfv, struct flowi6 *fl6)
{
int err = 0;
switch (fl6->flowi6_proto) {
case IPPROTO_ICMPV6:
rfv->hlen = 2;
err = memcpy_from_msg(rfv->c, rfv->msg, rfv->hlen);
if (!err) {
fl6->fl6_icmp_type = rfv->c[0];
fl6->fl6_icmp_code = rfv->c[1];
}
break;
case IPPROTO_MH:
rfv->hlen = 4;
err = memcpy_from_msg(rfv->c, rfv->msg, rfv->hlen);
if (!err)
fl6->fl6_mh_type = rfv->c[2];
}
return err;
}
static int raw6_getfrag(void *from, char *to, int offset, int len, int odd,
struct sk_buff *skb)
{
struct raw6_frag_vec *rfv = from;
if (offset < rfv->hlen) {
int copy = min(rfv->hlen - offset, len);
if (skb->ip_summed == CHECKSUM_PARTIAL)
memcpy(to, rfv->c + offset, copy);
else
skb->csum = csum_block_add(
skb->csum,
csum_partial_copy_nocheck(rfv->c + offset,
to, copy, 0),
odd);
odd = 0;
offset += copy;
to += copy;
len -= copy;
if (!len)
return 0;
}
offset -= rfv->hlen;
return ip_generic_getfrag(rfv->msg, to, offset, len, odd, skb);
}
static int rawv6_sendmsg(struct sock *sk, struct msghdr *msg, size_t len)
{
struct ipv6_txoptions *opt_to_free = NULL;
struct ipv6_txoptions opt_space;
DECLARE_SOCKADDR(struct sockaddr_in6 *, sin6, msg->msg_name);
struct in6_addr *daddr, *final_p, final;
struct inet_sock *inet = inet_sk(sk);
struct ipv6_pinfo *np = inet6_sk(sk);
struct raw6_sock *rp = raw6_sk(sk);
struct ipv6_txoptions *opt = NULL;
struct ip6_flowlabel *flowlabel = NULL;
struct dst_entry *dst = NULL;
struct raw6_frag_vec rfv;
struct flowi6 fl6;
int addr_len = msg->msg_namelen;
int hlimit = -1;
int tclass = -1;
int dontfrag = -1;
u16 proto;
int err;
/* Rough check on arithmetic overflow,
better check is made in ip6_append_data().
*/
if (len > INT_MAX)
return -EMSGSIZE;
/* Mirror BSD error message compatibility */
if (msg->msg_flags & MSG_OOB)
return -EOPNOTSUPP;
/*
* Get and verify the address.
*/
memset(&fl6, 0, sizeof(fl6));
fl6.flowi6_mark = sk->sk_mark;
if (sin6) {
if (addr_len < SIN6_LEN_RFC2133)
return -EINVAL;
if (sin6->sin6_family && sin6->sin6_family != AF_INET6)
return -EAFNOSUPPORT;
/* port is the proto value [0..255] carried in nexthdr */
proto = ntohs(sin6->sin6_port);
if (!proto)
proto = inet->inet_num;
else if (proto != inet->inet_num)
return -EINVAL;
if (proto > 255)
return -EINVAL;
daddr = &sin6->sin6_addr;
if (np->sndflow) {
fl6.flowlabel = sin6->sin6_flowinfo&IPV6_FLOWINFO_MASK;
if (fl6.flowlabel&IPV6_FLOWLABEL_MASK) {
flowlabel = fl6_sock_lookup(sk, fl6.flowlabel);
if (!flowlabel)
return -EINVAL;
}
}
/*
* Otherwise it will be difficult to maintain
* sk->sk_dst_cache.
*/
if (sk->sk_state == TCP_ESTABLISHED &&
ipv6_addr_equal(daddr, &sk->sk_v6_daddr))
daddr = &sk->sk_v6_daddr;
if (addr_len >= sizeof(struct sockaddr_in6) &&
sin6->sin6_scope_id &&
__ipv6_addr_needs_scope_id(__ipv6_addr_type(daddr)))
fl6.flowi6_oif = sin6->sin6_scope_id;
} else {
if (sk->sk_state != TCP_ESTABLISHED)
return -EDESTADDRREQ;
proto = inet->inet_num;
daddr = &sk->sk_v6_daddr;
fl6.flowlabel = np->flow_label;
}
if (fl6.flowi6_oif == 0)
fl6.flowi6_oif = sk->sk_bound_dev_if;
if (msg->msg_controllen) {
opt = &opt_space;
memset(opt, 0, sizeof(struct ipv6_txoptions));
opt->tot_len = sizeof(struct ipv6_txoptions);
err = ip6_datagram_send_ctl(sock_net(sk), sk, msg, &fl6, opt,
&hlimit, &tclass, &dontfrag);
if (err < 0) {
fl6_sock_release(flowlabel);
return err;
}
if ((fl6.flowlabel&IPV6_FLOWLABEL_MASK) && !flowlabel) {
flowlabel = fl6_sock_lookup(sk, fl6.flowlabel);
if (!flowlabel)
return -EINVAL;
}
if (!(opt->opt_nflen|opt->opt_flen))
opt = NULL;
}
if (!opt) {
opt = txopt_get(np);
opt_to_free = opt;
}
if (flowlabel)
opt = fl6_merge_options(&opt_space, flowlabel, opt);
opt = ipv6_fixup_options(&opt_space, opt);
fl6.flowi6_proto = proto;
rfv.msg = msg;
rfv.hlen = 0;
err = rawv6_probe_proto_opt(&rfv, &fl6);
if (err)
goto out;
if (!ipv6_addr_any(daddr))
fl6.daddr = *daddr;
else
fl6.daddr.s6_addr[15] = 0x1; /* :: means loopback (BSD'ism) */
if (ipv6_addr_any(&fl6.saddr) && !ipv6_addr_any(&np->saddr))
fl6.saddr = np->saddr;
final_p = fl6_update_dst(&fl6, opt, &final);
if (!fl6.flowi6_oif && ipv6_addr_is_multicast(&fl6.daddr))
fl6.flowi6_oif = np->mcast_oif;
else if (!fl6.flowi6_oif)
fl6.flowi6_oif = np->ucast_oif;
security_sk_classify_flow(sk, flowi6_to_flowi(&fl6));
if (inet->hdrincl)
fl6.flowi6_flags |= FLOWI_FLAG_KNOWN_NH;
dst = ip6_dst_lookup_flow(sk, &fl6, final_p);
if (IS_ERR(dst)) {
err = PTR_ERR(dst);
goto out;
}
if (hlimit < 0)
hlimit = ip6_sk_dst_hoplimit(np, &fl6, dst);
if (tclass < 0)
tclass = np->tclass;
if (dontfrag < 0)
dontfrag = np->dontfrag;
if (msg->msg_flags&MSG_CONFIRM)
goto do_confirm;
back_from_confirm:
if (inet->hdrincl)
err = rawv6_send_hdrinc(sk, msg, len, &fl6, &dst, msg->msg_flags);
else {
lock_sock(sk);
err = ip6_append_data(sk, raw6_getfrag, &rfv,
len, 0, hlimit, tclass, opt, &fl6, (struct rt6_info *)dst,
msg->msg_flags, dontfrag);
if (err)
ip6_flush_pending_frames(sk);
else if (!(msg->msg_flags & MSG_MORE))
err = rawv6_push_pending_frames(sk, &fl6, rp);
release_sock(sk);
}
done:
dst_release(dst);
out:
fl6_sock_release(flowlabel);
txopt_put(opt_to_free);
return err < 0 ? err : len;
do_confirm:
dst_confirm(dst);
if (!(msg->msg_flags & MSG_PROBE) || len)
goto back_from_confirm;
err = 0;
goto done;
}
static int rawv6_seticmpfilter(struct sock *sk, int level, int optname,
char __user *optval, int optlen)
{
switch (optname) {
case ICMPV6_FILTER:
if (optlen > sizeof(struct icmp6_filter))
optlen = sizeof(struct icmp6_filter);
if (copy_from_user(&raw6_sk(sk)->filter, optval, optlen))
return -EFAULT;
return 0;
default:
return -ENOPROTOOPT;
}
return 0;
}
static int rawv6_geticmpfilter(struct sock *sk, int level, int optname,
char __user *optval, int __user *optlen)
{
int len;
switch (optname) {
case ICMPV6_FILTER:
if (get_user(len, optlen))
return -EFAULT;
if (len < 0)
return -EINVAL;
if (len > sizeof(struct icmp6_filter))
len = sizeof(struct icmp6_filter);
if (put_user(len, optlen))
return -EFAULT;
if (copy_to_user(optval, &raw6_sk(sk)->filter, len))
return -EFAULT;
return 0;
default:
return -ENOPROTOOPT;
}
return 0;
}
static int do_rawv6_setsockopt(struct sock *sk, int level, int optname,
char __user *optval, unsigned int optlen)
{
struct raw6_sock *rp = raw6_sk(sk);
int val;
if (get_user(val, (int __user *)optval))
return -EFAULT;
switch (optname) {
case IPV6_CHECKSUM:
if (inet_sk(sk)->inet_num == IPPROTO_ICMPV6 &&
level == IPPROTO_IPV6) {
/*
* RFC3542 tells that IPV6_CHECKSUM socket
* option in the IPPROTO_IPV6 level is not
* allowed on ICMPv6 sockets.
* If you want to set it, use IPPROTO_RAW
* level IPV6_CHECKSUM socket option
* (Linux extension).
*/
return -EINVAL;
}
/* You may get strange result with a positive odd offset;
RFC2292bis agrees with me. */
if (val > 0 && (val&1))
return -EINVAL;
if (val < 0) {
rp->checksum = 0;
} else {
rp->checksum = 1;
rp->offset = val;
}
return 0;
default:
return -ENOPROTOOPT;
}
}
static int rawv6_setsockopt(struct sock *sk, int level, int optname,
char __user *optval, unsigned int optlen)
{
switch (level) {
case SOL_RAW:
break;
case SOL_ICMPV6:
if (inet_sk(sk)->inet_num != IPPROTO_ICMPV6)
return -EOPNOTSUPP;
return rawv6_seticmpfilter(sk, level, optname, optval, optlen);
case SOL_IPV6:
if (optname == IPV6_CHECKSUM)
break;
default:
return ipv6_setsockopt(sk, level, optname, optval, optlen);
}
return do_rawv6_setsockopt(sk, level, optname, optval, optlen);
}
#ifdef CONFIG_COMPAT
static int compat_rawv6_setsockopt(struct sock *sk, int level, int optname,
char __user *optval, unsigned int optlen)
{
switch (level) {
case SOL_RAW:
break;
case SOL_ICMPV6:
if (inet_sk(sk)->inet_num != IPPROTO_ICMPV6)
return -EOPNOTSUPP;
return rawv6_seticmpfilter(sk, level, optname, optval, optlen);
case SOL_IPV6:
if (optname == IPV6_CHECKSUM)
break;
default:
return compat_ipv6_setsockopt(sk, level, optname,
optval, optlen);
}
return do_rawv6_setsockopt(sk, level, optname, optval, optlen);
}
#endif
static int do_rawv6_getsockopt(struct sock *sk, int level, int optname,
char __user *optval, int __user *optlen)
{
struct raw6_sock *rp = raw6_sk(sk);
int val, len;
if (get_user(len, optlen))
return -EFAULT;
switch (optname) {
case IPV6_CHECKSUM:
/*
* We allow getsockopt() for IPPROTO_IPV6-level
* IPV6_CHECKSUM socket option on ICMPv6 sockets
* since RFC3542 is silent about it.
*/
if (rp->checksum == 0)
val = -1;
else
val = rp->offset;
break;
default:
return -ENOPROTOOPT;
}
len = min_t(unsigned int, sizeof(int), len);
if (put_user(len, optlen))
return -EFAULT;
if (copy_to_user(optval, &val, len))
return -EFAULT;
return 0;
}
static int rawv6_getsockopt(struct sock *sk, int level, int optname,
char __user *optval, int __user *optlen)
{
switch (level) {
case SOL_RAW:
break;
case SOL_ICMPV6:
if (inet_sk(sk)->inet_num != IPPROTO_ICMPV6)
return -EOPNOTSUPP;
return rawv6_geticmpfilter(sk, level, optname, optval, optlen);
case SOL_IPV6:
if (optname == IPV6_CHECKSUM)
break;
default:
return ipv6_getsockopt(sk, level, optname, optval, optlen);
}
return do_rawv6_getsockopt(sk, level, optname, optval, optlen);
}
#ifdef CONFIG_COMPAT
static int compat_rawv6_getsockopt(struct sock *sk, int level, int optname,
char __user *optval, int __user *optlen)
{
switch (level) {
case SOL_RAW:
break;
case SOL_ICMPV6:
if (inet_sk(sk)->inet_num != IPPROTO_ICMPV6)
return -EOPNOTSUPP;
return rawv6_geticmpfilter(sk, level, optname, optval, optlen);
case SOL_IPV6:
if (optname == IPV6_CHECKSUM)
break;
default:
return compat_ipv6_getsockopt(sk, level, optname,
optval, optlen);
}
return do_rawv6_getsockopt(sk, level, optname, optval, optlen);
}
#endif
static int rawv6_ioctl(struct sock *sk, int cmd, unsigned long arg)
{
switch (cmd) {
case SIOCOUTQ: {
int amount = sk_wmem_alloc_get(sk);
return put_user(amount, (int __user *)arg);
}
case SIOCINQ: {
struct sk_buff *skb;
int amount = 0;
spin_lock_bh(&sk->sk_receive_queue.lock);
skb = skb_peek(&sk->sk_receive_queue);
if (skb)
amount = skb_tail_pointer(skb) -
skb_transport_header(skb);
spin_unlock_bh(&sk->sk_receive_queue.lock);
return put_user(amount, (int __user *)arg);
}
default:
#ifdef CONFIG_IPV6_MROUTE
return ip6mr_ioctl(sk, cmd, (void __user *)arg);
#else
return -ENOIOCTLCMD;
#endif
}
}
#ifdef CONFIG_COMPAT
static int compat_rawv6_ioctl(struct sock *sk, unsigned int cmd, unsigned long arg)
{
switch (cmd) {
case SIOCOUTQ:
case SIOCINQ:
return -ENOIOCTLCMD;
default:
#ifdef CONFIG_IPV6_MROUTE
return ip6mr_compat_ioctl(sk, cmd, compat_ptr(arg));
#else
return -ENOIOCTLCMD;
#endif
}
}
#endif
static void rawv6_close(struct sock *sk, long timeout)
{
if (inet_sk(sk)->inet_num == IPPROTO_RAW)
ip6_ra_control(sk, -1);
ip6mr_sk_done(sk);
sk_common_release(sk);
}
static void raw6_destroy(struct sock *sk)
{
lock_sock(sk);
ip6_flush_pending_frames(sk);
release_sock(sk);
inet6_destroy_sock(sk);
}
static int rawv6_init_sk(struct sock *sk)
{
struct raw6_sock *rp = raw6_sk(sk);
switch (inet_sk(sk)->inet_num) {
case IPPROTO_ICMPV6:
rp->checksum = 1;
rp->offset = 2;
break;
case IPPROTO_MH:
rp->checksum = 1;
rp->offset = 4;
break;
default:
break;
}
return 0;
}
struct proto rawv6_prot = {
.name = "RAWv6",
.owner = THIS_MODULE,
.close = rawv6_close,
.destroy = raw6_destroy,
.connect = ip6_datagram_connect_v6_only,
.disconnect = udp_disconnect,
.ioctl = rawv6_ioctl,
.init = rawv6_init_sk,
.setsockopt = rawv6_setsockopt,
.getsockopt = rawv6_getsockopt,
.sendmsg = rawv6_sendmsg,
.recvmsg = rawv6_recvmsg,
.bind = rawv6_bind,
.backlog_rcv = rawv6_rcv_skb,
.hash = raw_hash_sk,
.unhash = raw_unhash_sk,
.obj_size = sizeof(struct raw6_sock),
.h.raw_hash = &raw_v6_hashinfo,
#ifdef CONFIG_COMPAT
.compat_setsockopt = compat_rawv6_setsockopt,
.compat_getsockopt = compat_rawv6_getsockopt,
.compat_ioctl = compat_rawv6_ioctl,
#endif
};
#ifdef CONFIG_PROC_FS
static int raw6_seq_show(struct seq_file *seq, void *v)
{
if (v == SEQ_START_TOKEN) {
seq_puts(seq, IPV6_SEQ_DGRAM_HEADER);
} else {
struct sock *sp = v;
__u16 srcp = inet_sk(sp)->inet_num;
ip6_dgram_sock_seq_show(seq, v, srcp, 0,
raw_seq_private(seq)->bucket);
}
return 0;
}
static const struct seq_operations raw6_seq_ops = {
.start = raw_seq_start,
.next = raw_seq_next,
.stop = raw_seq_stop,
.show = raw6_seq_show,
};
static int raw6_seq_open(struct inode *inode, struct file *file)
{
return raw_seq_open(inode, file, &raw_v6_hashinfo, &raw6_seq_ops);
}
static const struct file_operations raw6_seq_fops = {
.owner = THIS_MODULE,
.open = raw6_seq_open,
.read = seq_read,
.llseek = seq_lseek,
.release = seq_release_net,
};
static int __net_init raw6_init_net(struct net *net)
{
if (!proc_create("raw6", S_IRUGO, net->proc_net, &raw6_seq_fops))
return -ENOMEM;
return 0;
}
static void __net_exit raw6_exit_net(struct net *net)
{
remove_proc_entry("raw6", net->proc_net);
}
static struct pernet_operations raw6_net_ops = {
.init = raw6_init_net,
.exit = raw6_exit_net,
};
int __init raw6_proc_init(void)
{
return register_pernet_subsys(&raw6_net_ops);
}
void raw6_proc_exit(void)
{
unregister_pernet_subsys(&raw6_net_ops);
}
#endif /* CONFIG_PROC_FS */
/* Same as inet6_dgram_ops, sans udp_poll. */
static const struct proto_ops inet6_sockraw_ops = {
.family = PF_INET6,
.owner = THIS_MODULE,
.release = inet6_release,
.bind = inet6_bind,
.connect = inet_dgram_connect, /* ok */
.socketpair = sock_no_socketpair, /* a do nothing */
.accept = sock_no_accept, /* a do nothing */
.getname = inet6_getname,
.poll = datagram_poll, /* ok */
.ioctl = inet6_ioctl, /* must change */
.listen = sock_no_listen, /* ok */
.shutdown = inet_shutdown, /* ok */
.setsockopt = sock_common_setsockopt, /* ok */
.getsockopt = sock_common_getsockopt, /* ok */
.sendmsg = inet_sendmsg, /* ok */
.recvmsg = sock_common_recvmsg, /* ok */
.mmap = sock_no_mmap,
.sendpage = sock_no_sendpage,
#ifdef CONFIG_COMPAT
.compat_setsockopt = compat_sock_common_setsockopt,
.compat_getsockopt = compat_sock_common_getsockopt,
#endif
};
static struct inet_protosw rawv6_protosw = {
.type = SOCK_RAW,
.protocol = IPPROTO_IP, /* wild card */
.prot = &rawv6_prot,
.ops = &inet6_sockraw_ops,
.flags = INET_PROTOSW_REUSE,
};
int __init rawv6_init(void)
{
return inet6_register_protosw(&rawv6_protosw);
}
void rawv6_exit(void)
{
inet6_unregister_protosw(&rawv6_protosw);
}
| ./CrossVul/dataset_final_sorted/CWE-416/c/good_5021_8 |
crossvul-cpp_data_bad_1660_0 | /*
* Copyright (c) 2001-2002 Michael David Adams.
* All rights reserved.
*/
/* __START_OF_JASPER_LICENSE__
*
* JasPer License Version 2.0
*
* Copyright (c) 2001-2006 Michael David Adams
* Copyright (c) 1999-2000 Image Power, Inc.
* Copyright (c) 1999-2000 The University of British Columbia
*
* All rights reserved.
*
* Permission is hereby granted, free of charge, to any person (the
* "User") obtaining a copy of this software and associated documentation
* files (the "Software"), to deal in the Software without restriction,
* including without limitation the rights to use, copy, modify, merge,
* publish, distribute, and/or sell copies of the Software, and to permit
* persons to whom the Software is furnished to do so, subject to the
* following conditions:
*
* 1. The above copyright notices and this permission notice (which
* includes the disclaimer below) shall be included in all copies or
* substantial portions of the Software.
*
* 2. The name of a copyright holder shall not be used to endorse or
* promote products derived from the Software without specific prior
* written permission.
*
* THIS DISCLAIMER OF WARRANTY CONSTITUTES AN ESSENTIAL PART OF THIS
* LICENSE. NO USE OF THE SOFTWARE IS AUTHORIZED HEREUNDER EXCEPT UNDER
* THIS DISCLAIMER. THE SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS
* "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
* BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A
* PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS. IN NO
* EVENT SHALL THE COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, OR ANY SPECIAL
* INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES WHATSOEVER RESULTING
* FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT,
* NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION
* WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. NO ASSURANCES ARE
* PROVIDED BY THE COPYRIGHT HOLDERS THAT THE SOFTWARE DOES NOT INFRINGE
* THE PATENT OR OTHER INTELLECTUAL PROPERTY RIGHTS OF ANY OTHER ENTITY.
* EACH COPYRIGHT HOLDER DISCLAIMS ANY LIABILITY TO THE USER FOR CLAIMS
* BROUGHT BY ANY OTHER ENTITY BASED ON INFRINGEMENT OF INTELLECTUAL
* PROPERTY RIGHTS OR OTHERWISE. AS A CONDITION TO EXERCISING THE RIGHTS
* GRANTED HEREUNDER, EACH USER HEREBY ASSUMES SOLE RESPONSIBILITY TO SECURE
* ANY OTHER INTELLECTUAL PROPERTY RIGHTS NEEDED, IF ANY. THE SOFTWARE
* IS NOT FAULT-TOLERANT AND IS NOT INTENDED FOR USE IN MISSION-CRITICAL
* SYSTEMS, SUCH AS THOSE USED IN THE OPERATION OF NUCLEAR FACILITIES,
* AIRCRAFT NAVIGATION OR COMMUNICATION SYSTEMS, AIR TRAFFIC CONTROL
* SYSTEMS, DIRECT LIFE SUPPORT MACHINES, OR WEAPONS SYSTEMS, IN WHICH
* THE FAILURE OF THE SOFTWARE OR SYSTEM COULD LEAD DIRECTLY TO DEATH,
* PERSONAL INJURY, OR SEVERE PHYSICAL OR ENVIRONMENTAL DAMAGE ("HIGH
* RISK ACTIVITIES"). THE COPYRIGHT HOLDERS SPECIFICALLY DISCLAIM ANY
* EXPRESS OR IMPLIED WARRANTY OF FITNESS FOR HIGH RISK ACTIVITIES.
*
* __END_OF_JASPER_LICENSE__
*/
/******************************************************************************\
* Includes.
\******************************************************************************/
#include <assert.h>
#include "jasper/jas_tvp.h"
#include "jasper/jas_stream.h"
#include "jasper/jas_image.h"
#include "jasper/jas_string.h"
#include "jasper/jas_malloc.h"
#include "mif_cod.h"
/******************************************************************************\
* Local types.
\******************************************************************************/
typedef enum {
MIF_END = 0,
MIF_CMPT
} mif_tagid2_t;
typedef enum {
MIF_TLX = 0,
MIF_TLY,
MIF_WIDTH,
MIF_HEIGHT,
MIF_HSAMP,
MIF_VSAMP,
MIF_PREC,
MIF_SGND,
MIF_DATA
} mif_tagid_t;
/******************************************************************************\
* Local functions.
\******************************************************************************/
static mif_hdr_t *mif_hdr_create(int maxcmpts);
static void mif_hdr_destroy(mif_hdr_t *hdr);
static int mif_hdr_growcmpts(mif_hdr_t *hdr, int maxcmpts);
static mif_hdr_t *mif_hdr_get(jas_stream_t *in);
static int mif_process_cmpt(mif_hdr_t *hdr, char *buf);
static int mif_hdr_put(mif_hdr_t *hdr, jas_stream_t *out);
static int mif_hdr_addcmpt(mif_hdr_t *hdr, int cmptno, mif_cmpt_t *cmpt);
static mif_cmpt_t *mif_cmpt_create(void);
static void mif_cmpt_destroy(mif_cmpt_t *cmpt);
static char *mif_getline(jas_stream_t *jas_stream, char *buf, int bufsize);
static int mif_getc(jas_stream_t *in);
static mif_hdr_t *mif_makehdrfromimage(jas_image_t *image);
/******************************************************************************\
* Local data.
\******************************************************************************/
jas_taginfo_t mif_tags2[] = {
{MIF_CMPT, "component"},
{MIF_END, "end"},
{-1, 0}
};
jas_taginfo_t mif_tags[] = {
{MIF_TLX, "tlx"},
{MIF_TLY, "tly"},
{MIF_WIDTH, "width"},
{MIF_HEIGHT, "height"},
{MIF_HSAMP, "sampperx"},
{MIF_VSAMP, "samppery"},
{MIF_PREC, "prec"},
{MIF_SGND, "sgnd"},
{MIF_DATA, "data"},
{-1, 0}
};
/******************************************************************************\
* Code for load operation.
\******************************************************************************/
/* Load an image from a stream in the MIF format. */
jas_image_t *mif_decode(jas_stream_t *in, char *optstr)
{
mif_hdr_t *hdr;
jas_image_t *image;
jas_image_t *tmpimage;
jas_stream_t *tmpstream;
int cmptno;
mif_cmpt_t *cmpt;
jas_image_cmptparm_t cmptparm;
jas_seq2d_t *data;
int_fast32_t x;
int_fast32_t y;
int bias;
/* Avoid warnings about unused parameters. */
optstr = 0;
hdr = 0;
image = 0;
tmpimage = 0;
tmpstream = 0;
data = 0;
if (!(hdr = mif_hdr_get(in))) {
goto error;
}
if (!(image = jas_image_create0())) {
goto error;
}
for (cmptno = 0; cmptno < hdr->numcmpts; ++cmptno) {
cmpt = hdr->cmpts[cmptno];
tmpstream = cmpt->data ? jas_stream_fopen(cmpt->data, "rb") : in;
if (!tmpstream) {
goto error;
}
if (!(tmpimage = jas_image_decode(tmpstream, -1, 0))) {
goto error;
}
if (tmpstream != in) {
jas_stream_close(tmpstream);
tmpstream = 0;
}
if (!cmpt->width) {
cmpt->width = jas_image_cmptwidth(tmpimage, 0);
}
if (!cmpt->height) {
cmpt->height = jas_image_cmptwidth(tmpimage, 0);
}
if (!cmpt->prec) {
cmpt->prec = jas_image_cmptprec(tmpimage, 0);
}
if (cmpt->sgnd < 0) {
cmpt->sgnd = jas_image_cmptsgnd(tmpimage, 0);
}
cmptparm.tlx = cmpt->tlx;
cmptparm.tly = cmpt->tly;
cmptparm.hstep = cmpt->sampperx;
cmptparm.vstep = cmpt->samppery;
cmptparm.width = cmpt->width;
cmptparm.height = cmpt->height;
cmptparm.prec = cmpt->prec;
cmptparm.sgnd = cmpt->sgnd;
if (jas_image_addcmpt(image, jas_image_numcmpts(image), &cmptparm)) {
goto error;
}
if (!(data = jas_seq2d_create(0, 0, cmpt->width, cmpt->height))) {
goto error;
}
if (jas_image_readcmpt(tmpimage, 0, 0, 0, cmpt->width, cmpt->height,
data)) {
goto error;
}
if (cmpt->sgnd) {
bias = 1 << (cmpt->prec - 1);
for (y = 0; y < cmpt->height; ++y) {
for (x = 0; x < cmpt->width; ++x) {
*jas_seq2d_getref(data, x, y) -= bias;
}
}
}
if (jas_image_writecmpt(image, jas_image_numcmpts(image) - 1, 0, 0,
cmpt->width, cmpt->height, data)) {
goto error;
}
jas_seq2d_destroy(data);
data = 0;
jas_image_destroy(tmpimage);
tmpimage = 0;
}
mif_hdr_destroy(hdr);
hdr = 0;
return image;
error:
if (image) {
jas_image_destroy(image);
}
if (hdr) {
mif_hdr_destroy(hdr);
}
if (tmpstream && tmpstream != in) {
jas_stream_close(tmpstream);
}
if (tmpimage) {
jas_image_destroy(tmpimage);
}
if (data) {
jas_seq2d_destroy(data);
}
return 0;
}
/******************************************************************************\
* Code for save operation.
\******************************************************************************/
/* Save an image to a stream in the the MIF format. */
int mif_encode(jas_image_t *image, jas_stream_t *out, char *optstr)
{
mif_hdr_t *hdr;
jas_image_t *tmpimage;
int fmt;
int cmptno;
mif_cmpt_t *cmpt;
jas_image_cmptparm_t cmptparm;
jas_seq2d_t *data;
int_fast32_t x;
int_fast32_t y;
int bias;
hdr = 0;
tmpimage = 0;
data = 0;
if (optstr && *optstr != '\0') {
jas_eprintf("warning: ignoring unsupported options\n");
}
if ((fmt = jas_image_strtofmt("pnm")) < 0) {
jas_eprintf("error: PNM support required\n");
goto error;
}
if (!(hdr = mif_makehdrfromimage(image))) {
goto error;
}
if (mif_hdr_put(hdr, out)) {
goto error;
}
/* Output component data. */
for (cmptno = 0; cmptno < hdr->numcmpts; ++cmptno) {
cmpt = hdr->cmpts[cmptno];
if (!cmpt->data) {
if (!(tmpimage = jas_image_create0())) {
goto error;
}
cmptparm.tlx = 0;
cmptparm.tly = 0;
cmptparm.hstep = cmpt->sampperx;
cmptparm.vstep = cmpt->samppery;
cmptparm.width = cmpt->width;
cmptparm.height = cmpt->height;
cmptparm.prec = cmpt->prec;
cmptparm.sgnd = false;
if (jas_image_addcmpt(tmpimage, jas_image_numcmpts(tmpimage), &cmptparm)) {
goto error;
}
if (!(data = jas_seq2d_create(0, 0, cmpt->width, cmpt->height))) {
goto error;
}
if (jas_image_readcmpt(image, cmptno, 0, 0, cmpt->width, cmpt->height,
data)) {
goto error;
}
if (cmpt->sgnd) {
bias = 1 << (cmpt->prec - 1);
for (y = 0; y < cmpt->height; ++y) {
for (x = 0; x < cmpt->width; ++x) {
*jas_seq2d_getref(data, x, y) += bias;
}
}
}
if (jas_image_writecmpt(tmpimage, 0, 0, 0, cmpt->width, cmpt->height,
data)) {
goto error;
}
jas_seq2d_destroy(data);
data = 0;
if (jas_image_encode(tmpimage, out, fmt, 0)) {
goto error;
}
jas_image_destroy(tmpimage);
tmpimage = 0;
}
}
mif_hdr_destroy(hdr);
return 0;
error:
if (hdr) {
mif_hdr_destroy(hdr);
}
if (tmpimage) {
jas_image_destroy(tmpimage);
}
if (data) {
jas_seq2d_destroy(data);
}
return -1;
}
/******************************************************************************\
* Code for validate operation.
\******************************************************************************/
int mif_validate(jas_stream_t *in)
{
uchar buf[MIF_MAGICLEN];
uint_fast32_t magic;
int i;
int n;
assert(JAS_STREAM_MAXPUTBACK >= MIF_MAGICLEN);
/* Read the validation data (i.e., the data used for detecting
the format). */
if ((n = jas_stream_read(in, buf, MIF_MAGICLEN)) < 0) {
return -1;
}
/* Put the validation data back onto the stream, so that the
stream position will not be changed. */
for (i = n - 1; i >= 0; --i) {
if (jas_stream_ungetc(in, buf[i]) == EOF) {
return -1;
}
}
/* Was enough data read? */
if (n < MIF_MAGICLEN) {
return -1;
}
/* Compute the signature value. */
magic = (buf[0] << 24) | (buf[1] << 16) | (buf[2] << 8) | buf[3];
/* Ensure that the signature is correct for this format. */
if (magic != MIF_MAGIC) {
return -1;
}
return 0;
}
/******************************************************************************\
* Code for MIF header class.
\******************************************************************************/
static mif_hdr_t *mif_hdr_create(int maxcmpts)
{
mif_hdr_t *hdr;
if (!(hdr = jas_malloc(sizeof(mif_hdr_t)))) {
return 0;
}
hdr->numcmpts = 0;
hdr->maxcmpts = 0;
hdr->cmpts = 0;
if (mif_hdr_growcmpts(hdr, maxcmpts)) {
mif_hdr_destroy(hdr);
return 0;
}
return hdr;
}
static void mif_hdr_destroy(mif_hdr_t *hdr)
{
int cmptno;
if (hdr->cmpts) {
for (cmptno = 0; cmptno < hdr->numcmpts; ++cmptno) {
mif_cmpt_destroy(hdr->cmpts[cmptno]);
}
jas_free(hdr->cmpts);
}
jas_free(hdr);
}
static int mif_hdr_growcmpts(mif_hdr_t *hdr, int maxcmpts)
{
int cmptno;
mif_cmpt_t **newcmpts;
assert(maxcmpts >= hdr->numcmpts);
newcmpts = (!hdr->cmpts) ? jas_malloc(maxcmpts * sizeof(mif_cmpt_t *)) :
jas_realloc(hdr->cmpts, maxcmpts * sizeof(mif_cmpt_t *));
if (!newcmpts) {
return -1;
}
hdr->maxcmpts = maxcmpts;
hdr->cmpts = newcmpts;
for (cmptno = hdr->numcmpts; cmptno < hdr->maxcmpts; ++cmptno) {
hdr->cmpts[cmptno] = 0;
}
return 0;
}
static mif_hdr_t *mif_hdr_get(jas_stream_t *in)
{
uchar magicbuf[MIF_MAGICLEN];
char buf[4096];
mif_hdr_t *hdr;
bool done;
jas_tvparser_t *tvp;
int id;
hdr = 0;
if (jas_stream_read(in, magicbuf, MIF_MAGICLEN) != MIF_MAGICLEN) {
goto error;
}
if (magicbuf[0] != (MIF_MAGIC >> 24) || magicbuf[1] != ((MIF_MAGIC >> 16) &
0xff) || magicbuf[2] != ((MIF_MAGIC >> 8) & 0xff) || magicbuf[3] !=
(MIF_MAGIC & 0xff)) {
jas_eprintf("error: bad signature\n");
goto error;
}
if (!(hdr = mif_hdr_create(0))) {
goto error;
}
done = false;
do {
if (!mif_getline(in, buf, sizeof(buf))) {
goto error;
}
if (buf[0] == '\0') {
continue;
}
if (!(tvp = jas_tvparser_create(buf))) {
goto error;
}
if (jas_tvparser_next(tvp)) {
abort();
}
id = jas_taginfo_nonull(jas_taginfos_lookup(mif_tags2, jas_tvparser_gettag(tvp)))->id;
jas_tvparser_destroy(tvp);
switch (id) {
case MIF_CMPT:
mif_process_cmpt(hdr, buf);
break;
case MIF_END:
done = 1;
break;
}
} while (!done);
return hdr;
error:
if (hdr) {
mif_hdr_destroy(hdr);
}
return 0;
}
static int mif_process_cmpt(mif_hdr_t *hdr, char *buf)
{
jas_tvparser_t *tvp;
mif_cmpt_t *cmpt;
int id;
cmpt = 0;
tvp = 0;
if (!(cmpt = mif_cmpt_create())) {
goto error;
}
cmpt->tlx = 0;
cmpt->tly = 0;
cmpt->sampperx = 0;
cmpt->samppery = 0;
cmpt->width = 0;
cmpt->height = 0;
cmpt->prec = 0;
cmpt->sgnd = -1;
cmpt->data = 0;
if (!(tvp = jas_tvparser_create(buf))) {
goto error;
}
while (!(id = jas_tvparser_next(tvp))) {
switch (jas_taginfo_nonull(jas_taginfos_lookup(mif_tags,
jas_tvparser_gettag(tvp)))->id) {
case MIF_TLX:
cmpt->tlx = atoi(jas_tvparser_getval(tvp));
break;
case MIF_TLY:
cmpt->tly = atoi(jas_tvparser_getval(tvp));
break;
case MIF_WIDTH:
cmpt->width = atoi(jas_tvparser_getval(tvp));
break;
case MIF_HEIGHT:
cmpt->height = atoi(jas_tvparser_getval(tvp));
break;
case MIF_HSAMP:
cmpt->sampperx = atoi(jas_tvparser_getval(tvp));
break;
case MIF_VSAMP:
cmpt->samppery = atoi(jas_tvparser_getval(tvp));
break;
case MIF_PREC:
cmpt->prec = atoi(jas_tvparser_getval(tvp));
break;
case MIF_SGND:
cmpt->sgnd = atoi(jas_tvparser_getval(tvp));
break;
case MIF_DATA:
if (!(cmpt->data = jas_strdup(jas_tvparser_getval(tvp)))) {
return -1;
}
break;
}
}
jas_tvparser_destroy(tvp);
if (!cmpt->sampperx || !cmpt->samppery) {
goto error;
}
if (mif_hdr_addcmpt(hdr, hdr->numcmpts, cmpt)) {
goto error;
}
return 0;
error:
if (cmpt) {
mif_cmpt_destroy(cmpt);
}
if (tvp) {
jas_tvparser_destroy(tvp);
}
return -1;
}
static int mif_hdr_put(mif_hdr_t *hdr, jas_stream_t *out)
{
int cmptno;
mif_cmpt_t *cmpt;
/* Output signature. */
jas_stream_putc(out, (MIF_MAGIC >> 24) & 0xff);
jas_stream_putc(out, (MIF_MAGIC >> 16) & 0xff);
jas_stream_putc(out, (MIF_MAGIC >> 8) & 0xff);
jas_stream_putc(out, MIF_MAGIC & 0xff);
/* Output component information. */
for (cmptno = 0; cmptno < hdr->numcmpts; ++cmptno) {
cmpt = hdr->cmpts[cmptno];
jas_stream_printf(out, "component tlx=%ld tly=%ld "
"sampperx=%ld samppery=%ld width=%ld height=%ld prec=%d sgnd=%d",
cmpt->tlx, cmpt->tly, cmpt->sampperx, cmpt->samppery, cmpt->width,
cmpt->height, cmpt->prec, cmpt->sgnd);
if (cmpt->data) {
jas_stream_printf(out, " data=%s", cmpt->data);
}
jas_stream_printf(out, "\n");
}
/* Output end of header indicator. */
jas_stream_printf(out, "end\n");
return 0;
}
static int mif_hdr_addcmpt(mif_hdr_t *hdr, int cmptno, mif_cmpt_t *cmpt)
{
assert(cmptno >= hdr->numcmpts);
if (hdr->numcmpts >= hdr->maxcmpts) {
if (mif_hdr_growcmpts(hdr, hdr->numcmpts + 128)) {
return -1;
}
}
hdr->cmpts[hdr->numcmpts] = cmpt;
++hdr->numcmpts;
return 0;
}
/******************************************************************************\
* Code for MIF component class.
\******************************************************************************/
static mif_cmpt_t *mif_cmpt_create()
{
mif_cmpt_t *cmpt;
if (!(cmpt = jas_malloc(sizeof(mif_cmpt_t)))) {
return 0;
}
memset(cmpt, 0, sizeof(mif_cmpt_t));
return cmpt;
}
static void mif_cmpt_destroy(mif_cmpt_t *cmpt)
{
if (cmpt->data) {
jas_free(cmpt->data);
}
jas_free(cmpt);
}
/******************************************************************************\
* MIF parsing code.
\******************************************************************************/
static char *mif_getline(jas_stream_t *stream, char *buf, int bufsize)
{
int c;
char *bufptr;
assert(bufsize > 0);
bufptr = buf;
while (bufsize > 1) {
if ((c = mif_getc(stream)) == EOF) {
break;
}
*bufptr++ = c;
--bufsize;
if (c == '\n') {
break;
}
}
*bufptr = '\0';
if (!(bufptr = strchr(buf, '\n'))) {
return 0;
}
*bufptr = '\0';
return buf;
}
static int mif_getc(jas_stream_t *in)
{
int c;
bool done;
done = false;
do {
switch (c = jas_stream_getc(in)) {
case EOF:
done = 1;
break;
case '#':
for (;;) {
if ((c = jas_stream_getc(in)) == EOF) {
done = 1;
break;
}
if (c == '\n') {
break;
}
}
break;
case '\\':
if (jas_stream_peekc(in) == '\n') {
jas_stream_getc(in);
}
break;
default:
done = 1;
break;
}
} while (!done);
return c;
}
/******************************************************************************\
* Miscellaneous functions.
\******************************************************************************/
static mif_hdr_t *mif_makehdrfromimage(jas_image_t *image)
{
mif_hdr_t *hdr;
int cmptno;
mif_cmpt_t *cmpt;
if (!(hdr = mif_hdr_create(jas_image_numcmpts(image)))) {
return 0;
}
hdr->magic = MIF_MAGIC;
hdr->numcmpts = jas_image_numcmpts(image);
for (cmptno = 0; cmptno < hdr->numcmpts; ++cmptno) {
hdr->cmpts[cmptno] = jas_malloc(sizeof(mif_cmpt_t));
cmpt = hdr->cmpts[cmptno];
cmpt->tlx = jas_image_cmpttlx(image, cmptno);
cmpt->tly = jas_image_cmpttly(image, cmptno);
cmpt->width = jas_image_cmptwidth(image, cmptno);
cmpt->height = jas_image_cmptheight(image, cmptno);
cmpt->sampperx = jas_image_cmpthstep(image, cmptno);
cmpt->samppery = jas_image_cmptvstep(image, cmptno);
cmpt->prec = jas_image_cmptprec(image, cmptno);
cmpt->sgnd = jas_image_cmptsgnd(image, cmptno);
cmpt->data = 0;
}
return hdr;
}
| ./CrossVul/dataset_final_sorted/CWE-416/c/bad_1660_0 |
crossvul-cpp_data_bad_4776_1 | /*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% PPPP W W PPPP %
% P P W W P P %
% PPPP W W PPPP %
% P W W W P %
% P W W P %
% %
% %
% Read Seattle Film Works Image Format %
% %
% Software Design %
% Cristy %
% July 1992 %
% %
% %
% Copyright 1999-2016 ImageMagick Studio LLC, a non-profit organization %
% dedicated to making software imaging solutions freely available. %
% %
% You may not use this file except in compliance with the License. You may %
% obtain a copy of the License at %
% %
% http://www.imagemagick.org/script/license.php %
% %
% Unless required by applicable law or agreed to in writing, software %
% distributed under the License is distributed on an "AS IS" BASIS, %
% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. %
% See the License for the specific language governing permissions and %
% limitations under the License. %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
%
*/
/*
Include declarations.
*/
#include "magick/studio.h"
#include "magick/blob.h"
#include "magick/blob-private.h"
#include "magick/constitute.h"
#include "magick/exception.h"
#include "magick/exception-private.h"
#include "magick/image.h"
#include "magick/image-private.h"
#include "magick/list.h"
#include "magick/magick.h"
#include "magick/memory_.h"
#include "magick/monitor.h"
#include "magick/monitor-private.h"
#include "magick/pixel-accessor.h"
#include "magick/quantum-private.h"
#include "magick/resource_.h"
#include "magick/static.h"
#include "magick/string_.h"
#include "magick/module.h"
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% I s P W P %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% IsPWP() returns MagickTrue if the image format type, identified by the
% magick string, is PWP.
%
% The format of the IsPWP method is:
%
% MagickBooleanType IsPWP(const unsigned char *magick,const size_t length)
%
% A description of each parameter follows:
%
% o magick: compare image format pattern against these bytes.
%
% o length: Specifies the length of the magick string.
%
%
*/
static MagickBooleanType IsPWP(const unsigned char *magick,const size_t length)
{
if (length < 5)
return(MagickFalse);
if (LocaleNCompare((char *) magick,"SFW95",5) == 0)
return(MagickTrue);
return(MagickFalse);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% R e a d P W P I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ReadPWPImage() reads a Seattle Film Works multi-image file and returns
% it. It allocates the memory necessary for the new Image structure and
% returns a pointer to the new image.
%
% The format of the ReadPWPImage method is:
%
% Image *ReadPWPImage(const ImageInfo *image_info,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image_info: the image info.
%
% o exception: return any errors or warnings in this structure.
%
*/
static Image *ReadPWPImage(const ImageInfo *image_info,ExceptionInfo *exception)
{
FILE
*file;
Image
*image,
*next_image,
*pwp_image;
ImageInfo
*read_info;
int
c,
unique_file;
MagickBooleanType
status;
register Image
*p;
register ssize_t
i;
size_t
filesize,
length;
ssize_t
count;
unsigned char
magick[MaxTextExtent];
/*
Open image file.
*/
assert(image_info != (const ImageInfo *) NULL);
assert(image_info->signature == MagickSignature);
if (image_info->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",
image_info->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickSignature);
pwp_image=AcquireImage(image_info);
image=pwp_image;
status=OpenBlob(image_info,pwp_image,ReadBinaryBlobMode,exception);
if (status == MagickFalse)
return((Image *) NULL);
count=ReadBlob(pwp_image,5,magick);
if ((count != 5) || (LocaleNCompare((char *) magick,"SFW95",5) != 0))
ThrowReaderException(CorruptImageError,"ImproperImageHeader");
read_info=CloneImageInfo(image_info);
(void) SetImageInfoProgressMonitor(read_info,(MagickProgressMonitor) NULL,
(void *) NULL);
SetImageInfoBlob(read_info,(void *) NULL,0);
unique_file=AcquireUniqueFileResource(read_info->filename);
for ( ; ; )
{
for (c=ReadBlobByte(pwp_image); c != EOF; c=ReadBlobByte(pwp_image))
{
for (i=0; i < 17; i++)
magick[i]=magick[i+1];
magick[17]=(unsigned char) c;
if (LocaleNCompare((char *) (magick+12),"SFW94A",6) == 0)
break;
}
if (c == EOF)
break;
if (LocaleNCompare((char *) (magick+12),"SFW94A",6) != 0)
{
(void) RelinquishUniqueFileResource(read_info->filename);
ThrowReaderException(CorruptImageError,"ImproperImageHeader");
}
/*
Dump SFW image to a temporary file.
*/
file=(FILE *) NULL;
if (unique_file != -1)
file=fdopen(unique_file,"wb");
if ((unique_file == -1) || (file == (FILE *) NULL))
{
(void) RelinquishUniqueFileResource(read_info->filename);
ThrowFileException(exception,FileOpenError,"UnableToWriteFile",
image->filename);
image=DestroyImageList(image);
return((Image *) NULL);
}
length=fwrite("SFW94A",1,6,file);
(void) length;
filesize=65535UL*magick[2]+256L*magick[1]+magick[0];
for (i=0; i < (ssize_t) filesize; i++)
{
c=ReadBlobByte(pwp_image);
(void) fputc(c,file);
}
(void) fclose(file);
next_image=ReadImage(read_info,exception);
if (next_image == (Image *) NULL)
break;
(void) FormatLocaleString(next_image->filename,MaxTextExtent,
"slide_%02ld.sfw",(long) next_image->scene);
if (image == (Image *) NULL)
image=next_image;
else
{
/*
Link image into image list.
*/
for (p=image; p->next != (Image *) NULL; p=GetNextImageInList(p)) ;
next_image->previous=p;
next_image->scene=p->scene+1;
p->next=next_image;
}
if (image_info->number_scenes != 0)
if (next_image->scene >= (image_info->scene+image_info->number_scenes-1))
break;
status=SetImageProgress(image,LoadImagesTag,TellBlob(pwp_image),
GetBlobSize(pwp_image));
if (status == MagickFalse)
break;
}
if (unique_file != -1)
(void) close(unique_file);
(void) RelinquishUniqueFileResource(read_info->filename);
read_info=DestroyImageInfo(read_info);
(void) CloseBlob(pwp_image);
pwp_image=DestroyImage(pwp_image);
if (EOFBlob(image) != MagickFalse)
{
char
*message;
message=GetExceptionMessage(errno);
(void) ThrowMagickException(exception,GetMagickModule(),CorruptImageError,
"UnexpectedEndOfFile","`%s': %s",image->filename,message);
message=DestroyString(message);
}
(void) CloseBlob(image);
return(GetFirstImageInList(image));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% R e g i s t e r P W P I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% RegisterPWPImage() adds attributes for the PWP image format to
% the list of supported formats. The attributes include the image format
% tag, a method to read and/or write the format, whether the format
% supports the saving of more than one frame to the same file or blob,
% whether the format supports native in-memory I/O, and a brief
% description of the format.
%
% The format of the RegisterPWPImage method is:
%
% size_t RegisterPWPImage(void)
%
*/
ModuleExport size_t RegisterPWPImage(void)
{
MagickInfo
*entry;
entry=SetMagickInfo("PWP");
entry->decoder=(DecodeImageHandler *) ReadPWPImage;
entry->magick=(IsImageFormatHandler *) IsPWP;
entry->description=ConstantString("Seattle Film Works");
entry->module=ConstantString("PWP");
(void) RegisterMagickInfo(entry);
return(MagickImageCoderSignature);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% U n r e g i s t e r P W P I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% UnregisterPWPImage() removes format registrations made by the
% PWP module from the list of supported formats.
%
% The format of the UnregisterPWPImage method is:
%
% UnregisterPWPImage(void)
%
*/
ModuleExport void UnregisterPWPImage(void)
{
(void) UnregisterMagickInfo("PWP");
}
| ./CrossVul/dataset_final_sorted/CWE-416/c/bad_4776_1 |
crossvul-cpp_data_bad_822_1 | /*
* Generic hugetlb support.
* (C) Nadia Yvette Chambers, April 2004
*/
#include <linux/list.h>
#include <linux/init.h>
#include <linux/mm.h>
#include <linux/seq_file.h>
#include <linux/sysctl.h>
#include <linux/highmem.h>
#include <linux/mmu_notifier.h>
#include <linux/nodemask.h>
#include <linux/pagemap.h>
#include <linux/mempolicy.h>
#include <linux/compiler.h>
#include <linux/cpuset.h>
#include <linux/mutex.h>
#include <linux/memblock.h>
#include <linux/sysfs.h>
#include <linux/slab.h>
#include <linux/mmdebug.h>
#include <linux/sched/signal.h>
#include <linux/rmap.h>
#include <linux/string_helpers.h>
#include <linux/swap.h>
#include <linux/swapops.h>
#include <linux/jhash.h>
#include <asm/page.h>
#include <asm/pgtable.h>
#include <asm/tlb.h>
#include <linux/io.h>
#include <linux/hugetlb.h>
#include <linux/hugetlb_cgroup.h>
#include <linux/node.h>
#include <linux/userfaultfd_k.h>
#include <linux/page_owner.h>
#include "internal.h"
int hugetlb_max_hstate __read_mostly;
unsigned int default_hstate_idx;
struct hstate hstates[HUGE_MAX_HSTATE];
/*
* Minimum page order among possible hugepage sizes, set to a proper value
* at boot time.
*/
static unsigned int minimum_order __read_mostly = UINT_MAX;
__initdata LIST_HEAD(huge_boot_pages);
/* for command line parsing */
static struct hstate * __initdata parsed_hstate;
static unsigned long __initdata default_hstate_max_huge_pages;
static unsigned long __initdata default_hstate_size;
static bool __initdata parsed_valid_hugepagesz = true;
/*
* Protects updates to hugepage_freelists, hugepage_activelist, nr_huge_pages,
* free_huge_pages, and surplus_huge_pages.
*/
DEFINE_SPINLOCK(hugetlb_lock);
/*
* Serializes faults on the same logical page. This is used to
* prevent spurious OOMs when the hugepage pool is fully utilized.
*/
static int num_fault_mutexes;
struct mutex *hugetlb_fault_mutex_table ____cacheline_aligned_in_smp;
/* Forward declaration */
static int hugetlb_acct_memory(struct hstate *h, long delta);
static inline void unlock_or_release_subpool(struct hugepage_subpool *spool)
{
bool free = (spool->count == 0) && (spool->used_hpages == 0);
spin_unlock(&spool->lock);
/* If no pages are used, and no other handles to the subpool
* remain, give up any reservations mased on minimum size and
* free the subpool */
if (free) {
if (spool->min_hpages != -1)
hugetlb_acct_memory(spool->hstate,
-spool->min_hpages);
kfree(spool);
}
}
struct hugepage_subpool *hugepage_new_subpool(struct hstate *h, long max_hpages,
long min_hpages)
{
struct hugepage_subpool *spool;
spool = kzalloc(sizeof(*spool), GFP_KERNEL);
if (!spool)
return NULL;
spin_lock_init(&spool->lock);
spool->count = 1;
spool->max_hpages = max_hpages;
spool->hstate = h;
spool->min_hpages = min_hpages;
if (min_hpages != -1 && hugetlb_acct_memory(h, min_hpages)) {
kfree(spool);
return NULL;
}
spool->rsv_hpages = min_hpages;
return spool;
}
void hugepage_put_subpool(struct hugepage_subpool *spool)
{
spin_lock(&spool->lock);
BUG_ON(!spool->count);
spool->count--;
unlock_or_release_subpool(spool);
}
/*
* Subpool accounting for allocating and reserving pages.
* Return -ENOMEM if there are not enough resources to satisfy the
* the request. Otherwise, return the number of pages by which the
* global pools must be adjusted (upward). The returned value may
* only be different than the passed value (delta) in the case where
* a subpool minimum size must be manitained.
*/
static long hugepage_subpool_get_pages(struct hugepage_subpool *spool,
long delta)
{
long ret = delta;
if (!spool)
return ret;
spin_lock(&spool->lock);
if (spool->max_hpages != -1) { /* maximum size accounting */
if ((spool->used_hpages + delta) <= spool->max_hpages)
spool->used_hpages += delta;
else {
ret = -ENOMEM;
goto unlock_ret;
}
}
/* minimum size accounting */
if (spool->min_hpages != -1 && spool->rsv_hpages) {
if (delta > spool->rsv_hpages) {
/*
* Asking for more reserves than those already taken on
* behalf of subpool. Return difference.
*/
ret = delta - spool->rsv_hpages;
spool->rsv_hpages = 0;
} else {
ret = 0; /* reserves already accounted for */
spool->rsv_hpages -= delta;
}
}
unlock_ret:
spin_unlock(&spool->lock);
return ret;
}
/*
* Subpool accounting for freeing and unreserving pages.
* Return the number of global page reservations that must be dropped.
* The return value may only be different than the passed value (delta)
* in the case where a subpool minimum size must be maintained.
*/
static long hugepage_subpool_put_pages(struct hugepage_subpool *spool,
long delta)
{
long ret = delta;
if (!spool)
return delta;
spin_lock(&spool->lock);
if (spool->max_hpages != -1) /* maximum size accounting */
spool->used_hpages -= delta;
/* minimum size accounting */
if (spool->min_hpages != -1 && spool->used_hpages < spool->min_hpages) {
if (spool->rsv_hpages + delta <= spool->min_hpages)
ret = 0;
else
ret = spool->rsv_hpages + delta - spool->min_hpages;
spool->rsv_hpages += delta;
if (spool->rsv_hpages > spool->min_hpages)
spool->rsv_hpages = spool->min_hpages;
}
/*
* If hugetlbfs_put_super couldn't free spool due to an outstanding
* quota reference, free it now.
*/
unlock_or_release_subpool(spool);
return ret;
}
static inline struct hugepage_subpool *subpool_inode(struct inode *inode)
{
return HUGETLBFS_SB(inode->i_sb)->spool;
}
static inline struct hugepage_subpool *subpool_vma(struct vm_area_struct *vma)
{
return subpool_inode(file_inode(vma->vm_file));
}
/*
* Region tracking -- allows tracking of reservations and instantiated pages
* across the pages in a mapping.
*
* The region data structures are embedded into a resv_map and protected
* by a resv_map's lock. The set of regions within the resv_map represent
* reservations for huge pages, or huge pages that have already been
* instantiated within the map. The from and to elements are huge page
* indicies into the associated mapping. from indicates the starting index
* of the region. to represents the first index past the end of the region.
*
* For example, a file region structure with from == 0 and to == 4 represents
* four huge pages in a mapping. It is important to note that the to element
* represents the first element past the end of the region. This is used in
* arithmetic as 4(to) - 0(from) = 4 huge pages in the region.
*
* Interval notation of the form [from, to) will be used to indicate that
* the endpoint from is inclusive and to is exclusive.
*/
struct file_region {
struct list_head link;
long from;
long to;
};
/*
* Add the huge page range represented by [f, t) to the reserve
* map. In the normal case, existing regions will be expanded
* to accommodate the specified range. Sufficient regions should
* exist for expansion due to the previous call to region_chg
* with the same range. However, it is possible that region_del
* could have been called after region_chg and modifed the map
* in such a way that no region exists to be expanded. In this
* case, pull a region descriptor from the cache associated with
* the map and use that for the new range.
*
* Return the number of new huge pages added to the map. This
* number is greater than or equal to zero.
*/
static long region_add(struct resv_map *resv, long f, long t)
{
struct list_head *head = &resv->regions;
struct file_region *rg, *nrg, *trg;
long add = 0;
spin_lock(&resv->lock);
/* Locate the region we are either in or before. */
list_for_each_entry(rg, head, link)
if (f <= rg->to)
break;
/*
* If no region exists which can be expanded to include the
* specified range, the list must have been modified by an
* interleving call to region_del(). Pull a region descriptor
* from the cache and use it for this range.
*/
if (&rg->link == head || t < rg->from) {
VM_BUG_ON(resv->region_cache_count <= 0);
resv->region_cache_count--;
nrg = list_first_entry(&resv->region_cache, struct file_region,
link);
list_del(&nrg->link);
nrg->from = f;
nrg->to = t;
list_add(&nrg->link, rg->link.prev);
add += t - f;
goto out_locked;
}
/* Round our left edge to the current segment if it encloses us. */
if (f > rg->from)
f = rg->from;
/* Check for and consume any regions we now overlap with. */
nrg = rg;
list_for_each_entry_safe(rg, trg, rg->link.prev, link) {
if (&rg->link == head)
break;
if (rg->from > t)
break;
/* If this area reaches higher then extend our area to
* include it completely. If this is not the first area
* which we intend to reuse, free it. */
if (rg->to > t)
t = rg->to;
if (rg != nrg) {
/* Decrement return value by the deleted range.
* Another range will span this area so that by
* end of routine add will be >= zero
*/
add -= (rg->to - rg->from);
list_del(&rg->link);
kfree(rg);
}
}
add += (nrg->from - f); /* Added to beginning of region */
nrg->from = f;
add += t - nrg->to; /* Added to end of region */
nrg->to = t;
out_locked:
resv->adds_in_progress--;
spin_unlock(&resv->lock);
VM_BUG_ON(add < 0);
return add;
}
/*
* Examine the existing reserve map and determine how many
* huge pages in the specified range [f, t) are NOT currently
* represented. This routine is called before a subsequent
* call to region_add that will actually modify the reserve
* map to add the specified range [f, t). region_chg does
* not change the number of huge pages represented by the
* map. However, if the existing regions in the map can not
* be expanded to represent the new range, a new file_region
* structure is added to the map as a placeholder. This is
* so that the subsequent region_add call will have all the
* regions it needs and will not fail.
*
* Upon entry, region_chg will also examine the cache of region descriptors
* associated with the map. If there are not enough descriptors cached, one
* will be allocated for the in progress add operation.
*
* Returns the number of huge pages that need to be added to the existing
* reservation map for the range [f, t). This number is greater or equal to
* zero. -ENOMEM is returned if a new file_region structure or cache entry
* is needed and can not be allocated.
*/
static long region_chg(struct resv_map *resv, long f, long t)
{
struct list_head *head = &resv->regions;
struct file_region *rg, *nrg = NULL;
long chg = 0;
retry:
spin_lock(&resv->lock);
retry_locked:
resv->adds_in_progress++;
/*
* Check for sufficient descriptors in the cache to accommodate
* the number of in progress add operations.
*/
if (resv->adds_in_progress > resv->region_cache_count) {
struct file_region *trg;
VM_BUG_ON(resv->adds_in_progress - resv->region_cache_count > 1);
/* Must drop lock to allocate a new descriptor. */
resv->adds_in_progress--;
spin_unlock(&resv->lock);
trg = kmalloc(sizeof(*trg), GFP_KERNEL);
if (!trg) {
kfree(nrg);
return -ENOMEM;
}
spin_lock(&resv->lock);
list_add(&trg->link, &resv->region_cache);
resv->region_cache_count++;
goto retry_locked;
}
/* Locate the region we are before or in. */
list_for_each_entry(rg, head, link)
if (f <= rg->to)
break;
/* If we are below the current region then a new region is required.
* Subtle, allocate a new region at the position but make it zero
* size such that we can guarantee to record the reservation. */
if (&rg->link == head || t < rg->from) {
if (!nrg) {
resv->adds_in_progress--;
spin_unlock(&resv->lock);
nrg = kmalloc(sizeof(*nrg), GFP_KERNEL);
if (!nrg)
return -ENOMEM;
nrg->from = f;
nrg->to = f;
INIT_LIST_HEAD(&nrg->link);
goto retry;
}
list_add(&nrg->link, rg->link.prev);
chg = t - f;
goto out_nrg;
}
/* Round our left edge to the current segment if it encloses us. */
if (f > rg->from)
f = rg->from;
chg = t - f;
/* Check for and consume any regions we now overlap with. */
list_for_each_entry(rg, rg->link.prev, link) {
if (&rg->link == head)
break;
if (rg->from > t)
goto out;
/* We overlap with this area, if it extends further than
* us then we must extend ourselves. Account for its
* existing reservation. */
if (rg->to > t) {
chg += rg->to - t;
t = rg->to;
}
chg -= rg->to - rg->from;
}
out:
spin_unlock(&resv->lock);
/* We already know we raced and no longer need the new region */
kfree(nrg);
return chg;
out_nrg:
spin_unlock(&resv->lock);
return chg;
}
/*
* Abort the in progress add operation. The adds_in_progress field
* of the resv_map keeps track of the operations in progress between
* calls to region_chg and region_add. Operations are sometimes
* aborted after the call to region_chg. In such cases, region_abort
* is called to decrement the adds_in_progress counter.
*
* NOTE: The range arguments [f, t) are not needed or used in this
* routine. They are kept to make reading the calling code easier as
* arguments will match the associated region_chg call.
*/
static void region_abort(struct resv_map *resv, long f, long t)
{
spin_lock(&resv->lock);
VM_BUG_ON(!resv->region_cache_count);
resv->adds_in_progress--;
spin_unlock(&resv->lock);
}
/*
* Delete the specified range [f, t) from the reserve map. If the
* t parameter is LONG_MAX, this indicates that ALL regions after f
* should be deleted. Locate the regions which intersect [f, t)
* and either trim, delete or split the existing regions.
*
* Returns the number of huge pages deleted from the reserve map.
* In the normal case, the return value is zero or more. In the
* case where a region must be split, a new region descriptor must
* be allocated. If the allocation fails, -ENOMEM will be returned.
* NOTE: If the parameter t == LONG_MAX, then we will never split
* a region and possibly return -ENOMEM. Callers specifying
* t == LONG_MAX do not need to check for -ENOMEM error.
*/
static long region_del(struct resv_map *resv, long f, long t)
{
struct list_head *head = &resv->regions;
struct file_region *rg, *trg;
struct file_region *nrg = NULL;
long del = 0;
retry:
spin_lock(&resv->lock);
list_for_each_entry_safe(rg, trg, head, link) {
/*
* Skip regions before the range to be deleted. file_region
* ranges are normally of the form [from, to). However, there
* may be a "placeholder" entry in the map which is of the form
* (from, to) with from == to. Check for placeholder entries
* at the beginning of the range to be deleted.
*/
if (rg->to <= f && (rg->to != rg->from || rg->to != f))
continue;
if (rg->from >= t)
break;
if (f > rg->from && t < rg->to) { /* Must split region */
/*
* Check for an entry in the cache before dropping
* lock and attempting allocation.
*/
if (!nrg &&
resv->region_cache_count > resv->adds_in_progress) {
nrg = list_first_entry(&resv->region_cache,
struct file_region,
link);
list_del(&nrg->link);
resv->region_cache_count--;
}
if (!nrg) {
spin_unlock(&resv->lock);
nrg = kmalloc(sizeof(*nrg), GFP_KERNEL);
if (!nrg)
return -ENOMEM;
goto retry;
}
del += t - f;
/* New entry for end of split region */
nrg->from = t;
nrg->to = rg->to;
INIT_LIST_HEAD(&nrg->link);
/* Original entry is trimmed */
rg->to = f;
list_add(&nrg->link, &rg->link);
nrg = NULL;
break;
}
if (f <= rg->from && t >= rg->to) { /* Remove entire region */
del += rg->to - rg->from;
list_del(&rg->link);
kfree(rg);
continue;
}
if (f <= rg->from) { /* Trim beginning of region */
del += t - rg->from;
rg->from = t;
} else { /* Trim end of region */
del += rg->to - f;
rg->to = f;
}
}
spin_unlock(&resv->lock);
kfree(nrg);
return del;
}
/*
* A rare out of memory error was encountered which prevented removal of
* the reserve map region for a page. The huge page itself was free'ed
* and removed from the page cache. This routine will adjust the subpool
* usage count, and the global reserve count if needed. By incrementing
* these counts, the reserve map entry which could not be deleted will
* appear as a "reserved" entry instead of simply dangling with incorrect
* counts.
*/
void hugetlb_fix_reserve_counts(struct inode *inode)
{
struct hugepage_subpool *spool = subpool_inode(inode);
long rsv_adjust;
rsv_adjust = hugepage_subpool_get_pages(spool, 1);
if (rsv_adjust) {
struct hstate *h = hstate_inode(inode);
hugetlb_acct_memory(h, 1);
}
}
/*
* Count and return the number of huge pages in the reserve map
* that intersect with the range [f, t).
*/
static long region_count(struct resv_map *resv, long f, long t)
{
struct list_head *head = &resv->regions;
struct file_region *rg;
long chg = 0;
spin_lock(&resv->lock);
/* Locate each segment we overlap with, and count that overlap. */
list_for_each_entry(rg, head, link) {
long seg_from;
long seg_to;
if (rg->to <= f)
continue;
if (rg->from >= t)
break;
seg_from = max(rg->from, f);
seg_to = min(rg->to, t);
chg += seg_to - seg_from;
}
spin_unlock(&resv->lock);
return chg;
}
/*
* Convert the address within this vma to the page offset within
* the mapping, in pagecache page units; huge pages here.
*/
static pgoff_t vma_hugecache_offset(struct hstate *h,
struct vm_area_struct *vma, unsigned long address)
{
return ((address - vma->vm_start) >> huge_page_shift(h)) +
(vma->vm_pgoff >> huge_page_order(h));
}
pgoff_t linear_hugepage_index(struct vm_area_struct *vma,
unsigned long address)
{
return vma_hugecache_offset(hstate_vma(vma), vma, address);
}
EXPORT_SYMBOL_GPL(linear_hugepage_index);
/*
* Return the size of the pages allocated when backing a VMA. In the majority
* cases this will be same size as used by the page table entries.
*/
unsigned long vma_kernel_pagesize(struct vm_area_struct *vma)
{
if (vma->vm_ops && vma->vm_ops->pagesize)
return vma->vm_ops->pagesize(vma);
return PAGE_SIZE;
}
EXPORT_SYMBOL_GPL(vma_kernel_pagesize);
/*
* Return the page size being used by the MMU to back a VMA. In the majority
* of cases, the page size used by the kernel matches the MMU size. On
* architectures where it differs, an architecture-specific 'strong'
* version of this symbol is required.
*/
__weak unsigned long vma_mmu_pagesize(struct vm_area_struct *vma)
{
return vma_kernel_pagesize(vma);
}
/*
* Flags for MAP_PRIVATE reservations. These are stored in the bottom
* bits of the reservation map pointer, which are always clear due to
* alignment.
*/
#define HPAGE_RESV_OWNER (1UL << 0)
#define HPAGE_RESV_UNMAPPED (1UL << 1)
#define HPAGE_RESV_MASK (HPAGE_RESV_OWNER | HPAGE_RESV_UNMAPPED)
/*
* These helpers are used to track how many pages are reserved for
* faults in a MAP_PRIVATE mapping. Only the process that called mmap()
* is guaranteed to have their future faults succeed.
*
* With the exception of reset_vma_resv_huge_pages() which is called at fork(),
* the reserve counters are updated with the hugetlb_lock held. It is safe
* to reset the VMA at fork() time as it is not in use yet and there is no
* chance of the global counters getting corrupted as a result of the values.
*
* The private mapping reservation is represented in a subtly different
* manner to a shared mapping. A shared mapping has a region map associated
* with the underlying file, this region map represents the backing file
* pages which have ever had a reservation assigned which this persists even
* after the page is instantiated. A private mapping has a region map
* associated with the original mmap which is attached to all VMAs which
* reference it, this region map represents those offsets which have consumed
* reservation ie. where pages have been instantiated.
*/
static unsigned long get_vma_private_data(struct vm_area_struct *vma)
{
return (unsigned long)vma->vm_private_data;
}
static void set_vma_private_data(struct vm_area_struct *vma,
unsigned long value)
{
vma->vm_private_data = (void *)value;
}
struct resv_map *resv_map_alloc(void)
{
struct resv_map *resv_map = kmalloc(sizeof(*resv_map), GFP_KERNEL);
struct file_region *rg = kmalloc(sizeof(*rg), GFP_KERNEL);
if (!resv_map || !rg) {
kfree(resv_map);
kfree(rg);
return NULL;
}
kref_init(&resv_map->refs);
spin_lock_init(&resv_map->lock);
INIT_LIST_HEAD(&resv_map->regions);
resv_map->adds_in_progress = 0;
INIT_LIST_HEAD(&resv_map->region_cache);
list_add(&rg->link, &resv_map->region_cache);
resv_map->region_cache_count = 1;
return resv_map;
}
void resv_map_release(struct kref *ref)
{
struct resv_map *resv_map = container_of(ref, struct resv_map, refs);
struct list_head *head = &resv_map->region_cache;
struct file_region *rg, *trg;
/* Clear out any active regions before we release the map. */
region_del(resv_map, 0, LONG_MAX);
/* ... and any entries left in the cache */
list_for_each_entry_safe(rg, trg, head, link) {
list_del(&rg->link);
kfree(rg);
}
VM_BUG_ON(resv_map->adds_in_progress);
kfree(resv_map);
}
static inline struct resv_map *inode_resv_map(struct inode *inode)
{
return inode->i_mapping->private_data;
}
static struct resv_map *vma_resv_map(struct vm_area_struct *vma)
{
VM_BUG_ON_VMA(!is_vm_hugetlb_page(vma), vma);
if (vma->vm_flags & VM_MAYSHARE) {
struct address_space *mapping = vma->vm_file->f_mapping;
struct inode *inode = mapping->host;
return inode_resv_map(inode);
} else {
return (struct resv_map *)(get_vma_private_data(vma) &
~HPAGE_RESV_MASK);
}
}
static void set_vma_resv_map(struct vm_area_struct *vma, struct resv_map *map)
{
VM_BUG_ON_VMA(!is_vm_hugetlb_page(vma), vma);
VM_BUG_ON_VMA(vma->vm_flags & VM_MAYSHARE, vma);
set_vma_private_data(vma, (get_vma_private_data(vma) &
HPAGE_RESV_MASK) | (unsigned long)map);
}
static void set_vma_resv_flags(struct vm_area_struct *vma, unsigned long flags)
{
VM_BUG_ON_VMA(!is_vm_hugetlb_page(vma), vma);
VM_BUG_ON_VMA(vma->vm_flags & VM_MAYSHARE, vma);
set_vma_private_data(vma, get_vma_private_data(vma) | flags);
}
static int is_vma_resv_set(struct vm_area_struct *vma, unsigned long flag)
{
VM_BUG_ON_VMA(!is_vm_hugetlb_page(vma), vma);
return (get_vma_private_data(vma) & flag) != 0;
}
/* Reset counters to 0 and clear all HPAGE_RESV_* flags */
void reset_vma_resv_huge_pages(struct vm_area_struct *vma)
{
VM_BUG_ON_VMA(!is_vm_hugetlb_page(vma), vma);
if (!(vma->vm_flags & VM_MAYSHARE))
vma->vm_private_data = (void *)0;
}
/* Returns true if the VMA has associated reserve pages */
static bool vma_has_reserves(struct vm_area_struct *vma, long chg)
{
if (vma->vm_flags & VM_NORESERVE) {
/*
* This address is already reserved by other process(chg == 0),
* so, we should decrement reserved count. Without decrementing,
* reserve count remains after releasing inode, because this
* allocated page will go into page cache and is regarded as
* coming from reserved pool in releasing step. Currently, we
* don't have any other solution to deal with this situation
* properly, so add work-around here.
*/
if (vma->vm_flags & VM_MAYSHARE && chg == 0)
return true;
else
return false;
}
/* Shared mappings always use reserves */
if (vma->vm_flags & VM_MAYSHARE) {
/*
* We know VM_NORESERVE is not set. Therefore, there SHOULD
* be a region map for all pages. The only situation where
* there is no region map is if a hole was punched via
* fallocate. In this case, there really are no reverves to
* use. This situation is indicated if chg != 0.
*/
if (chg)
return false;
else
return true;
}
/*
* Only the process that called mmap() has reserves for
* private mappings.
*/
if (is_vma_resv_set(vma, HPAGE_RESV_OWNER)) {
/*
* Like the shared case above, a hole punch or truncate
* could have been performed on the private mapping.
* Examine the value of chg to determine if reserves
* actually exist or were previously consumed.
* Very Subtle - The value of chg comes from a previous
* call to vma_needs_reserves(). The reserve map for
* private mappings has different (opposite) semantics
* than that of shared mappings. vma_needs_reserves()
* has already taken this difference in semantics into
* account. Therefore, the meaning of chg is the same
* as in the shared case above. Code could easily be
* combined, but keeping it separate draws attention to
* subtle differences.
*/
if (chg)
return false;
else
return true;
}
return false;
}
static void enqueue_huge_page(struct hstate *h, struct page *page)
{
int nid = page_to_nid(page);
list_move(&page->lru, &h->hugepage_freelists[nid]);
h->free_huge_pages++;
h->free_huge_pages_node[nid]++;
}
static struct page *dequeue_huge_page_node_exact(struct hstate *h, int nid)
{
struct page *page;
list_for_each_entry(page, &h->hugepage_freelists[nid], lru)
if (!PageHWPoison(page))
break;
/*
* if 'non-isolated free hugepage' not found on the list,
* the allocation fails.
*/
if (&h->hugepage_freelists[nid] == &page->lru)
return NULL;
list_move(&page->lru, &h->hugepage_activelist);
set_page_refcounted(page);
h->free_huge_pages--;
h->free_huge_pages_node[nid]--;
return page;
}
static struct page *dequeue_huge_page_nodemask(struct hstate *h, gfp_t gfp_mask, int nid,
nodemask_t *nmask)
{
unsigned int cpuset_mems_cookie;
struct zonelist *zonelist;
struct zone *zone;
struct zoneref *z;
int node = -1;
zonelist = node_zonelist(nid, gfp_mask);
retry_cpuset:
cpuset_mems_cookie = read_mems_allowed_begin();
for_each_zone_zonelist_nodemask(zone, z, zonelist, gfp_zone(gfp_mask), nmask) {
struct page *page;
if (!cpuset_zone_allowed(zone, gfp_mask))
continue;
/*
* no need to ask again on the same node. Pool is node rather than
* zone aware
*/
if (zone_to_nid(zone) == node)
continue;
node = zone_to_nid(zone);
page = dequeue_huge_page_node_exact(h, node);
if (page)
return page;
}
if (unlikely(read_mems_allowed_retry(cpuset_mems_cookie)))
goto retry_cpuset;
return NULL;
}
/* Movability of hugepages depends on migration support. */
static inline gfp_t htlb_alloc_mask(struct hstate *h)
{
if (hugepage_migration_supported(h))
return GFP_HIGHUSER_MOVABLE;
else
return GFP_HIGHUSER;
}
static struct page *dequeue_huge_page_vma(struct hstate *h,
struct vm_area_struct *vma,
unsigned long address, int avoid_reserve,
long chg)
{
struct page *page;
struct mempolicy *mpol;
gfp_t gfp_mask;
nodemask_t *nodemask;
int nid;
/*
* A child process with MAP_PRIVATE mappings created by their parent
* have no page reserves. This check ensures that reservations are
* not "stolen". The child may still get SIGKILLed
*/
if (!vma_has_reserves(vma, chg) &&
h->free_huge_pages - h->resv_huge_pages == 0)
goto err;
/* If reserves cannot be used, ensure enough pages are in the pool */
if (avoid_reserve && h->free_huge_pages - h->resv_huge_pages == 0)
goto err;
gfp_mask = htlb_alloc_mask(h);
nid = huge_node(vma, address, gfp_mask, &mpol, &nodemask);
page = dequeue_huge_page_nodemask(h, gfp_mask, nid, nodemask);
if (page && !avoid_reserve && vma_has_reserves(vma, chg)) {
SetPagePrivate(page);
h->resv_huge_pages--;
}
mpol_cond_put(mpol);
return page;
err:
return NULL;
}
/*
* common helper functions for hstate_next_node_to_{alloc|free}.
* We may have allocated or freed a huge page based on a different
* nodes_allowed previously, so h->next_node_to_{alloc|free} might
* be outside of *nodes_allowed. Ensure that we use an allowed
* node for alloc or free.
*/
static int next_node_allowed(int nid, nodemask_t *nodes_allowed)
{
nid = next_node_in(nid, *nodes_allowed);
VM_BUG_ON(nid >= MAX_NUMNODES);
return nid;
}
static int get_valid_node_allowed(int nid, nodemask_t *nodes_allowed)
{
if (!node_isset(nid, *nodes_allowed))
nid = next_node_allowed(nid, nodes_allowed);
return nid;
}
/*
* returns the previously saved node ["this node"] from which to
* allocate a persistent huge page for the pool and advance the
* next node from which to allocate, handling wrap at end of node
* mask.
*/
static int hstate_next_node_to_alloc(struct hstate *h,
nodemask_t *nodes_allowed)
{
int nid;
VM_BUG_ON(!nodes_allowed);
nid = get_valid_node_allowed(h->next_nid_to_alloc, nodes_allowed);
h->next_nid_to_alloc = next_node_allowed(nid, nodes_allowed);
return nid;
}
/*
* helper for free_pool_huge_page() - return the previously saved
* node ["this node"] from which to free a huge page. Advance the
* next node id whether or not we find a free huge page to free so
* that the next attempt to free addresses the next node.
*/
static int hstate_next_node_to_free(struct hstate *h, nodemask_t *nodes_allowed)
{
int nid;
VM_BUG_ON(!nodes_allowed);
nid = get_valid_node_allowed(h->next_nid_to_free, nodes_allowed);
h->next_nid_to_free = next_node_allowed(nid, nodes_allowed);
return nid;
}
#define for_each_node_mask_to_alloc(hs, nr_nodes, node, mask) \
for (nr_nodes = nodes_weight(*mask); \
nr_nodes > 0 && \
((node = hstate_next_node_to_alloc(hs, mask)) || 1); \
nr_nodes--)
#define for_each_node_mask_to_free(hs, nr_nodes, node, mask) \
for (nr_nodes = nodes_weight(*mask); \
nr_nodes > 0 && \
((node = hstate_next_node_to_free(hs, mask)) || 1); \
nr_nodes--)
#ifdef CONFIG_ARCH_HAS_GIGANTIC_PAGE
static void destroy_compound_gigantic_page(struct page *page,
unsigned int order)
{
int i;
int nr_pages = 1 << order;
struct page *p = page + 1;
atomic_set(compound_mapcount_ptr(page), 0);
for (i = 1; i < nr_pages; i++, p = mem_map_next(p, page, i)) {
clear_compound_head(p);
set_page_refcounted(p);
}
set_compound_order(page, 0);
__ClearPageHead(page);
}
static void free_gigantic_page(struct page *page, unsigned int order)
{
free_contig_range(page_to_pfn(page), 1 << order);
}
static int __alloc_gigantic_page(unsigned long start_pfn,
unsigned long nr_pages, gfp_t gfp_mask)
{
unsigned long end_pfn = start_pfn + nr_pages;
return alloc_contig_range(start_pfn, end_pfn, MIGRATE_MOVABLE,
gfp_mask);
}
static bool pfn_range_valid_gigantic(struct zone *z,
unsigned long start_pfn, unsigned long nr_pages)
{
unsigned long i, end_pfn = start_pfn + nr_pages;
struct page *page;
for (i = start_pfn; i < end_pfn; i++) {
if (!pfn_valid(i))
return false;
page = pfn_to_page(i);
if (page_zone(page) != z)
return false;
if (PageReserved(page))
return false;
if (page_count(page) > 0)
return false;
if (PageHuge(page))
return false;
}
return true;
}
static bool zone_spans_last_pfn(const struct zone *zone,
unsigned long start_pfn, unsigned long nr_pages)
{
unsigned long last_pfn = start_pfn + nr_pages - 1;
return zone_spans_pfn(zone, last_pfn);
}
static struct page *alloc_gigantic_page(struct hstate *h, gfp_t gfp_mask,
int nid, nodemask_t *nodemask)
{
unsigned int order = huge_page_order(h);
unsigned long nr_pages = 1 << order;
unsigned long ret, pfn, flags;
struct zonelist *zonelist;
struct zone *zone;
struct zoneref *z;
zonelist = node_zonelist(nid, gfp_mask);
for_each_zone_zonelist_nodemask(zone, z, zonelist, gfp_zone(gfp_mask), nodemask) {
spin_lock_irqsave(&zone->lock, flags);
pfn = ALIGN(zone->zone_start_pfn, nr_pages);
while (zone_spans_last_pfn(zone, pfn, nr_pages)) {
if (pfn_range_valid_gigantic(zone, pfn, nr_pages)) {
/*
* We release the zone lock here because
* alloc_contig_range() will also lock the zone
* at some point. If there's an allocation
* spinning on this lock, it may win the race
* and cause alloc_contig_range() to fail...
*/
spin_unlock_irqrestore(&zone->lock, flags);
ret = __alloc_gigantic_page(pfn, nr_pages, gfp_mask);
if (!ret)
return pfn_to_page(pfn);
spin_lock_irqsave(&zone->lock, flags);
}
pfn += nr_pages;
}
spin_unlock_irqrestore(&zone->lock, flags);
}
return NULL;
}
static void prep_new_huge_page(struct hstate *h, struct page *page, int nid);
static void prep_compound_gigantic_page(struct page *page, unsigned int order);
#else /* !CONFIG_ARCH_HAS_GIGANTIC_PAGE */
static inline bool gigantic_page_supported(void) { return false; }
static struct page *alloc_gigantic_page(struct hstate *h, gfp_t gfp_mask,
int nid, nodemask_t *nodemask) { return NULL; }
static inline void free_gigantic_page(struct page *page, unsigned int order) { }
static inline void destroy_compound_gigantic_page(struct page *page,
unsigned int order) { }
#endif
static void update_and_free_page(struct hstate *h, struct page *page)
{
int i;
if (hstate_is_gigantic(h) && !gigantic_page_supported())
return;
h->nr_huge_pages--;
h->nr_huge_pages_node[page_to_nid(page)]--;
for (i = 0; i < pages_per_huge_page(h); i++) {
page[i].flags &= ~(1 << PG_locked | 1 << PG_error |
1 << PG_referenced | 1 << PG_dirty |
1 << PG_active | 1 << PG_private |
1 << PG_writeback);
}
VM_BUG_ON_PAGE(hugetlb_cgroup_from_page(page), page);
set_compound_page_dtor(page, NULL_COMPOUND_DTOR);
set_page_refcounted(page);
if (hstate_is_gigantic(h)) {
destroy_compound_gigantic_page(page, huge_page_order(h));
free_gigantic_page(page, huge_page_order(h));
} else {
__free_pages(page, huge_page_order(h));
}
}
struct hstate *size_to_hstate(unsigned long size)
{
struct hstate *h;
for_each_hstate(h) {
if (huge_page_size(h) == size)
return h;
}
return NULL;
}
/*
* Test to determine whether the hugepage is "active/in-use" (i.e. being linked
* to hstate->hugepage_activelist.)
*
* This function can be called for tail pages, but never returns true for them.
*/
bool page_huge_active(struct page *page)
{
VM_BUG_ON_PAGE(!PageHuge(page), page);
return PageHead(page) && PagePrivate(&page[1]);
}
/* never called for tail page */
static void set_page_huge_active(struct page *page)
{
VM_BUG_ON_PAGE(!PageHeadHuge(page), page);
SetPagePrivate(&page[1]);
}
static void clear_page_huge_active(struct page *page)
{
VM_BUG_ON_PAGE(!PageHeadHuge(page), page);
ClearPagePrivate(&page[1]);
}
/*
* Internal hugetlb specific page flag. Do not use outside of the hugetlb
* code
*/
static inline bool PageHugeTemporary(struct page *page)
{
if (!PageHuge(page))
return false;
return (unsigned long)page[2].mapping == -1U;
}
static inline void SetPageHugeTemporary(struct page *page)
{
page[2].mapping = (void *)-1U;
}
static inline void ClearPageHugeTemporary(struct page *page)
{
page[2].mapping = NULL;
}
void free_huge_page(struct page *page)
{
/*
* Can't pass hstate in here because it is called from the
* compound page destructor.
*/
struct hstate *h = page_hstate(page);
int nid = page_to_nid(page);
struct hugepage_subpool *spool =
(struct hugepage_subpool *)page_private(page);
bool restore_reserve;
VM_BUG_ON_PAGE(page_count(page), page);
VM_BUG_ON_PAGE(page_mapcount(page), page);
set_page_private(page, 0);
page->mapping = NULL;
restore_reserve = PagePrivate(page);
ClearPagePrivate(page);
/*
* A return code of zero implies that the subpool will be under its
* minimum size if the reservation is not restored after page is free.
* Therefore, force restore_reserve operation.
*/
if (hugepage_subpool_put_pages(spool, 1) == 0)
restore_reserve = true;
spin_lock(&hugetlb_lock);
clear_page_huge_active(page);
hugetlb_cgroup_uncharge_page(hstate_index(h),
pages_per_huge_page(h), page);
if (restore_reserve)
h->resv_huge_pages++;
if (PageHugeTemporary(page)) {
list_del(&page->lru);
ClearPageHugeTemporary(page);
update_and_free_page(h, page);
} else if (h->surplus_huge_pages_node[nid]) {
/* remove the page from active list */
list_del(&page->lru);
update_and_free_page(h, page);
h->surplus_huge_pages--;
h->surplus_huge_pages_node[nid]--;
} else {
arch_clear_hugepage_flags(page);
enqueue_huge_page(h, page);
}
spin_unlock(&hugetlb_lock);
}
static void prep_new_huge_page(struct hstate *h, struct page *page, int nid)
{
INIT_LIST_HEAD(&page->lru);
set_compound_page_dtor(page, HUGETLB_PAGE_DTOR);
spin_lock(&hugetlb_lock);
set_hugetlb_cgroup(page, NULL);
h->nr_huge_pages++;
h->nr_huge_pages_node[nid]++;
spin_unlock(&hugetlb_lock);
}
static void prep_compound_gigantic_page(struct page *page, unsigned int order)
{
int i;
int nr_pages = 1 << order;
struct page *p = page + 1;
/* we rely on prep_new_huge_page to set the destructor */
set_compound_order(page, order);
__ClearPageReserved(page);
__SetPageHead(page);
for (i = 1; i < nr_pages; i++, p = mem_map_next(p, page, i)) {
/*
* For gigantic hugepages allocated through bootmem at
* boot, it's safer to be consistent with the not-gigantic
* hugepages and clear the PG_reserved bit from all tail pages
* too. Otherwse drivers using get_user_pages() to access tail
* pages may get the reference counting wrong if they see
* PG_reserved set on a tail page (despite the head page not
* having PG_reserved set). Enforcing this consistency between
* head and tail pages allows drivers to optimize away a check
* on the head page when they need know if put_page() is needed
* after get_user_pages().
*/
__ClearPageReserved(p);
set_page_count(p, 0);
set_compound_head(p, page);
}
atomic_set(compound_mapcount_ptr(page), -1);
}
/*
* PageHuge() only returns true for hugetlbfs pages, but not for normal or
* transparent huge pages. See the PageTransHuge() documentation for more
* details.
*/
int PageHuge(struct page *page)
{
if (!PageCompound(page))
return 0;
page = compound_head(page);
return page[1].compound_dtor == HUGETLB_PAGE_DTOR;
}
EXPORT_SYMBOL_GPL(PageHuge);
/*
* PageHeadHuge() only returns true for hugetlbfs head page, but not for
* normal or transparent huge pages.
*/
int PageHeadHuge(struct page *page_head)
{
if (!PageHead(page_head))
return 0;
return get_compound_page_dtor(page_head) == free_huge_page;
}
pgoff_t __basepage_index(struct page *page)
{
struct page *page_head = compound_head(page);
pgoff_t index = page_index(page_head);
unsigned long compound_idx;
if (!PageHuge(page_head))
return page_index(page);
if (compound_order(page_head) >= MAX_ORDER)
compound_idx = page_to_pfn(page) - page_to_pfn(page_head);
else
compound_idx = page - page_head;
return (index << compound_order(page_head)) + compound_idx;
}
static struct page *alloc_buddy_huge_page(struct hstate *h,
gfp_t gfp_mask, int nid, nodemask_t *nmask)
{
int order = huge_page_order(h);
struct page *page;
gfp_mask |= __GFP_COMP|__GFP_RETRY_MAYFAIL|__GFP_NOWARN;
if (nid == NUMA_NO_NODE)
nid = numa_mem_id();
page = __alloc_pages_nodemask(gfp_mask, order, nid, nmask);
if (page)
__count_vm_event(HTLB_BUDDY_PGALLOC);
else
__count_vm_event(HTLB_BUDDY_PGALLOC_FAIL);
return page;
}
/*
* Common helper to allocate a fresh hugetlb page. All specific allocators
* should use this function to get new hugetlb pages
*/
static struct page *alloc_fresh_huge_page(struct hstate *h,
gfp_t gfp_mask, int nid, nodemask_t *nmask)
{
struct page *page;
if (hstate_is_gigantic(h))
page = alloc_gigantic_page(h, gfp_mask, nid, nmask);
else
page = alloc_buddy_huge_page(h, gfp_mask,
nid, nmask);
if (!page)
return NULL;
if (hstate_is_gigantic(h))
prep_compound_gigantic_page(page, huge_page_order(h));
prep_new_huge_page(h, page, page_to_nid(page));
return page;
}
/*
* Allocates a fresh page to the hugetlb allocator pool in the node interleaved
* manner.
*/
static int alloc_pool_huge_page(struct hstate *h, nodemask_t *nodes_allowed)
{
struct page *page;
int nr_nodes, node;
gfp_t gfp_mask = htlb_alloc_mask(h) | __GFP_THISNODE;
for_each_node_mask_to_alloc(h, nr_nodes, node, nodes_allowed) {
page = alloc_fresh_huge_page(h, gfp_mask, node, nodes_allowed);
if (page)
break;
}
if (!page)
return 0;
put_page(page); /* free it into the hugepage allocator */
return 1;
}
/*
* Free huge page from pool from next node to free.
* Attempt to keep persistent huge pages more or less
* balanced over allowed nodes.
* Called with hugetlb_lock locked.
*/
static int free_pool_huge_page(struct hstate *h, nodemask_t *nodes_allowed,
bool acct_surplus)
{
int nr_nodes, node;
int ret = 0;
for_each_node_mask_to_free(h, nr_nodes, node, nodes_allowed) {
/*
* If we're returning unused surplus pages, only examine
* nodes with surplus pages.
*/
if ((!acct_surplus || h->surplus_huge_pages_node[node]) &&
!list_empty(&h->hugepage_freelists[node])) {
struct page *page =
list_entry(h->hugepage_freelists[node].next,
struct page, lru);
list_del(&page->lru);
h->free_huge_pages--;
h->free_huge_pages_node[node]--;
if (acct_surplus) {
h->surplus_huge_pages--;
h->surplus_huge_pages_node[node]--;
}
update_and_free_page(h, page);
ret = 1;
break;
}
}
return ret;
}
/*
* Dissolve a given free hugepage into free buddy pages. This function does
* nothing for in-use (including surplus) hugepages. Returns -EBUSY if the
* dissolution fails because a give page is not a free hugepage, or because
* free hugepages are fully reserved.
*/
int dissolve_free_huge_page(struct page *page)
{
int rc = -EBUSY;
spin_lock(&hugetlb_lock);
if (PageHuge(page) && !page_count(page)) {
struct page *head = compound_head(page);
struct hstate *h = page_hstate(head);
int nid = page_to_nid(head);
if (h->free_huge_pages - h->resv_huge_pages == 0)
goto out;
/*
* Move PageHWPoison flag from head page to the raw error page,
* which makes any subpages rather than the error page reusable.
*/
if (PageHWPoison(head) && page != head) {
SetPageHWPoison(page);
ClearPageHWPoison(head);
}
list_del(&head->lru);
h->free_huge_pages--;
h->free_huge_pages_node[nid]--;
h->max_huge_pages--;
update_and_free_page(h, head);
rc = 0;
}
out:
spin_unlock(&hugetlb_lock);
return rc;
}
/*
* Dissolve free hugepages in a given pfn range. Used by memory hotplug to
* make specified memory blocks removable from the system.
* Note that this will dissolve a free gigantic hugepage completely, if any
* part of it lies within the given range.
* Also note that if dissolve_free_huge_page() returns with an error, all
* free hugepages that were dissolved before that error are lost.
*/
int dissolve_free_huge_pages(unsigned long start_pfn, unsigned long end_pfn)
{
unsigned long pfn;
struct page *page;
int rc = 0;
if (!hugepages_supported())
return rc;
for (pfn = start_pfn; pfn < end_pfn; pfn += 1 << minimum_order) {
page = pfn_to_page(pfn);
if (PageHuge(page) && !page_count(page)) {
rc = dissolve_free_huge_page(page);
if (rc)
break;
}
}
return rc;
}
/*
* Allocates a fresh surplus page from the page allocator.
*/
static struct page *alloc_surplus_huge_page(struct hstate *h, gfp_t gfp_mask,
int nid, nodemask_t *nmask)
{
struct page *page = NULL;
if (hstate_is_gigantic(h))
return NULL;
spin_lock(&hugetlb_lock);
if (h->surplus_huge_pages >= h->nr_overcommit_huge_pages)
goto out_unlock;
spin_unlock(&hugetlb_lock);
page = alloc_fresh_huge_page(h, gfp_mask, nid, nmask);
if (!page)
return NULL;
spin_lock(&hugetlb_lock);
/*
* We could have raced with the pool size change.
* Double check that and simply deallocate the new page
* if we would end up overcommiting the surpluses. Abuse
* temporary page to workaround the nasty free_huge_page
* codeflow
*/
if (h->surplus_huge_pages >= h->nr_overcommit_huge_pages) {
SetPageHugeTemporary(page);
put_page(page);
page = NULL;
} else {
h->surplus_huge_pages++;
h->surplus_huge_pages_node[page_to_nid(page)]++;
}
out_unlock:
spin_unlock(&hugetlb_lock);
return page;
}
static struct page *alloc_migrate_huge_page(struct hstate *h, gfp_t gfp_mask,
int nid, nodemask_t *nmask)
{
struct page *page;
if (hstate_is_gigantic(h))
return NULL;
page = alloc_fresh_huge_page(h, gfp_mask, nid, nmask);
if (!page)
return NULL;
/*
* We do not account these pages as surplus because they are only
* temporary and will be released properly on the last reference
*/
SetPageHugeTemporary(page);
return page;
}
/*
* Use the VMA's mpolicy to allocate a huge page from the buddy.
*/
static
struct page *alloc_buddy_huge_page_with_mpol(struct hstate *h,
struct vm_area_struct *vma, unsigned long addr)
{
struct page *page;
struct mempolicy *mpol;
gfp_t gfp_mask = htlb_alloc_mask(h);
int nid;
nodemask_t *nodemask;
nid = huge_node(vma, addr, gfp_mask, &mpol, &nodemask);
page = alloc_surplus_huge_page(h, gfp_mask, nid, nodemask);
mpol_cond_put(mpol);
return page;
}
/* page migration callback function */
struct page *alloc_huge_page_node(struct hstate *h, int nid)
{
gfp_t gfp_mask = htlb_alloc_mask(h);
struct page *page = NULL;
if (nid != NUMA_NO_NODE)
gfp_mask |= __GFP_THISNODE;
spin_lock(&hugetlb_lock);
if (h->free_huge_pages - h->resv_huge_pages > 0)
page = dequeue_huge_page_nodemask(h, gfp_mask, nid, NULL);
spin_unlock(&hugetlb_lock);
if (!page)
page = alloc_migrate_huge_page(h, gfp_mask, nid, NULL);
return page;
}
/* page migration callback function */
struct page *alloc_huge_page_nodemask(struct hstate *h, int preferred_nid,
nodemask_t *nmask)
{
gfp_t gfp_mask = htlb_alloc_mask(h);
spin_lock(&hugetlb_lock);
if (h->free_huge_pages - h->resv_huge_pages > 0) {
struct page *page;
page = dequeue_huge_page_nodemask(h, gfp_mask, preferred_nid, nmask);
if (page) {
spin_unlock(&hugetlb_lock);
return page;
}
}
spin_unlock(&hugetlb_lock);
return alloc_migrate_huge_page(h, gfp_mask, preferred_nid, nmask);
}
/* mempolicy aware migration callback */
struct page *alloc_huge_page_vma(struct hstate *h, struct vm_area_struct *vma,
unsigned long address)
{
struct mempolicy *mpol;
nodemask_t *nodemask;
struct page *page;
gfp_t gfp_mask;
int node;
gfp_mask = htlb_alloc_mask(h);
node = huge_node(vma, address, gfp_mask, &mpol, &nodemask);
page = alloc_huge_page_nodemask(h, node, nodemask);
mpol_cond_put(mpol);
return page;
}
/*
* Increase the hugetlb pool such that it can accommodate a reservation
* of size 'delta'.
*/
static int gather_surplus_pages(struct hstate *h, int delta)
{
struct list_head surplus_list;
struct page *page, *tmp;
int ret, i;
int needed, allocated;
bool alloc_ok = true;
needed = (h->resv_huge_pages + delta) - h->free_huge_pages;
if (needed <= 0) {
h->resv_huge_pages += delta;
return 0;
}
allocated = 0;
INIT_LIST_HEAD(&surplus_list);
ret = -ENOMEM;
retry:
spin_unlock(&hugetlb_lock);
for (i = 0; i < needed; i++) {
page = alloc_surplus_huge_page(h, htlb_alloc_mask(h),
NUMA_NO_NODE, NULL);
if (!page) {
alloc_ok = false;
break;
}
list_add(&page->lru, &surplus_list);
cond_resched();
}
allocated += i;
/*
* After retaking hugetlb_lock, we need to recalculate 'needed'
* because either resv_huge_pages or free_huge_pages may have changed.
*/
spin_lock(&hugetlb_lock);
needed = (h->resv_huge_pages + delta) -
(h->free_huge_pages + allocated);
if (needed > 0) {
if (alloc_ok)
goto retry;
/*
* We were not able to allocate enough pages to
* satisfy the entire reservation so we free what
* we've allocated so far.
*/
goto free;
}
/*
* The surplus_list now contains _at_least_ the number of extra pages
* needed to accommodate the reservation. Add the appropriate number
* of pages to the hugetlb pool and free the extras back to the buddy
* allocator. Commit the entire reservation here to prevent another
* process from stealing the pages as they are added to the pool but
* before they are reserved.
*/
needed += allocated;
h->resv_huge_pages += delta;
ret = 0;
/* Free the needed pages to the hugetlb pool */
list_for_each_entry_safe(page, tmp, &surplus_list, lru) {
if ((--needed) < 0)
break;
/*
* This page is now managed by the hugetlb allocator and has
* no users -- drop the buddy allocator's reference.
*/
put_page_testzero(page);
VM_BUG_ON_PAGE(page_count(page), page);
enqueue_huge_page(h, page);
}
free:
spin_unlock(&hugetlb_lock);
/* Free unnecessary surplus pages to the buddy allocator */
list_for_each_entry_safe(page, tmp, &surplus_list, lru)
put_page(page);
spin_lock(&hugetlb_lock);
return ret;
}
/*
* This routine has two main purposes:
* 1) Decrement the reservation count (resv_huge_pages) by the value passed
* in unused_resv_pages. This corresponds to the prior adjustments made
* to the associated reservation map.
* 2) Free any unused surplus pages that may have been allocated to satisfy
* the reservation. As many as unused_resv_pages may be freed.
*
* Called with hugetlb_lock held. However, the lock could be dropped (and
* reacquired) during calls to cond_resched_lock. Whenever dropping the lock,
* we must make sure nobody else can claim pages we are in the process of
* freeing. Do this by ensuring resv_huge_page always is greater than the
* number of huge pages we plan to free when dropping the lock.
*/
static void return_unused_surplus_pages(struct hstate *h,
unsigned long unused_resv_pages)
{
unsigned long nr_pages;
/* Cannot return gigantic pages currently */
if (hstate_is_gigantic(h))
goto out;
/*
* Part (or even all) of the reservation could have been backed
* by pre-allocated pages. Only free surplus pages.
*/
nr_pages = min(unused_resv_pages, h->surplus_huge_pages);
/*
* We want to release as many surplus pages as possible, spread
* evenly across all nodes with memory. Iterate across these nodes
* until we can no longer free unreserved surplus pages. This occurs
* when the nodes with surplus pages have no free pages.
* free_pool_huge_page() will balance the the freed pages across the
* on-line nodes with memory and will handle the hstate accounting.
*
* Note that we decrement resv_huge_pages as we free the pages. If
* we drop the lock, resv_huge_pages will still be sufficiently large
* to cover subsequent pages we may free.
*/
while (nr_pages--) {
h->resv_huge_pages--;
unused_resv_pages--;
if (!free_pool_huge_page(h, &node_states[N_MEMORY], 1))
goto out;
cond_resched_lock(&hugetlb_lock);
}
out:
/* Fully uncommit the reservation */
h->resv_huge_pages -= unused_resv_pages;
}
/*
* vma_needs_reservation, vma_commit_reservation and vma_end_reservation
* are used by the huge page allocation routines to manage reservations.
*
* vma_needs_reservation is called to determine if the huge page at addr
* within the vma has an associated reservation. If a reservation is
* needed, the value 1 is returned. The caller is then responsible for
* managing the global reservation and subpool usage counts. After
* the huge page has been allocated, vma_commit_reservation is called
* to add the page to the reservation map. If the page allocation fails,
* the reservation must be ended instead of committed. vma_end_reservation
* is called in such cases.
*
* In the normal case, vma_commit_reservation returns the same value
* as the preceding vma_needs_reservation call. The only time this
* is not the case is if a reserve map was changed between calls. It
* is the responsibility of the caller to notice the difference and
* take appropriate action.
*
* vma_add_reservation is used in error paths where a reservation must
* be restored when a newly allocated huge page must be freed. It is
* to be called after calling vma_needs_reservation to determine if a
* reservation exists.
*/
enum vma_resv_mode {
VMA_NEEDS_RESV,
VMA_COMMIT_RESV,
VMA_END_RESV,
VMA_ADD_RESV,
};
static long __vma_reservation_common(struct hstate *h,
struct vm_area_struct *vma, unsigned long addr,
enum vma_resv_mode mode)
{
struct resv_map *resv;
pgoff_t idx;
long ret;
resv = vma_resv_map(vma);
if (!resv)
return 1;
idx = vma_hugecache_offset(h, vma, addr);
switch (mode) {
case VMA_NEEDS_RESV:
ret = region_chg(resv, idx, idx + 1);
break;
case VMA_COMMIT_RESV:
ret = region_add(resv, idx, idx + 1);
break;
case VMA_END_RESV:
region_abort(resv, idx, idx + 1);
ret = 0;
break;
case VMA_ADD_RESV:
if (vma->vm_flags & VM_MAYSHARE)
ret = region_add(resv, idx, idx + 1);
else {
region_abort(resv, idx, idx + 1);
ret = region_del(resv, idx, idx + 1);
}
break;
default:
BUG();
}
if (vma->vm_flags & VM_MAYSHARE)
return ret;
else if (is_vma_resv_set(vma, HPAGE_RESV_OWNER) && ret >= 0) {
/*
* In most cases, reserves always exist for private mappings.
* However, a file associated with mapping could have been
* hole punched or truncated after reserves were consumed.
* As subsequent fault on such a range will not use reserves.
* Subtle - The reserve map for private mappings has the
* opposite meaning than that of shared mappings. If NO
* entry is in the reserve map, it means a reservation exists.
* If an entry exists in the reserve map, it means the
* reservation has already been consumed. As a result, the
* return value of this routine is the opposite of the
* value returned from reserve map manipulation routines above.
*/
if (ret)
return 0;
else
return 1;
}
else
return ret < 0 ? ret : 0;
}
static long vma_needs_reservation(struct hstate *h,
struct vm_area_struct *vma, unsigned long addr)
{
return __vma_reservation_common(h, vma, addr, VMA_NEEDS_RESV);
}
static long vma_commit_reservation(struct hstate *h,
struct vm_area_struct *vma, unsigned long addr)
{
return __vma_reservation_common(h, vma, addr, VMA_COMMIT_RESV);
}
static void vma_end_reservation(struct hstate *h,
struct vm_area_struct *vma, unsigned long addr)
{
(void)__vma_reservation_common(h, vma, addr, VMA_END_RESV);
}
static long vma_add_reservation(struct hstate *h,
struct vm_area_struct *vma, unsigned long addr)
{
return __vma_reservation_common(h, vma, addr, VMA_ADD_RESV);
}
/*
* This routine is called to restore a reservation on error paths. In the
* specific error paths, a huge page was allocated (via alloc_huge_page)
* and is about to be freed. If a reservation for the page existed,
* alloc_huge_page would have consumed the reservation and set PagePrivate
* in the newly allocated page. When the page is freed via free_huge_page,
* the global reservation count will be incremented if PagePrivate is set.
* However, free_huge_page can not adjust the reserve map. Adjust the
* reserve map here to be consistent with global reserve count adjustments
* to be made by free_huge_page.
*/
static void restore_reserve_on_error(struct hstate *h,
struct vm_area_struct *vma, unsigned long address,
struct page *page)
{
if (unlikely(PagePrivate(page))) {
long rc = vma_needs_reservation(h, vma, address);
if (unlikely(rc < 0)) {
/*
* Rare out of memory condition in reserve map
* manipulation. Clear PagePrivate so that
* global reserve count will not be incremented
* by free_huge_page. This will make it appear
* as though the reservation for this page was
* consumed. This may prevent the task from
* faulting in the page at a later time. This
* is better than inconsistent global huge page
* accounting of reserve counts.
*/
ClearPagePrivate(page);
} else if (rc) {
rc = vma_add_reservation(h, vma, address);
if (unlikely(rc < 0))
/*
* See above comment about rare out of
* memory condition.
*/
ClearPagePrivate(page);
} else
vma_end_reservation(h, vma, address);
}
}
struct page *alloc_huge_page(struct vm_area_struct *vma,
unsigned long addr, int avoid_reserve)
{
struct hugepage_subpool *spool = subpool_vma(vma);
struct hstate *h = hstate_vma(vma);
struct page *page;
long map_chg, map_commit;
long gbl_chg;
int ret, idx;
struct hugetlb_cgroup *h_cg;
idx = hstate_index(h);
/*
* Examine the region/reserve map to determine if the process
* has a reservation for the page to be allocated. A return
* code of zero indicates a reservation exists (no change).
*/
map_chg = gbl_chg = vma_needs_reservation(h, vma, addr);
if (map_chg < 0)
return ERR_PTR(-ENOMEM);
/*
* Processes that did not create the mapping will have no
* reserves as indicated by the region/reserve map. Check
* that the allocation will not exceed the subpool limit.
* Allocations for MAP_NORESERVE mappings also need to be
* checked against any subpool limit.
*/
if (map_chg || avoid_reserve) {
gbl_chg = hugepage_subpool_get_pages(spool, 1);
if (gbl_chg < 0) {
vma_end_reservation(h, vma, addr);
return ERR_PTR(-ENOSPC);
}
/*
* Even though there was no reservation in the region/reserve
* map, there could be reservations associated with the
* subpool that can be used. This would be indicated if the
* return value of hugepage_subpool_get_pages() is zero.
* However, if avoid_reserve is specified we still avoid even
* the subpool reservations.
*/
if (avoid_reserve)
gbl_chg = 1;
}
ret = hugetlb_cgroup_charge_cgroup(idx, pages_per_huge_page(h), &h_cg);
if (ret)
goto out_subpool_put;
spin_lock(&hugetlb_lock);
/*
* glb_chg is passed to indicate whether or not a page must be taken
* from the global free pool (global change). gbl_chg == 0 indicates
* a reservation exists for the allocation.
*/
page = dequeue_huge_page_vma(h, vma, addr, avoid_reserve, gbl_chg);
if (!page) {
spin_unlock(&hugetlb_lock);
page = alloc_buddy_huge_page_with_mpol(h, vma, addr);
if (!page)
goto out_uncharge_cgroup;
if (!avoid_reserve && vma_has_reserves(vma, gbl_chg)) {
SetPagePrivate(page);
h->resv_huge_pages--;
}
spin_lock(&hugetlb_lock);
list_move(&page->lru, &h->hugepage_activelist);
/* Fall through */
}
hugetlb_cgroup_commit_charge(idx, pages_per_huge_page(h), h_cg, page);
spin_unlock(&hugetlb_lock);
set_page_private(page, (unsigned long)spool);
map_commit = vma_commit_reservation(h, vma, addr);
if (unlikely(map_chg > map_commit)) {
/*
* The page was added to the reservation map between
* vma_needs_reservation and vma_commit_reservation.
* This indicates a race with hugetlb_reserve_pages.
* Adjust for the subpool count incremented above AND
* in hugetlb_reserve_pages for the same page. Also,
* the reservation count added in hugetlb_reserve_pages
* no longer applies.
*/
long rsv_adjust;
rsv_adjust = hugepage_subpool_put_pages(spool, 1);
hugetlb_acct_memory(h, -rsv_adjust);
}
return page;
out_uncharge_cgroup:
hugetlb_cgroup_uncharge_cgroup(idx, pages_per_huge_page(h), h_cg);
out_subpool_put:
if (map_chg || avoid_reserve)
hugepage_subpool_put_pages(spool, 1);
vma_end_reservation(h, vma, addr);
return ERR_PTR(-ENOSPC);
}
int alloc_bootmem_huge_page(struct hstate *h)
__attribute__ ((weak, alias("__alloc_bootmem_huge_page")));
int __alloc_bootmem_huge_page(struct hstate *h)
{
struct huge_bootmem_page *m;
int nr_nodes, node;
for_each_node_mask_to_alloc(h, nr_nodes, node, &node_states[N_MEMORY]) {
void *addr;
addr = memblock_alloc_try_nid_raw(
huge_page_size(h), huge_page_size(h),
0, MEMBLOCK_ALLOC_ACCESSIBLE, node);
if (addr) {
/*
* Use the beginning of the huge page to store the
* huge_bootmem_page struct (until gather_bootmem
* puts them into the mem_map).
*/
m = addr;
goto found;
}
}
return 0;
found:
BUG_ON(!IS_ALIGNED(virt_to_phys(m), huge_page_size(h)));
/* Put them into a private list first because mem_map is not up yet */
INIT_LIST_HEAD(&m->list);
list_add(&m->list, &huge_boot_pages);
m->hstate = h;
return 1;
}
static void __init prep_compound_huge_page(struct page *page,
unsigned int order)
{
if (unlikely(order > (MAX_ORDER - 1)))
prep_compound_gigantic_page(page, order);
else
prep_compound_page(page, order);
}
/* Put bootmem huge pages into the standard lists after mem_map is up */
static void __init gather_bootmem_prealloc(void)
{
struct huge_bootmem_page *m;
list_for_each_entry(m, &huge_boot_pages, list) {
struct page *page = virt_to_page(m);
struct hstate *h = m->hstate;
WARN_ON(page_count(page) != 1);
prep_compound_huge_page(page, h->order);
WARN_ON(PageReserved(page));
prep_new_huge_page(h, page, page_to_nid(page));
put_page(page); /* free it into the hugepage allocator */
/*
* If we had gigantic hugepages allocated at boot time, we need
* to restore the 'stolen' pages to totalram_pages in order to
* fix confusing memory reports from free(1) and another
* side-effects, like CommitLimit going negative.
*/
if (hstate_is_gigantic(h))
adjust_managed_page_count(page, 1 << h->order);
cond_resched();
}
}
static void __init hugetlb_hstate_alloc_pages(struct hstate *h)
{
unsigned long i;
for (i = 0; i < h->max_huge_pages; ++i) {
if (hstate_is_gigantic(h)) {
if (!alloc_bootmem_huge_page(h))
break;
} else if (!alloc_pool_huge_page(h,
&node_states[N_MEMORY]))
break;
cond_resched();
}
if (i < h->max_huge_pages) {
char buf[32];
string_get_size(huge_page_size(h), 1, STRING_UNITS_2, buf, 32);
pr_warn("HugeTLB: allocating %lu of page size %s failed. Only allocated %lu hugepages.\n",
h->max_huge_pages, buf, i);
h->max_huge_pages = i;
}
}
static void __init hugetlb_init_hstates(void)
{
struct hstate *h;
for_each_hstate(h) {
if (minimum_order > huge_page_order(h))
minimum_order = huge_page_order(h);
/* oversize hugepages were init'ed in early boot */
if (!hstate_is_gigantic(h))
hugetlb_hstate_alloc_pages(h);
}
VM_BUG_ON(minimum_order == UINT_MAX);
}
static void __init report_hugepages(void)
{
struct hstate *h;
for_each_hstate(h) {
char buf[32];
string_get_size(huge_page_size(h), 1, STRING_UNITS_2, buf, 32);
pr_info("HugeTLB registered %s page size, pre-allocated %ld pages\n",
buf, h->free_huge_pages);
}
}
#ifdef CONFIG_HIGHMEM
static void try_to_free_low(struct hstate *h, unsigned long count,
nodemask_t *nodes_allowed)
{
int i;
if (hstate_is_gigantic(h))
return;
for_each_node_mask(i, *nodes_allowed) {
struct page *page, *next;
struct list_head *freel = &h->hugepage_freelists[i];
list_for_each_entry_safe(page, next, freel, lru) {
if (count >= h->nr_huge_pages)
return;
if (PageHighMem(page))
continue;
list_del(&page->lru);
update_and_free_page(h, page);
h->free_huge_pages--;
h->free_huge_pages_node[page_to_nid(page)]--;
}
}
}
#else
static inline void try_to_free_low(struct hstate *h, unsigned long count,
nodemask_t *nodes_allowed)
{
}
#endif
/*
* Increment or decrement surplus_huge_pages. Keep node-specific counters
* balanced by operating on them in a round-robin fashion.
* Returns 1 if an adjustment was made.
*/
static int adjust_pool_surplus(struct hstate *h, nodemask_t *nodes_allowed,
int delta)
{
int nr_nodes, node;
VM_BUG_ON(delta != -1 && delta != 1);
if (delta < 0) {
for_each_node_mask_to_alloc(h, nr_nodes, node, nodes_allowed) {
if (h->surplus_huge_pages_node[node])
goto found;
}
} else {
for_each_node_mask_to_free(h, nr_nodes, node, nodes_allowed) {
if (h->surplus_huge_pages_node[node] <
h->nr_huge_pages_node[node])
goto found;
}
}
return 0;
found:
h->surplus_huge_pages += delta;
h->surplus_huge_pages_node[node] += delta;
return 1;
}
#define persistent_huge_pages(h) (h->nr_huge_pages - h->surplus_huge_pages)
static unsigned long set_max_huge_pages(struct hstate *h, unsigned long count,
nodemask_t *nodes_allowed)
{
unsigned long min_count, ret;
if (hstate_is_gigantic(h) && !gigantic_page_supported())
return h->max_huge_pages;
/*
* Increase the pool size
* First take pages out of surplus state. Then make up the
* remaining difference by allocating fresh huge pages.
*
* We might race with alloc_surplus_huge_page() here and be unable
* to convert a surplus huge page to a normal huge page. That is
* not critical, though, it just means the overall size of the
* pool might be one hugepage larger than it needs to be, but
* within all the constraints specified by the sysctls.
*/
spin_lock(&hugetlb_lock);
while (h->surplus_huge_pages && count > persistent_huge_pages(h)) {
if (!adjust_pool_surplus(h, nodes_allowed, -1))
break;
}
while (count > persistent_huge_pages(h)) {
/*
* If this allocation races such that we no longer need the
* page, free_huge_page will handle it by freeing the page
* and reducing the surplus.
*/
spin_unlock(&hugetlb_lock);
/* yield cpu to avoid soft lockup */
cond_resched();
ret = alloc_pool_huge_page(h, nodes_allowed);
spin_lock(&hugetlb_lock);
if (!ret)
goto out;
/* Bail for signals. Probably ctrl-c from user */
if (signal_pending(current))
goto out;
}
/*
* Decrease the pool size
* First return free pages to the buddy allocator (being careful
* to keep enough around to satisfy reservations). Then place
* pages into surplus state as needed so the pool will shrink
* to the desired size as pages become free.
*
* By placing pages into the surplus state independent of the
* overcommit value, we are allowing the surplus pool size to
* exceed overcommit. There are few sane options here. Since
* alloc_surplus_huge_page() is checking the global counter,
* though, we'll note that we're not allowed to exceed surplus
* and won't grow the pool anywhere else. Not until one of the
* sysctls are changed, or the surplus pages go out of use.
*/
min_count = h->resv_huge_pages + h->nr_huge_pages - h->free_huge_pages;
min_count = max(count, min_count);
try_to_free_low(h, min_count, nodes_allowed);
while (min_count < persistent_huge_pages(h)) {
if (!free_pool_huge_page(h, nodes_allowed, 0))
break;
cond_resched_lock(&hugetlb_lock);
}
while (count < persistent_huge_pages(h)) {
if (!adjust_pool_surplus(h, nodes_allowed, 1))
break;
}
out:
ret = persistent_huge_pages(h);
spin_unlock(&hugetlb_lock);
return ret;
}
#define HSTATE_ATTR_RO(_name) \
static struct kobj_attribute _name##_attr = __ATTR_RO(_name)
#define HSTATE_ATTR(_name) \
static struct kobj_attribute _name##_attr = \
__ATTR(_name, 0644, _name##_show, _name##_store)
static struct kobject *hugepages_kobj;
static struct kobject *hstate_kobjs[HUGE_MAX_HSTATE];
static struct hstate *kobj_to_node_hstate(struct kobject *kobj, int *nidp);
static struct hstate *kobj_to_hstate(struct kobject *kobj, int *nidp)
{
int i;
for (i = 0; i < HUGE_MAX_HSTATE; i++)
if (hstate_kobjs[i] == kobj) {
if (nidp)
*nidp = NUMA_NO_NODE;
return &hstates[i];
}
return kobj_to_node_hstate(kobj, nidp);
}
static ssize_t nr_hugepages_show_common(struct kobject *kobj,
struct kobj_attribute *attr, char *buf)
{
struct hstate *h;
unsigned long nr_huge_pages;
int nid;
h = kobj_to_hstate(kobj, &nid);
if (nid == NUMA_NO_NODE)
nr_huge_pages = h->nr_huge_pages;
else
nr_huge_pages = h->nr_huge_pages_node[nid];
return sprintf(buf, "%lu\n", nr_huge_pages);
}
static ssize_t __nr_hugepages_store_common(bool obey_mempolicy,
struct hstate *h, int nid,
unsigned long count, size_t len)
{
int err;
NODEMASK_ALLOC(nodemask_t, nodes_allowed, GFP_KERNEL | __GFP_NORETRY);
if (hstate_is_gigantic(h) && !gigantic_page_supported()) {
err = -EINVAL;
goto out;
}
if (nid == NUMA_NO_NODE) {
/*
* global hstate attribute
*/
if (!(obey_mempolicy &&
init_nodemask_of_mempolicy(nodes_allowed))) {
NODEMASK_FREE(nodes_allowed);
nodes_allowed = &node_states[N_MEMORY];
}
} else if (nodes_allowed) {
/*
* per node hstate attribute: adjust count to global,
* but restrict alloc/free to the specified node.
*/
count += h->nr_huge_pages - h->nr_huge_pages_node[nid];
init_nodemask_of_node(nodes_allowed, nid);
} else
nodes_allowed = &node_states[N_MEMORY];
h->max_huge_pages = set_max_huge_pages(h, count, nodes_allowed);
if (nodes_allowed != &node_states[N_MEMORY])
NODEMASK_FREE(nodes_allowed);
return len;
out:
NODEMASK_FREE(nodes_allowed);
return err;
}
static ssize_t nr_hugepages_store_common(bool obey_mempolicy,
struct kobject *kobj, const char *buf,
size_t len)
{
struct hstate *h;
unsigned long count;
int nid;
int err;
err = kstrtoul(buf, 10, &count);
if (err)
return err;
h = kobj_to_hstate(kobj, &nid);
return __nr_hugepages_store_common(obey_mempolicy, h, nid, count, len);
}
static ssize_t nr_hugepages_show(struct kobject *kobj,
struct kobj_attribute *attr, char *buf)
{
return nr_hugepages_show_common(kobj, attr, buf);
}
static ssize_t nr_hugepages_store(struct kobject *kobj,
struct kobj_attribute *attr, const char *buf, size_t len)
{
return nr_hugepages_store_common(false, kobj, buf, len);
}
HSTATE_ATTR(nr_hugepages);
#ifdef CONFIG_NUMA
/*
* hstate attribute for optionally mempolicy-based constraint on persistent
* huge page alloc/free.
*/
static ssize_t nr_hugepages_mempolicy_show(struct kobject *kobj,
struct kobj_attribute *attr, char *buf)
{
return nr_hugepages_show_common(kobj, attr, buf);
}
static ssize_t nr_hugepages_mempolicy_store(struct kobject *kobj,
struct kobj_attribute *attr, const char *buf, size_t len)
{
return nr_hugepages_store_common(true, kobj, buf, len);
}
HSTATE_ATTR(nr_hugepages_mempolicy);
#endif
static ssize_t nr_overcommit_hugepages_show(struct kobject *kobj,
struct kobj_attribute *attr, char *buf)
{
struct hstate *h = kobj_to_hstate(kobj, NULL);
return sprintf(buf, "%lu\n", h->nr_overcommit_huge_pages);
}
static ssize_t nr_overcommit_hugepages_store(struct kobject *kobj,
struct kobj_attribute *attr, const char *buf, size_t count)
{
int err;
unsigned long input;
struct hstate *h = kobj_to_hstate(kobj, NULL);
if (hstate_is_gigantic(h))
return -EINVAL;
err = kstrtoul(buf, 10, &input);
if (err)
return err;
spin_lock(&hugetlb_lock);
h->nr_overcommit_huge_pages = input;
spin_unlock(&hugetlb_lock);
return count;
}
HSTATE_ATTR(nr_overcommit_hugepages);
static ssize_t free_hugepages_show(struct kobject *kobj,
struct kobj_attribute *attr, char *buf)
{
struct hstate *h;
unsigned long free_huge_pages;
int nid;
h = kobj_to_hstate(kobj, &nid);
if (nid == NUMA_NO_NODE)
free_huge_pages = h->free_huge_pages;
else
free_huge_pages = h->free_huge_pages_node[nid];
return sprintf(buf, "%lu\n", free_huge_pages);
}
HSTATE_ATTR_RO(free_hugepages);
static ssize_t resv_hugepages_show(struct kobject *kobj,
struct kobj_attribute *attr, char *buf)
{
struct hstate *h = kobj_to_hstate(kobj, NULL);
return sprintf(buf, "%lu\n", h->resv_huge_pages);
}
HSTATE_ATTR_RO(resv_hugepages);
static ssize_t surplus_hugepages_show(struct kobject *kobj,
struct kobj_attribute *attr, char *buf)
{
struct hstate *h;
unsigned long surplus_huge_pages;
int nid;
h = kobj_to_hstate(kobj, &nid);
if (nid == NUMA_NO_NODE)
surplus_huge_pages = h->surplus_huge_pages;
else
surplus_huge_pages = h->surplus_huge_pages_node[nid];
return sprintf(buf, "%lu\n", surplus_huge_pages);
}
HSTATE_ATTR_RO(surplus_hugepages);
static struct attribute *hstate_attrs[] = {
&nr_hugepages_attr.attr,
&nr_overcommit_hugepages_attr.attr,
&free_hugepages_attr.attr,
&resv_hugepages_attr.attr,
&surplus_hugepages_attr.attr,
#ifdef CONFIG_NUMA
&nr_hugepages_mempolicy_attr.attr,
#endif
NULL,
};
static const struct attribute_group hstate_attr_group = {
.attrs = hstate_attrs,
};
static int hugetlb_sysfs_add_hstate(struct hstate *h, struct kobject *parent,
struct kobject **hstate_kobjs,
const struct attribute_group *hstate_attr_group)
{
int retval;
int hi = hstate_index(h);
hstate_kobjs[hi] = kobject_create_and_add(h->name, parent);
if (!hstate_kobjs[hi])
return -ENOMEM;
retval = sysfs_create_group(hstate_kobjs[hi], hstate_attr_group);
if (retval)
kobject_put(hstate_kobjs[hi]);
return retval;
}
static void __init hugetlb_sysfs_init(void)
{
struct hstate *h;
int err;
hugepages_kobj = kobject_create_and_add("hugepages", mm_kobj);
if (!hugepages_kobj)
return;
for_each_hstate(h) {
err = hugetlb_sysfs_add_hstate(h, hugepages_kobj,
hstate_kobjs, &hstate_attr_group);
if (err)
pr_err("Hugetlb: Unable to add hstate %s", h->name);
}
}
#ifdef CONFIG_NUMA
/*
* node_hstate/s - associate per node hstate attributes, via their kobjects,
* with node devices in node_devices[] using a parallel array. The array
* index of a node device or _hstate == node id.
* This is here to avoid any static dependency of the node device driver, in
* the base kernel, on the hugetlb module.
*/
struct node_hstate {
struct kobject *hugepages_kobj;
struct kobject *hstate_kobjs[HUGE_MAX_HSTATE];
};
static struct node_hstate node_hstates[MAX_NUMNODES];
/*
* A subset of global hstate attributes for node devices
*/
static struct attribute *per_node_hstate_attrs[] = {
&nr_hugepages_attr.attr,
&free_hugepages_attr.attr,
&surplus_hugepages_attr.attr,
NULL,
};
static const struct attribute_group per_node_hstate_attr_group = {
.attrs = per_node_hstate_attrs,
};
/*
* kobj_to_node_hstate - lookup global hstate for node device hstate attr kobj.
* Returns node id via non-NULL nidp.
*/
static struct hstate *kobj_to_node_hstate(struct kobject *kobj, int *nidp)
{
int nid;
for (nid = 0; nid < nr_node_ids; nid++) {
struct node_hstate *nhs = &node_hstates[nid];
int i;
for (i = 0; i < HUGE_MAX_HSTATE; i++)
if (nhs->hstate_kobjs[i] == kobj) {
if (nidp)
*nidp = nid;
return &hstates[i];
}
}
BUG();
return NULL;
}
/*
* Unregister hstate attributes from a single node device.
* No-op if no hstate attributes attached.
*/
static void hugetlb_unregister_node(struct node *node)
{
struct hstate *h;
struct node_hstate *nhs = &node_hstates[node->dev.id];
if (!nhs->hugepages_kobj)
return; /* no hstate attributes */
for_each_hstate(h) {
int idx = hstate_index(h);
if (nhs->hstate_kobjs[idx]) {
kobject_put(nhs->hstate_kobjs[idx]);
nhs->hstate_kobjs[idx] = NULL;
}
}
kobject_put(nhs->hugepages_kobj);
nhs->hugepages_kobj = NULL;
}
/*
* Register hstate attributes for a single node device.
* No-op if attributes already registered.
*/
static void hugetlb_register_node(struct node *node)
{
struct hstate *h;
struct node_hstate *nhs = &node_hstates[node->dev.id];
int err;
if (nhs->hugepages_kobj)
return; /* already allocated */
nhs->hugepages_kobj = kobject_create_and_add("hugepages",
&node->dev.kobj);
if (!nhs->hugepages_kobj)
return;
for_each_hstate(h) {
err = hugetlb_sysfs_add_hstate(h, nhs->hugepages_kobj,
nhs->hstate_kobjs,
&per_node_hstate_attr_group);
if (err) {
pr_err("Hugetlb: Unable to add hstate %s for node %d\n",
h->name, node->dev.id);
hugetlb_unregister_node(node);
break;
}
}
}
/*
* hugetlb init time: register hstate attributes for all registered node
* devices of nodes that have memory. All on-line nodes should have
* registered their associated device by this time.
*/
static void __init hugetlb_register_all_nodes(void)
{
int nid;
for_each_node_state(nid, N_MEMORY) {
struct node *node = node_devices[nid];
if (node->dev.id == nid)
hugetlb_register_node(node);
}
/*
* Let the node device driver know we're here so it can
* [un]register hstate attributes on node hotplug.
*/
register_hugetlbfs_with_node(hugetlb_register_node,
hugetlb_unregister_node);
}
#else /* !CONFIG_NUMA */
static struct hstate *kobj_to_node_hstate(struct kobject *kobj, int *nidp)
{
BUG();
if (nidp)
*nidp = -1;
return NULL;
}
static void hugetlb_register_all_nodes(void) { }
#endif
static int __init hugetlb_init(void)
{
int i;
if (!hugepages_supported())
return 0;
if (!size_to_hstate(default_hstate_size)) {
if (default_hstate_size != 0) {
pr_err("HugeTLB: unsupported default_hugepagesz %lu. Reverting to %lu\n",
default_hstate_size, HPAGE_SIZE);
}
default_hstate_size = HPAGE_SIZE;
if (!size_to_hstate(default_hstate_size))
hugetlb_add_hstate(HUGETLB_PAGE_ORDER);
}
default_hstate_idx = hstate_index(size_to_hstate(default_hstate_size));
if (default_hstate_max_huge_pages) {
if (!default_hstate.max_huge_pages)
default_hstate.max_huge_pages = default_hstate_max_huge_pages;
}
hugetlb_init_hstates();
gather_bootmem_prealloc();
report_hugepages();
hugetlb_sysfs_init();
hugetlb_register_all_nodes();
hugetlb_cgroup_file_init();
#ifdef CONFIG_SMP
num_fault_mutexes = roundup_pow_of_two(8 * num_possible_cpus());
#else
num_fault_mutexes = 1;
#endif
hugetlb_fault_mutex_table =
kmalloc_array(num_fault_mutexes, sizeof(struct mutex),
GFP_KERNEL);
BUG_ON(!hugetlb_fault_mutex_table);
for (i = 0; i < num_fault_mutexes; i++)
mutex_init(&hugetlb_fault_mutex_table[i]);
return 0;
}
subsys_initcall(hugetlb_init);
/* Should be called on processing a hugepagesz=... option */
void __init hugetlb_bad_size(void)
{
parsed_valid_hugepagesz = false;
}
void __init hugetlb_add_hstate(unsigned int order)
{
struct hstate *h;
unsigned long i;
if (size_to_hstate(PAGE_SIZE << order)) {
pr_warn("hugepagesz= specified twice, ignoring\n");
return;
}
BUG_ON(hugetlb_max_hstate >= HUGE_MAX_HSTATE);
BUG_ON(order == 0);
h = &hstates[hugetlb_max_hstate++];
h->order = order;
h->mask = ~((1ULL << (order + PAGE_SHIFT)) - 1);
h->nr_huge_pages = 0;
h->free_huge_pages = 0;
for (i = 0; i < MAX_NUMNODES; ++i)
INIT_LIST_HEAD(&h->hugepage_freelists[i]);
INIT_LIST_HEAD(&h->hugepage_activelist);
h->next_nid_to_alloc = first_memory_node;
h->next_nid_to_free = first_memory_node;
snprintf(h->name, HSTATE_NAME_LEN, "hugepages-%lukB",
huge_page_size(h)/1024);
parsed_hstate = h;
}
static int __init hugetlb_nrpages_setup(char *s)
{
unsigned long *mhp;
static unsigned long *last_mhp;
if (!parsed_valid_hugepagesz) {
pr_warn("hugepages = %s preceded by "
"an unsupported hugepagesz, ignoring\n", s);
parsed_valid_hugepagesz = true;
return 1;
}
/*
* !hugetlb_max_hstate means we haven't parsed a hugepagesz= parameter yet,
* so this hugepages= parameter goes to the "default hstate".
*/
else if (!hugetlb_max_hstate)
mhp = &default_hstate_max_huge_pages;
else
mhp = &parsed_hstate->max_huge_pages;
if (mhp == last_mhp) {
pr_warn("hugepages= specified twice without interleaving hugepagesz=, ignoring\n");
return 1;
}
if (sscanf(s, "%lu", mhp) <= 0)
*mhp = 0;
/*
* Global state is always initialized later in hugetlb_init.
* But we need to allocate >= MAX_ORDER hstates here early to still
* use the bootmem allocator.
*/
if (hugetlb_max_hstate && parsed_hstate->order >= MAX_ORDER)
hugetlb_hstate_alloc_pages(parsed_hstate);
last_mhp = mhp;
return 1;
}
__setup("hugepages=", hugetlb_nrpages_setup);
static int __init hugetlb_default_setup(char *s)
{
default_hstate_size = memparse(s, &s);
return 1;
}
__setup("default_hugepagesz=", hugetlb_default_setup);
static unsigned int cpuset_mems_nr(unsigned int *array)
{
int node;
unsigned int nr = 0;
for_each_node_mask(node, cpuset_current_mems_allowed)
nr += array[node];
return nr;
}
#ifdef CONFIG_SYSCTL
static int hugetlb_sysctl_handler_common(bool obey_mempolicy,
struct ctl_table *table, int write,
void __user *buffer, size_t *length, loff_t *ppos)
{
struct hstate *h = &default_hstate;
unsigned long tmp = h->max_huge_pages;
int ret;
if (!hugepages_supported())
return -EOPNOTSUPP;
table->data = &tmp;
table->maxlen = sizeof(unsigned long);
ret = proc_doulongvec_minmax(table, write, buffer, length, ppos);
if (ret)
goto out;
if (write)
ret = __nr_hugepages_store_common(obey_mempolicy, h,
NUMA_NO_NODE, tmp, *length);
out:
return ret;
}
int hugetlb_sysctl_handler(struct ctl_table *table, int write,
void __user *buffer, size_t *length, loff_t *ppos)
{
return hugetlb_sysctl_handler_common(false, table, write,
buffer, length, ppos);
}
#ifdef CONFIG_NUMA
int hugetlb_mempolicy_sysctl_handler(struct ctl_table *table, int write,
void __user *buffer, size_t *length, loff_t *ppos)
{
return hugetlb_sysctl_handler_common(true, table, write,
buffer, length, ppos);
}
#endif /* CONFIG_NUMA */
int hugetlb_overcommit_handler(struct ctl_table *table, int write,
void __user *buffer,
size_t *length, loff_t *ppos)
{
struct hstate *h = &default_hstate;
unsigned long tmp;
int ret;
if (!hugepages_supported())
return -EOPNOTSUPP;
tmp = h->nr_overcommit_huge_pages;
if (write && hstate_is_gigantic(h))
return -EINVAL;
table->data = &tmp;
table->maxlen = sizeof(unsigned long);
ret = proc_doulongvec_minmax(table, write, buffer, length, ppos);
if (ret)
goto out;
if (write) {
spin_lock(&hugetlb_lock);
h->nr_overcommit_huge_pages = tmp;
spin_unlock(&hugetlb_lock);
}
out:
return ret;
}
#endif /* CONFIG_SYSCTL */
void hugetlb_report_meminfo(struct seq_file *m)
{
struct hstate *h;
unsigned long total = 0;
if (!hugepages_supported())
return;
for_each_hstate(h) {
unsigned long count = h->nr_huge_pages;
total += (PAGE_SIZE << huge_page_order(h)) * count;
if (h == &default_hstate)
seq_printf(m,
"HugePages_Total: %5lu\n"
"HugePages_Free: %5lu\n"
"HugePages_Rsvd: %5lu\n"
"HugePages_Surp: %5lu\n"
"Hugepagesize: %8lu kB\n",
count,
h->free_huge_pages,
h->resv_huge_pages,
h->surplus_huge_pages,
(PAGE_SIZE << huge_page_order(h)) / 1024);
}
seq_printf(m, "Hugetlb: %8lu kB\n", total / 1024);
}
int hugetlb_report_node_meminfo(int nid, char *buf)
{
struct hstate *h = &default_hstate;
if (!hugepages_supported())
return 0;
return sprintf(buf,
"Node %d HugePages_Total: %5u\n"
"Node %d HugePages_Free: %5u\n"
"Node %d HugePages_Surp: %5u\n",
nid, h->nr_huge_pages_node[nid],
nid, h->free_huge_pages_node[nid],
nid, h->surplus_huge_pages_node[nid]);
}
void hugetlb_show_meminfo(void)
{
struct hstate *h;
int nid;
if (!hugepages_supported())
return;
for_each_node_state(nid, N_MEMORY)
for_each_hstate(h)
pr_info("Node %d hugepages_total=%u hugepages_free=%u hugepages_surp=%u hugepages_size=%lukB\n",
nid,
h->nr_huge_pages_node[nid],
h->free_huge_pages_node[nid],
h->surplus_huge_pages_node[nid],
1UL << (huge_page_order(h) + PAGE_SHIFT - 10));
}
void hugetlb_report_usage(struct seq_file *m, struct mm_struct *mm)
{
seq_printf(m, "HugetlbPages:\t%8lu kB\n",
atomic_long_read(&mm->hugetlb_usage) << (PAGE_SHIFT - 10));
}
/* Return the number pages of memory we physically have, in PAGE_SIZE units. */
unsigned long hugetlb_total_pages(void)
{
struct hstate *h;
unsigned long nr_total_pages = 0;
for_each_hstate(h)
nr_total_pages += h->nr_huge_pages * pages_per_huge_page(h);
return nr_total_pages;
}
static int hugetlb_acct_memory(struct hstate *h, long delta)
{
int ret = -ENOMEM;
spin_lock(&hugetlb_lock);
/*
* When cpuset is configured, it breaks the strict hugetlb page
* reservation as the accounting is done on a global variable. Such
* reservation is completely rubbish in the presence of cpuset because
* the reservation is not checked against page availability for the
* current cpuset. Application can still potentially OOM'ed by kernel
* with lack of free htlb page in cpuset that the task is in.
* Attempt to enforce strict accounting with cpuset is almost
* impossible (or too ugly) because cpuset is too fluid that
* task or memory node can be dynamically moved between cpusets.
*
* The change of semantics for shared hugetlb mapping with cpuset is
* undesirable. However, in order to preserve some of the semantics,
* we fall back to check against current free page availability as
* a best attempt and hopefully to minimize the impact of changing
* semantics that cpuset has.
*/
if (delta > 0) {
if (gather_surplus_pages(h, delta) < 0)
goto out;
if (delta > cpuset_mems_nr(h->free_huge_pages_node)) {
return_unused_surplus_pages(h, delta);
goto out;
}
}
ret = 0;
if (delta < 0)
return_unused_surplus_pages(h, (unsigned long) -delta);
out:
spin_unlock(&hugetlb_lock);
return ret;
}
static void hugetlb_vm_op_open(struct vm_area_struct *vma)
{
struct resv_map *resv = vma_resv_map(vma);
/*
* This new VMA should share its siblings reservation map if present.
* The VMA will only ever have a valid reservation map pointer where
* it is being copied for another still existing VMA. As that VMA
* has a reference to the reservation map it cannot disappear until
* after this open call completes. It is therefore safe to take a
* new reference here without additional locking.
*/
if (resv && is_vma_resv_set(vma, HPAGE_RESV_OWNER))
kref_get(&resv->refs);
}
static void hugetlb_vm_op_close(struct vm_area_struct *vma)
{
struct hstate *h = hstate_vma(vma);
struct resv_map *resv = vma_resv_map(vma);
struct hugepage_subpool *spool = subpool_vma(vma);
unsigned long reserve, start, end;
long gbl_reserve;
if (!resv || !is_vma_resv_set(vma, HPAGE_RESV_OWNER))
return;
start = vma_hugecache_offset(h, vma, vma->vm_start);
end = vma_hugecache_offset(h, vma, vma->vm_end);
reserve = (end - start) - region_count(resv, start, end);
kref_put(&resv->refs, resv_map_release);
if (reserve) {
/*
* Decrement reserve counts. The global reserve count may be
* adjusted if the subpool has a minimum size.
*/
gbl_reserve = hugepage_subpool_put_pages(spool, reserve);
hugetlb_acct_memory(h, -gbl_reserve);
}
}
static int hugetlb_vm_op_split(struct vm_area_struct *vma, unsigned long addr)
{
if (addr & ~(huge_page_mask(hstate_vma(vma))))
return -EINVAL;
return 0;
}
static unsigned long hugetlb_vm_op_pagesize(struct vm_area_struct *vma)
{
struct hstate *hstate = hstate_vma(vma);
return 1UL << huge_page_shift(hstate);
}
/*
* We cannot handle pagefaults against hugetlb pages at all. They cause
* handle_mm_fault() to try to instantiate regular-sized pages in the
* hugegpage VMA. do_page_fault() is supposed to trap this, so BUG is we get
* this far.
*/
static vm_fault_t hugetlb_vm_op_fault(struct vm_fault *vmf)
{
BUG();
return 0;
}
/*
* When a new function is introduced to vm_operations_struct and added
* to hugetlb_vm_ops, please consider adding the function to shm_vm_ops.
* This is because under System V memory model, mappings created via
* shmget/shmat with "huge page" specified are backed by hugetlbfs files,
* their original vm_ops are overwritten with shm_vm_ops.
*/
const struct vm_operations_struct hugetlb_vm_ops = {
.fault = hugetlb_vm_op_fault,
.open = hugetlb_vm_op_open,
.close = hugetlb_vm_op_close,
.split = hugetlb_vm_op_split,
.pagesize = hugetlb_vm_op_pagesize,
};
static pte_t make_huge_pte(struct vm_area_struct *vma, struct page *page,
int writable)
{
pte_t entry;
if (writable) {
entry = huge_pte_mkwrite(huge_pte_mkdirty(mk_huge_pte(page,
vma->vm_page_prot)));
} else {
entry = huge_pte_wrprotect(mk_huge_pte(page,
vma->vm_page_prot));
}
entry = pte_mkyoung(entry);
entry = pte_mkhuge(entry);
entry = arch_make_huge_pte(entry, vma, page, writable);
return entry;
}
static void set_huge_ptep_writable(struct vm_area_struct *vma,
unsigned long address, pte_t *ptep)
{
pte_t entry;
entry = huge_pte_mkwrite(huge_pte_mkdirty(huge_ptep_get(ptep)));
if (huge_ptep_set_access_flags(vma, address, ptep, entry, 1))
update_mmu_cache(vma, address, ptep);
}
bool is_hugetlb_entry_migration(pte_t pte)
{
swp_entry_t swp;
if (huge_pte_none(pte) || pte_present(pte))
return false;
swp = pte_to_swp_entry(pte);
if (non_swap_entry(swp) && is_migration_entry(swp))
return true;
else
return false;
}
static int is_hugetlb_entry_hwpoisoned(pte_t pte)
{
swp_entry_t swp;
if (huge_pte_none(pte) || pte_present(pte))
return 0;
swp = pte_to_swp_entry(pte);
if (non_swap_entry(swp) && is_hwpoison_entry(swp))
return 1;
else
return 0;
}
int copy_hugetlb_page_range(struct mm_struct *dst, struct mm_struct *src,
struct vm_area_struct *vma)
{
pte_t *src_pte, *dst_pte, entry, dst_entry;
struct page *ptepage;
unsigned long addr;
int cow;
struct hstate *h = hstate_vma(vma);
unsigned long sz = huge_page_size(h);
struct mmu_notifier_range range;
int ret = 0;
cow = (vma->vm_flags & (VM_SHARED | VM_MAYWRITE)) == VM_MAYWRITE;
if (cow) {
mmu_notifier_range_init(&range, src, vma->vm_start,
vma->vm_end);
mmu_notifier_invalidate_range_start(&range);
}
for (addr = vma->vm_start; addr < vma->vm_end; addr += sz) {
spinlock_t *src_ptl, *dst_ptl;
src_pte = huge_pte_offset(src, addr, sz);
if (!src_pte)
continue;
dst_pte = huge_pte_alloc(dst, addr, sz);
if (!dst_pte) {
ret = -ENOMEM;
break;
}
/*
* If the pagetables are shared don't copy or take references.
* dst_pte == src_pte is the common case of src/dest sharing.
*
* However, src could have 'unshared' and dst shares with
* another vma. If dst_pte !none, this implies sharing.
* Check here before taking page table lock, and once again
* after taking the lock below.
*/
dst_entry = huge_ptep_get(dst_pte);
if ((dst_pte == src_pte) || !huge_pte_none(dst_entry))
continue;
dst_ptl = huge_pte_lock(h, dst, dst_pte);
src_ptl = huge_pte_lockptr(h, src, src_pte);
spin_lock_nested(src_ptl, SINGLE_DEPTH_NESTING);
entry = huge_ptep_get(src_pte);
dst_entry = huge_ptep_get(dst_pte);
if (huge_pte_none(entry) || !huge_pte_none(dst_entry)) {
/*
* Skip if src entry none. Also, skip in the
* unlikely case dst entry !none as this implies
* sharing with another vma.
*/
;
} else if (unlikely(is_hugetlb_entry_migration(entry) ||
is_hugetlb_entry_hwpoisoned(entry))) {
swp_entry_t swp_entry = pte_to_swp_entry(entry);
if (is_write_migration_entry(swp_entry) && cow) {
/*
* COW mappings require pages in both
* parent and child to be set to read.
*/
make_migration_entry_read(&swp_entry);
entry = swp_entry_to_pte(swp_entry);
set_huge_swap_pte_at(src, addr, src_pte,
entry, sz);
}
set_huge_swap_pte_at(dst, addr, dst_pte, entry, sz);
} else {
if (cow) {
/*
* No need to notify as we are downgrading page
* table protection not changing it to point
* to a new page.
*
* See Documentation/vm/mmu_notifier.rst
*/
huge_ptep_set_wrprotect(src, addr, src_pte);
}
entry = huge_ptep_get(src_pte);
ptepage = pte_page(entry);
get_page(ptepage);
page_dup_rmap(ptepage, true);
set_huge_pte_at(dst, addr, dst_pte, entry);
hugetlb_count_add(pages_per_huge_page(h), dst);
}
spin_unlock(src_ptl);
spin_unlock(dst_ptl);
}
if (cow)
mmu_notifier_invalidate_range_end(&range);
return ret;
}
void __unmap_hugepage_range(struct mmu_gather *tlb, struct vm_area_struct *vma,
unsigned long start, unsigned long end,
struct page *ref_page)
{
struct mm_struct *mm = vma->vm_mm;
unsigned long address;
pte_t *ptep;
pte_t pte;
spinlock_t *ptl;
struct page *page;
struct hstate *h = hstate_vma(vma);
unsigned long sz = huge_page_size(h);
struct mmu_notifier_range range;
WARN_ON(!is_vm_hugetlb_page(vma));
BUG_ON(start & ~huge_page_mask(h));
BUG_ON(end & ~huge_page_mask(h));
/*
* This is a hugetlb vma, all the pte entries should point
* to huge page.
*/
tlb_remove_check_page_size_change(tlb, sz);
tlb_start_vma(tlb, vma);
/*
* If sharing possible, alert mmu notifiers of worst case.
*/
mmu_notifier_range_init(&range, mm, start, end);
adjust_range_if_pmd_sharing_possible(vma, &range.start, &range.end);
mmu_notifier_invalidate_range_start(&range);
address = start;
for (; address < end; address += sz) {
ptep = huge_pte_offset(mm, address, sz);
if (!ptep)
continue;
ptl = huge_pte_lock(h, mm, ptep);
if (huge_pmd_unshare(mm, &address, ptep)) {
spin_unlock(ptl);
/*
* We just unmapped a page of PMDs by clearing a PUD.
* The caller's TLB flush range should cover this area.
*/
continue;
}
pte = huge_ptep_get(ptep);
if (huge_pte_none(pte)) {
spin_unlock(ptl);
continue;
}
/*
* Migrating hugepage or HWPoisoned hugepage is already
* unmapped and its refcount is dropped, so just clear pte here.
*/
if (unlikely(!pte_present(pte))) {
huge_pte_clear(mm, address, ptep, sz);
spin_unlock(ptl);
continue;
}
page = pte_page(pte);
/*
* If a reference page is supplied, it is because a specific
* page is being unmapped, not a range. Ensure the page we
* are about to unmap is the actual page of interest.
*/
if (ref_page) {
if (page != ref_page) {
spin_unlock(ptl);
continue;
}
/*
* Mark the VMA as having unmapped its page so that
* future faults in this VMA will fail rather than
* looking like data was lost
*/
set_vma_resv_flags(vma, HPAGE_RESV_UNMAPPED);
}
pte = huge_ptep_get_and_clear(mm, address, ptep);
tlb_remove_huge_tlb_entry(h, tlb, ptep, address);
if (huge_pte_dirty(pte))
set_page_dirty(page);
hugetlb_count_sub(pages_per_huge_page(h), mm);
page_remove_rmap(page, true);
spin_unlock(ptl);
tlb_remove_page_size(tlb, page, huge_page_size(h));
/*
* Bail out after unmapping reference page if supplied
*/
if (ref_page)
break;
}
mmu_notifier_invalidate_range_end(&range);
tlb_end_vma(tlb, vma);
}
void __unmap_hugepage_range_final(struct mmu_gather *tlb,
struct vm_area_struct *vma, unsigned long start,
unsigned long end, struct page *ref_page)
{
__unmap_hugepage_range(tlb, vma, start, end, ref_page);
/*
* Clear this flag so that x86's huge_pmd_share page_table_shareable
* test will fail on a vma being torn down, and not grab a page table
* on its way out. We're lucky that the flag has such an appropriate
* name, and can in fact be safely cleared here. We could clear it
* before the __unmap_hugepage_range above, but all that's necessary
* is to clear it before releasing the i_mmap_rwsem. This works
* because in the context this is called, the VMA is about to be
* destroyed and the i_mmap_rwsem is held.
*/
vma->vm_flags &= ~VM_MAYSHARE;
}
void unmap_hugepage_range(struct vm_area_struct *vma, unsigned long start,
unsigned long end, struct page *ref_page)
{
struct mm_struct *mm;
struct mmu_gather tlb;
unsigned long tlb_start = start;
unsigned long tlb_end = end;
/*
* If shared PMDs were possibly used within this vma range, adjust
* start/end for worst case tlb flushing.
* Note that we can not be sure if PMDs are shared until we try to
* unmap pages. However, we want to make sure TLB flushing covers
* the largest possible range.
*/
adjust_range_if_pmd_sharing_possible(vma, &tlb_start, &tlb_end);
mm = vma->vm_mm;
tlb_gather_mmu(&tlb, mm, tlb_start, tlb_end);
__unmap_hugepage_range(&tlb, vma, start, end, ref_page);
tlb_finish_mmu(&tlb, tlb_start, tlb_end);
}
/*
* This is called when the original mapper is failing to COW a MAP_PRIVATE
* mappping it owns the reserve page for. The intention is to unmap the page
* from other VMAs and let the children be SIGKILLed if they are faulting the
* same region.
*/
static void unmap_ref_private(struct mm_struct *mm, struct vm_area_struct *vma,
struct page *page, unsigned long address)
{
struct hstate *h = hstate_vma(vma);
struct vm_area_struct *iter_vma;
struct address_space *mapping;
pgoff_t pgoff;
/*
* vm_pgoff is in PAGE_SIZE units, hence the different calculation
* from page cache lookup which is in HPAGE_SIZE units.
*/
address = address & huge_page_mask(h);
pgoff = ((address - vma->vm_start) >> PAGE_SHIFT) +
vma->vm_pgoff;
mapping = vma->vm_file->f_mapping;
/*
* Take the mapping lock for the duration of the table walk. As
* this mapping should be shared between all the VMAs,
* __unmap_hugepage_range() is called as the lock is already held
*/
i_mmap_lock_write(mapping);
vma_interval_tree_foreach(iter_vma, &mapping->i_mmap, pgoff, pgoff) {
/* Do not unmap the current VMA */
if (iter_vma == vma)
continue;
/*
* Shared VMAs have their own reserves and do not affect
* MAP_PRIVATE accounting but it is possible that a shared
* VMA is using the same page so check and skip such VMAs.
*/
if (iter_vma->vm_flags & VM_MAYSHARE)
continue;
/*
* Unmap the page from other VMAs without their own reserves.
* They get marked to be SIGKILLed if they fault in these
* areas. This is because a future no-page fault on this VMA
* could insert a zeroed page instead of the data existing
* from the time of fork. This would look like data corruption
*/
if (!is_vma_resv_set(iter_vma, HPAGE_RESV_OWNER))
unmap_hugepage_range(iter_vma, address,
address + huge_page_size(h), page);
}
i_mmap_unlock_write(mapping);
}
/*
* Hugetlb_cow() should be called with page lock of the original hugepage held.
* Called with hugetlb_instantiation_mutex held and pte_page locked so we
* cannot race with other handlers or page migration.
* Keep the pte_same checks anyway to make transition from the mutex easier.
*/
static vm_fault_t hugetlb_cow(struct mm_struct *mm, struct vm_area_struct *vma,
unsigned long address, pte_t *ptep,
struct page *pagecache_page, spinlock_t *ptl)
{
pte_t pte;
struct hstate *h = hstate_vma(vma);
struct page *old_page, *new_page;
int outside_reserve = 0;
vm_fault_t ret = 0;
unsigned long haddr = address & huge_page_mask(h);
struct mmu_notifier_range range;
pte = huge_ptep_get(ptep);
old_page = pte_page(pte);
retry_avoidcopy:
/* If no-one else is actually using this page, avoid the copy
* and just make the page writable */
if (page_mapcount(old_page) == 1 && PageAnon(old_page)) {
page_move_anon_rmap(old_page, vma);
set_huge_ptep_writable(vma, haddr, ptep);
return 0;
}
/*
* If the process that created a MAP_PRIVATE mapping is about to
* perform a COW due to a shared page count, attempt to satisfy
* the allocation without using the existing reserves. The pagecache
* page is used to determine if the reserve at this address was
* consumed or not. If reserves were used, a partial faulted mapping
* at the time of fork() could consume its reserves on COW instead
* of the full address range.
*/
if (is_vma_resv_set(vma, HPAGE_RESV_OWNER) &&
old_page != pagecache_page)
outside_reserve = 1;
get_page(old_page);
/*
* Drop page table lock as buddy allocator may be called. It will
* be acquired again before returning to the caller, as expected.
*/
spin_unlock(ptl);
new_page = alloc_huge_page(vma, haddr, outside_reserve);
if (IS_ERR(new_page)) {
/*
* If a process owning a MAP_PRIVATE mapping fails to COW,
* it is due to references held by a child and an insufficient
* huge page pool. To guarantee the original mappers
* reliability, unmap the page from child processes. The child
* may get SIGKILLed if it later faults.
*/
if (outside_reserve) {
put_page(old_page);
BUG_ON(huge_pte_none(pte));
unmap_ref_private(mm, vma, old_page, haddr);
BUG_ON(huge_pte_none(pte));
spin_lock(ptl);
ptep = huge_pte_offset(mm, haddr, huge_page_size(h));
if (likely(ptep &&
pte_same(huge_ptep_get(ptep), pte)))
goto retry_avoidcopy;
/*
* race occurs while re-acquiring page table
* lock, and our job is done.
*/
return 0;
}
ret = vmf_error(PTR_ERR(new_page));
goto out_release_old;
}
/*
* When the original hugepage is shared one, it does not have
* anon_vma prepared.
*/
if (unlikely(anon_vma_prepare(vma))) {
ret = VM_FAULT_OOM;
goto out_release_all;
}
copy_user_huge_page(new_page, old_page, address, vma,
pages_per_huge_page(h));
__SetPageUptodate(new_page);
mmu_notifier_range_init(&range, mm, haddr, haddr + huge_page_size(h));
mmu_notifier_invalidate_range_start(&range);
/*
* Retake the page table lock to check for racing updates
* before the page tables are altered
*/
spin_lock(ptl);
ptep = huge_pte_offset(mm, haddr, huge_page_size(h));
if (likely(ptep && pte_same(huge_ptep_get(ptep), pte))) {
ClearPagePrivate(new_page);
/* Break COW */
huge_ptep_clear_flush(vma, haddr, ptep);
mmu_notifier_invalidate_range(mm, range.start, range.end);
set_huge_pte_at(mm, haddr, ptep,
make_huge_pte(vma, new_page, 1));
page_remove_rmap(old_page, true);
hugepage_add_new_anon_rmap(new_page, vma, haddr);
set_page_huge_active(new_page);
/* Make the old page be freed below */
new_page = old_page;
}
spin_unlock(ptl);
mmu_notifier_invalidate_range_end(&range);
out_release_all:
restore_reserve_on_error(h, vma, haddr, new_page);
put_page(new_page);
out_release_old:
put_page(old_page);
spin_lock(ptl); /* Caller expects lock to be held */
return ret;
}
/* Return the pagecache page at a given address within a VMA */
static struct page *hugetlbfs_pagecache_page(struct hstate *h,
struct vm_area_struct *vma, unsigned long address)
{
struct address_space *mapping;
pgoff_t idx;
mapping = vma->vm_file->f_mapping;
idx = vma_hugecache_offset(h, vma, address);
return find_lock_page(mapping, idx);
}
/*
* Return whether there is a pagecache page to back given address within VMA.
* Caller follow_hugetlb_page() holds page_table_lock so we cannot lock_page.
*/
static bool hugetlbfs_pagecache_present(struct hstate *h,
struct vm_area_struct *vma, unsigned long address)
{
struct address_space *mapping;
pgoff_t idx;
struct page *page;
mapping = vma->vm_file->f_mapping;
idx = vma_hugecache_offset(h, vma, address);
page = find_get_page(mapping, idx);
if (page)
put_page(page);
return page != NULL;
}
int huge_add_to_page_cache(struct page *page, struct address_space *mapping,
pgoff_t idx)
{
struct inode *inode = mapping->host;
struct hstate *h = hstate_inode(inode);
int err = add_to_page_cache(page, mapping, idx, GFP_KERNEL);
if (err)
return err;
ClearPagePrivate(page);
/*
* set page dirty so that it will not be removed from cache/file
* by non-hugetlbfs specific code paths.
*/
set_page_dirty(page);
spin_lock(&inode->i_lock);
inode->i_blocks += blocks_per_huge_page(h);
spin_unlock(&inode->i_lock);
return 0;
}
static vm_fault_t hugetlb_no_page(struct mm_struct *mm,
struct vm_area_struct *vma,
struct address_space *mapping, pgoff_t idx,
unsigned long address, pte_t *ptep, unsigned int flags)
{
struct hstate *h = hstate_vma(vma);
vm_fault_t ret = VM_FAULT_SIGBUS;
int anon_rmap = 0;
unsigned long size;
struct page *page;
pte_t new_pte;
spinlock_t *ptl;
unsigned long haddr = address & huge_page_mask(h);
bool new_page = false;
/*
* Currently, we are forced to kill the process in the event the
* original mapper has unmapped pages from the child due to a failed
* COW. Warn that such a situation has occurred as it may not be obvious
*/
if (is_vma_resv_set(vma, HPAGE_RESV_UNMAPPED)) {
pr_warn_ratelimited("PID %d killed due to inadequate hugepage pool\n",
current->pid);
return ret;
}
/*
* Use page lock to guard against racing truncation
* before we get page_table_lock.
*/
retry:
page = find_lock_page(mapping, idx);
if (!page) {
size = i_size_read(mapping->host) >> huge_page_shift(h);
if (idx >= size)
goto out;
/*
* Check for page in userfault range
*/
if (userfaultfd_missing(vma)) {
u32 hash;
struct vm_fault vmf = {
.vma = vma,
.address = haddr,
.flags = flags,
/*
* Hard to debug if it ends up being
* used by a callee that assumes
* something about the other
* uninitialized fields... same as in
* memory.c
*/
};
/*
* hugetlb_fault_mutex must be dropped before
* handling userfault. Reacquire after handling
* fault to make calling code simpler.
*/
hash = hugetlb_fault_mutex_hash(h, mm, vma, mapping,
idx, haddr);
mutex_unlock(&hugetlb_fault_mutex_table[hash]);
ret = handle_userfault(&vmf, VM_UFFD_MISSING);
mutex_lock(&hugetlb_fault_mutex_table[hash]);
goto out;
}
page = alloc_huge_page(vma, haddr, 0);
if (IS_ERR(page)) {
ret = vmf_error(PTR_ERR(page));
goto out;
}
clear_huge_page(page, address, pages_per_huge_page(h));
__SetPageUptodate(page);
new_page = true;
if (vma->vm_flags & VM_MAYSHARE) {
int err = huge_add_to_page_cache(page, mapping, idx);
if (err) {
put_page(page);
if (err == -EEXIST)
goto retry;
goto out;
}
} else {
lock_page(page);
if (unlikely(anon_vma_prepare(vma))) {
ret = VM_FAULT_OOM;
goto backout_unlocked;
}
anon_rmap = 1;
}
} else {
/*
* If memory error occurs between mmap() and fault, some process
* don't have hwpoisoned swap entry for errored virtual address.
* So we need to block hugepage fault by PG_hwpoison bit check.
*/
if (unlikely(PageHWPoison(page))) {
ret = VM_FAULT_HWPOISON |
VM_FAULT_SET_HINDEX(hstate_index(h));
goto backout_unlocked;
}
}
/*
* If we are going to COW a private mapping later, we examine the
* pending reservations for this page now. This will ensure that
* any allocations necessary to record that reservation occur outside
* the spinlock.
*/
if ((flags & FAULT_FLAG_WRITE) && !(vma->vm_flags & VM_SHARED)) {
if (vma_needs_reservation(h, vma, haddr) < 0) {
ret = VM_FAULT_OOM;
goto backout_unlocked;
}
/* Just decrements count, does not deallocate */
vma_end_reservation(h, vma, haddr);
}
ptl = huge_pte_lock(h, mm, ptep);
size = i_size_read(mapping->host) >> huge_page_shift(h);
if (idx >= size)
goto backout;
ret = 0;
if (!huge_pte_none(huge_ptep_get(ptep)))
goto backout;
if (anon_rmap) {
ClearPagePrivate(page);
hugepage_add_new_anon_rmap(page, vma, haddr);
} else
page_dup_rmap(page, true);
new_pte = make_huge_pte(vma, page, ((vma->vm_flags & VM_WRITE)
&& (vma->vm_flags & VM_SHARED)));
set_huge_pte_at(mm, haddr, ptep, new_pte);
hugetlb_count_add(pages_per_huge_page(h), mm);
if ((flags & FAULT_FLAG_WRITE) && !(vma->vm_flags & VM_SHARED)) {
/* Optimization, do the COW without a second fault */
ret = hugetlb_cow(mm, vma, address, ptep, page, ptl);
}
spin_unlock(ptl);
/*
* Only make newly allocated pages active. Existing pages found
* in the pagecache could be !page_huge_active() if they have been
* isolated for migration.
*/
if (new_page)
set_page_huge_active(page);
unlock_page(page);
out:
return ret;
backout:
spin_unlock(ptl);
backout_unlocked:
unlock_page(page);
restore_reserve_on_error(h, vma, haddr, page);
put_page(page);
goto out;
}
#ifdef CONFIG_SMP
u32 hugetlb_fault_mutex_hash(struct hstate *h, struct mm_struct *mm,
struct vm_area_struct *vma,
struct address_space *mapping,
pgoff_t idx, unsigned long address)
{
unsigned long key[2];
u32 hash;
if (vma->vm_flags & VM_SHARED) {
key[0] = (unsigned long) mapping;
key[1] = idx;
} else {
key[0] = (unsigned long) mm;
key[1] = address >> huge_page_shift(h);
}
hash = jhash2((u32 *)&key, sizeof(key)/sizeof(u32), 0);
return hash & (num_fault_mutexes - 1);
}
#else
/*
* For uniprocesor systems we always use a single mutex, so just
* return 0 and avoid the hashing overhead.
*/
u32 hugetlb_fault_mutex_hash(struct hstate *h, struct mm_struct *mm,
struct vm_area_struct *vma,
struct address_space *mapping,
pgoff_t idx, unsigned long address)
{
return 0;
}
#endif
vm_fault_t hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma,
unsigned long address, unsigned int flags)
{
pte_t *ptep, entry;
spinlock_t *ptl;
vm_fault_t ret;
u32 hash;
pgoff_t idx;
struct page *page = NULL;
struct page *pagecache_page = NULL;
struct hstate *h = hstate_vma(vma);
struct address_space *mapping;
int need_wait_lock = 0;
unsigned long haddr = address & huge_page_mask(h);
ptep = huge_pte_offset(mm, haddr, huge_page_size(h));
if (ptep) {
entry = huge_ptep_get(ptep);
if (unlikely(is_hugetlb_entry_migration(entry))) {
migration_entry_wait_huge(vma, mm, ptep);
return 0;
} else if (unlikely(is_hugetlb_entry_hwpoisoned(entry)))
return VM_FAULT_HWPOISON_LARGE |
VM_FAULT_SET_HINDEX(hstate_index(h));
} else {
ptep = huge_pte_alloc(mm, haddr, huge_page_size(h));
if (!ptep)
return VM_FAULT_OOM;
}
mapping = vma->vm_file->f_mapping;
idx = vma_hugecache_offset(h, vma, haddr);
/*
* Serialize hugepage allocation and instantiation, so that we don't
* get spurious allocation failures if two CPUs race to instantiate
* the same page in the page cache.
*/
hash = hugetlb_fault_mutex_hash(h, mm, vma, mapping, idx, haddr);
mutex_lock(&hugetlb_fault_mutex_table[hash]);
entry = huge_ptep_get(ptep);
if (huge_pte_none(entry)) {
ret = hugetlb_no_page(mm, vma, mapping, idx, address, ptep, flags);
goto out_mutex;
}
ret = 0;
/*
* entry could be a migration/hwpoison entry at this point, so this
* check prevents the kernel from going below assuming that we have
* a active hugepage in pagecache. This goto expects the 2nd page fault,
* and is_hugetlb_entry_(migration|hwpoisoned) check will properly
* handle it.
*/
if (!pte_present(entry))
goto out_mutex;
/*
* If we are going to COW the mapping later, we examine the pending
* reservations for this page now. This will ensure that any
* allocations necessary to record that reservation occur outside the
* spinlock. For private mappings, we also lookup the pagecache
* page now as it is used to determine if a reservation has been
* consumed.
*/
if ((flags & FAULT_FLAG_WRITE) && !huge_pte_write(entry)) {
if (vma_needs_reservation(h, vma, haddr) < 0) {
ret = VM_FAULT_OOM;
goto out_mutex;
}
/* Just decrements count, does not deallocate */
vma_end_reservation(h, vma, haddr);
if (!(vma->vm_flags & VM_MAYSHARE))
pagecache_page = hugetlbfs_pagecache_page(h,
vma, haddr);
}
ptl = huge_pte_lock(h, mm, ptep);
/* Check for a racing update before calling hugetlb_cow */
if (unlikely(!pte_same(entry, huge_ptep_get(ptep))))
goto out_ptl;
/*
* hugetlb_cow() requires page locks of pte_page(entry) and
* pagecache_page, so here we need take the former one
* when page != pagecache_page or !pagecache_page.
*/
page = pte_page(entry);
if (page != pagecache_page)
if (!trylock_page(page)) {
need_wait_lock = 1;
goto out_ptl;
}
get_page(page);
if (flags & FAULT_FLAG_WRITE) {
if (!huge_pte_write(entry)) {
ret = hugetlb_cow(mm, vma, address, ptep,
pagecache_page, ptl);
goto out_put_page;
}
entry = huge_pte_mkdirty(entry);
}
entry = pte_mkyoung(entry);
if (huge_ptep_set_access_flags(vma, haddr, ptep, entry,
flags & FAULT_FLAG_WRITE))
update_mmu_cache(vma, haddr, ptep);
out_put_page:
if (page != pagecache_page)
unlock_page(page);
put_page(page);
out_ptl:
spin_unlock(ptl);
if (pagecache_page) {
unlock_page(pagecache_page);
put_page(pagecache_page);
}
out_mutex:
mutex_unlock(&hugetlb_fault_mutex_table[hash]);
/*
* Generally it's safe to hold refcount during waiting page lock. But
* here we just wait to defer the next page fault to avoid busy loop and
* the page is not used after unlocked before returning from the current
* page fault. So we are safe from accessing freed page, even if we wait
* here without taking refcount.
*/
if (need_wait_lock)
wait_on_page_locked(page);
return ret;
}
/*
* Used by userfaultfd UFFDIO_COPY. Based on mcopy_atomic_pte with
* modifications for huge pages.
*/
int hugetlb_mcopy_atomic_pte(struct mm_struct *dst_mm,
pte_t *dst_pte,
struct vm_area_struct *dst_vma,
unsigned long dst_addr,
unsigned long src_addr,
struct page **pagep)
{
struct address_space *mapping;
pgoff_t idx;
unsigned long size;
int vm_shared = dst_vma->vm_flags & VM_SHARED;
struct hstate *h = hstate_vma(dst_vma);
pte_t _dst_pte;
spinlock_t *ptl;
int ret;
struct page *page;
if (!*pagep) {
ret = -ENOMEM;
page = alloc_huge_page(dst_vma, dst_addr, 0);
if (IS_ERR(page))
goto out;
ret = copy_huge_page_from_user(page,
(const void __user *) src_addr,
pages_per_huge_page(h), false);
/* fallback to copy_from_user outside mmap_sem */
if (unlikely(ret)) {
ret = -ENOENT;
*pagep = page;
/* don't free the page */
goto out;
}
} else {
page = *pagep;
*pagep = NULL;
}
/*
* The memory barrier inside __SetPageUptodate makes sure that
* preceding stores to the page contents become visible before
* the set_pte_at() write.
*/
__SetPageUptodate(page);
mapping = dst_vma->vm_file->f_mapping;
idx = vma_hugecache_offset(h, dst_vma, dst_addr);
/*
* If shared, add to page cache
*/
if (vm_shared) {
size = i_size_read(mapping->host) >> huge_page_shift(h);
ret = -EFAULT;
if (idx >= size)
goto out_release_nounlock;
/*
* Serialization between remove_inode_hugepages() and
* huge_add_to_page_cache() below happens through the
* hugetlb_fault_mutex_table that here must be hold by
* the caller.
*/
ret = huge_add_to_page_cache(page, mapping, idx);
if (ret)
goto out_release_nounlock;
}
ptl = huge_pte_lockptr(h, dst_mm, dst_pte);
spin_lock(ptl);
/*
* Recheck the i_size after holding PT lock to make sure not
* to leave any page mapped (as page_mapped()) beyond the end
* of the i_size (remove_inode_hugepages() is strict about
* enforcing that). If we bail out here, we'll also leave a
* page in the radix tree in the vm_shared case beyond the end
* of the i_size, but remove_inode_hugepages() will take care
* of it as soon as we drop the hugetlb_fault_mutex_table.
*/
size = i_size_read(mapping->host) >> huge_page_shift(h);
ret = -EFAULT;
if (idx >= size)
goto out_release_unlock;
ret = -EEXIST;
if (!huge_pte_none(huge_ptep_get(dst_pte)))
goto out_release_unlock;
if (vm_shared) {
page_dup_rmap(page, true);
} else {
ClearPagePrivate(page);
hugepage_add_new_anon_rmap(page, dst_vma, dst_addr);
}
_dst_pte = make_huge_pte(dst_vma, page, dst_vma->vm_flags & VM_WRITE);
if (dst_vma->vm_flags & VM_WRITE)
_dst_pte = huge_pte_mkdirty(_dst_pte);
_dst_pte = pte_mkyoung(_dst_pte);
set_huge_pte_at(dst_mm, dst_addr, dst_pte, _dst_pte);
(void)huge_ptep_set_access_flags(dst_vma, dst_addr, dst_pte, _dst_pte,
dst_vma->vm_flags & VM_WRITE);
hugetlb_count_add(pages_per_huge_page(h), dst_mm);
/* No need to invalidate - it was non-present before */
update_mmu_cache(dst_vma, dst_addr, dst_pte);
spin_unlock(ptl);
set_page_huge_active(page);
if (vm_shared)
unlock_page(page);
ret = 0;
out:
return ret;
out_release_unlock:
spin_unlock(ptl);
if (vm_shared)
unlock_page(page);
out_release_nounlock:
put_page(page);
goto out;
}
long follow_hugetlb_page(struct mm_struct *mm, struct vm_area_struct *vma,
struct page **pages, struct vm_area_struct **vmas,
unsigned long *position, unsigned long *nr_pages,
long i, unsigned int flags, int *nonblocking)
{
unsigned long pfn_offset;
unsigned long vaddr = *position;
unsigned long remainder = *nr_pages;
struct hstate *h = hstate_vma(vma);
int err = -EFAULT;
while (vaddr < vma->vm_end && remainder) {
pte_t *pte;
spinlock_t *ptl = NULL;
int absent;
struct page *page;
/*
* If we have a pending SIGKILL, don't keep faulting pages and
* potentially allocating memory.
*/
if (fatal_signal_pending(current)) {
remainder = 0;
break;
}
/*
* Some archs (sparc64, sh*) have multiple pte_ts to
* each hugepage. We have to make sure we get the
* first, for the page indexing below to work.
*
* Note that page table lock is not held when pte is null.
*/
pte = huge_pte_offset(mm, vaddr & huge_page_mask(h),
huge_page_size(h));
if (pte)
ptl = huge_pte_lock(h, mm, pte);
absent = !pte || huge_pte_none(huge_ptep_get(pte));
/*
* When coredumping, it suits get_dump_page if we just return
* an error where there's an empty slot with no huge pagecache
* to back it. This way, we avoid allocating a hugepage, and
* the sparse dumpfile avoids allocating disk blocks, but its
* huge holes still show up with zeroes where they need to be.
*/
if (absent && (flags & FOLL_DUMP) &&
!hugetlbfs_pagecache_present(h, vma, vaddr)) {
if (pte)
spin_unlock(ptl);
remainder = 0;
break;
}
/*
* We need call hugetlb_fault for both hugepages under migration
* (in which case hugetlb_fault waits for the migration,) and
* hwpoisoned hugepages (in which case we need to prevent the
* caller from accessing to them.) In order to do this, we use
* here is_swap_pte instead of is_hugetlb_entry_migration and
* is_hugetlb_entry_hwpoisoned. This is because it simply covers
* both cases, and because we can't follow correct pages
* directly from any kind of swap entries.
*/
if (absent || is_swap_pte(huge_ptep_get(pte)) ||
((flags & FOLL_WRITE) &&
!huge_pte_write(huge_ptep_get(pte)))) {
vm_fault_t ret;
unsigned int fault_flags = 0;
if (pte)
spin_unlock(ptl);
if (flags & FOLL_WRITE)
fault_flags |= FAULT_FLAG_WRITE;
if (nonblocking)
fault_flags |= FAULT_FLAG_ALLOW_RETRY;
if (flags & FOLL_NOWAIT)
fault_flags |= FAULT_FLAG_ALLOW_RETRY |
FAULT_FLAG_RETRY_NOWAIT;
if (flags & FOLL_TRIED) {
VM_WARN_ON_ONCE(fault_flags &
FAULT_FLAG_ALLOW_RETRY);
fault_flags |= FAULT_FLAG_TRIED;
}
ret = hugetlb_fault(mm, vma, vaddr, fault_flags);
if (ret & VM_FAULT_ERROR) {
err = vm_fault_to_errno(ret, flags);
remainder = 0;
break;
}
if (ret & VM_FAULT_RETRY) {
if (nonblocking &&
!(fault_flags & FAULT_FLAG_RETRY_NOWAIT))
*nonblocking = 0;
*nr_pages = 0;
/*
* VM_FAULT_RETRY must not return an
* error, it will return zero
* instead.
*
* No need to update "position" as the
* caller will not check it after
* *nr_pages is set to 0.
*/
return i;
}
continue;
}
pfn_offset = (vaddr & ~huge_page_mask(h)) >> PAGE_SHIFT;
page = pte_page(huge_ptep_get(pte));
same_page:
if (pages) {
pages[i] = mem_map_offset(page, pfn_offset);
get_page(pages[i]);
}
if (vmas)
vmas[i] = vma;
vaddr += PAGE_SIZE;
++pfn_offset;
--remainder;
++i;
if (vaddr < vma->vm_end && remainder &&
pfn_offset < pages_per_huge_page(h)) {
/*
* We use pfn_offset to avoid touching the pageframes
* of this compound page.
*/
goto same_page;
}
spin_unlock(ptl);
}
*nr_pages = remainder;
/*
* setting position is actually required only if remainder is
* not zero but it's faster not to add a "if (remainder)"
* branch.
*/
*position = vaddr;
return i ? i : err;
}
#ifndef __HAVE_ARCH_FLUSH_HUGETLB_TLB_RANGE
/*
* ARCHes with special requirements for evicting HUGETLB backing TLB entries can
* implement this.
*/
#define flush_hugetlb_tlb_range(vma, addr, end) flush_tlb_range(vma, addr, end)
#endif
unsigned long hugetlb_change_protection(struct vm_area_struct *vma,
unsigned long address, unsigned long end, pgprot_t newprot)
{
struct mm_struct *mm = vma->vm_mm;
unsigned long start = address;
pte_t *ptep;
pte_t pte;
struct hstate *h = hstate_vma(vma);
unsigned long pages = 0;
bool shared_pmd = false;
struct mmu_notifier_range range;
/*
* In the case of shared PMDs, the area to flush could be beyond
* start/end. Set range.start/range.end to cover the maximum possible
* range if PMD sharing is possible.
*/
mmu_notifier_range_init(&range, mm, start, end);
adjust_range_if_pmd_sharing_possible(vma, &range.start, &range.end);
BUG_ON(address >= end);
flush_cache_range(vma, range.start, range.end);
mmu_notifier_invalidate_range_start(&range);
i_mmap_lock_write(vma->vm_file->f_mapping);
for (; address < end; address += huge_page_size(h)) {
spinlock_t *ptl;
ptep = huge_pte_offset(mm, address, huge_page_size(h));
if (!ptep)
continue;
ptl = huge_pte_lock(h, mm, ptep);
if (huge_pmd_unshare(mm, &address, ptep)) {
pages++;
spin_unlock(ptl);
shared_pmd = true;
continue;
}
pte = huge_ptep_get(ptep);
if (unlikely(is_hugetlb_entry_hwpoisoned(pte))) {
spin_unlock(ptl);
continue;
}
if (unlikely(is_hugetlb_entry_migration(pte))) {
swp_entry_t entry = pte_to_swp_entry(pte);
if (is_write_migration_entry(entry)) {
pte_t newpte;
make_migration_entry_read(&entry);
newpte = swp_entry_to_pte(entry);
set_huge_swap_pte_at(mm, address, ptep,
newpte, huge_page_size(h));
pages++;
}
spin_unlock(ptl);
continue;
}
if (!huge_pte_none(pte)) {
pte = huge_ptep_get_and_clear(mm, address, ptep);
pte = pte_mkhuge(huge_pte_modify(pte, newprot));
pte = arch_make_huge_pte(pte, vma, NULL, 0);
set_huge_pte_at(mm, address, ptep, pte);
pages++;
}
spin_unlock(ptl);
}
/*
* Must flush TLB before releasing i_mmap_rwsem: x86's huge_pmd_unshare
* may have cleared our pud entry and done put_page on the page table:
* once we release i_mmap_rwsem, another task can do the final put_page
* and that page table be reused and filled with junk. If we actually
* did unshare a page of pmds, flush the range corresponding to the pud.
*/
if (shared_pmd)
flush_hugetlb_tlb_range(vma, range.start, range.end);
else
flush_hugetlb_tlb_range(vma, start, end);
/*
* No need to call mmu_notifier_invalidate_range() we are downgrading
* page table protection not changing it to point to a new page.
*
* See Documentation/vm/mmu_notifier.rst
*/
i_mmap_unlock_write(vma->vm_file->f_mapping);
mmu_notifier_invalidate_range_end(&range);
return pages << h->order;
}
int hugetlb_reserve_pages(struct inode *inode,
long from, long to,
struct vm_area_struct *vma,
vm_flags_t vm_flags)
{
long ret, chg;
struct hstate *h = hstate_inode(inode);
struct hugepage_subpool *spool = subpool_inode(inode);
struct resv_map *resv_map;
long gbl_reserve;
/* This should never happen */
if (from > to) {
VM_WARN(1, "%s called with a negative range\n", __func__);
return -EINVAL;
}
/*
* Only apply hugepage reservation if asked. At fault time, an
* attempt will be made for VM_NORESERVE to allocate a page
* without using reserves
*/
if (vm_flags & VM_NORESERVE)
return 0;
/*
* Shared mappings base their reservation on the number of pages that
* are already allocated on behalf of the file. Private mappings need
* to reserve the full area even if read-only as mprotect() may be
* called to make the mapping read-write. Assume !vma is a shm mapping
*/
if (!vma || vma->vm_flags & VM_MAYSHARE) {
resv_map = inode_resv_map(inode);
chg = region_chg(resv_map, from, to);
} else {
resv_map = resv_map_alloc();
if (!resv_map)
return -ENOMEM;
chg = to - from;
set_vma_resv_map(vma, resv_map);
set_vma_resv_flags(vma, HPAGE_RESV_OWNER);
}
if (chg < 0) {
ret = chg;
goto out_err;
}
/*
* There must be enough pages in the subpool for the mapping. If
* the subpool has a minimum size, there may be some global
* reservations already in place (gbl_reserve).
*/
gbl_reserve = hugepage_subpool_get_pages(spool, chg);
if (gbl_reserve < 0) {
ret = -ENOSPC;
goto out_err;
}
/*
* Check enough hugepages are available for the reservation.
* Hand the pages back to the subpool if there are not
*/
ret = hugetlb_acct_memory(h, gbl_reserve);
if (ret < 0) {
/* put back original number of pages, chg */
(void)hugepage_subpool_put_pages(spool, chg);
goto out_err;
}
/*
* Account for the reservations made. Shared mappings record regions
* that have reservations as they are shared by multiple VMAs.
* When the last VMA disappears, the region map says how much
* the reservation was and the page cache tells how much of
* the reservation was consumed. Private mappings are per-VMA and
* only the consumed reservations are tracked. When the VMA
* disappears, the original reservation is the VMA size and the
* consumed reservations are stored in the map. Hence, nothing
* else has to be done for private mappings here
*/
if (!vma || vma->vm_flags & VM_MAYSHARE) {
long add = region_add(resv_map, from, to);
if (unlikely(chg > add)) {
/*
* pages in this range were added to the reserve
* map between region_chg and region_add. This
* indicates a race with alloc_huge_page. Adjust
* the subpool and reserve counts modified above
* based on the difference.
*/
long rsv_adjust;
rsv_adjust = hugepage_subpool_put_pages(spool,
chg - add);
hugetlb_acct_memory(h, -rsv_adjust);
}
}
return 0;
out_err:
if (!vma || vma->vm_flags & VM_MAYSHARE)
/* Don't call region_abort if region_chg failed */
if (chg >= 0)
region_abort(resv_map, from, to);
if (vma && is_vma_resv_set(vma, HPAGE_RESV_OWNER))
kref_put(&resv_map->refs, resv_map_release);
return ret;
}
long hugetlb_unreserve_pages(struct inode *inode, long start, long end,
long freed)
{
struct hstate *h = hstate_inode(inode);
struct resv_map *resv_map = inode_resv_map(inode);
long chg = 0;
struct hugepage_subpool *spool = subpool_inode(inode);
long gbl_reserve;
if (resv_map) {
chg = region_del(resv_map, start, end);
/*
* region_del() can fail in the rare case where a region
* must be split and another region descriptor can not be
* allocated. If end == LONG_MAX, it will not fail.
*/
if (chg < 0)
return chg;
}
spin_lock(&inode->i_lock);
inode->i_blocks -= (blocks_per_huge_page(h) * freed);
spin_unlock(&inode->i_lock);
/*
* If the subpool has a minimum size, the number of global
* reservations to be released may be adjusted.
*/
gbl_reserve = hugepage_subpool_put_pages(spool, (chg - freed));
hugetlb_acct_memory(h, -gbl_reserve);
return 0;
}
#ifdef CONFIG_ARCH_WANT_HUGE_PMD_SHARE
static unsigned long page_table_shareable(struct vm_area_struct *svma,
struct vm_area_struct *vma,
unsigned long addr, pgoff_t idx)
{
unsigned long saddr = ((idx - svma->vm_pgoff) << PAGE_SHIFT) +
svma->vm_start;
unsigned long sbase = saddr & PUD_MASK;
unsigned long s_end = sbase + PUD_SIZE;
/* Allow segments to share if only one is marked locked */
unsigned long vm_flags = vma->vm_flags & VM_LOCKED_CLEAR_MASK;
unsigned long svm_flags = svma->vm_flags & VM_LOCKED_CLEAR_MASK;
/*
* match the virtual addresses, permission and the alignment of the
* page table page.
*/
if (pmd_index(addr) != pmd_index(saddr) ||
vm_flags != svm_flags ||
sbase < svma->vm_start || svma->vm_end < s_end)
return 0;
return saddr;
}
static bool vma_shareable(struct vm_area_struct *vma, unsigned long addr)
{
unsigned long base = addr & PUD_MASK;
unsigned long end = base + PUD_SIZE;
/*
* check on proper vm_flags and page table alignment
*/
if (vma->vm_flags & VM_MAYSHARE && range_in_vma(vma, base, end))
return true;
return false;
}
/*
* Determine if start,end range within vma could be mapped by shared pmd.
* If yes, adjust start and end to cover range associated with possible
* shared pmd mappings.
*/
void adjust_range_if_pmd_sharing_possible(struct vm_area_struct *vma,
unsigned long *start, unsigned long *end)
{
unsigned long check_addr = *start;
if (!(vma->vm_flags & VM_MAYSHARE))
return;
for (check_addr = *start; check_addr < *end; check_addr += PUD_SIZE) {
unsigned long a_start = check_addr & PUD_MASK;
unsigned long a_end = a_start + PUD_SIZE;
/*
* If sharing is possible, adjust start/end if necessary.
*/
if (range_in_vma(vma, a_start, a_end)) {
if (a_start < *start)
*start = a_start;
if (a_end > *end)
*end = a_end;
}
}
}
/*
* Search for a shareable pmd page for hugetlb. In any case calls pmd_alloc()
* and returns the corresponding pte. While this is not necessary for the
* !shared pmd case because we can allocate the pmd later as well, it makes the
* code much cleaner. pmd allocation is essential for the shared case because
* pud has to be populated inside the same i_mmap_rwsem section - otherwise
* racing tasks could either miss the sharing (see huge_pte_offset) or select a
* bad pmd for sharing.
*/
pte_t *huge_pmd_share(struct mm_struct *mm, unsigned long addr, pud_t *pud)
{
struct vm_area_struct *vma = find_vma(mm, addr);
struct address_space *mapping = vma->vm_file->f_mapping;
pgoff_t idx = ((addr - vma->vm_start) >> PAGE_SHIFT) +
vma->vm_pgoff;
struct vm_area_struct *svma;
unsigned long saddr;
pte_t *spte = NULL;
pte_t *pte;
spinlock_t *ptl;
if (!vma_shareable(vma, addr))
return (pte_t *)pmd_alloc(mm, pud, addr);
i_mmap_lock_write(mapping);
vma_interval_tree_foreach(svma, &mapping->i_mmap, idx, idx) {
if (svma == vma)
continue;
saddr = page_table_shareable(svma, vma, addr, idx);
if (saddr) {
spte = huge_pte_offset(svma->vm_mm, saddr,
vma_mmu_pagesize(svma));
if (spte) {
get_page(virt_to_page(spte));
break;
}
}
}
if (!spte)
goto out;
ptl = huge_pte_lock(hstate_vma(vma), mm, spte);
if (pud_none(*pud)) {
pud_populate(mm, pud,
(pmd_t *)((unsigned long)spte & PAGE_MASK));
mm_inc_nr_pmds(mm);
} else {
put_page(virt_to_page(spte));
}
spin_unlock(ptl);
out:
pte = (pte_t *)pmd_alloc(mm, pud, addr);
i_mmap_unlock_write(mapping);
return pte;
}
/*
* unmap huge page backed by shared pte.
*
* Hugetlb pte page is ref counted at the time of mapping. If pte is shared
* indicated by page_count > 1, unmap is achieved by clearing pud and
* decrementing the ref count. If count == 1, the pte page is not shared.
*
* called with page table lock held.
*
* returns: 1 successfully unmapped a shared pte page
* 0 the underlying pte page is not shared, or it is the last user
*/
int huge_pmd_unshare(struct mm_struct *mm, unsigned long *addr, pte_t *ptep)
{
pgd_t *pgd = pgd_offset(mm, *addr);
p4d_t *p4d = p4d_offset(pgd, *addr);
pud_t *pud = pud_offset(p4d, *addr);
BUG_ON(page_count(virt_to_page(ptep)) == 0);
if (page_count(virt_to_page(ptep)) == 1)
return 0;
pud_clear(pud);
put_page(virt_to_page(ptep));
mm_dec_nr_pmds(mm);
*addr = ALIGN(*addr, HPAGE_SIZE * PTRS_PER_PTE) - HPAGE_SIZE;
return 1;
}
#define want_pmd_share() (1)
#else /* !CONFIG_ARCH_WANT_HUGE_PMD_SHARE */
pte_t *huge_pmd_share(struct mm_struct *mm, unsigned long addr, pud_t *pud)
{
return NULL;
}
int huge_pmd_unshare(struct mm_struct *mm, unsigned long *addr, pte_t *ptep)
{
return 0;
}
void adjust_range_if_pmd_sharing_possible(struct vm_area_struct *vma,
unsigned long *start, unsigned long *end)
{
}
#define want_pmd_share() (0)
#endif /* CONFIG_ARCH_WANT_HUGE_PMD_SHARE */
#ifdef CONFIG_ARCH_WANT_GENERAL_HUGETLB
pte_t *huge_pte_alloc(struct mm_struct *mm,
unsigned long addr, unsigned long sz)
{
pgd_t *pgd;
p4d_t *p4d;
pud_t *pud;
pte_t *pte = NULL;
pgd = pgd_offset(mm, addr);
p4d = p4d_alloc(mm, pgd, addr);
if (!p4d)
return NULL;
pud = pud_alloc(mm, p4d, addr);
if (pud) {
if (sz == PUD_SIZE) {
pte = (pte_t *)pud;
} else {
BUG_ON(sz != PMD_SIZE);
if (want_pmd_share() && pud_none(*pud))
pte = huge_pmd_share(mm, addr, pud);
else
pte = (pte_t *)pmd_alloc(mm, pud, addr);
}
}
BUG_ON(pte && pte_present(*pte) && !pte_huge(*pte));
return pte;
}
/*
* huge_pte_offset() - Walk the page table to resolve the hugepage
* entry at address @addr
*
* Return: Pointer to page table or swap entry (PUD or PMD) for
* address @addr, or NULL if a p*d_none() entry is encountered and the
* size @sz doesn't match the hugepage size at this level of the page
* table.
*/
pte_t *huge_pte_offset(struct mm_struct *mm,
unsigned long addr, unsigned long sz)
{
pgd_t *pgd;
p4d_t *p4d;
pud_t *pud;
pmd_t *pmd;
pgd = pgd_offset(mm, addr);
if (!pgd_present(*pgd))
return NULL;
p4d = p4d_offset(pgd, addr);
if (!p4d_present(*p4d))
return NULL;
pud = pud_offset(p4d, addr);
if (sz != PUD_SIZE && pud_none(*pud))
return NULL;
/* hugepage or swap? */
if (pud_huge(*pud) || !pud_present(*pud))
return (pte_t *)pud;
pmd = pmd_offset(pud, addr);
if (sz != PMD_SIZE && pmd_none(*pmd))
return NULL;
/* hugepage or swap? */
if (pmd_huge(*pmd) || !pmd_present(*pmd))
return (pte_t *)pmd;
return NULL;
}
#endif /* CONFIG_ARCH_WANT_GENERAL_HUGETLB */
/*
* These functions are overwritable if your architecture needs its own
* behavior.
*/
struct page * __weak
follow_huge_addr(struct mm_struct *mm, unsigned long address,
int write)
{
return ERR_PTR(-EINVAL);
}
struct page * __weak
follow_huge_pd(struct vm_area_struct *vma,
unsigned long address, hugepd_t hpd, int flags, int pdshift)
{
WARN(1, "hugepd follow called with no support for hugepage directory format\n");
return NULL;
}
struct page * __weak
follow_huge_pmd(struct mm_struct *mm, unsigned long address,
pmd_t *pmd, int flags)
{
struct page *page = NULL;
spinlock_t *ptl;
pte_t pte;
retry:
ptl = pmd_lockptr(mm, pmd);
spin_lock(ptl);
/*
* make sure that the address range covered by this pmd is not
* unmapped from other threads.
*/
if (!pmd_huge(*pmd))
goto out;
pte = huge_ptep_get((pte_t *)pmd);
if (pte_present(pte)) {
page = pmd_page(*pmd) + ((address & ~PMD_MASK) >> PAGE_SHIFT);
if (flags & FOLL_GET)
get_page(page);
} else {
if (is_hugetlb_entry_migration(pte)) {
spin_unlock(ptl);
__migration_entry_wait(mm, (pte_t *)pmd, ptl);
goto retry;
}
/*
* hwpoisoned entry is treated as no_page_table in
* follow_page_mask().
*/
}
out:
spin_unlock(ptl);
return page;
}
struct page * __weak
follow_huge_pud(struct mm_struct *mm, unsigned long address,
pud_t *pud, int flags)
{
if (flags & FOLL_GET)
return NULL;
return pte_page(*(pte_t *)pud) + ((address & ~PUD_MASK) >> PAGE_SHIFT);
}
struct page * __weak
follow_huge_pgd(struct mm_struct *mm, unsigned long address, pgd_t *pgd, int flags)
{
if (flags & FOLL_GET)
return NULL;
return pte_page(*(pte_t *)pgd) + ((address & ~PGDIR_MASK) >> PAGE_SHIFT);
}
bool isolate_huge_page(struct page *page, struct list_head *list)
{
bool ret = true;
VM_BUG_ON_PAGE(!PageHead(page), page);
spin_lock(&hugetlb_lock);
if (!page_huge_active(page) || !get_page_unless_zero(page)) {
ret = false;
goto unlock;
}
clear_page_huge_active(page);
list_move_tail(&page->lru, list);
unlock:
spin_unlock(&hugetlb_lock);
return ret;
}
void putback_active_hugepage(struct page *page)
{
VM_BUG_ON_PAGE(!PageHead(page), page);
spin_lock(&hugetlb_lock);
set_page_huge_active(page);
list_move_tail(&page->lru, &(page_hstate(page))->hugepage_activelist);
spin_unlock(&hugetlb_lock);
put_page(page);
}
void move_hugetlb_state(struct page *oldpage, struct page *newpage, int reason)
{
struct hstate *h = page_hstate(oldpage);
hugetlb_cgroup_migrate(oldpage, newpage);
set_page_owner_migrate_reason(newpage, reason);
/*
* transfer temporary state of the new huge page. This is
* reverse to other transitions because the newpage is going to
* be final while the old one will be freed so it takes over
* the temporary status.
*
* Also note that we have to transfer the per-node surplus state
* here as well otherwise the global surplus count will not match
* the per-node's.
*/
if (PageHugeTemporary(newpage)) {
int old_nid = page_to_nid(oldpage);
int new_nid = page_to_nid(newpage);
SetPageHugeTemporary(oldpage);
ClearPageHugeTemporary(newpage);
spin_lock(&hugetlb_lock);
if (h->surplus_huge_pages_node[old_nid]) {
h->surplus_huge_pages_node[old_nid]--;
h->surplus_huge_pages_node[new_nid]++;
}
spin_unlock(&hugetlb_lock);
}
}
| ./CrossVul/dataset_final_sorted/CWE-416/c/bad_822_1 |
crossvul-cpp_data_bad_4020_1 | /* exif-mnote-data-fuji.c
*
* Copyright (c) 2002 Lutz Mueller <lutz@users.sourceforge.net>
*
* This library is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2 of the License, or (at your option) any later version.
*
* This library is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with this library; if not, write to the
* Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
* Boston, MA 02110-1301 USA.
*/
#include <stdlib.h>
#include <string.h>
#include <config.h>
#include <libexif/exif-byte-order.h>
#include <libexif/exif-utils.h>
#include "exif-mnote-data-fuji.h"
#define CHECKOVERFLOW(offset,datasize,structsize) (( offset >= datasize) || (structsize > datasize) || (offset > datasize - structsize ))
struct _MNoteFujiDataPrivate {
ExifByteOrder order;
};
static void
exif_mnote_data_fuji_clear (ExifMnoteDataFuji *n)
{
ExifMnoteData *d = (ExifMnoteData *) n;
unsigned int i;
if (!n) return;
if (n->entries) {
for (i = 0; i < n->count; i++)
if (n->entries[i].data) {
exif_mem_free (d->mem, n->entries[i].data);
n->entries[i].data = NULL;
}
exif_mem_free (d->mem, n->entries);
n->entries = NULL;
n->count = 0;
}
}
static void
exif_mnote_data_fuji_free (ExifMnoteData *n)
{
if (!n) return;
exif_mnote_data_fuji_clear ((ExifMnoteDataFuji *) n);
}
static char *
exif_mnote_data_fuji_get_value (ExifMnoteData *d, unsigned int i, char *val, unsigned int maxlen)
{
ExifMnoteDataFuji *n = (ExifMnoteDataFuji *) d;
if (!d || !val) return NULL;
if (i > n->count -1) return NULL;
/*
exif_log (d->log, EXIF_LOG_CODE_DEBUG, "ExifMnoteDataFuji",
"Querying value for tag '%s'...",
mnote_fuji_tag_get_name (n->entries[i].tag));
*/
return mnote_fuji_entry_get_value (&n->entries[i], val, maxlen);
}
static void
exif_mnote_data_fuji_save (ExifMnoteData *ne, unsigned char **buf,
unsigned int *buf_size)
{
ExifMnoteDataFuji *n = (ExifMnoteDataFuji *) ne;
size_t i, o, s, doff;
unsigned char *t;
size_t ts;
if (!n || !buf || !buf_size) return;
/*
* Allocate enough memory for all entries and the number
* of entries.
*/
*buf_size = 8 + 4 + 2 + n->count * 12 + 4;
*buf = exif_mem_alloc (ne->mem, *buf_size);
if (!*buf) {
*buf_size = 0;
return;
}
/*
* Header: "FUJIFILM" and 4 bytes offset to the first entry.
* As the first entry will start right thereafter, the offset is 12.
*/
memcpy (*buf, "FUJIFILM", 8);
exif_set_long (*buf + 8, n->order, 12);
/* Save the number of entries */
exif_set_short (*buf + 8 + 4, n->order, (ExifShort) n->count);
/* Save each entry */
for (i = 0; i < n->count; i++) {
o = 8 + 4 + 2 + i * 12;
exif_set_short (*buf + o + 0, n->order, (ExifShort) n->entries[i].tag);
exif_set_short (*buf + o + 2, n->order, (ExifShort) n->entries[i].format);
exif_set_long (*buf + o + 4, n->order, n->entries[i].components);
o += 8;
s = exif_format_get_size (n->entries[i].format) *
n->entries[i].components;
if (s > 65536) {
/* Corrupt data: EXIF data size is limited to the
* maximum size of a JPEG segment (64 kb).
*/
continue;
}
if (s > 4) {
ts = *buf_size + s;
/* Ensure even offsets. Set padding bytes to 0. */
if (s & 1) ts += 1;
t = exif_mem_realloc (ne->mem, *buf, ts);
if (!t) {
return;
}
*buf = t;
*buf_size = ts;
doff = *buf_size - s;
if (s & 1) { doff--; *(*buf + *buf_size - 1) = '\0'; }
exif_set_long (*buf + o, n->order, doff);
} else
doff = o;
/*
* Write the data. Fill unneeded bytes with 0. Do not
* crash if data is NULL.
*/
if (!n->entries[i].data) memset (*buf + doff, 0, s);
else memcpy (*buf + doff, n->entries[i].data, s);
}
}
static void
exif_mnote_data_fuji_load (ExifMnoteData *en,
const unsigned char *buf, unsigned int buf_size)
{
ExifMnoteDataFuji *n = (ExifMnoteDataFuji*) en;
ExifLong c;
size_t i, tcount, o, datao;
if (!n || !buf || !buf_size) {
exif_log (en->log, EXIF_LOG_CODE_CORRUPT_DATA,
"ExifMnoteDataFuji", "Short MakerNote");
return;
}
datao = 6 + n->offset;
if (CHECKOVERFLOW(datao, buf_size, 12)) {
exif_log (en->log, EXIF_LOG_CODE_CORRUPT_DATA,
"ExifMnoteDataFuji", "Short MakerNote");
return;
}
n->order = EXIF_BYTE_ORDER_INTEL;
datao += exif_get_long (buf + datao + 8, EXIF_BYTE_ORDER_INTEL);
if (CHECKOVERFLOW(datao, buf_size, 2)) {
exif_log (en->log, EXIF_LOG_CODE_CORRUPT_DATA,
"ExifMnoteDataFuji", "Short MakerNote");
return;
}
/* Read the number of tags */
c = exif_get_short (buf + datao, EXIF_BYTE_ORDER_INTEL);
datao += 2;
/* Remove any old entries */
exif_mnote_data_fuji_clear (n);
/* Reserve enough space for all the possible MakerNote tags */
n->entries = exif_mem_alloc (en->mem, sizeof (MnoteFujiEntry) * c);
if (!n->entries) {
EXIF_LOG_NO_MEMORY(en->log, "ExifMnoteDataFuji", sizeof (MnoteFujiEntry) * c);
return;
}
/* Parse all c entries, storing ones that are successfully parsed */
tcount = 0;
for (i = c, o = datao; i; --i, o += 12) {
size_t s;
if (CHECKOVERFLOW(o, buf_size, 12)) {
exif_log (en->log, EXIF_LOG_CODE_CORRUPT_DATA,
"ExifMnoteDataFuji", "Short MakerNote");
break;
}
n->entries[tcount].tag = exif_get_short (buf + o, n->order);
n->entries[tcount].format = exif_get_short (buf + o + 2, n->order);
n->entries[tcount].components = exif_get_long (buf + o + 4, n->order);
n->entries[tcount].order = n->order;
exif_log (en->log, EXIF_LOG_CODE_DEBUG, "ExifMnoteDataFuji",
"Loading entry 0x%x ('%s')...", n->entries[tcount].tag,
mnote_fuji_tag_get_name (n->entries[tcount].tag));
/* Check if we overflow the multiplication. Use buf_size as the max size for integer overflow detection,
* we will check the buffer sizes closer later. */
if ( exif_format_get_size (n->entries[tcount].format) &&
buf_size / exif_format_get_size (n->entries[tcount].format) < n->entries[tcount].components
) {
exif_log (en->log, EXIF_LOG_CODE_CORRUPT_DATA,
"ExifMnoteDataFuji", "Tag size overflow detected (%u * %lu)", exif_format_get_size (n->entries[tcount].format), n->entries[tcount].components);
continue;
}
/*
* Size? If bigger than 4 bytes, the actual data is not
* in the entry but somewhere else (offset).
*/
s = exif_format_get_size (n->entries[tcount].format) * n->entries[tcount].components;
n->entries[tcount].size = s;
if (s) {
size_t dataofs = o + 8;
if (s > 4)
/* The data in this case is merely a pointer */
dataofs = exif_get_long (buf + dataofs, n->order) + 6 + n->offset;
if (CHECKOVERFLOW(dataofs, buf_size, s)) {
exif_log (en->log, EXIF_LOG_CODE_CORRUPT_DATA,
"ExifMnoteDataFuji", "Tag data past end of "
"buffer (%u >= %u)", (unsigned)(dataofs + s), buf_size);
continue;
}
n->entries[tcount].data = exif_mem_alloc (en->mem, s);
if (!n->entries[tcount].data) {
EXIF_LOG_NO_MEMORY(en->log, "ExifMnoteDataFuji", s);
continue;
}
memcpy (n->entries[tcount].data, buf + dataofs, s);
}
/* Tag was successfully parsed */
++tcount;
}
/* Store the count of successfully parsed tags */
n->count = tcount;
}
static unsigned int
exif_mnote_data_fuji_count (ExifMnoteData *n)
{
return n ? ((ExifMnoteDataFuji *) n)->count : 0;
}
static unsigned int
exif_mnote_data_fuji_get_id (ExifMnoteData *d, unsigned int n)
{
ExifMnoteDataFuji *note = (ExifMnoteDataFuji *) d;
if (!note) return 0;
if (note->count <= n) return 0;
return note->entries[n].tag;
}
static const char *
exif_mnote_data_fuji_get_name (ExifMnoteData *d, unsigned int i)
{
ExifMnoteDataFuji *n = (ExifMnoteDataFuji *) d;
if (!n) return NULL;
if (i >= n->count) return NULL;
return mnote_fuji_tag_get_name (n->entries[i].tag);
}
static const char *
exif_mnote_data_fuji_get_title (ExifMnoteData *d, unsigned int i)
{
ExifMnoteDataFuji *n = (ExifMnoteDataFuji *) d;
if (!n) return NULL;
if (i >= n->count) return NULL;
return mnote_fuji_tag_get_title (n->entries[i].tag);
}
static const char *
exif_mnote_data_fuji_get_description (ExifMnoteData *d, unsigned int i)
{
ExifMnoteDataFuji *n = (ExifMnoteDataFuji *) d;
if (!n) return NULL;
if (i >= n->count) return NULL;
return mnote_fuji_tag_get_description (n->entries[i].tag);
}
static void
exif_mnote_data_fuji_set_byte_order (ExifMnoteData *d, ExifByteOrder o)
{
ExifByteOrder o_orig;
ExifMnoteDataFuji *n = (ExifMnoteDataFuji *) d;
unsigned int i;
if (!n) return;
o_orig = n->order;
n->order = o;
for (i = 0; i < n->count; i++) {
if (n->entries[i].components && (n->entries[i].size/n->entries[i].components < exif_format_get_size (n->entries[i].format)))
continue;
n->entries[i].order = o;
exif_array_set_byte_order (n->entries[i].format, n->entries[i].data,
n->entries[i].components, o_orig, o);
}
}
static void
exif_mnote_data_fuji_set_offset (ExifMnoteData *n, unsigned int o)
{
if (n) ((ExifMnoteDataFuji *) n)->offset = o;
}
int
exif_mnote_data_fuji_identify (const ExifData *ed, const ExifEntry *e)
{
(void) ed; /* unused */
return ((e->size >= 12) && !memcmp (e->data, "FUJIFILM", 8));
}
ExifMnoteData *
exif_mnote_data_fuji_new (ExifMem *mem)
{
ExifMnoteData *d;
if (!mem) return NULL;
d = exif_mem_alloc (mem, sizeof (ExifMnoteDataFuji));
if (!d) return NULL;
exif_mnote_data_construct (d, mem);
/* Set up function pointers */
d->methods.free = exif_mnote_data_fuji_free;
d->methods.set_byte_order = exif_mnote_data_fuji_set_byte_order;
d->methods.set_offset = exif_mnote_data_fuji_set_offset;
d->methods.load = exif_mnote_data_fuji_load;
d->methods.save = exif_mnote_data_fuji_save;
d->methods.count = exif_mnote_data_fuji_count;
d->methods.get_id = exif_mnote_data_fuji_get_id;
d->methods.get_name = exif_mnote_data_fuji_get_name;
d->methods.get_title = exif_mnote_data_fuji_get_title;
d->methods.get_description = exif_mnote_data_fuji_get_description;
d->methods.get_value = exif_mnote_data_fuji_get_value;
return d;
}
| ./CrossVul/dataset_final_sorted/CWE-416/c/bad_4020_1 |
crossvul-cpp_data_good_1830_0 | /*
* The copyright in this software is being made available under the 2-clauses
* BSD License, included below. This software may be subject to other third
* party and contributor rights, including patent rights, and no such rights
* are granted under this license.
*
* Copyright (c) 2002-2014, Universite catholique de Louvain (UCL), Belgium
* Copyright (c) 2002-2014, Professor Benoit Macq
* Copyright (c) 2001-2003, David Janssens
* Copyright (c) 2002-2003, Yannick Verschueren
* Copyright (c) 2003-2007, Francois-Olivier Devaux
* Copyright (c) 2003-2014, Antonin Descampe
* Copyright (c) 2005, Herve Drolon, FreeImage Team
* Copyright (c) 2008, Jerome Fimes, Communications & Systemes <jerome.fimes@c-s.fr>
* Copyright (c) 2006-2007, Parvatha Elangovan
* Copyright (c) 2010-2011, Kaori Hagihara
* Copyright (c) 2011-2012, Centre National d'Etudes Spatiales (CNES), France
* Copyright (c) 2012, CS Systemes d'Information, France
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS `AS IS'
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
#include "opj_includes.h"
/** @defgroup J2K J2K - JPEG-2000 codestream reader/writer */
/*@{*/
/** @name Local static functions */
/*@{*/
/**
* Sets up the procedures to do on reading header. Developpers wanting to extend the library can add their own reading procedures.
*/
static OPJ_BOOL opj_j2k_setup_header_reading (opj_j2k_t *p_j2k, opj_event_mgr_t * p_manager);
/**
* The read header procedure.
*/
static OPJ_BOOL opj_j2k_read_header_procedure( opj_j2k_t *p_j2k,
opj_stream_private_t *p_stream,
opj_event_mgr_t * p_manager);
/**
* The default encoding validation procedure without any extension.
*
* @param p_j2k the jpeg2000 codec to validate.
* @param p_stream the input stream to validate.
* @param p_manager the user event manager.
*
* @return true if the parameters are correct.
*/
static OPJ_BOOL opj_j2k_encoding_validation ( opj_j2k_t * p_j2k,
opj_stream_private_t *p_stream,
opj_event_mgr_t * p_manager );
/**
* The default decoding validation procedure without any extension.
*
* @param p_j2k the jpeg2000 codec to validate.
* @param p_stream the input stream to validate.
* @param p_manager the user event manager.
*
* @return true if the parameters are correct.
*/
static OPJ_BOOL opj_j2k_decoding_validation ( opj_j2k_t * p_j2k,
opj_stream_private_t *p_stream,
opj_event_mgr_t * p_manager );
/**
* Sets up the validation ,i.e. adds the procedures to lauch to make sure the codec parameters
* are valid. Developpers wanting to extend the library can add their own validation procedures.
*/
static OPJ_BOOL opj_j2k_setup_encoding_validation (opj_j2k_t *p_j2k, opj_event_mgr_t * p_manager);
/**
* Sets up the validation ,i.e. adds the procedures to lauch to make sure the codec parameters
* are valid. Developpers wanting to extend the library can add their own validation procedures.
*/
static OPJ_BOOL opj_j2k_setup_decoding_validation (opj_j2k_t *p_j2k, opj_event_mgr_t * p_manager);
/**
* Sets up the validation ,i.e. adds the procedures to lauch to make sure the codec parameters
* are valid. Developpers wanting to extend the library can add their own validation procedures.
*/
static OPJ_BOOL opj_j2k_setup_end_compress (opj_j2k_t *p_j2k, opj_event_mgr_t * p_manager);
/**
* The mct encoding validation procedure.
*
* @param p_j2k the jpeg2000 codec to validate.
* @param p_stream the input stream to validate.
* @param p_manager the user event manager.
*
* @return true if the parameters are correct.
*/
static OPJ_BOOL opj_j2k_mct_validation (opj_j2k_t * p_j2k,
opj_stream_private_t *p_stream,
opj_event_mgr_t * p_manager );
/**
* Builds the tcd decoder to use to decode tile.
*/
static OPJ_BOOL opj_j2k_build_decoder ( opj_j2k_t * p_j2k,
opj_stream_private_t *p_stream,
opj_event_mgr_t * p_manager );
/**
* Builds the tcd encoder to use to encode tile.
*/
static OPJ_BOOL opj_j2k_build_encoder ( opj_j2k_t * p_j2k,
opj_stream_private_t *p_stream,
opj_event_mgr_t * p_manager );
/**
* Creates a tile-coder decoder.
*
* @param p_stream the stream to write data to.
* @param p_j2k J2K codec.
* @param p_manager the user event manager.
*/
static OPJ_BOOL opj_j2k_create_tcd( opj_j2k_t *p_j2k,
opj_stream_private_t *p_stream,
opj_event_mgr_t * p_manager );
/**
* Excutes the given procedures on the given codec.
*
* @param p_procedure_list the list of procedures to execute
* @param p_j2k the jpeg2000 codec to execute the procedures on.
* @param p_stream the stream to execute the procedures on.
* @param p_manager the user manager.
*
* @return true if all the procedures were successfully executed.
*/
static OPJ_BOOL opj_j2k_exec ( opj_j2k_t * p_j2k,
opj_procedure_list_t * p_procedure_list,
opj_stream_private_t *p_stream,
opj_event_mgr_t * p_manager);
/**
* Updates the rates of the tcp.
*
* @param p_stream the stream to write data to.
* @param p_j2k J2K codec.
* @param p_manager the user event manager.
*/
static OPJ_BOOL opj_j2k_update_rates( opj_j2k_t *p_j2k,
opj_stream_private_t *p_stream,
opj_event_mgr_t * p_manager );
/**
* Copies the decoding tile parameters onto all the tile parameters.
* Creates also the tile decoder.
*/
static OPJ_BOOL opj_j2k_copy_default_tcp_and_create_tcd ( opj_j2k_t * p_j2k,
opj_stream_private_t *p_stream,
opj_event_mgr_t * p_manager );
/**
* Destroys the memory associated with the decoding of headers.
*/
static OPJ_BOOL opj_j2k_destroy_header_memory ( opj_j2k_t * p_j2k,
opj_stream_private_t *p_stream,
opj_event_mgr_t * p_manager );
/**
* Reads the lookup table containing all the marker, status and action, and returns the handler associated
* with the marker value.
* @param p_id Marker value to look up
*
* @return the handler associated with the id.
*/
static const struct opj_dec_memory_marker_handler * opj_j2k_get_marker_handler (OPJ_UINT32 p_id);
/**
* Destroys a tile coding parameter structure.
*
* @param p_tcp the tile coding parameter to destroy.
*/
static void opj_j2k_tcp_destroy (opj_tcp_t *p_tcp);
/**
* Destroys the data inside a tile coding parameter structure.
*
* @param p_tcp the tile coding parameter which contain data to destroy.
*/
static void opj_j2k_tcp_data_destroy (opj_tcp_t *p_tcp);
/**
* Destroys a coding parameter structure.
*
* @param p_cp the coding parameter to destroy.
*/
static void opj_j2k_cp_destroy (opj_cp_t *p_cp);
/**
* Writes a SPCod or SPCoc element, i.e. the coding style of a given component of a tile.
*
* @param p_j2k J2K codec.
* @param p_tile_no FIXME DOC
* @param p_comp_no the component number to output.
* @param p_data FIXME DOC
* @param p_header_size FIXME DOC
* @param p_manager the user event manager.
*
* @return FIXME DOC
*/
static OPJ_BOOL opj_j2k_write_SPCod_SPCoc( opj_j2k_t *p_j2k,
OPJ_UINT32 p_tile_no,
OPJ_UINT32 p_comp_no,
OPJ_BYTE * p_data,
OPJ_UINT32 * p_header_size,
opj_event_mgr_t * p_manager );
/**
* Gets the size taken by writing a SPCod or SPCoc for the given tile and component.
*
* @param p_j2k the J2K codec.
* @param p_tile_no the tile index.
* @param p_comp_no the component being outputted.
*
* @return the number of bytes taken by the SPCod element.
*/
static OPJ_UINT32 opj_j2k_get_SPCod_SPCoc_size (opj_j2k_t *p_j2k,
OPJ_UINT32 p_tile_no,
OPJ_UINT32 p_comp_no );
/**
* Reads a SPCod or SPCoc element, i.e. the coding style of a given component of a tile.
* @param p_j2k the jpeg2000 codec.
* @param compno FIXME DOC
* @param p_header_data the data contained in the COM box.
* @param p_header_size the size of the data contained in the COM marker.
* @param p_manager the user event manager.
*/
static OPJ_BOOL opj_j2k_read_SPCod_SPCoc( opj_j2k_t *p_j2k,
OPJ_UINT32 compno,
OPJ_BYTE * p_header_data,
OPJ_UINT32 * p_header_size,
opj_event_mgr_t * p_manager );
/**
* Gets the size taken by writing SQcd or SQcc element, i.e. the quantization values of a band in the QCD or QCC.
*
* @param p_tile_no the tile index.
* @param p_comp_no the component being outputted.
* @param p_j2k the J2K codec.
*
* @return the number of bytes taken by the SPCod element.
*/
static OPJ_UINT32 opj_j2k_get_SQcd_SQcc_size ( opj_j2k_t *p_j2k,
OPJ_UINT32 p_tile_no,
OPJ_UINT32 p_comp_no );
/**
* Writes a SQcd or SQcc element, i.e. the quantization values of a band in the QCD or QCC.
*
* @param p_tile_no the tile to output.
* @param p_comp_no the component number to output.
* @param p_data the data buffer.
* @param p_header_size pointer to the size of the data buffer, it is changed by the function.
* @param p_j2k J2K codec.
* @param p_manager the user event manager.
*
*/
static OPJ_BOOL opj_j2k_write_SQcd_SQcc(opj_j2k_t *p_j2k,
OPJ_UINT32 p_tile_no,
OPJ_UINT32 p_comp_no,
OPJ_BYTE * p_data,
OPJ_UINT32 * p_header_size,
opj_event_mgr_t * p_manager);
/**
* Updates the Tile Length Marker.
*/
static void opj_j2k_update_tlm ( opj_j2k_t * p_j2k, OPJ_UINT32 p_tile_part_size);
/**
* Reads a SQcd or SQcc element, i.e. the quantization values of a band in the QCD or QCC.
*
* @param p_j2k J2K codec.
* @param compno the component number to output.
* @param p_header_data the data buffer.
* @param p_header_size pointer to the size of the data buffer, it is changed by the function.
* @param p_manager the user event manager.
*
*/
static OPJ_BOOL opj_j2k_read_SQcd_SQcc( opj_j2k_t *p_j2k,
OPJ_UINT32 compno,
OPJ_BYTE * p_header_data,
OPJ_UINT32 * p_header_size,
opj_event_mgr_t * p_manager );
/**
* Copies the tile component parameters of all the component from the first tile component.
*
* @param p_j2k the J2k codec.
*/
static void opj_j2k_copy_tile_component_parameters( opj_j2k_t *p_j2k );
/**
* Copies the tile quantization parameters of all the component from the first tile component.
*
* @param p_j2k the J2k codec.
*/
static void opj_j2k_copy_tile_quantization_parameters( opj_j2k_t *p_j2k );
/**
* Reads the tiles.
*/
static OPJ_BOOL opj_j2k_decode_tiles ( opj_j2k_t *p_j2k,
opj_stream_private_t *p_stream,
opj_event_mgr_t * p_manager);
static OPJ_BOOL opj_j2k_pre_write_tile ( opj_j2k_t * p_j2k,
OPJ_UINT32 p_tile_index,
opj_stream_private_t *p_stream,
opj_event_mgr_t * p_manager );
static OPJ_BOOL opj_j2k_update_image_data (opj_tcd_t * p_tcd, OPJ_BYTE * p_data, opj_image_t* p_output_image);
static void opj_get_tile_dimensions(opj_image_t * l_image,
opj_tcd_tilecomp_t * l_tilec,
opj_image_comp_t * l_img_comp,
OPJ_UINT32* l_size_comp,
OPJ_UINT32* l_width,
OPJ_UINT32* l_height,
OPJ_UINT32* l_offset_x,
OPJ_UINT32* l_offset_y,
OPJ_UINT32* l_image_width,
OPJ_UINT32* l_stride,
OPJ_UINT32* l_tile_offset);
static void opj_j2k_get_tile_data (opj_tcd_t * p_tcd, OPJ_BYTE * p_data);
static OPJ_BOOL opj_j2k_post_write_tile (opj_j2k_t * p_j2k,
opj_stream_private_t *p_stream,
opj_event_mgr_t * p_manager );
/**
* Sets up the procedures to do on writing header.
* Developers wanting to extend the library can add their own writing procedures.
*/
static OPJ_BOOL opj_j2k_setup_header_writing (opj_j2k_t *p_j2k, opj_event_mgr_t * p_manager);
static OPJ_BOOL opj_j2k_write_first_tile_part( opj_j2k_t *p_j2k,
OPJ_BYTE * p_data,
OPJ_UINT32 * p_data_written,
OPJ_UINT32 p_total_data_size,
opj_stream_private_t *p_stream,
struct opj_event_mgr * p_manager );
static OPJ_BOOL opj_j2k_write_all_tile_parts( opj_j2k_t *p_j2k,
OPJ_BYTE * p_data,
OPJ_UINT32 * p_data_written,
OPJ_UINT32 p_total_data_size,
opj_stream_private_t *p_stream,
struct opj_event_mgr * p_manager );
/**
* Gets the offset of the header.
*
* @param p_stream the stream to write data to.
* @param p_j2k J2K codec.
* @param p_manager the user event manager.
*/
static OPJ_BOOL opj_j2k_get_end_header( opj_j2k_t *p_j2k,
opj_stream_private_t *p_stream,
opj_event_mgr_t * p_manager );
static OPJ_BOOL opj_j2k_allocate_tile_element_cstr_index(opj_j2k_t *p_j2k);
/*
* -----------------------------------------------------------------------
* -----------------------------------------------------------------------
* -----------------------------------------------------------------------
*/
/**
* Writes the SOC marker (Start Of Codestream)
*
* @param p_stream the stream to write data to.
* @param p_j2k J2K codec.
* @param p_manager the user event manager.
*/
static OPJ_BOOL opj_j2k_write_soc( opj_j2k_t *p_j2k,
opj_stream_private_t *p_stream,
opj_event_mgr_t * p_manager );
/**
* Reads a SOC marker (Start of Codestream)
* @param p_j2k the jpeg2000 file codec.
* @param p_stream XXX needs data
* @param p_manager the user event manager.
*/
static OPJ_BOOL opj_j2k_read_soc( opj_j2k_t *p_j2k,
opj_stream_private_t *p_stream,
opj_event_mgr_t * p_manager );
/**
* Writes the SIZ marker (image and tile size)
*
* @param p_j2k J2K codec.
* @param p_stream the stream to write data to.
* @param p_manager the user event manager.
*/
static OPJ_BOOL opj_j2k_write_siz( opj_j2k_t *p_j2k,
opj_stream_private_t *p_stream,
opj_event_mgr_t * p_manager );
/**
* Reads a SIZ marker (image and tile size)
* @param p_j2k the jpeg2000 file codec.
* @param p_header_data the data contained in the SIZ box.
* @param p_header_size the size of the data contained in the SIZ marker.
* @param p_manager the user event manager.
*/
static OPJ_BOOL opj_j2k_read_siz(opj_j2k_t *p_j2k,
OPJ_BYTE * p_header_data,
OPJ_UINT32 p_header_size,
opj_event_mgr_t * p_manager);
/**
* Writes the COM marker (comment)
*
* @param p_stream the stream to write data to.
* @param p_j2k J2K codec.
* @param p_manager the user event manager.
*/
static OPJ_BOOL opj_j2k_write_com( opj_j2k_t *p_j2k,
opj_stream_private_t *p_stream,
opj_event_mgr_t * p_manager );
/**
* Reads a COM marker (comments)
* @param p_j2k the jpeg2000 file codec.
* @param p_header_data the data contained in the COM box.
* @param p_header_size the size of the data contained in the COM marker.
* @param p_manager the user event manager.
*/
static OPJ_BOOL opj_j2k_read_com ( opj_j2k_t *p_j2k,
OPJ_BYTE * p_header_data,
OPJ_UINT32 p_header_size,
opj_event_mgr_t * p_manager );
/**
* Writes the COD marker (Coding style default)
*
* @param p_stream the stream to write data to.
* @param p_j2k J2K codec.
* @param p_manager the user event manager.
*/
static OPJ_BOOL opj_j2k_write_cod( opj_j2k_t *p_j2k,
opj_stream_private_t *p_stream,
opj_event_mgr_t * p_manager );
/**
* Reads a COD marker (Coding Styke defaults)
* @param p_header_data the data contained in the COD box.
* @param p_j2k the jpeg2000 codec.
* @param p_header_size the size of the data contained in the COD marker.
* @param p_manager the user event manager.
*/
static OPJ_BOOL opj_j2k_read_cod ( opj_j2k_t *p_j2k,
OPJ_BYTE * p_header_data,
OPJ_UINT32 p_header_size,
opj_event_mgr_t * p_manager);
#if 0
/**
* Writes the COC marker (Coding style component)
*
* @param p_j2k J2K codec.
* @param p_comp_no the index of the component to output.
* @param p_stream the stream to write data to.
* @param p_manager the user event manager.
*/
static OPJ_BOOL opj_j2k_write_coc( opj_j2k_t *p_j2k,
OPJ_UINT32 p_comp_no,
opj_stream_private_t *p_stream,
opj_event_mgr_t * p_manager );
#endif
#if 0
/**
* Writes the COC marker (Coding style component)
*
* @param p_j2k J2K codec.
* @param p_comp_no the index of the component to output.
* @param p_data FIXME DOC
* @param p_data_written FIXME DOC
* @param p_manager the user event manager.
*/
static void opj_j2k_write_coc_in_memory(opj_j2k_t *p_j2k,
OPJ_UINT32 p_comp_no,
OPJ_BYTE * p_data,
OPJ_UINT32 * p_data_written,
opj_event_mgr_t * p_manager );
#endif
/**
* Gets the maximum size taken by a coc.
*
* @param p_j2k the jpeg2000 codec to use.
*/
static OPJ_UINT32 opj_j2k_get_max_coc_size(opj_j2k_t *p_j2k);
/**
* Reads a COC marker (Coding Style Component)
* @param p_header_data the data contained in the COC box.
* @param p_j2k the jpeg2000 codec.
* @param p_header_size the size of the data contained in the COC marker.
* @param p_manager the user event manager.
*/
static OPJ_BOOL opj_j2k_read_coc ( opj_j2k_t *p_j2k,
OPJ_BYTE * p_header_data,
OPJ_UINT32 p_header_size,
opj_event_mgr_t * p_manager );
/**
* Writes the QCD marker (quantization default)
*
* @param p_j2k J2K codec.
* @param p_stream the stream to write data to.
* @param p_manager the user event manager.
*/
static OPJ_BOOL opj_j2k_write_qcd( opj_j2k_t *p_j2k,
opj_stream_private_t *p_stream,
opj_event_mgr_t * p_manager );
/**
* Reads a QCD marker (Quantization defaults)
* @param p_header_data the data contained in the QCD box.
* @param p_j2k the jpeg2000 codec.
* @param p_header_size the size of the data contained in the QCD marker.
* @param p_manager the user event manager.
*/
static OPJ_BOOL opj_j2k_read_qcd ( opj_j2k_t *p_j2k,
OPJ_BYTE * p_header_data,
OPJ_UINT32 p_header_size,
opj_event_mgr_t * p_manager );
#if 0
/**
* Writes the QCC marker (quantization component)
*
* @param p_comp_no the index of the component to output.
* @param p_stream the stream to write data to.
* @param p_j2k J2K codec.
* @param p_manager the user event manager.
*/
static OPJ_BOOL opj_j2k_write_qcc( opj_j2k_t *p_j2k,
OPJ_UINT32 p_comp_no,
opj_stream_private_t *p_stream,
opj_event_mgr_t * p_manager );
#endif
#if 0
/**
* Writes the QCC marker (quantization component)
*
* @param p_j2k J2K codec.
* @param p_comp_no the index of the component to output.
* @param p_data FIXME DOC
* @param p_data_written the stream to write data to.
* @param p_manager the user event manager.
*/
static void opj_j2k_write_qcc_in_memory(opj_j2k_t *p_j2k,
OPJ_UINT32 p_comp_no,
OPJ_BYTE * p_data,
OPJ_UINT32 * p_data_written,
opj_event_mgr_t * p_manager );
#endif
/**
* Gets the maximum size taken by a qcc.
*/
static OPJ_UINT32 opj_j2k_get_max_qcc_size (opj_j2k_t *p_j2k);
/**
* Reads a QCC marker (Quantization component)
* @param p_header_data the data contained in the QCC box.
* @param p_j2k the jpeg2000 codec.
* @param p_header_size the size of the data contained in the QCC marker.
* @param p_manager the user event manager.
*/
static OPJ_BOOL opj_j2k_read_qcc( opj_j2k_t *p_j2k,
OPJ_BYTE * p_header_data,
OPJ_UINT32 p_header_size,
opj_event_mgr_t * p_manager);
/**
* Writes the POC marker (Progression Order Change)
*
* @param p_stream the stream to write data to.
* @param p_j2k J2K codec.
* @param p_manager the user event manager.
*/
static OPJ_BOOL opj_j2k_write_poc( opj_j2k_t *p_j2k,
opj_stream_private_t *p_stream,
opj_event_mgr_t * p_manager );
/**
* Writes the POC marker (Progression Order Change)
*
* @param p_j2k J2K codec.
* @param p_data FIXME DOC
* @param p_data_written the stream to write data to.
* @param p_manager the user event manager.
*/
static void opj_j2k_write_poc_in_memory(opj_j2k_t *p_j2k,
OPJ_BYTE * p_data,
OPJ_UINT32 * p_data_written,
opj_event_mgr_t * p_manager );
/**
* Gets the maximum size taken by the writing of a POC.
*/
static OPJ_UINT32 opj_j2k_get_max_poc_size(opj_j2k_t *p_j2k);
/**
* Reads a POC marker (Progression Order Change)
*
* @param p_header_data the data contained in the POC box.
* @param p_j2k the jpeg2000 codec.
* @param p_header_size the size of the data contained in the POC marker.
* @param p_manager the user event manager.
*/
static OPJ_BOOL opj_j2k_read_poc ( opj_j2k_t *p_j2k,
OPJ_BYTE * p_header_data,
OPJ_UINT32 p_header_size,
opj_event_mgr_t * p_manager );
/**
* Gets the maximum size taken by the toc headers of all the tile parts of any given tile.
*/
static OPJ_UINT32 opj_j2k_get_max_toc_size (opj_j2k_t *p_j2k);
/**
* Gets the maximum size taken by the headers of the SOT.
*
* @param p_j2k the jpeg2000 codec to use.
*/
static OPJ_UINT32 opj_j2k_get_specific_header_sizes(opj_j2k_t *p_j2k);
/**
* Reads a CRG marker (Component registration)
*
* @param p_header_data the data contained in the TLM box.
* @param p_j2k the jpeg2000 codec.
* @param p_header_size the size of the data contained in the TLM marker.
* @param p_manager the user event manager.
*/
static OPJ_BOOL opj_j2k_read_crg ( opj_j2k_t *p_j2k,
OPJ_BYTE * p_header_data,
OPJ_UINT32 p_header_size,
opj_event_mgr_t * p_manager );
/**
* Reads a TLM marker (Tile Length Marker)
*
* @param p_header_data the data contained in the TLM box.
* @param p_j2k the jpeg2000 codec.
* @param p_header_size the size of the data contained in the TLM marker.
* @param p_manager the user event manager.
*/
static OPJ_BOOL opj_j2k_read_tlm ( opj_j2k_t *p_j2k,
OPJ_BYTE * p_header_data,
OPJ_UINT32 p_header_size,
opj_event_mgr_t * p_manager);
/**
* Writes the updated tlm.
*
* @param p_stream the stream to write data to.
* @param p_j2k J2K codec.
* @param p_manager the user event manager.
*/
static OPJ_BOOL opj_j2k_write_updated_tlm( opj_j2k_t *p_j2k,
opj_stream_private_t *p_stream,
opj_event_mgr_t * p_manager );
/**
* Reads a PLM marker (Packet length, main header marker)
*
* @param p_header_data the data contained in the TLM box.
* @param p_j2k the jpeg2000 codec.
* @param p_header_size the size of the data contained in the TLM marker.
* @param p_manager the user event manager.
*/
static OPJ_BOOL opj_j2k_read_plm ( opj_j2k_t *p_j2k,
OPJ_BYTE * p_header_data,
OPJ_UINT32 p_header_size,
opj_event_mgr_t * p_manager);
/**
* Reads a PLT marker (Packet length, tile-part header)
*
* @param p_header_data the data contained in the PLT box.
* @param p_j2k the jpeg2000 codec.
* @param p_header_size the size of the data contained in the PLT marker.
* @param p_manager the user event manager.
*/
static OPJ_BOOL opj_j2k_read_plt ( opj_j2k_t *p_j2k,
OPJ_BYTE * p_header_data,
OPJ_UINT32 p_header_size,
opj_event_mgr_t * p_manager );
/**
* Reads a PPM marker (Packed headers, main header)
*
* @param p_header_data the data contained in the POC box.
* @param p_j2k the jpeg2000 codec.
* @param p_header_size the size of the data contained in the POC marker.
* @param p_manager the user event manager.
*/
static OPJ_BOOL opj_j2k_read_ppm (
opj_j2k_t *p_j2k,
OPJ_BYTE * p_header_data,
OPJ_UINT32 p_header_size,
opj_event_mgr_t * p_manager );
/**
* Merges all PPM markers read (Packed headers, main header)
*
* @param p_cp main coding parameters.
* @param p_manager the user event manager.
*/
static OPJ_BOOL opj_j2k_merge_ppm ( opj_cp_t *p_cp, opj_event_mgr_t * p_manager );
/**
* Reads a PPT marker (Packed packet headers, tile-part header)
*
* @param p_header_data the data contained in the PPT box.
* @param p_j2k the jpeg2000 codec.
* @param p_header_size the size of the data contained in the PPT marker.
* @param p_manager the user event manager.
*/
static OPJ_BOOL opj_j2k_read_ppt ( opj_j2k_t *p_j2k,
OPJ_BYTE * p_header_data,
OPJ_UINT32 p_header_size,
opj_event_mgr_t * p_manager );
/**
* Merges all PPT markers read (Packed headers, tile-part header)
*
* @param p_tcp the tile.
* @param p_manager the user event manager.
*/
static OPJ_BOOL opj_j2k_merge_ppt ( opj_tcp_t *p_tcp,
opj_event_mgr_t * p_manager );
/**
* Writes the TLM marker (Tile Length Marker)
*
* @param p_stream the stream to write data to.
* @param p_j2k J2K codec.
* @param p_manager the user event manager.
*/
static OPJ_BOOL opj_j2k_write_tlm( opj_j2k_t *p_j2k,
opj_stream_private_t *p_stream,
opj_event_mgr_t * p_manager );
/**
* Writes the SOT marker (Start of tile-part)
*
* @param p_j2k J2K codec.
* @param p_data FIXME DOC
* @param p_data_written FIXME DOC
* @param p_stream the stream to write data to.
* @param p_manager the user event manager.
*/
static OPJ_BOOL opj_j2k_write_sot( opj_j2k_t *p_j2k,
OPJ_BYTE * p_data,
OPJ_UINT32 * p_data_written,
const opj_stream_private_t *p_stream,
opj_event_mgr_t * p_manager );
/**
* Reads values from a SOT marker (Start of tile-part)
*
* the j2k decoder state is not affected. No side effects, no checks except for p_header_size.
*
* @param p_header_data the data contained in the SOT marker.
* @param p_header_size the size of the data contained in the SOT marker.
* @param p_tile_no Isot.
* @param p_tot_len Psot.
* @param p_current_part TPsot.
* @param p_num_parts TNsot.
* @param p_manager the user event manager.
*/
static OPJ_BOOL opj_j2k_get_sot_values(OPJ_BYTE * p_header_data,
OPJ_UINT32 p_header_size,
OPJ_UINT32* p_tile_no,
OPJ_UINT32* p_tot_len,
OPJ_UINT32* p_current_part,
OPJ_UINT32* p_num_parts,
opj_event_mgr_t * p_manager );
/**
* Reads a SOT marker (Start of tile-part)
*
* @param p_header_data the data contained in the SOT marker.
* @param p_j2k the jpeg2000 codec.
* @param p_header_size the size of the data contained in the PPT marker.
* @param p_manager the user event manager.
*/
static OPJ_BOOL opj_j2k_read_sot ( opj_j2k_t *p_j2k,
OPJ_BYTE * p_header_data,
OPJ_UINT32 p_header_size,
opj_event_mgr_t * p_manager );
/**
* Writes the SOD marker (Start of data)
*
* @param p_j2k J2K codec.
* @param p_tile_coder FIXME DOC
* @param p_data FIXME DOC
* @param p_data_written FIXME DOC
* @param p_total_data_size FIXME DOC
* @param p_stream the stream to write data to.
* @param p_manager the user event manager.
*/
static OPJ_BOOL opj_j2k_write_sod( opj_j2k_t *p_j2k,
opj_tcd_t * p_tile_coder,
OPJ_BYTE * p_data,
OPJ_UINT32 * p_data_written,
OPJ_UINT32 p_total_data_size,
const opj_stream_private_t *p_stream,
opj_event_mgr_t * p_manager );
/**
* Reads a SOD marker (Start Of Data)
*
* @param p_j2k the jpeg2000 codec.
* @param p_stream FIXME DOC
* @param p_manager the user event manager.
*/
static OPJ_BOOL opj_j2k_read_sod( opj_j2k_t *p_j2k,
opj_stream_private_t *p_stream,
opj_event_mgr_t * p_manager );
static void opj_j2k_update_tlm (opj_j2k_t * p_j2k, OPJ_UINT32 p_tile_part_size )
{
opj_write_bytes(p_j2k->m_specific_param.m_encoder.m_tlm_sot_offsets_current,p_j2k->m_current_tile_number,1); /* PSOT */
++p_j2k->m_specific_param.m_encoder.m_tlm_sot_offsets_current;
opj_write_bytes(p_j2k->m_specific_param.m_encoder.m_tlm_sot_offsets_current,p_tile_part_size,4); /* PSOT */
p_j2k->m_specific_param.m_encoder.m_tlm_sot_offsets_current += 4;
}
/**
* Writes the RGN marker (Region Of Interest)
*
* @param p_tile_no the tile to output
* @param p_comp_no the component to output
* @param nb_comps the number of components
* @param p_stream the stream to write data to.
* @param p_j2k J2K codec.
* @param p_manager the user event manager.
*/
static OPJ_BOOL opj_j2k_write_rgn( opj_j2k_t *p_j2k,
OPJ_UINT32 p_tile_no,
OPJ_UINT32 p_comp_no,
OPJ_UINT32 nb_comps,
opj_stream_private_t *p_stream,
opj_event_mgr_t * p_manager );
/**
* Reads a RGN marker (Region Of Interest)
*
* @param p_header_data the data contained in the POC box.
* @param p_j2k the jpeg2000 codec.
* @param p_header_size the size of the data contained in the POC marker.
* @param p_manager the user event manager.
*/
static OPJ_BOOL opj_j2k_read_rgn (opj_j2k_t *p_j2k,
OPJ_BYTE * p_header_data,
OPJ_UINT32 p_header_size,
opj_event_mgr_t * p_manager );
/**
* Writes the EOC marker (End of Codestream)
*
* @param p_stream the stream to write data to.
* @param p_j2k J2K codec.
* @param p_manager the user event manager.
*/
static OPJ_BOOL opj_j2k_write_eoc( opj_j2k_t *p_j2k,
opj_stream_private_t *p_stream,
opj_event_mgr_t * p_manager );
#if 0
/**
* Reads a EOC marker (End Of Codestream)
*
* @param p_j2k the jpeg2000 codec.
* @param p_stream FIXME DOC
* @param p_manager the user event manager.
*/
static OPJ_BOOL opj_j2k_read_eoc ( opj_j2k_t *p_j2k,
opj_stream_private_t *p_stream,
opj_event_mgr_t * p_manager );
#endif
/**
* Writes the CBD-MCT-MCC-MCO markers (Multi components transform)
*
* @param p_stream the stream to write data to.
* @param p_j2k J2K codec.
* @param p_manager the user event manager.
*/
static OPJ_BOOL opj_j2k_write_mct_data_group( opj_j2k_t *p_j2k,
opj_stream_private_t *p_stream,
opj_event_mgr_t * p_manager );
/**
* Inits the Info
*
* @param p_stream the stream to write data to.
* @param p_j2k J2K codec.
* @param p_manager the user event manager.
*/
static OPJ_BOOL opj_j2k_init_info( opj_j2k_t *p_j2k,
opj_stream_private_t *p_stream,
opj_event_mgr_t * p_manager );
/**
Add main header marker information
@param cstr_index Codestream information structure
@param type marker type
@param pos byte offset of marker segment
@param len length of marker segment
*/
static OPJ_BOOL opj_j2k_add_mhmarker(opj_codestream_index_t *cstr_index, OPJ_UINT32 type, OPJ_OFF_T pos, OPJ_UINT32 len) ;
/**
Add tile header marker information
@param tileno tile index number
@param cstr_index Codestream information structure
@param type marker type
@param pos byte offset of marker segment
@param len length of marker segment
*/
static OPJ_BOOL opj_j2k_add_tlmarker(OPJ_UINT32 tileno, opj_codestream_index_t *cstr_index, OPJ_UINT32 type, OPJ_OFF_T pos, OPJ_UINT32 len);
/**
* Reads an unknown marker
*
* @param p_j2k the jpeg2000 codec.
* @param p_stream the stream object to read from.
* @param output_marker FIXME DOC
* @param p_manager the user event manager.
*
* @return true if the marker could be deduced.
*/
static OPJ_BOOL opj_j2k_read_unk( opj_j2k_t *p_j2k,
opj_stream_private_t *p_stream,
OPJ_UINT32 *output_marker,
opj_event_mgr_t * p_manager );
/**
* Writes the MCT marker (Multiple Component Transform)
*
* @param p_j2k J2K codec.
* @param p_mct_record FIXME DOC
* @param p_stream the stream to write data to.
* @param p_manager the user event manager.
*/
static OPJ_BOOL opj_j2k_write_mct_record( opj_j2k_t *p_j2k,
opj_mct_data_t * p_mct_record,
opj_stream_private_t *p_stream,
opj_event_mgr_t * p_manager );
/**
* Reads a MCT marker (Multiple Component Transform)
*
* @param p_header_data the data contained in the MCT box.
* @param p_j2k the jpeg2000 codec.
* @param p_header_size the size of the data contained in the MCT marker.
* @param p_manager the user event manager.
*/
static OPJ_BOOL opj_j2k_read_mct ( opj_j2k_t *p_j2k,
OPJ_BYTE * p_header_data,
OPJ_UINT32 p_header_size,
opj_event_mgr_t * p_manager );
/**
* Writes the MCC marker (Multiple Component Collection)
*
* @param p_j2k J2K codec.
* @param p_mcc_record FIXME DOC
* @param p_stream the stream to write data to.
* @param p_manager the user event manager.
*/
static OPJ_BOOL opj_j2k_write_mcc_record( opj_j2k_t *p_j2k,
opj_simple_mcc_decorrelation_data_t * p_mcc_record,
opj_stream_private_t *p_stream,
opj_event_mgr_t * p_manager );
/**
* Reads a MCC marker (Multiple Component Collection)
*
* @param p_header_data the data contained in the MCC box.
* @param p_j2k the jpeg2000 codec.
* @param p_header_size the size of the data contained in the MCC marker.
* @param p_manager the user event manager.
*/
static OPJ_BOOL opj_j2k_read_mcc ( opj_j2k_t *p_j2k,
OPJ_BYTE * p_header_data,
OPJ_UINT32 p_header_size,
opj_event_mgr_t * p_manager );
/**
* Writes the MCO marker (Multiple component transformation ordering)
*
* @param p_stream the stream to write data to.
* @param p_j2k J2K codec.
* @param p_manager the user event manager.
*/
static OPJ_BOOL opj_j2k_write_mco( opj_j2k_t *p_j2k,
opj_stream_private_t *p_stream,
opj_event_mgr_t * p_manager );
/**
* Reads a MCO marker (Multiple Component Transform Ordering)
*
* @param p_header_data the data contained in the MCO box.
* @param p_j2k the jpeg2000 codec.
* @param p_header_size the size of the data contained in the MCO marker.
* @param p_manager the user event manager.
*/
static OPJ_BOOL opj_j2k_read_mco ( opj_j2k_t *p_j2k,
OPJ_BYTE * p_header_data,
OPJ_UINT32 p_header_size,
opj_event_mgr_t * p_manager );
static OPJ_BOOL opj_j2k_add_mct(opj_tcp_t * p_tcp, opj_image_t * p_image, OPJ_UINT32 p_index);
static void opj_j2k_read_int16_to_float (const void * p_src_data, void * p_dest_data, OPJ_UINT32 p_nb_elem);
static void opj_j2k_read_int32_to_float (const void * p_src_data, void * p_dest_data, OPJ_UINT32 p_nb_elem);
static void opj_j2k_read_float32_to_float (const void * p_src_data, void * p_dest_data, OPJ_UINT32 p_nb_elem);
static void opj_j2k_read_float64_to_float (const void * p_src_data, void * p_dest_data, OPJ_UINT32 p_nb_elem);
static void opj_j2k_read_int16_to_int32 (const void * p_src_data, void * p_dest_data, OPJ_UINT32 p_nb_elem);
static void opj_j2k_read_int32_to_int32 (const void * p_src_data, void * p_dest_data, OPJ_UINT32 p_nb_elem);
static void opj_j2k_read_float32_to_int32 (const void * p_src_data, void * p_dest_data, OPJ_UINT32 p_nb_elem);
static void opj_j2k_read_float64_to_int32 (const void * p_src_data, void * p_dest_data, OPJ_UINT32 p_nb_elem);
static void opj_j2k_write_float_to_int16 (const void * p_src_data, void * p_dest_data, OPJ_UINT32 p_nb_elem);
static void opj_j2k_write_float_to_int32 (const void * p_src_data, void * p_dest_data, OPJ_UINT32 p_nb_elem);
static void opj_j2k_write_float_to_float (const void * p_src_data, void * p_dest_data, OPJ_UINT32 p_nb_elem);
static void opj_j2k_write_float_to_float64 (const void * p_src_data, void * p_dest_data, OPJ_UINT32 p_nb_elem);
/**
* Ends the encoding, i.e. frees memory.
*
* @param p_stream the stream to write data to.
* @param p_j2k J2K codec.
* @param p_manager the user event manager.
*/
static OPJ_BOOL opj_j2k_end_encoding( opj_j2k_t *p_j2k,
opj_stream_private_t *p_stream,
opj_event_mgr_t * p_manager );
/**
* Writes the CBD marker (Component bit depth definition)
*
* @param p_stream the stream to write data to.
* @param p_j2k J2K codec.
* @param p_manager the user event manager.
*/
static OPJ_BOOL opj_j2k_write_cbd( opj_j2k_t *p_j2k,
opj_stream_private_t *p_stream,
opj_event_mgr_t * p_manager );
/**
* Reads a CBD marker (Component bit depth definition)
* @param p_header_data the data contained in the CBD box.
* @param p_j2k the jpeg2000 codec.
* @param p_header_size the size of the data contained in the CBD marker.
* @param p_manager the user event manager.
*/
static OPJ_BOOL opj_j2k_read_cbd ( opj_j2k_t *p_j2k,
OPJ_BYTE * p_header_data,
OPJ_UINT32 p_header_size,
opj_event_mgr_t * p_manager);
#if 0
/**
* Writes COC marker for each component.
*
* @param p_stream the stream to write data to.
* @param p_j2k J2K codec.
* @param p_manager the user event manager.
*/
static OPJ_BOOL opj_j2k_write_all_coc( opj_j2k_t *p_j2k,
opj_stream_private_t *p_stream,
opj_event_mgr_t * p_manager );
#endif
#if 0
/**
* Writes QCC marker for each component.
*
* @param p_stream the stream to write data to.
* @param p_j2k J2K codec.
* @param p_manager the user event manager.
*/
static OPJ_BOOL opj_j2k_write_all_qcc( opj_j2k_t *p_j2k,
opj_stream_private_t *p_stream,
opj_event_mgr_t * p_manager );
#endif
/**
* Writes regions of interests.
*
* @param p_stream the stream to write data to.
* @param p_j2k J2K codec.
* @param p_manager the user event manager.
*/
static OPJ_BOOL opj_j2k_write_regions( opj_j2k_t *p_j2k,
opj_stream_private_t *p_stream,
opj_event_mgr_t * p_manager );
/**
* Writes EPC ????
*
* @param p_stream the stream to write data to.
* @param p_j2k J2K codec.
* @param p_manager the user event manager.
*/
static OPJ_BOOL opj_j2k_write_epc( opj_j2k_t *p_j2k,
opj_stream_private_t *p_stream,
opj_event_mgr_t * p_manager );
/**
* Checks the progression order changes values. Tells of the poc given as input are valid.
* A nice message is outputted at errors.
*
* @param p_pocs the progression order changes.
* @param p_nb_pocs the number of progression order changes.
* @param p_nb_resolutions the number of resolutions.
* @param numcomps the number of components
* @param numlayers the number of layers.
* @param p_manager the user event manager.
*
* @return true if the pocs are valid.
*/
static OPJ_BOOL opj_j2k_check_poc_val( const opj_poc_t *p_pocs,
OPJ_UINT32 p_nb_pocs,
OPJ_UINT32 p_nb_resolutions,
OPJ_UINT32 numcomps,
OPJ_UINT32 numlayers,
opj_event_mgr_t * p_manager);
/**
* Gets the number of tile parts used for the given change of progression (if any) and the given tile.
*
* @param cp the coding parameters.
* @param pino the offset of the given poc (i.e. its position in the coding parameter).
* @param tileno the given tile.
*
* @return the number of tile parts.
*/
static OPJ_UINT32 opj_j2k_get_num_tp( opj_cp_t *cp, OPJ_UINT32 pino, OPJ_UINT32 tileno);
/**
* Calculates the total number of tile parts needed by the encoder to
* encode such an image. If not enough memory is available, then the function return false.
*
* @param p_nb_tiles pointer that will hold the number of tile parts.
* @param cp the coding parameters for the image.
* @param image the image to encode.
* @param p_j2k the p_j2k encoder.
* @param p_manager the user event manager.
*
* @return true if the function was successful, false else.
*/
static OPJ_BOOL opj_j2k_calculate_tp( opj_j2k_t *p_j2k,
opj_cp_t *cp,
OPJ_UINT32 * p_nb_tiles,
opj_image_t *image,
opj_event_mgr_t * p_manager);
static void opj_j2k_dump_MH_info(opj_j2k_t* p_j2k, FILE* out_stream);
static void opj_j2k_dump_MH_index(opj_j2k_t* p_j2k, FILE* out_stream);
static opj_codestream_index_t* opj_j2k_create_cstr_index(void);
static OPJ_FLOAT32 opj_j2k_get_tp_stride (opj_tcp_t * p_tcp);
static OPJ_FLOAT32 opj_j2k_get_default_stride (opj_tcp_t * p_tcp);
static int opj_j2k_initialise_4K_poc(opj_poc_t *POC, int numres);
static void opj_j2k_set_cinema_parameters(opj_cparameters_t *parameters, opj_image_t *image, opj_event_mgr_t *p_manager);
static OPJ_BOOL opj_j2k_is_cinema_compliant(opj_image_t *image, OPJ_UINT16 rsiz, opj_event_mgr_t *p_manager);
/**
* Checks for invalid number of tile-parts in SOT marker (TPsot==TNsot). See issue 254.
*
* @param p_stream the stream to read data from.
* @param tile_no tile number we're looking for.
* @param p_correction_needed output value. if true, non conformant codestream needs TNsot correction.
* @param p_manager the user event manager.
*
* @return true if the function was successful, false else.
*/
static OPJ_BOOL opj_j2k_need_nb_tile_parts_correction(opj_stream_private_t *p_stream, OPJ_UINT32 tile_no, OPJ_BOOL* p_correction_needed, opj_event_mgr_t * p_manager );
/*@}*/
/*@}*/
/* ----------------------------------------------------------------------- */
typedef struct j2k_prog_order{
OPJ_PROG_ORDER enum_prog;
char str_prog[5];
}j2k_prog_order_t;
static j2k_prog_order_t j2k_prog_order_list[] = {
{OPJ_CPRL, "CPRL"},
{OPJ_LRCP, "LRCP"},
{OPJ_PCRL, "PCRL"},
{OPJ_RLCP, "RLCP"},
{OPJ_RPCL, "RPCL"},
{(OPJ_PROG_ORDER)-1, ""}
};
/**
* FIXME DOC
*/
static const OPJ_UINT32 MCT_ELEMENT_SIZE [] =
{
2,
4,
4,
8
};
typedef void (* opj_j2k_mct_function) (const void * p_src_data, void * p_dest_data, OPJ_UINT32 p_nb_elem);
static const opj_j2k_mct_function j2k_mct_read_functions_to_float [] =
{
opj_j2k_read_int16_to_float,
opj_j2k_read_int32_to_float,
opj_j2k_read_float32_to_float,
opj_j2k_read_float64_to_float
};
static const opj_j2k_mct_function j2k_mct_read_functions_to_int32 [] =
{
opj_j2k_read_int16_to_int32,
opj_j2k_read_int32_to_int32,
opj_j2k_read_float32_to_int32,
opj_j2k_read_float64_to_int32
};
static const opj_j2k_mct_function j2k_mct_write_functions_from_float [] =
{
opj_j2k_write_float_to_int16,
opj_j2k_write_float_to_int32,
opj_j2k_write_float_to_float,
opj_j2k_write_float_to_float64
};
typedef struct opj_dec_memory_marker_handler
{
/** marker value */
OPJ_UINT32 id;
/** value of the state when the marker can appear */
OPJ_UINT32 states;
/** action linked to the marker */
OPJ_BOOL (*handler) ( opj_j2k_t *p_j2k,
OPJ_BYTE * p_header_data,
OPJ_UINT32 p_header_size,
opj_event_mgr_t * p_manager );
}
opj_dec_memory_marker_handler_t;
static const opj_dec_memory_marker_handler_t j2k_memory_marker_handler_tab [] =
{
{J2K_MS_SOT, J2K_STATE_MH | J2K_STATE_TPHSOT, opj_j2k_read_sot},
{J2K_MS_COD, J2K_STATE_MH | J2K_STATE_TPH, opj_j2k_read_cod},
{J2K_MS_COC, J2K_STATE_MH | J2K_STATE_TPH, opj_j2k_read_coc},
{J2K_MS_RGN, J2K_STATE_MH | J2K_STATE_TPH, opj_j2k_read_rgn},
{J2K_MS_QCD, J2K_STATE_MH | J2K_STATE_TPH, opj_j2k_read_qcd},
{J2K_MS_QCC, J2K_STATE_MH | J2K_STATE_TPH, opj_j2k_read_qcc},
{J2K_MS_POC, J2K_STATE_MH | J2K_STATE_TPH, opj_j2k_read_poc},
{J2K_MS_SIZ, J2K_STATE_MHSIZ, opj_j2k_read_siz},
{J2K_MS_TLM, J2K_STATE_MH, opj_j2k_read_tlm},
{J2K_MS_PLM, J2K_STATE_MH, opj_j2k_read_plm},
{J2K_MS_PLT, J2K_STATE_TPH, opj_j2k_read_plt},
{J2K_MS_PPM, J2K_STATE_MH, opj_j2k_read_ppm},
{J2K_MS_PPT, J2K_STATE_TPH, opj_j2k_read_ppt},
{J2K_MS_SOP, 0, 0},
{J2K_MS_CRG, J2K_STATE_MH, opj_j2k_read_crg},
{J2K_MS_COM, J2K_STATE_MH | J2K_STATE_TPH, opj_j2k_read_com},
{J2K_MS_MCT, J2K_STATE_MH | J2K_STATE_TPH, opj_j2k_read_mct},
{J2K_MS_CBD, J2K_STATE_MH , opj_j2k_read_cbd},
{J2K_MS_MCC, J2K_STATE_MH | J2K_STATE_TPH, opj_j2k_read_mcc},
{J2K_MS_MCO, J2K_STATE_MH | J2K_STATE_TPH, opj_j2k_read_mco},
#ifdef USE_JPWL
#ifdef TODO_MS /* remove these functions which are not commpatible with the v2 API */
{J2K_MS_EPC, J2K_STATE_MH | J2K_STATE_TPH, j2k_read_epc},
{J2K_MS_EPB, J2K_STATE_MH | J2K_STATE_TPH, j2k_read_epb},
{J2K_MS_ESD, J2K_STATE_MH | J2K_STATE_TPH, j2k_read_esd},
{J2K_MS_RED, J2K_STATE_MH | J2K_STATE_TPH, j2k_read_red},
#endif
#endif /* USE_JPWL */
#ifdef USE_JPSEC
{J2K_MS_SEC, J2K_DEC_STATE_MH, j2k_read_sec},
{J2K_MS_INSEC, 0, j2k_read_insec}
#endif /* USE_JPSEC */
{J2K_MS_UNK, J2K_STATE_MH | J2K_STATE_TPH, 0}/*opj_j2k_read_unk is directly used*/
};
static void opj_j2k_read_int16_to_float (const void * p_src_data, void * p_dest_data, OPJ_UINT32 p_nb_elem)
{
OPJ_BYTE * l_src_data = (OPJ_BYTE *) p_src_data;
OPJ_FLOAT32 * l_dest_data = (OPJ_FLOAT32 *) p_dest_data;
OPJ_UINT32 i;
OPJ_UINT32 l_temp;
for (i=0;i<p_nb_elem;++i) {
opj_read_bytes(l_src_data,&l_temp,2);
l_src_data+=sizeof(OPJ_INT16);
*(l_dest_data++) = (OPJ_FLOAT32) l_temp;
}
}
static void opj_j2k_read_int32_to_float (const void * p_src_data, void * p_dest_data, OPJ_UINT32 p_nb_elem)
{
OPJ_BYTE * l_src_data = (OPJ_BYTE *) p_src_data;
OPJ_FLOAT32 * l_dest_data = (OPJ_FLOAT32 *) p_dest_data;
OPJ_UINT32 i;
OPJ_UINT32 l_temp;
for (i=0;i<p_nb_elem;++i) {
opj_read_bytes(l_src_data,&l_temp,4);
l_src_data+=sizeof(OPJ_INT32);
*(l_dest_data++) = (OPJ_FLOAT32) l_temp;
}
}
static void opj_j2k_read_float32_to_float (const void * p_src_data, void * p_dest_data, OPJ_UINT32 p_nb_elem)
{
OPJ_BYTE * l_src_data = (OPJ_BYTE *) p_src_data;
OPJ_FLOAT32 * l_dest_data = (OPJ_FLOAT32 *) p_dest_data;
OPJ_UINT32 i;
OPJ_FLOAT32 l_temp;
for (i=0;i<p_nb_elem;++i) {
opj_read_float(l_src_data,&l_temp);
l_src_data+=sizeof(OPJ_FLOAT32);
*(l_dest_data++) = l_temp;
}
}
static void opj_j2k_read_float64_to_float (const void * p_src_data, void * p_dest_data, OPJ_UINT32 p_nb_elem)
{
OPJ_BYTE * l_src_data = (OPJ_BYTE *) p_src_data;
OPJ_FLOAT32 * l_dest_data = (OPJ_FLOAT32 *) p_dest_data;
OPJ_UINT32 i;
OPJ_FLOAT64 l_temp;
for (i=0;i<p_nb_elem;++i) {
opj_read_double(l_src_data,&l_temp);
l_src_data+=sizeof(OPJ_FLOAT64);
*(l_dest_data++) = (OPJ_FLOAT32) l_temp;
}
}
static void opj_j2k_read_int16_to_int32 (const void * p_src_data, void * p_dest_data, OPJ_UINT32 p_nb_elem)
{
OPJ_BYTE * l_src_data = (OPJ_BYTE *) p_src_data;
OPJ_INT32 * l_dest_data = (OPJ_INT32 *) p_dest_data;
OPJ_UINT32 i;
OPJ_UINT32 l_temp;
for (i=0;i<p_nb_elem;++i) {
opj_read_bytes(l_src_data,&l_temp,2);
l_src_data+=sizeof(OPJ_INT16);
*(l_dest_data++) = (OPJ_INT32) l_temp;
}
}
static void opj_j2k_read_int32_to_int32 (const void * p_src_data, void * p_dest_data, OPJ_UINT32 p_nb_elem)
{
OPJ_BYTE * l_src_data = (OPJ_BYTE *) p_src_data;
OPJ_INT32 * l_dest_data = (OPJ_INT32 *) p_dest_data;
OPJ_UINT32 i;
OPJ_UINT32 l_temp;
for (i=0;i<p_nb_elem;++i) {
opj_read_bytes(l_src_data,&l_temp,4);
l_src_data+=sizeof(OPJ_INT32);
*(l_dest_data++) = (OPJ_INT32) l_temp;
}
}
static void opj_j2k_read_float32_to_int32 (const void * p_src_data, void * p_dest_data, OPJ_UINT32 p_nb_elem)
{
OPJ_BYTE * l_src_data = (OPJ_BYTE *) p_src_data;
OPJ_INT32 * l_dest_data = (OPJ_INT32 *) p_dest_data;
OPJ_UINT32 i;
OPJ_FLOAT32 l_temp;
for (i=0;i<p_nb_elem;++i) {
opj_read_float(l_src_data,&l_temp);
l_src_data+=sizeof(OPJ_FLOAT32);
*(l_dest_data++) = (OPJ_INT32) l_temp;
}
}
static void opj_j2k_read_float64_to_int32 (const void * p_src_data, void * p_dest_data, OPJ_UINT32 p_nb_elem)
{
OPJ_BYTE * l_src_data = (OPJ_BYTE *) p_src_data;
OPJ_INT32 * l_dest_data = (OPJ_INT32 *) p_dest_data;
OPJ_UINT32 i;
OPJ_FLOAT64 l_temp;
for (i=0;i<p_nb_elem;++i) {
opj_read_double(l_src_data,&l_temp);
l_src_data+=sizeof(OPJ_FLOAT64);
*(l_dest_data++) = (OPJ_INT32) l_temp;
}
}
static void opj_j2k_write_float_to_int16 (const void * p_src_data, void * p_dest_data, OPJ_UINT32 p_nb_elem)
{
OPJ_BYTE * l_dest_data = (OPJ_BYTE *) p_dest_data;
OPJ_FLOAT32 * l_src_data = (OPJ_FLOAT32 *) p_src_data;
OPJ_UINT32 i;
OPJ_UINT32 l_temp;
for (i=0;i<p_nb_elem;++i) {
l_temp = (OPJ_UINT32) *(l_src_data++);
opj_write_bytes(l_dest_data,l_temp,sizeof(OPJ_INT16));
l_dest_data+=sizeof(OPJ_INT16);
}
}
static void opj_j2k_write_float_to_int32 (const void * p_src_data, void * p_dest_data, OPJ_UINT32 p_nb_elem)
{
OPJ_BYTE * l_dest_data = (OPJ_BYTE *) p_dest_data;
OPJ_FLOAT32 * l_src_data = (OPJ_FLOAT32 *) p_src_data;
OPJ_UINT32 i;
OPJ_UINT32 l_temp;
for (i=0;i<p_nb_elem;++i) {
l_temp = (OPJ_UINT32) *(l_src_data++);
opj_write_bytes(l_dest_data,l_temp,sizeof(OPJ_INT32));
l_dest_data+=sizeof(OPJ_INT32);
}
}
static void opj_j2k_write_float_to_float (const void * p_src_data, void * p_dest_data, OPJ_UINT32 p_nb_elem)
{
OPJ_BYTE * l_dest_data = (OPJ_BYTE *) p_dest_data;
OPJ_FLOAT32 * l_src_data = (OPJ_FLOAT32 *) p_src_data;
OPJ_UINT32 i;
OPJ_FLOAT32 l_temp;
for (i=0;i<p_nb_elem;++i) {
l_temp = (OPJ_FLOAT32) *(l_src_data++);
opj_write_float(l_dest_data,l_temp);
l_dest_data+=sizeof(OPJ_FLOAT32);
}
}
static void opj_j2k_write_float_to_float64 (const void * p_src_data, void * p_dest_data, OPJ_UINT32 p_nb_elem)
{
OPJ_BYTE * l_dest_data = (OPJ_BYTE *) p_dest_data;
OPJ_FLOAT32 * l_src_data = (OPJ_FLOAT32 *) p_src_data;
OPJ_UINT32 i;
OPJ_FLOAT64 l_temp;
for (i=0;i<p_nb_elem;++i) {
l_temp = (OPJ_FLOAT64) *(l_src_data++);
opj_write_double(l_dest_data,l_temp);
l_dest_data+=sizeof(OPJ_FLOAT64);
}
}
char *opj_j2k_convert_progression_order(OPJ_PROG_ORDER prg_order){
j2k_prog_order_t *po;
for(po = j2k_prog_order_list; po->enum_prog != -1; po++ ){
if(po->enum_prog == prg_order){
return po->str_prog;
}
}
return po->str_prog;
}
static OPJ_BOOL opj_j2k_check_poc_val( const opj_poc_t *p_pocs,
OPJ_UINT32 p_nb_pocs,
OPJ_UINT32 p_nb_resolutions,
OPJ_UINT32 p_num_comps,
OPJ_UINT32 p_num_layers,
opj_event_mgr_t * p_manager)
{
OPJ_UINT32* packet_array;
OPJ_UINT32 index , resno, compno, layno;
OPJ_UINT32 i;
OPJ_UINT32 step_c = 1;
OPJ_UINT32 step_r = p_num_comps * step_c;
OPJ_UINT32 step_l = p_nb_resolutions * step_r;
OPJ_BOOL loss = OPJ_FALSE;
OPJ_UINT32 layno0 = 0;
packet_array = (OPJ_UINT32*) opj_calloc(step_l * p_num_layers, sizeof(OPJ_UINT32));
if (packet_array == 00) {
opj_event_msg(p_manager , EVT_ERROR, "Not enough memory for checking the poc values.\n");
return OPJ_FALSE;
}
if (p_nb_pocs == 0) {
opj_free(packet_array);
return OPJ_TRUE;
}
index = step_r * p_pocs->resno0;
/* take each resolution for each poc */
for (resno = p_pocs->resno0 ; resno < p_pocs->resno1 ; ++resno)
{
OPJ_UINT32 res_index = index + p_pocs->compno0 * step_c;
/* take each comp of each resolution for each poc */
for (compno = p_pocs->compno0 ; compno < p_pocs->compno1 ; ++compno) {
OPJ_UINT32 comp_index = res_index + layno0 * step_l;
/* and finally take each layer of each res of ... */
for (layno = layno0; layno < p_pocs->layno1 ; ++layno) {
/*index = step_r * resno + step_c * compno + step_l * layno;*/
packet_array[comp_index] = 1;
comp_index += step_l;
}
res_index += step_c;
}
index += step_r;
}
++p_pocs;
/* iterate through all the pocs */
for (i = 1; i < p_nb_pocs ; ++i) {
OPJ_UINT32 l_last_layno1 = (p_pocs-1)->layno1 ;
layno0 = (p_pocs->layno1 > l_last_layno1)? l_last_layno1 : 0;
index = step_r * p_pocs->resno0;
/* take each resolution for each poc */
for (resno = p_pocs->resno0 ; resno < p_pocs->resno1 ; ++resno) {
OPJ_UINT32 res_index = index + p_pocs->compno0 * step_c;
/* take each comp of each resolution for each poc */
for (compno = p_pocs->compno0 ; compno < p_pocs->compno1 ; ++compno) {
OPJ_UINT32 comp_index = res_index + layno0 * step_l;
/* and finally take each layer of each res of ... */
for (layno = layno0; layno < p_pocs->layno1 ; ++layno) {
/*index = step_r * resno + step_c * compno + step_l * layno;*/
packet_array[comp_index] = 1;
comp_index += step_l;
}
res_index += step_c;
}
index += step_r;
}
++p_pocs;
}
index = 0;
for (layno = 0; layno < p_num_layers ; ++layno) {
for (resno = 0; resno < p_nb_resolutions; ++resno) {
for (compno = 0; compno < p_num_comps; ++compno) {
loss |= (packet_array[index]!=1);
/*index = step_r * resno + step_c * compno + step_l * layno;*/
index += step_c;
}
}
}
if (loss) {
opj_event_msg(p_manager , EVT_ERROR, "Missing packets possible loss of data\n");
}
opj_free(packet_array);
return !loss;
}
/* ----------------------------------------------------------------------- */
static OPJ_UINT32 opj_j2k_get_num_tp(opj_cp_t *cp, OPJ_UINT32 pino, OPJ_UINT32 tileno)
{
const OPJ_CHAR *prog = 00;
OPJ_INT32 i;
OPJ_UINT32 tpnum = 1;
opj_tcp_t *tcp = 00;
opj_poc_t * l_current_poc = 00;
/* preconditions */
assert(tileno < (cp->tw * cp->th));
assert(pino < (cp->tcps[tileno].numpocs + 1));
/* get the given tile coding parameter */
tcp = &cp->tcps[tileno];
assert(tcp != 00);
l_current_poc = &(tcp->pocs[pino]);
assert(l_current_poc != 0);
/* get the progression order as a character string */
prog = opj_j2k_convert_progression_order(tcp->prg);
assert(strlen(prog) > 0);
if (cp->m_specific_param.m_enc.m_tp_on == 1) {
for (i=0;i<4;++i) {
switch (prog[i])
{
/* component wise */
case 'C':
tpnum *= l_current_poc->compE;
break;
/* resolution wise */
case 'R':
tpnum *= l_current_poc->resE;
break;
/* precinct wise */
case 'P':
tpnum *= l_current_poc->prcE;
break;
/* layer wise */
case 'L':
tpnum *= l_current_poc->layE;
break;
}
/* whould we split here ? */
if ( cp->m_specific_param.m_enc.m_tp_flag == prog[i] ) {
cp->m_specific_param.m_enc.m_tp_pos=i;
break;
}
}
}
else {
tpnum=1;
}
return tpnum;
}
static OPJ_BOOL opj_j2k_calculate_tp( opj_j2k_t *p_j2k,
opj_cp_t *cp,
OPJ_UINT32 * p_nb_tiles,
opj_image_t *image,
opj_event_mgr_t * p_manager
)
{
OPJ_UINT32 pino,tileno;
OPJ_UINT32 l_nb_tiles;
opj_tcp_t *tcp;
/* preconditions */
assert(p_nb_tiles != 00);
assert(cp != 00);
assert(image != 00);
assert(p_j2k != 00);
assert(p_manager != 00);
l_nb_tiles = cp->tw * cp->th;
* p_nb_tiles = 0;
tcp = cp->tcps;
/* INDEX >> */
/* TODO mergeV2: check this part which use cstr_info */
/*if (p_j2k->cstr_info) {
opj_tile_info_t * l_info_tile_ptr = p_j2k->cstr_info->tile;
for (tileno = 0; tileno < l_nb_tiles; ++tileno) {
OPJ_UINT32 cur_totnum_tp = 0;
opj_pi_update_encoding_parameters(image,cp,tileno);
for (pino = 0; pino <= tcp->numpocs; ++pino)
{
OPJ_UINT32 tp_num = opj_j2k_get_num_tp(cp,pino,tileno);
*p_nb_tiles = *p_nb_tiles + tp_num;
cur_totnum_tp += tp_num;
}
tcp->m_nb_tile_parts = cur_totnum_tp;
l_info_tile_ptr->tp = (opj_tp_info_t *) opj_malloc(cur_totnum_tp * sizeof(opj_tp_info_t));
if (l_info_tile_ptr->tp == 00) {
return OPJ_FALSE;
}
memset(l_info_tile_ptr->tp,0,cur_totnum_tp * sizeof(opj_tp_info_t));
l_info_tile_ptr->num_tps = cur_totnum_tp;
++l_info_tile_ptr;
++tcp;
}
}
else */{
for (tileno = 0; tileno < l_nb_tiles; ++tileno) {
OPJ_UINT32 cur_totnum_tp = 0;
opj_pi_update_encoding_parameters(image,cp,tileno);
for (pino = 0; pino <= tcp->numpocs; ++pino) {
OPJ_UINT32 tp_num = opj_j2k_get_num_tp(cp,pino,tileno);
*p_nb_tiles = *p_nb_tiles + tp_num;
cur_totnum_tp += tp_num;
}
tcp->m_nb_tile_parts = cur_totnum_tp;
++tcp;
}
}
return OPJ_TRUE;
}
static OPJ_BOOL opj_j2k_write_soc( opj_j2k_t *p_j2k,
opj_stream_private_t *p_stream,
opj_event_mgr_t * p_manager )
{
/* 2 bytes will be written */
OPJ_BYTE * l_start_stream = 00;
/* preconditions */
assert(p_stream != 00);
assert(p_j2k != 00);
assert(p_manager != 00);
l_start_stream = p_j2k->m_specific_param.m_encoder.m_header_tile_data;
/* write SOC identifier */
opj_write_bytes(l_start_stream,J2K_MS_SOC,2);
if (opj_stream_write_data(p_stream,l_start_stream,2,p_manager) != 2) {
return OPJ_FALSE;
}
/* UniPG>> */
#ifdef USE_JPWL
/* update markers struct */
/*
OPJ_BOOL res = j2k_add_marker(p_j2k->cstr_info, J2K_MS_SOC, p_stream_tell(p_stream) - 2, 2);
*/
assert( 0 && "TODO" );
#endif /* USE_JPWL */
/* <<UniPG */
return OPJ_TRUE;
}
/**
* Reads a SOC marker (Start of Codestream)
* @param p_j2k the jpeg2000 file codec.
* @param p_stream FIXME DOC
* @param p_manager the user event manager.
*/
static OPJ_BOOL opj_j2k_read_soc( opj_j2k_t *p_j2k,
opj_stream_private_t *p_stream,
opj_event_mgr_t * p_manager
)
{
OPJ_BYTE l_data [2];
OPJ_UINT32 l_marker;
/* preconditions */
assert(p_j2k != 00);
assert(p_manager != 00);
assert(p_stream != 00);
if (opj_stream_read_data(p_stream,l_data,2,p_manager) != 2) {
return OPJ_FALSE;
}
opj_read_bytes(l_data,&l_marker,2);
if (l_marker != J2K_MS_SOC) {
return OPJ_FALSE;
}
/* Next marker should be a SIZ marker in the main header */
p_j2k->m_specific_param.m_decoder.m_state = J2K_STATE_MHSIZ;
/* FIXME move it in a index structure included in p_j2k*/
p_j2k->cstr_index->main_head_start = opj_stream_tell(p_stream) - 2;
opj_event_msg(p_manager, EVT_INFO, "Start to read j2k main header (%d).\n", p_j2k->cstr_index->main_head_start);
/* Add the marker to the codestream index*/
if (OPJ_FALSE == opj_j2k_add_mhmarker(p_j2k->cstr_index, J2K_MS_SOC, p_j2k->cstr_index->main_head_start, 2)) {
opj_event_msg(p_manager, EVT_ERROR, "Not enough memory to add mh marker\n");
return OPJ_FALSE;
}
return OPJ_TRUE;
}
static OPJ_BOOL opj_j2k_write_siz( opj_j2k_t *p_j2k,
opj_stream_private_t *p_stream,
opj_event_mgr_t * p_manager )
{
OPJ_UINT32 i;
OPJ_UINT32 l_size_len;
OPJ_BYTE * l_current_ptr;
opj_image_t * l_image = 00;
opj_cp_t *cp = 00;
opj_image_comp_t * l_img_comp = 00;
/* preconditions */
assert(p_stream != 00);
assert(p_j2k != 00);
assert(p_manager != 00);
l_image = p_j2k->m_private_image;
cp = &(p_j2k->m_cp);
l_size_len = 40 + 3 * l_image->numcomps;
l_img_comp = l_image->comps;
if (l_size_len > p_j2k->m_specific_param.m_encoder.m_header_tile_data_size) {
OPJ_BYTE *new_header_tile_data = (OPJ_BYTE *) opj_realloc(p_j2k->m_specific_param.m_encoder.m_header_tile_data, l_size_len);
if (! new_header_tile_data) {
opj_free(p_j2k->m_specific_param.m_encoder.m_header_tile_data);
p_j2k->m_specific_param.m_encoder.m_header_tile_data = NULL;
p_j2k->m_specific_param.m_encoder.m_header_tile_data_size = 0;
opj_event_msg(p_manager, EVT_ERROR, "Not enough memory for the SIZ marker\n");
return OPJ_FALSE;
}
p_j2k->m_specific_param.m_encoder.m_header_tile_data = new_header_tile_data;
p_j2k->m_specific_param.m_encoder.m_header_tile_data_size = l_size_len;
}
l_current_ptr = p_j2k->m_specific_param.m_encoder.m_header_tile_data;
/* write SOC identifier */
opj_write_bytes(l_current_ptr,J2K_MS_SIZ,2); /* SIZ */
l_current_ptr+=2;
opj_write_bytes(l_current_ptr,l_size_len-2,2); /* L_SIZ */
l_current_ptr+=2;
opj_write_bytes(l_current_ptr, cp->rsiz, 2); /* Rsiz (capabilities) */
l_current_ptr+=2;
opj_write_bytes(l_current_ptr, l_image->x1, 4); /* Xsiz */
l_current_ptr+=4;
opj_write_bytes(l_current_ptr, l_image->y1, 4); /* Ysiz */
l_current_ptr+=4;
opj_write_bytes(l_current_ptr, l_image->x0, 4); /* X0siz */
l_current_ptr+=4;
opj_write_bytes(l_current_ptr, l_image->y0, 4); /* Y0siz */
l_current_ptr+=4;
opj_write_bytes(l_current_ptr, cp->tdx, 4); /* XTsiz */
l_current_ptr+=4;
opj_write_bytes(l_current_ptr, cp->tdy, 4); /* YTsiz */
l_current_ptr+=4;
opj_write_bytes(l_current_ptr, cp->tx0, 4); /* XT0siz */
l_current_ptr+=4;
opj_write_bytes(l_current_ptr, cp->ty0, 4); /* YT0siz */
l_current_ptr+=4;
opj_write_bytes(l_current_ptr, l_image->numcomps, 2); /* Csiz */
l_current_ptr+=2;
for (i = 0; i < l_image->numcomps; ++i) {
/* TODO here with MCT ? */
opj_write_bytes(l_current_ptr, l_img_comp->prec - 1 + (l_img_comp->sgnd << 7), 1); /* Ssiz_i */
++l_current_ptr;
opj_write_bytes(l_current_ptr, l_img_comp->dx, 1); /* XRsiz_i */
++l_current_ptr;
opj_write_bytes(l_current_ptr, l_img_comp->dy, 1); /* YRsiz_i */
++l_current_ptr;
++l_img_comp;
}
if (opj_stream_write_data(p_stream,p_j2k->m_specific_param.m_encoder.m_header_tile_data,l_size_len,p_manager) != l_size_len) {
return OPJ_FALSE;
}
return OPJ_TRUE;
}
/**
* Reads a SIZ marker (image and tile size)
* @param p_j2k the jpeg2000 file codec.
* @param p_header_data the data contained in the SIZ box.
* @param p_header_size the size of the data contained in the SIZ marker.
* @param p_manager the user event manager.
*/
static OPJ_BOOL opj_j2k_read_siz(opj_j2k_t *p_j2k,
OPJ_BYTE * p_header_data,
OPJ_UINT32 p_header_size,
opj_event_mgr_t * p_manager
)
{
OPJ_UINT32 i;
OPJ_UINT32 l_nb_comp;
OPJ_UINT32 l_nb_comp_remain;
OPJ_UINT32 l_remaining_size;
OPJ_UINT32 l_nb_tiles;
OPJ_UINT32 l_tmp, l_tx1, l_ty1;
opj_image_t *l_image = 00;
opj_cp_t *l_cp = 00;
opj_image_comp_t * l_img_comp = 00;
opj_tcp_t * l_current_tile_param = 00;
/* preconditions */
assert(p_j2k != 00);
assert(p_manager != 00);
assert(p_header_data != 00);
l_image = p_j2k->m_private_image;
l_cp = &(p_j2k->m_cp);
/* minimum size == 39 - 3 (= minimum component parameter) */
if (p_header_size < 36) {
opj_event_msg(p_manager, EVT_ERROR, "Error with SIZ marker size\n");
return OPJ_FALSE;
}
l_remaining_size = p_header_size - 36;
l_nb_comp = l_remaining_size / 3;
l_nb_comp_remain = l_remaining_size % 3;
if (l_nb_comp_remain != 0){
opj_event_msg(p_manager, EVT_ERROR, "Error with SIZ marker size\n");
return OPJ_FALSE;
}
opj_read_bytes(p_header_data,&l_tmp ,2); /* Rsiz (capabilities) */
p_header_data+=2;
l_cp->rsiz = (OPJ_UINT16) l_tmp;
opj_read_bytes(p_header_data, (OPJ_UINT32*) &l_image->x1, 4); /* Xsiz */
p_header_data+=4;
opj_read_bytes(p_header_data, (OPJ_UINT32*) &l_image->y1, 4); /* Ysiz */
p_header_data+=4;
opj_read_bytes(p_header_data, (OPJ_UINT32*) &l_image->x0, 4); /* X0siz */
p_header_data+=4;
opj_read_bytes(p_header_data, (OPJ_UINT32*) &l_image->y0, 4); /* Y0siz */
p_header_data+=4;
opj_read_bytes(p_header_data, (OPJ_UINT32*) &l_cp->tdx, 4); /* XTsiz */
p_header_data+=4;
opj_read_bytes(p_header_data, (OPJ_UINT32*) &l_cp->tdy, 4); /* YTsiz */
p_header_data+=4;
opj_read_bytes(p_header_data, (OPJ_UINT32*) &l_cp->tx0, 4); /* XT0siz */
p_header_data+=4;
opj_read_bytes(p_header_data, (OPJ_UINT32*) &l_cp->ty0, 4); /* YT0siz */
p_header_data+=4;
opj_read_bytes(p_header_data, (OPJ_UINT32*) &l_tmp, 2); /* Csiz */
p_header_data+=2;
if (l_tmp < 16385)
l_image->numcomps = (OPJ_UINT16) l_tmp;
else {
opj_event_msg(p_manager, EVT_ERROR, "Error with SIZ marker: number of component is illegal -> %d\n", l_tmp);
return OPJ_FALSE;
}
if (l_image->numcomps != l_nb_comp) {
opj_event_msg(p_manager, EVT_ERROR, "Error with SIZ marker: number of component is not compatible with the remaining number of parameters ( %d vs %d)\n", l_image->numcomps, l_nb_comp);
return OPJ_FALSE;
}
/* testcase 4035.pdf.SIGSEGV.d8b.3375 */
/* testcase issue427-null-image-size.jp2 */
if ((l_image->x0 >= l_image->x1) || (l_image->y0 >= l_image->y1)) {
opj_event_msg(p_manager, EVT_ERROR, "Error with SIZ marker: negative or zero image size (%d x %d)\n", l_image->x1 - l_image->x0, l_image->y1 - l_image->y0);
return OPJ_FALSE;
}
/* testcase 2539.pdf.SIGFPE.706.1712 (also 3622.pdf.SIGFPE.706.2916 and 4008.pdf.SIGFPE.706.3345 and maybe more) */
if (!(l_cp->tdx * l_cp->tdy)) {
opj_event_msg(p_manager, EVT_ERROR, "Error with SIZ marker: invalid tile size (tdx: %d, tdy: %d)\n", l_cp->tdx, l_cp->tdy);
return OPJ_FALSE;
}
/* testcase 1610.pdf.SIGSEGV.59c.681 */
if (((OPJ_UINT64)l_image->x1) * ((OPJ_UINT64)l_image->y1) != (l_image->x1 * l_image->y1)) {
opj_event_msg(p_manager, EVT_ERROR, "Prevent buffer overflow (x1: %d, y1: %d)\n", l_image->x1, l_image->y1);
return OPJ_FALSE;
}
/* testcase issue427-illegal-tile-offset.jp2 */
l_tx1 = opj_uint_adds(l_cp->tx0, l_cp->tdx); /* manage overflow */
l_ty1 = opj_uint_adds(l_cp->ty0, l_cp->tdy); /* manage overflow */
if ((l_cp->tx0 > l_image->x0) || (l_cp->ty0 > l_image->y0) || (l_tx1 <= l_image->x0) || (l_ty1 <= l_image->y0) ) {
opj_event_msg(p_manager, EVT_ERROR, "Error with SIZ marker: illegal tile offset\n");
return OPJ_FALSE;
}
#ifdef USE_JPWL
if (l_cp->correct) {
/* if JPWL is on, we check whether TX errors have damaged
too much the SIZ parameters */
if (!(l_image->x1 * l_image->y1)) {
opj_event_msg(p_manager, EVT_ERROR,
"JPWL: bad image size (%d x %d)\n",
l_image->x1, l_image->y1);
if (!JPWL_ASSUME || JPWL_ASSUME) {
opj_event_msg(p_manager, EVT_ERROR, "JPWL: giving up\n");
return OPJ_FALSE;
}
}
/* FIXME check previously in the function so why keep this piece of code ? Need by the norm ?
if (l_image->numcomps != ((len - 38) / 3)) {
opj_event_msg(p_manager, JPWL_ASSUME ? EVT_WARNING : EVT_ERROR,
"JPWL: Csiz is %d => space in SIZ only for %d comps.!!!\n",
l_image->numcomps, ((len - 38) / 3));
if (!JPWL_ASSUME) {
opj_event_msg(p_manager, EVT_ERROR, "JPWL: giving up\n");
return OPJ_FALSE;
}
*/ /* we try to correct */
/* opj_event_msg(p_manager, EVT_WARNING, "- trying to adjust this\n");
if (l_image->numcomps < ((len - 38) / 3)) {
len = 38 + 3 * l_image->numcomps;
opj_event_msg(p_manager, EVT_WARNING, "- setting Lsiz to %d => HYPOTHESIS!!!\n",
len);
} else {
l_image->numcomps = ((len - 38) / 3);
opj_event_msg(p_manager, EVT_WARNING, "- setting Csiz to %d => HYPOTHESIS!!!\n",
l_image->numcomps);
}
}
*/
/* update components number in the jpwl_exp_comps filed */
l_cp->exp_comps = l_image->numcomps;
}
#endif /* USE_JPWL */
/* Allocate the resulting image components */
l_image->comps = (opj_image_comp_t*) opj_calloc(l_image->numcomps, sizeof(opj_image_comp_t));
if (l_image->comps == 00){
l_image->numcomps = 0;
opj_event_msg(p_manager, EVT_ERROR, "Not enough memory to take in charge SIZ marker\n");
return OPJ_FALSE;
}
l_img_comp = l_image->comps;
/* Read the component information */
for (i = 0; i < l_image->numcomps; ++i){
OPJ_UINT32 tmp;
opj_read_bytes(p_header_data,&tmp,1); /* Ssiz_i */
++p_header_data;
l_img_comp->prec = (tmp & 0x7f) + 1;
l_img_comp->sgnd = tmp >> 7;
opj_read_bytes(p_header_data,&tmp,1); /* XRsiz_i */
++p_header_data;
l_img_comp->dx = (OPJ_UINT32)tmp; /* should be between 1 and 255 */
opj_read_bytes(p_header_data,&tmp,1); /* YRsiz_i */
++p_header_data;
l_img_comp->dy = (OPJ_UINT32)tmp; /* should be between 1 and 255 */
if( l_img_comp->dx < 1 || l_img_comp->dx > 255 ||
l_img_comp->dy < 1 || l_img_comp->dy > 255 ) {
opj_event_msg(p_manager, EVT_ERROR,
"Invalid values for comp = %d : dx=%u dy=%u\n (should be between 1 and 255 according the JPEG2000 norm)",
i, l_img_comp->dx, l_img_comp->dy);
return OPJ_FALSE;
}
#ifdef USE_JPWL
if (l_cp->correct) {
/* if JPWL is on, we check whether TX errors have damaged
too much the SIZ parameters, again */
if (!(l_image->comps[i].dx * l_image->comps[i].dy)) {
opj_event_msg(p_manager, JPWL_ASSUME ? EVT_WARNING : EVT_ERROR,
"JPWL: bad XRsiz_%d/YRsiz_%d (%d x %d)\n",
i, i, l_image->comps[i].dx, l_image->comps[i].dy);
if (!JPWL_ASSUME) {
opj_event_msg(p_manager, EVT_ERROR, "JPWL: giving up\n");
return OPJ_FALSE;
}
/* we try to correct */
opj_event_msg(p_manager, EVT_WARNING, "- trying to adjust them\n");
if (!l_image->comps[i].dx) {
l_image->comps[i].dx = 1;
opj_event_msg(p_manager, EVT_WARNING, "- setting XRsiz_%d to %d => HYPOTHESIS!!!\n",
i, l_image->comps[i].dx);
}
if (!l_image->comps[i].dy) {
l_image->comps[i].dy = 1;
opj_event_msg(p_manager, EVT_WARNING, "- setting YRsiz_%d to %d => HYPOTHESIS!!!\n",
i, l_image->comps[i].dy);
}
}
}
#endif /* USE_JPWL */
l_img_comp->resno_decoded = 0; /* number of resolution decoded */
l_img_comp->factor = l_cp->m_specific_param.m_dec.m_reduce; /* reducing factor per component */
++l_img_comp;
}
/* Compute the number of tiles */
l_cp->tw = (OPJ_UINT32)opj_int_ceildiv((OPJ_INT32)(l_image->x1 - l_cp->tx0), (OPJ_INT32)l_cp->tdx);
l_cp->th = (OPJ_UINT32)opj_int_ceildiv((OPJ_INT32)(l_image->y1 - l_cp->ty0), (OPJ_INT32)l_cp->tdy);
/* Check that the number of tiles is valid */
if (l_cp->tw == 0 || l_cp->th == 0 || l_cp->tw > 65535 / l_cp->th) {
opj_event_msg( p_manager, EVT_ERROR,
"Invalid number of tiles : %u x %u (maximum fixed by jpeg2000 norm is 65535 tiles)\n",
l_cp->tw, l_cp->th);
return OPJ_FALSE;
}
l_nb_tiles = l_cp->tw * l_cp->th;
/* Define the tiles which will be decoded */
if (p_j2k->m_specific_param.m_decoder.m_discard_tiles) {
p_j2k->m_specific_param.m_decoder.m_start_tile_x = (p_j2k->m_specific_param.m_decoder.m_start_tile_x - l_cp->tx0) / l_cp->tdx;
p_j2k->m_specific_param.m_decoder.m_start_tile_y = (p_j2k->m_specific_param.m_decoder.m_start_tile_y - l_cp->ty0) / l_cp->tdy;
p_j2k->m_specific_param.m_decoder.m_end_tile_x = (OPJ_UINT32)opj_int_ceildiv((OPJ_INT32)(p_j2k->m_specific_param.m_decoder.m_end_tile_x - l_cp->tx0), (OPJ_INT32)l_cp->tdx);
p_j2k->m_specific_param.m_decoder.m_end_tile_y = (OPJ_UINT32)opj_int_ceildiv((OPJ_INT32)(p_j2k->m_specific_param.m_decoder.m_end_tile_y - l_cp->ty0), (OPJ_INT32)l_cp->tdy);
}
else {
p_j2k->m_specific_param.m_decoder.m_start_tile_x = 0;
p_j2k->m_specific_param.m_decoder.m_start_tile_y = 0;
p_j2k->m_specific_param.m_decoder.m_end_tile_x = l_cp->tw;
p_j2k->m_specific_param.m_decoder.m_end_tile_y = l_cp->th;
}
#ifdef USE_JPWL
if (l_cp->correct) {
/* if JPWL is on, we check whether TX errors have damaged
too much the SIZ parameters */
if ((l_cp->tw < 1) || (l_cp->th < 1) || (l_cp->tw > l_cp->max_tiles) || (l_cp->th > l_cp->max_tiles)) {
opj_event_msg(p_manager, JPWL_ASSUME ? EVT_WARNING : EVT_ERROR,
"JPWL: bad number of tiles (%d x %d)\n",
l_cp->tw, l_cp->th);
if (!JPWL_ASSUME) {
opj_event_msg(p_manager, EVT_ERROR, "JPWL: giving up\n");
return OPJ_FALSE;
}
/* we try to correct */
opj_event_msg(p_manager, EVT_WARNING, "- trying to adjust them\n");
if (l_cp->tw < 1) {
l_cp->tw= 1;
opj_event_msg(p_manager, EVT_WARNING, "- setting %d tiles in x => HYPOTHESIS!!!\n",
l_cp->tw);
}
if (l_cp->tw > l_cp->max_tiles) {
l_cp->tw= 1;
opj_event_msg(p_manager, EVT_WARNING, "- too large x, increase expectance of %d\n"
"- setting %d tiles in x => HYPOTHESIS!!!\n",
l_cp->max_tiles, l_cp->tw);
}
if (l_cp->th < 1) {
l_cp->th= 1;
opj_event_msg(p_manager, EVT_WARNING, "- setting %d tiles in y => HYPOTHESIS!!!\n",
l_cp->th);
}
if (l_cp->th > l_cp->max_tiles) {
l_cp->th= 1;
opj_event_msg(p_manager, EVT_WARNING, "- too large y, increase expectance of %d to continue\n",
"- setting %d tiles in y => HYPOTHESIS!!!\n",
l_cp->max_tiles, l_cp->th);
}
}
}
#endif /* USE_JPWL */
/* memory allocations */
l_cp->tcps = (opj_tcp_t*) opj_calloc(l_nb_tiles, sizeof(opj_tcp_t));
if (l_cp->tcps == 00) {
opj_event_msg(p_manager, EVT_ERROR, "Not enough memory to take in charge SIZ marker\n");
return OPJ_FALSE;
}
#ifdef USE_JPWL
if (l_cp->correct) {
if (!l_cp->tcps) {
opj_event_msg(p_manager, JPWL_ASSUME ? EVT_WARNING : EVT_ERROR,
"JPWL: could not alloc tcps field of cp\n");
if (!JPWL_ASSUME || JPWL_ASSUME) {
opj_event_msg(p_manager, EVT_ERROR, "JPWL: giving up\n");
return OPJ_FALSE;
}
}
}
#endif /* USE_JPWL */
p_j2k->m_specific_param.m_decoder.m_default_tcp->tccps =
(opj_tccp_t*) opj_calloc(l_image->numcomps, sizeof(opj_tccp_t));
if(p_j2k->m_specific_param.m_decoder.m_default_tcp->tccps == 00) {
opj_event_msg(p_manager, EVT_ERROR, "Not enough memory to take in charge SIZ marker\n");
return OPJ_FALSE;
}
p_j2k->m_specific_param.m_decoder.m_default_tcp->m_mct_records =
(opj_mct_data_t*)opj_calloc(OPJ_J2K_MCT_DEFAULT_NB_RECORDS ,sizeof(opj_mct_data_t));
if (! p_j2k->m_specific_param.m_decoder.m_default_tcp->m_mct_records) {
opj_event_msg(p_manager, EVT_ERROR, "Not enough memory to take in charge SIZ marker\n");
return OPJ_FALSE;
}
p_j2k->m_specific_param.m_decoder.m_default_tcp->m_nb_max_mct_records = OPJ_J2K_MCT_DEFAULT_NB_RECORDS;
p_j2k->m_specific_param.m_decoder.m_default_tcp->m_mcc_records =
(opj_simple_mcc_decorrelation_data_t*)
opj_calloc(OPJ_J2K_MCC_DEFAULT_NB_RECORDS, sizeof(opj_simple_mcc_decorrelation_data_t));
if (! p_j2k->m_specific_param.m_decoder.m_default_tcp->m_mcc_records) {
opj_event_msg(p_manager, EVT_ERROR, "Not enough memory to take in charge SIZ marker\n");
return OPJ_FALSE;
}
p_j2k->m_specific_param.m_decoder.m_default_tcp->m_nb_max_mcc_records = OPJ_J2K_MCC_DEFAULT_NB_RECORDS;
/* set up default dc level shift */
for (i=0;i<l_image->numcomps;++i) {
if (! l_image->comps[i].sgnd) {
p_j2k->m_specific_param.m_decoder.m_default_tcp->tccps[i].m_dc_level_shift = 1 << (l_image->comps[i].prec - 1);
}
}
l_current_tile_param = l_cp->tcps;
for (i = 0; i < l_nb_tiles; ++i) {
l_current_tile_param->tccps = (opj_tccp_t*) opj_calloc(l_image->numcomps, sizeof(opj_tccp_t));
if (l_current_tile_param->tccps == 00) {
opj_event_msg(p_manager, EVT_ERROR, "Not enough memory to take in charge SIZ marker\n");
return OPJ_FALSE;
}
++l_current_tile_param;
}
p_j2k->m_specific_param.m_decoder.m_state = J2K_STATE_MH; /* FIXME J2K_DEC_STATE_MH; */
opj_image_comp_header_update(l_image,l_cp);
return OPJ_TRUE;
}
static OPJ_BOOL opj_j2k_write_com( opj_j2k_t *p_j2k,
opj_stream_private_t *p_stream,
opj_event_mgr_t * p_manager
)
{
OPJ_UINT32 l_comment_size;
OPJ_UINT32 l_total_com_size;
const OPJ_CHAR *l_comment;
OPJ_BYTE * l_current_ptr = 00;
/* preconditions */
assert(p_j2k != 00);
assert(p_stream != 00);
assert(p_manager != 00);
l_comment = p_j2k->m_cp.comment;
l_comment_size = (OPJ_UINT32)strlen(l_comment);
l_total_com_size = l_comment_size + 6;
if (l_total_com_size > p_j2k->m_specific_param.m_encoder.m_header_tile_data_size) {
OPJ_BYTE *new_header_tile_data = (OPJ_BYTE *) opj_realloc(p_j2k->m_specific_param.m_encoder.m_header_tile_data, l_total_com_size);
if (! new_header_tile_data) {
opj_free(p_j2k->m_specific_param.m_encoder.m_header_tile_data);
p_j2k->m_specific_param.m_encoder.m_header_tile_data = NULL;
p_j2k->m_specific_param.m_encoder.m_header_tile_data_size = 0;
opj_event_msg(p_manager, EVT_ERROR, "Not enough memory to write the COM marker\n");
return OPJ_FALSE;
}
p_j2k->m_specific_param.m_encoder.m_header_tile_data = new_header_tile_data;
p_j2k->m_specific_param.m_encoder.m_header_tile_data_size = l_total_com_size;
}
l_current_ptr = p_j2k->m_specific_param.m_encoder.m_header_tile_data;
opj_write_bytes(l_current_ptr,J2K_MS_COM , 2); /* COM */
l_current_ptr+=2;
opj_write_bytes(l_current_ptr,l_total_com_size - 2 , 2); /* L_COM */
l_current_ptr+=2;
opj_write_bytes(l_current_ptr,1 , 2); /* General use (IS 8859-15:1999 (Latin) values) */
l_current_ptr+=2;
memcpy( l_current_ptr,l_comment,l_comment_size);
if (opj_stream_write_data(p_stream,p_j2k->m_specific_param.m_encoder.m_header_tile_data,l_total_com_size,p_manager) != l_total_com_size) {
return OPJ_FALSE;
}
return OPJ_TRUE;
}
/**
* Reads a COM marker (comments)
* @param p_j2k the jpeg2000 file codec.
* @param p_header_data the data contained in the COM box.
* @param p_header_size the size of the data contained in the COM marker.
* @param p_manager the user event manager.
*/
static OPJ_BOOL opj_j2k_read_com ( opj_j2k_t *p_j2k,
OPJ_BYTE * p_header_data,
OPJ_UINT32 p_header_size,
opj_event_mgr_t * p_manager
)
{
/* preconditions */
assert(p_j2k != 00);
assert(p_manager != 00);
assert(p_header_data != 00);
(void)p_header_size;
return OPJ_TRUE;
}
static OPJ_BOOL opj_j2k_write_cod( opj_j2k_t *p_j2k,
opj_stream_private_t *p_stream,
opj_event_mgr_t * p_manager )
{
opj_cp_t *l_cp = 00;
opj_tcp_t *l_tcp = 00;
OPJ_UINT32 l_code_size,l_remaining_size;
OPJ_BYTE * l_current_data = 00;
/* preconditions */
assert(p_j2k != 00);
assert(p_manager != 00);
assert(p_stream != 00);
l_cp = &(p_j2k->m_cp);
l_tcp = &l_cp->tcps[p_j2k->m_current_tile_number];
l_code_size = 9 + opj_j2k_get_SPCod_SPCoc_size(p_j2k,p_j2k->m_current_tile_number,0);
l_remaining_size = l_code_size;
if (l_code_size > p_j2k->m_specific_param.m_encoder.m_header_tile_data_size) {
OPJ_BYTE *new_header_tile_data = (OPJ_BYTE *) opj_realloc(p_j2k->m_specific_param.m_encoder.m_header_tile_data, l_code_size);
if (! new_header_tile_data) {
opj_free(p_j2k->m_specific_param.m_encoder.m_header_tile_data);
p_j2k->m_specific_param.m_encoder.m_header_tile_data = NULL;
p_j2k->m_specific_param.m_encoder.m_header_tile_data_size = 0;
opj_event_msg(p_manager, EVT_ERROR, "Not enough memory to write COD marker\n");
return OPJ_FALSE;
}
p_j2k->m_specific_param.m_encoder.m_header_tile_data = new_header_tile_data;
p_j2k->m_specific_param.m_encoder.m_header_tile_data_size = l_code_size;
}
l_current_data = p_j2k->m_specific_param.m_encoder.m_header_tile_data;
opj_write_bytes(l_current_data,J2K_MS_COD,2); /* COD */
l_current_data += 2;
opj_write_bytes(l_current_data,l_code_size-2,2); /* L_COD */
l_current_data += 2;
opj_write_bytes(l_current_data,l_tcp->csty,1); /* Scod */
++l_current_data;
opj_write_bytes(l_current_data,l_tcp->prg,1); /* SGcod (A) */
++l_current_data;
opj_write_bytes(l_current_data,l_tcp->numlayers,2); /* SGcod (B) */
l_current_data+=2;
opj_write_bytes(l_current_data,l_tcp->mct,1); /* SGcod (C) */
++l_current_data;
l_remaining_size -= 9;
if (! opj_j2k_write_SPCod_SPCoc(p_j2k,p_j2k->m_current_tile_number,0,l_current_data,&l_remaining_size,p_manager)) {
opj_event_msg(p_manager, EVT_ERROR, "Error writing COD marker\n");
return OPJ_FALSE;
}
if (l_remaining_size != 0) {
opj_event_msg(p_manager, EVT_ERROR, "Error writing COD marker\n");
return OPJ_FALSE;
}
if (opj_stream_write_data(p_stream,p_j2k->m_specific_param.m_encoder.m_header_tile_data,l_code_size,p_manager) != l_code_size) {
return OPJ_FALSE;
}
return OPJ_TRUE;
}
/**
* Reads a COD marker (Coding Styke defaults)
* @param p_header_data the data contained in the COD box.
* @param p_j2k the jpeg2000 codec.
* @param p_header_size the size of the data contained in the COD marker.
* @param p_manager the user event manager.
*/
static OPJ_BOOL opj_j2k_read_cod ( opj_j2k_t *p_j2k,
OPJ_BYTE * p_header_data,
OPJ_UINT32 p_header_size,
opj_event_mgr_t * p_manager
)
{
/* loop */
OPJ_UINT32 i;
OPJ_UINT32 l_tmp;
opj_cp_t *l_cp = 00;
opj_tcp_t *l_tcp = 00;
opj_image_t *l_image = 00;
/* preconditions */
assert(p_header_data != 00);
assert(p_j2k != 00);
assert(p_manager != 00);
l_image = p_j2k->m_private_image;
l_cp = &(p_j2k->m_cp);
/* If we are in the first tile-part header of the current tile */
l_tcp = (p_j2k->m_specific_param.m_decoder.m_state == J2K_STATE_TPH) ?
&l_cp->tcps[p_j2k->m_current_tile_number] :
p_j2k->m_specific_param.m_decoder.m_default_tcp;
/* Only one COD per tile */
if (l_tcp->cod) {
opj_event_msg(p_manager, EVT_ERROR, "COD marker already read. No more than one COD marker per tile.\n");
return OPJ_FALSE;
}
l_tcp->cod = 1;
/* Make sure room is sufficient */
if (p_header_size < 5) {
opj_event_msg(p_manager, EVT_ERROR, "Error reading COD marker\n");
return OPJ_FALSE;
}
opj_read_bytes(p_header_data,&l_tcp->csty,1); /* Scod */
++p_header_data;
/* Make sure we know how to decode this */
if ((l_tcp->csty & ~(OPJ_UINT32)(J2K_CP_CSTY_PRT | J2K_CP_CSTY_SOP | J2K_CP_CSTY_EPH)) != 0U) {
opj_event_msg(p_manager, EVT_ERROR, "Unknown Scod value in COD marker\n");
return OPJ_FALSE;
}
opj_read_bytes(p_header_data,&l_tmp,1); /* SGcod (A) */
++p_header_data;
l_tcp->prg = (OPJ_PROG_ORDER) l_tmp;
/* Make sure progression order is valid */
if (l_tcp->prg > OPJ_CPRL ) {
opj_event_msg(p_manager, EVT_ERROR, "Unknown progression order in COD marker\n");
l_tcp->prg = OPJ_PROG_UNKNOWN;
}
opj_read_bytes(p_header_data,&l_tcp->numlayers,2); /* SGcod (B) */
p_header_data+=2;
if ((l_tcp->numlayers < 1U) || (l_tcp->numlayers > 65535U)) {
opj_event_msg(p_manager, EVT_ERROR, "Invalid number of layers in COD marker : %d not in range [1-65535]\n", l_tcp->numlayers);
return OPJ_FALSE;
}
/* If user didn't set a number layer to decode take the max specify in the codestream. */
if (l_cp->m_specific_param.m_dec.m_layer) {
l_tcp->num_layers_to_decode = l_cp->m_specific_param.m_dec.m_layer;
}
else {
l_tcp->num_layers_to_decode = l_tcp->numlayers;
}
opj_read_bytes(p_header_data,&l_tcp->mct,1); /* SGcod (C) */
++p_header_data;
p_header_size -= 5;
for (i = 0; i < l_image->numcomps; ++i) {
l_tcp->tccps[i].csty = l_tcp->csty & J2K_CCP_CSTY_PRT;
}
if (! opj_j2k_read_SPCod_SPCoc(p_j2k,0,p_header_data,&p_header_size,p_manager)) {
opj_event_msg(p_manager, EVT_ERROR, "Error reading COD marker\n");
return OPJ_FALSE;
}
if (p_header_size != 0) {
opj_event_msg(p_manager, EVT_ERROR, "Error reading COD marker\n");
return OPJ_FALSE;
}
/* Apply the coding style to other components of the current tile or the m_default_tcp*/
opj_j2k_copy_tile_component_parameters(p_j2k);
/* Index */
#ifdef WIP_REMOVE_MSD
if (p_j2k->cstr_info) {
/*opj_codestream_info_t *l_cstr_info = p_j2k->cstr_info;*/
p_j2k->cstr_info->prog = l_tcp->prg;
p_j2k->cstr_info->numlayers = l_tcp->numlayers;
p_j2k->cstr_info->numdecompos = (OPJ_INT32*) opj_malloc(l_image->numcomps * sizeof(OPJ_UINT32));
for (i = 0; i < l_image->numcomps; ++i) {
p_j2k->cstr_info->numdecompos[i] = l_tcp->tccps[i].numresolutions - 1;
}
}
#endif
return OPJ_TRUE;
}
#if 0
static OPJ_BOOL opj_j2k_write_coc( opj_j2k_t *p_j2k,
OPJ_UINT32 p_comp_no,
opj_stream_private_t *p_stream,
opj_event_mgr_t * p_manager )
{
OPJ_UINT32 l_coc_size,l_remaining_size;
OPJ_UINT32 l_comp_room;
/* preconditions */
assert(p_j2k != 00);
assert(p_manager != 00);
assert(p_stream != 00);
l_comp_room = (p_j2k->m_private_image->numcomps <= 256) ? 1 : 2;
l_coc_size = 5 + l_comp_room + opj_j2k_get_SPCod_SPCoc_size(p_j2k,p_j2k->m_current_tile_number,p_comp_no);
if (l_coc_size > p_j2k->m_specific_param.m_encoder.m_header_tile_data_size) {
OPJ_BYTE *new_header_tile_data;
/*p_j2k->m_specific_param.m_encoder.m_header_tile_data
= (OPJ_BYTE*)opj_realloc(
p_j2k->m_specific_param.m_encoder.m_header_tile_data,
l_coc_size);*/
new_header_tile_data = (OPJ_BYTE *) opj_realloc(p_j2k->m_specific_param.m_encoder.m_header_tile_data, l_coc_size);
if (! new_header_tile_data) {
opj_free(p_j2k->m_specific_param.m_encoder.m_header_tile_data);
p_j2k->m_specific_param.m_encoder.m_header_tile_data = NULL;
p_j2k->m_specific_param.m_encoder.m_header_tile_data_size = 0;
opj_event_msg(p_manager, EVT_ERROR, "Not enough memory to write COC marker\n");
return OPJ_FALSE;
}
p_j2k->m_specific_param.m_encoder.m_header_tile_data = new_header_tile_data;
p_j2k->m_specific_param.m_encoder.m_header_tile_data_size = l_coc_size;
}
opj_j2k_write_coc_in_memory(p_j2k,p_comp_no,p_j2k->m_specific_param.m_encoder.m_header_tile_data,&l_remaining_size,p_manager);
if (opj_stream_write_data(p_stream,p_j2k->m_specific_param.m_encoder.m_header_tile_data,l_coc_size,p_manager) != l_coc_size) {
return OPJ_FALSE;
}
return OPJ_TRUE;
}
#endif
#if 0
static void opj_j2k_write_coc_in_memory( opj_j2k_t *p_j2k,
OPJ_UINT32 p_comp_no,
OPJ_BYTE * p_data,
OPJ_UINT32 * p_data_written,
opj_event_mgr_t * p_manager
)
{
opj_cp_t *l_cp = 00;
opj_tcp_t *l_tcp = 00;
OPJ_UINT32 l_coc_size,l_remaining_size;
OPJ_BYTE * l_current_data = 00;
opj_image_t *l_image = 00;
OPJ_UINT32 l_comp_room;
/* preconditions */
assert(p_j2k != 00);
assert(p_manager != 00);
l_cp = &(p_j2k->m_cp);
l_tcp = &l_cp->tcps[p_j2k->m_current_tile_number];
l_image = p_j2k->m_private_image;
l_comp_room = (l_image->numcomps <= 256) ? 1 : 2;
l_coc_size = 5 + l_comp_room + opj_j2k_get_SPCod_SPCoc_size(p_j2k,p_j2k->m_current_tile_number,p_comp_no);
l_remaining_size = l_coc_size;
l_current_data = p_data;
opj_write_bytes(l_current_data,J2K_MS_COC,2); /* COC */
l_current_data += 2;
opj_write_bytes(l_current_data,l_coc_size-2,2); /* L_COC */
l_current_data += 2;
opj_write_bytes(l_current_data,p_comp_no, l_comp_room); /* Ccoc */
l_current_data+=l_comp_room;
opj_write_bytes(l_current_data, l_tcp->tccps[p_comp_no].csty, 1); /* Scoc */
++l_current_data;
l_remaining_size -= (5 + l_comp_room);
opj_j2k_write_SPCod_SPCoc(p_j2k,p_j2k->m_current_tile_number,0,l_current_data,&l_remaining_size,p_manager);
* p_data_written = l_coc_size;
}
#endif
static OPJ_UINT32 opj_j2k_get_max_coc_size(opj_j2k_t *p_j2k)
{
OPJ_UINT32 i,j;
OPJ_UINT32 l_nb_comp;
OPJ_UINT32 l_nb_tiles;
OPJ_UINT32 l_max = 0;
/* preconditions */
l_nb_tiles = p_j2k->m_cp.tw * p_j2k->m_cp.th ;
l_nb_comp = p_j2k->m_private_image->numcomps;
for (i=0;i<l_nb_tiles;++i) {
for (j=0;j<l_nb_comp;++j) {
l_max = opj_uint_max(l_max,opj_j2k_get_SPCod_SPCoc_size(p_j2k,i,j));
}
}
return 6 + l_max;
}
/**
* Reads a COC marker (Coding Style Component)
* @param p_header_data the data contained in the COC box.
* @param p_j2k the jpeg2000 codec.
* @param p_header_size the size of the data contained in the COC marker.
* @param p_manager the user event manager.
*/
static OPJ_BOOL opj_j2k_read_coc ( opj_j2k_t *p_j2k,
OPJ_BYTE * p_header_data,
OPJ_UINT32 p_header_size,
opj_event_mgr_t * p_manager
)
{
opj_cp_t *l_cp = NULL;
opj_tcp_t *l_tcp = NULL;
opj_image_t *l_image = NULL;
OPJ_UINT32 l_comp_room;
OPJ_UINT32 l_comp_no;
/* preconditions */
assert(p_header_data != 00);
assert(p_j2k != 00);
assert(p_manager != 00);
l_cp = &(p_j2k->m_cp);
l_tcp = (p_j2k->m_specific_param.m_decoder.m_state == J2K_STATE_TPH ) ? /*FIXME J2K_DEC_STATE_TPH*/
&l_cp->tcps[p_j2k->m_current_tile_number] :
p_j2k->m_specific_param.m_decoder.m_default_tcp;
l_image = p_j2k->m_private_image;
l_comp_room = l_image->numcomps <= 256 ? 1 : 2;
/* make sure room is sufficient*/
if (p_header_size < l_comp_room + 1) {
opj_event_msg(p_manager, EVT_ERROR, "Error reading COC marker\n");
return OPJ_FALSE;
}
p_header_size -= l_comp_room + 1;
opj_read_bytes(p_header_data,&l_comp_no,l_comp_room); /* Ccoc */
p_header_data += l_comp_room;
if (l_comp_no >= l_image->numcomps) {
opj_event_msg(p_manager, EVT_ERROR, "Error reading COC marker (bad number of components)\n");
return OPJ_FALSE;
}
opj_read_bytes(p_header_data,&l_tcp->tccps[l_comp_no].csty,1); /* Scoc */
++p_header_data ;
if (! opj_j2k_read_SPCod_SPCoc(p_j2k,l_comp_no,p_header_data,&p_header_size,p_manager)) {
opj_event_msg(p_manager, EVT_ERROR, "Error reading COC marker\n");
return OPJ_FALSE;
}
if (p_header_size != 0) {
opj_event_msg(p_manager, EVT_ERROR, "Error reading COC marker\n");
return OPJ_FALSE;
}
return OPJ_TRUE;
}
static OPJ_BOOL opj_j2k_write_qcd( opj_j2k_t *p_j2k,
opj_stream_private_t *p_stream,
opj_event_mgr_t * p_manager
)
{
OPJ_UINT32 l_qcd_size,l_remaining_size;
OPJ_BYTE * l_current_data = 00;
/* preconditions */
assert(p_j2k != 00);
assert(p_manager != 00);
assert(p_stream != 00);
l_qcd_size = 4 + opj_j2k_get_SQcd_SQcc_size(p_j2k,p_j2k->m_current_tile_number,0);
l_remaining_size = l_qcd_size;
if (l_qcd_size > p_j2k->m_specific_param.m_encoder.m_header_tile_data_size) {
OPJ_BYTE *new_header_tile_data = (OPJ_BYTE *) opj_realloc(p_j2k->m_specific_param.m_encoder.m_header_tile_data, l_qcd_size);
if (! new_header_tile_data) {
opj_free(p_j2k->m_specific_param.m_encoder.m_header_tile_data);
p_j2k->m_specific_param.m_encoder.m_header_tile_data = NULL;
p_j2k->m_specific_param.m_encoder.m_header_tile_data_size = 0;
opj_event_msg(p_manager, EVT_ERROR, "Not enough memory to write QCD marker\n");
return OPJ_FALSE;
}
p_j2k->m_specific_param.m_encoder.m_header_tile_data = new_header_tile_data;
p_j2k->m_specific_param.m_encoder.m_header_tile_data_size = l_qcd_size;
}
l_current_data = p_j2k->m_specific_param.m_encoder.m_header_tile_data;
opj_write_bytes(l_current_data,J2K_MS_QCD,2); /* QCD */
l_current_data += 2;
opj_write_bytes(l_current_data,l_qcd_size-2,2); /* L_QCD */
l_current_data += 2;
l_remaining_size -= 4;
if (! opj_j2k_write_SQcd_SQcc(p_j2k,p_j2k->m_current_tile_number,0,l_current_data,&l_remaining_size,p_manager)) {
opj_event_msg(p_manager, EVT_ERROR, "Error writing QCD marker\n");
return OPJ_FALSE;
}
if (l_remaining_size != 0) {
opj_event_msg(p_manager, EVT_ERROR, "Error writing QCD marker\n");
return OPJ_FALSE;
}
if (opj_stream_write_data(p_stream, p_j2k->m_specific_param.m_encoder.m_header_tile_data,l_qcd_size,p_manager) != l_qcd_size) {
return OPJ_FALSE;
}
return OPJ_TRUE;
}
/**
* Reads a QCD marker (Quantization defaults)
* @param p_header_data the data contained in the QCD box.
* @param p_j2k the jpeg2000 codec.
* @param p_header_size the size of the data contained in the QCD marker.
* @param p_manager the user event manager.
*/
static OPJ_BOOL opj_j2k_read_qcd ( opj_j2k_t *p_j2k,
OPJ_BYTE * p_header_data,
OPJ_UINT32 p_header_size,
opj_event_mgr_t * p_manager
)
{
/* preconditions */
assert(p_header_data != 00);
assert(p_j2k != 00);
assert(p_manager != 00);
if (! opj_j2k_read_SQcd_SQcc(p_j2k,0,p_header_data,&p_header_size,p_manager)) {
opj_event_msg(p_manager, EVT_ERROR, "Error reading QCD marker\n");
return OPJ_FALSE;
}
if (p_header_size != 0) {
opj_event_msg(p_manager, EVT_ERROR, "Error reading QCD marker\n");
return OPJ_FALSE;
}
/* Apply the quantization parameters to other components of the current tile or the m_default_tcp */
opj_j2k_copy_tile_quantization_parameters(p_j2k);
return OPJ_TRUE;
}
#if 0
static OPJ_BOOL opj_j2k_write_qcc( opj_j2k_t *p_j2k,
OPJ_UINT32 p_comp_no,
opj_stream_private_t *p_stream,
opj_event_mgr_t * p_manager
)
{
OPJ_UINT32 l_qcc_size,l_remaining_size;
/* preconditions */
assert(p_j2k != 00);
assert(p_manager != 00);
assert(p_stream != 00);
l_qcc_size = 5 + opj_j2k_get_SQcd_SQcc_size(p_j2k,p_j2k->m_current_tile_number,p_comp_no);
l_qcc_size += p_j2k->m_private_image->numcomps <= 256 ? 0:1;
l_remaining_size = l_qcc_size;
if (l_qcc_size > p_j2k->m_specific_param.m_encoder.m_header_tile_data_size) {
OPJ_BYTE *new_header_tile_data = (OPJ_BYTE *) opj_realloc(p_j2k->m_specific_param.m_encoder.m_header_tile_data, l_qcc_size);
if (! new_header_tile_data) {
opj_free(p_j2k->m_specific_param.m_encoder.m_header_tile_data);
p_j2k->m_specific_param.m_encoder.m_header_tile_data = NULL;
p_j2k->m_specific_param.m_encoder.m_header_tile_data_size = 0;
opj_event_msg(p_manager, EVT_ERROR, "Not enough memory to write QCC marker\n");
return OPJ_FALSE;
}
p_j2k->m_specific_param.m_encoder.m_header_tile_data = new_header_tile_data;
p_j2k->m_specific_param.m_encoder.m_header_tile_data_size = l_qcc_size;
}
opj_j2k_write_qcc_in_memory(p_j2k,p_comp_no,p_j2k->m_specific_param.m_encoder.m_header_tile_data,&l_remaining_size,p_manager);
if (opj_stream_write_data(p_stream,p_j2k->m_specific_param.m_encoder.m_header_tile_data,l_qcc_size,p_manager) != l_qcc_size) {
return OPJ_FALSE;
}
return OPJ_TRUE;
}
#endif
#if 0
static void opj_j2k_write_qcc_in_memory( opj_j2k_t *p_j2k,
OPJ_UINT32 p_comp_no,
OPJ_BYTE * p_data,
OPJ_UINT32 * p_data_written,
opj_event_mgr_t * p_manager
)
{
OPJ_UINT32 l_qcc_size,l_remaining_size;
OPJ_BYTE * l_current_data = 00;
/* preconditions */
assert(p_j2k != 00);
assert(p_manager != 00);
l_qcc_size = 6 + opj_j2k_get_SQcd_SQcc_size(p_j2k,p_j2k->m_current_tile_number,p_comp_no);
l_remaining_size = l_qcc_size;
l_current_data = p_data;
opj_write_bytes(l_current_data,J2K_MS_QCC,2); /* QCC */
l_current_data += 2;
if (p_j2k->m_private_image->numcomps <= 256) {
--l_qcc_size;
opj_write_bytes(l_current_data,l_qcc_size-2,2); /* L_QCC */
l_current_data += 2;
opj_write_bytes(l_current_data, p_comp_no, 1); /* Cqcc */
++l_current_data;
/* in the case only one byte is sufficient the last byte allocated is useless -> still do -6 for available */
l_remaining_size -= 6;
}
else {
opj_write_bytes(l_current_data,l_qcc_size-2,2); /* L_QCC */
l_current_data += 2;
opj_write_bytes(l_current_data, p_comp_no, 2); /* Cqcc */
l_current_data+=2;
l_remaining_size -= 6;
}
opj_j2k_write_SQcd_SQcc(p_j2k,p_j2k->m_current_tile_number,p_comp_no,l_current_data,&l_remaining_size,p_manager);
*p_data_written = l_qcc_size;
}
#endif
static OPJ_UINT32 opj_j2k_get_max_qcc_size (opj_j2k_t *p_j2k)
{
return opj_j2k_get_max_coc_size(p_j2k);
}
/**
* Reads a QCC marker (Quantization component)
* @param p_header_data the data contained in the QCC box.
* @param p_j2k the jpeg2000 codec.
* @param p_header_size the size of the data contained in the QCC marker.
* @param p_manager the user event manager.
*/
static OPJ_BOOL opj_j2k_read_qcc( opj_j2k_t *p_j2k,
OPJ_BYTE * p_header_data,
OPJ_UINT32 p_header_size,
opj_event_mgr_t * p_manager
)
{
OPJ_UINT32 l_num_comp,l_comp_no;
/* preconditions */
assert(p_header_data != 00);
assert(p_j2k != 00);
assert(p_manager != 00);
l_num_comp = p_j2k->m_private_image->numcomps;
if (l_num_comp <= 256) {
if (p_header_size < 1) {
opj_event_msg(p_manager, EVT_ERROR, "Error reading QCC marker\n");
return OPJ_FALSE;
}
opj_read_bytes(p_header_data,&l_comp_no,1);
++p_header_data;
--p_header_size;
}
else {
if (p_header_size < 2) {
opj_event_msg(p_manager, EVT_ERROR, "Error reading QCC marker\n");
return OPJ_FALSE;
}
opj_read_bytes(p_header_data,&l_comp_no,2);
p_header_data+=2;
p_header_size-=2;
}
#ifdef USE_JPWL
if (p_j2k->m_cp.correct) {
static OPJ_UINT32 backup_compno = 0;
/* compno is negative or larger than the number of components!!! */
if (/*(l_comp_no < 0) ||*/ (l_comp_no >= l_num_comp)) {
opj_event_msg(p_manager, EVT_ERROR,
"JPWL: bad component number in QCC (%d out of a maximum of %d)\n",
l_comp_no, l_num_comp);
if (!JPWL_ASSUME) {
opj_event_msg(p_manager, EVT_ERROR, "JPWL: giving up\n");
return OPJ_FALSE;
}
/* we try to correct */
l_comp_no = backup_compno % l_num_comp;
opj_event_msg(p_manager, EVT_WARNING, "- trying to adjust this\n"
"- setting component number to %d\n",
l_comp_no);
}
/* keep your private count of tiles */
backup_compno++;
};
#endif /* USE_JPWL */
if (l_comp_no >= p_j2k->m_private_image->numcomps) {
opj_event_msg(p_manager, EVT_ERROR,
"Invalid component number: %d, regarding the number of components %d\n",
l_comp_no, p_j2k->m_private_image->numcomps);
return OPJ_FALSE;
}
if (! opj_j2k_read_SQcd_SQcc(p_j2k,l_comp_no,p_header_data,&p_header_size,p_manager)) {
opj_event_msg(p_manager, EVT_ERROR, "Error reading QCC marker\n");
return OPJ_FALSE;
}
if (p_header_size != 0) {
opj_event_msg(p_manager, EVT_ERROR, "Error reading QCC marker\n");
return OPJ_FALSE;
}
return OPJ_TRUE;
}
static OPJ_BOOL opj_j2k_write_poc( opj_j2k_t *p_j2k,
opj_stream_private_t *p_stream,
opj_event_mgr_t * p_manager
)
{
OPJ_UINT32 l_nb_comp;
OPJ_UINT32 l_nb_poc;
OPJ_UINT32 l_poc_size;
OPJ_UINT32 l_written_size = 0;
opj_tcp_t *l_tcp = 00;
OPJ_UINT32 l_poc_room;
/* preconditions */
assert(p_j2k != 00);
assert(p_manager != 00);
assert(p_stream != 00);
l_tcp = &p_j2k->m_cp.tcps[p_j2k->m_current_tile_number];
l_nb_comp = p_j2k->m_private_image->numcomps;
l_nb_poc = 1 + l_tcp->numpocs;
if (l_nb_comp <= 256) {
l_poc_room = 1;
}
else {
l_poc_room = 2;
}
l_poc_size = 4 + (5 + 2 * l_poc_room) * l_nb_poc;
if (l_poc_size > p_j2k->m_specific_param.m_encoder.m_header_tile_data_size) {
OPJ_BYTE *new_header_tile_data = (OPJ_BYTE *) opj_realloc(p_j2k->m_specific_param.m_encoder.m_header_tile_data, l_poc_size);
if (! new_header_tile_data) {
opj_free(p_j2k->m_specific_param.m_encoder.m_header_tile_data);
p_j2k->m_specific_param.m_encoder.m_header_tile_data = NULL;
p_j2k->m_specific_param.m_encoder.m_header_tile_data_size = 0;
opj_event_msg(p_manager, EVT_ERROR, "Not enough memory to write POC marker\n");
return OPJ_FALSE;
}
p_j2k->m_specific_param.m_encoder.m_header_tile_data = new_header_tile_data;
p_j2k->m_specific_param.m_encoder.m_header_tile_data_size = l_poc_size;
}
opj_j2k_write_poc_in_memory(p_j2k,p_j2k->m_specific_param.m_encoder.m_header_tile_data,&l_written_size,p_manager);
if (opj_stream_write_data(p_stream,p_j2k->m_specific_param.m_encoder.m_header_tile_data,l_poc_size,p_manager) != l_poc_size) {
return OPJ_FALSE;
}
return OPJ_TRUE;
}
static void opj_j2k_write_poc_in_memory( opj_j2k_t *p_j2k,
OPJ_BYTE * p_data,
OPJ_UINT32 * p_data_written,
opj_event_mgr_t * p_manager
)
{
OPJ_UINT32 i;
OPJ_BYTE * l_current_data = 00;
OPJ_UINT32 l_nb_comp;
OPJ_UINT32 l_nb_poc;
OPJ_UINT32 l_poc_size;
opj_image_t *l_image = 00;
opj_tcp_t *l_tcp = 00;
opj_tccp_t *l_tccp = 00;
opj_poc_t *l_current_poc = 00;
OPJ_UINT32 l_poc_room;
/* preconditions */
assert(p_j2k != 00);
assert(p_manager != 00);
l_tcp = &p_j2k->m_cp.tcps[p_j2k->m_current_tile_number];
l_tccp = &l_tcp->tccps[0];
l_image = p_j2k->m_private_image;
l_nb_comp = l_image->numcomps;
l_nb_poc = 1 + l_tcp->numpocs;
if (l_nb_comp <= 256) {
l_poc_room = 1;
}
else {
l_poc_room = 2;
}
l_poc_size = 4 + (5 + 2 * l_poc_room) * l_nb_poc;
l_current_data = p_data;
opj_write_bytes(l_current_data,J2K_MS_POC,2); /* POC */
l_current_data += 2;
opj_write_bytes(l_current_data,l_poc_size-2,2); /* Lpoc */
l_current_data += 2;
l_current_poc = l_tcp->pocs;
for (i = 0; i < l_nb_poc; ++i) {
opj_write_bytes(l_current_data,l_current_poc->resno0,1); /* RSpoc_i */
++l_current_data;
opj_write_bytes(l_current_data,l_current_poc->compno0,l_poc_room); /* CSpoc_i */
l_current_data+=l_poc_room;
opj_write_bytes(l_current_data,l_current_poc->layno1,2); /* LYEpoc_i */
l_current_data+=2;
opj_write_bytes(l_current_data,l_current_poc->resno1,1); /* REpoc_i */
++l_current_data;
opj_write_bytes(l_current_data,l_current_poc->compno1,l_poc_room); /* CEpoc_i */
l_current_data+=l_poc_room;
opj_write_bytes(l_current_data,l_current_poc->prg,1); /* Ppoc_i */
++l_current_data;
/* change the value of the max layer according to the actual number of layers in the file, components and resolutions*/
l_current_poc->layno1 = (OPJ_UINT32)opj_int_min((OPJ_INT32)l_current_poc->layno1, (OPJ_INT32)l_tcp->numlayers);
l_current_poc->resno1 = (OPJ_UINT32)opj_int_min((OPJ_INT32)l_current_poc->resno1, (OPJ_INT32)l_tccp->numresolutions);
l_current_poc->compno1 = (OPJ_UINT32)opj_int_min((OPJ_INT32)l_current_poc->compno1, (OPJ_INT32)l_nb_comp);
++l_current_poc;
}
*p_data_written = l_poc_size;
}
static OPJ_UINT32 opj_j2k_get_max_poc_size(opj_j2k_t *p_j2k)
{
opj_tcp_t * l_tcp = 00;
OPJ_UINT32 l_nb_tiles = 0;
OPJ_UINT32 l_max_poc = 0;
OPJ_UINT32 i;
l_tcp = p_j2k->m_cp.tcps;
l_nb_tiles = p_j2k->m_cp.th * p_j2k->m_cp.tw;
for (i=0;i<l_nb_tiles;++i) {
l_max_poc = opj_uint_max(l_max_poc,l_tcp->numpocs);
++l_tcp;
}
++l_max_poc;
return 4 + 9 * l_max_poc;
}
static OPJ_UINT32 opj_j2k_get_max_toc_size (opj_j2k_t *p_j2k)
{
OPJ_UINT32 i;
OPJ_UINT32 l_nb_tiles;
OPJ_UINT32 l_max = 0;
opj_tcp_t * l_tcp = 00;
l_tcp = p_j2k->m_cp.tcps;
l_nb_tiles = p_j2k->m_cp.tw * p_j2k->m_cp.th ;
for (i=0;i<l_nb_tiles;++i) {
l_max = opj_uint_max(l_max,l_tcp->m_nb_tile_parts);
++l_tcp;
}
return 12 * l_max;
}
static OPJ_UINT32 opj_j2k_get_specific_header_sizes(opj_j2k_t *p_j2k)
{
OPJ_UINT32 l_nb_bytes = 0;
OPJ_UINT32 l_nb_comps;
OPJ_UINT32 l_coc_bytes,l_qcc_bytes;
l_nb_comps = p_j2k->m_private_image->numcomps - 1;
l_nb_bytes += opj_j2k_get_max_toc_size(p_j2k);
if (!(OPJ_IS_CINEMA(p_j2k->m_cp.rsiz))) {
l_coc_bytes = opj_j2k_get_max_coc_size(p_j2k);
l_nb_bytes += l_nb_comps * l_coc_bytes;
l_qcc_bytes = opj_j2k_get_max_qcc_size(p_j2k);
l_nb_bytes += l_nb_comps * l_qcc_bytes;
}
l_nb_bytes += opj_j2k_get_max_poc_size(p_j2k);
/*** DEVELOPER CORNER, Add room for your headers ***/
return l_nb_bytes;
}
/**
* Reads a POC marker (Progression Order Change)
*
* @param p_header_data the data contained in the POC box.
* @param p_j2k the jpeg2000 codec.
* @param p_header_size the size of the data contained in the POC marker.
* @param p_manager the user event manager.
*/
static OPJ_BOOL opj_j2k_read_poc ( opj_j2k_t *p_j2k,
OPJ_BYTE * p_header_data,
OPJ_UINT32 p_header_size,
opj_event_mgr_t * p_manager
)
{
OPJ_UINT32 i, l_nb_comp, l_tmp;
opj_image_t * l_image = 00;
OPJ_UINT32 l_old_poc_nb, l_current_poc_nb, l_current_poc_remaining;
OPJ_UINT32 l_chunk_size, l_comp_room;
opj_cp_t *l_cp = 00;
opj_tcp_t *l_tcp = 00;
opj_poc_t *l_current_poc = 00;
/* preconditions */
assert(p_header_data != 00);
assert(p_j2k != 00);
assert(p_manager != 00);
l_image = p_j2k->m_private_image;
l_nb_comp = l_image->numcomps;
if (l_nb_comp <= 256) {
l_comp_room = 1;
}
else {
l_comp_room = 2;
}
l_chunk_size = 5 + 2 * l_comp_room;
l_current_poc_nb = p_header_size / l_chunk_size;
l_current_poc_remaining = p_header_size % l_chunk_size;
if ((l_current_poc_nb <= 0) || (l_current_poc_remaining != 0)) {
opj_event_msg(p_manager, EVT_ERROR, "Error reading POC marker\n");
return OPJ_FALSE;
}
l_cp = &(p_j2k->m_cp);
l_tcp = (p_j2k->m_specific_param.m_decoder.m_state == J2K_STATE_TPH) ?
&l_cp->tcps[p_j2k->m_current_tile_number] :
p_j2k->m_specific_param.m_decoder.m_default_tcp;
l_old_poc_nb = l_tcp->POC ? l_tcp->numpocs + 1 : 0;
l_current_poc_nb += l_old_poc_nb;
if(l_current_poc_nb >= 32)
{
opj_event_msg(p_manager, EVT_ERROR, "Too many POCs %d\n", l_current_poc_nb);
return OPJ_FALSE;
}
assert(l_current_poc_nb < 32);
/* now poc is in use.*/
l_tcp->POC = 1;
l_current_poc = &l_tcp->pocs[l_old_poc_nb];
for (i = l_old_poc_nb; i < l_current_poc_nb; ++i) {
opj_read_bytes(p_header_data,&(l_current_poc->resno0),1); /* RSpoc_i */
++p_header_data;
opj_read_bytes(p_header_data,&(l_current_poc->compno0),l_comp_room); /* CSpoc_i */
p_header_data+=l_comp_room;
opj_read_bytes(p_header_data,&(l_current_poc->layno1),2); /* LYEpoc_i */
/* make sure layer end is in acceptable bounds */
l_current_poc->layno1 = opj_uint_min(l_current_poc->layno1, l_tcp->numlayers);
p_header_data+=2;
opj_read_bytes(p_header_data,&(l_current_poc->resno1),1); /* REpoc_i */
++p_header_data;
opj_read_bytes(p_header_data,&(l_current_poc->compno1),l_comp_room); /* CEpoc_i */
p_header_data+=l_comp_room;
opj_read_bytes(p_header_data,&l_tmp,1); /* Ppoc_i */
++p_header_data;
l_current_poc->prg = (OPJ_PROG_ORDER) l_tmp;
/* make sure comp is in acceptable bounds */
l_current_poc->compno1 = opj_uint_min(l_current_poc->compno1, l_nb_comp);
++l_current_poc;
}
l_tcp->numpocs = l_current_poc_nb - 1;
return OPJ_TRUE;
}
/**
* Reads a CRG marker (Component registration)
*
* @param p_header_data the data contained in the TLM box.
* @param p_j2k the jpeg2000 codec.
* @param p_header_size the size of the data contained in the TLM marker.
* @param p_manager the user event manager.
*/
static OPJ_BOOL opj_j2k_read_crg ( opj_j2k_t *p_j2k,
OPJ_BYTE * p_header_data,
OPJ_UINT32 p_header_size,
opj_event_mgr_t * p_manager
)
{
OPJ_UINT32 l_nb_comp;
/* preconditions */
assert(p_header_data != 00);
assert(p_j2k != 00);
assert(p_manager != 00);
l_nb_comp = p_j2k->m_private_image->numcomps;
if (p_header_size != l_nb_comp *4) {
opj_event_msg(p_manager, EVT_ERROR, "Error reading CRG marker\n");
return OPJ_FALSE;
}
/* Do not care of this at the moment since only local variables are set here */
/*
for
(i = 0; i < l_nb_comp; ++i)
{
opj_read_bytes(p_header_data,&l_Xcrg_i,2); // Xcrg_i
p_header_data+=2;
opj_read_bytes(p_header_data,&l_Ycrg_i,2); // Xcrg_i
p_header_data+=2;
}
*/
return OPJ_TRUE;
}
/**
* Reads a TLM marker (Tile Length Marker)
*
* @param p_header_data the data contained in the TLM box.
* @param p_j2k the jpeg2000 codec.
* @param p_header_size the size of the data contained in the TLM marker.
* @param p_manager the user event manager.
*/
static OPJ_BOOL opj_j2k_read_tlm ( opj_j2k_t *p_j2k,
OPJ_BYTE * p_header_data,
OPJ_UINT32 p_header_size,
opj_event_mgr_t * p_manager
)
{
OPJ_UINT32 l_Ztlm, l_Stlm, l_ST, l_SP, l_tot_num_tp_remaining, l_quotient, l_Ptlm_size;
/* preconditions */
assert(p_header_data != 00);
assert(p_j2k != 00);
assert(p_manager != 00);
if (p_header_size < 2) {
opj_event_msg(p_manager, EVT_ERROR, "Error reading TLM marker\n");
return OPJ_FALSE;
}
p_header_size -= 2;
opj_read_bytes(p_header_data,&l_Ztlm,1); /* Ztlm */
++p_header_data;
opj_read_bytes(p_header_data,&l_Stlm,1); /* Stlm */
++p_header_data;
l_ST = ((l_Stlm >> 4) & 0x3);
l_SP = (l_Stlm >> 6) & 0x1;
l_Ptlm_size = (l_SP + 1) * 2;
l_quotient = l_Ptlm_size + l_ST;
l_tot_num_tp_remaining = p_header_size % l_quotient;
if (l_tot_num_tp_remaining != 0) {
opj_event_msg(p_manager, EVT_ERROR, "Error reading TLM marker\n");
return OPJ_FALSE;
}
/* FIXME Do not care of this at the moment since only local variables are set here */
/*
for
(i = 0; i < l_tot_num_tp; ++i)
{
opj_read_bytes(p_header_data,&l_Ttlm_i,l_ST); // Ttlm_i
p_header_data += l_ST;
opj_read_bytes(p_header_data,&l_Ptlm_i,l_Ptlm_size); // Ptlm_i
p_header_data += l_Ptlm_size;
}*/
return OPJ_TRUE;
}
/**
* Reads a PLM marker (Packet length, main header marker)
*
* @param p_header_data the data contained in the TLM box.
* @param p_j2k the jpeg2000 codec.
* @param p_header_size the size of the data contained in the TLM marker.
* @param p_manager the user event manager.
*/
static OPJ_BOOL opj_j2k_read_plm ( opj_j2k_t *p_j2k,
OPJ_BYTE * p_header_data,
OPJ_UINT32 p_header_size,
opj_event_mgr_t * p_manager
)
{
/* preconditions */
assert(p_header_data != 00);
assert(p_j2k != 00);
assert(p_manager != 00);
if (p_header_size < 1) {
opj_event_msg(p_manager, EVT_ERROR, "Error reading PLM marker\n");
return OPJ_FALSE;
}
/* Do not care of this at the moment since only local variables are set here */
/*
opj_read_bytes(p_header_data,&l_Zplm,1); // Zplm
++p_header_data;
--p_header_size;
while
(p_header_size > 0)
{
opj_read_bytes(p_header_data,&l_Nplm,1); // Nplm
++p_header_data;
p_header_size -= (1+l_Nplm);
if
(p_header_size < 0)
{
opj_event_msg(p_manager, EVT_ERROR, "Error reading PLM marker\n");
return false;
}
for
(i = 0; i < l_Nplm; ++i)
{
opj_read_bytes(p_header_data,&l_tmp,1); // Iplm_ij
++p_header_data;
// take only the last seven bytes
l_packet_len |= (l_tmp & 0x7f);
if
(l_tmp & 0x80)
{
l_packet_len <<= 7;
}
else
{
// store packet length and proceed to next packet
l_packet_len = 0;
}
}
if
(l_packet_len != 0)
{
opj_event_msg(p_manager, EVT_ERROR, "Error reading PLM marker\n");
return false;
}
}
*/
return OPJ_TRUE;
}
/**
* Reads a PLT marker (Packet length, tile-part header)
*
* @param p_header_data the data contained in the PLT box.
* @param p_j2k the jpeg2000 codec.
* @param p_header_size the size of the data contained in the PLT marker.
* @param p_manager the user event manager.
*/
static OPJ_BOOL opj_j2k_read_plt ( opj_j2k_t *p_j2k,
OPJ_BYTE * p_header_data,
OPJ_UINT32 p_header_size,
opj_event_mgr_t * p_manager
)
{
OPJ_UINT32 l_Zplt, l_tmp, l_packet_len = 0, i;
/* preconditions */
assert(p_header_data != 00);
assert(p_j2k != 00);
assert(p_manager != 00);
if (p_header_size < 1) {
opj_event_msg(p_manager, EVT_ERROR, "Error reading PLT marker\n");
return OPJ_FALSE;
}
opj_read_bytes(p_header_data,&l_Zplt,1); /* Zplt */
++p_header_data;
--p_header_size;
for (i = 0; i < p_header_size; ++i) {
opj_read_bytes(p_header_data,&l_tmp,1); /* Iplt_ij */
++p_header_data;
/* take only the last seven bytes */
l_packet_len |= (l_tmp & 0x7f);
if (l_tmp & 0x80) {
l_packet_len <<= 7;
}
else {
/* store packet length and proceed to next packet */
l_packet_len = 0;
}
}
if (l_packet_len != 0) {
opj_event_msg(p_manager, EVT_ERROR, "Error reading PLT marker\n");
return OPJ_FALSE;
}
return OPJ_TRUE;
}
/**
* Reads a PPM marker (Packed packet headers, main header)
*
* @param p_header_data the data contained in the POC box.
* @param p_j2k the jpeg2000 codec.
* @param p_header_size the size of the data contained in the POC marker.
* @param p_manager the user event manager.
*/
static OPJ_BOOL opj_j2k_read_ppm (
opj_j2k_t *p_j2k,
OPJ_BYTE * p_header_data,
OPJ_UINT32 p_header_size,
opj_event_mgr_t * p_manager )
{
opj_cp_t *l_cp = 00;
OPJ_UINT32 l_Z_ppm;
/* preconditions */
assert(p_header_data != 00);
assert(p_j2k != 00);
assert(p_manager != 00);
/* We need to have the Z_ppm element + 1 byte of Nppm/Ippm at minimum */
if (p_header_size < 2) {
opj_event_msg(p_manager, EVT_ERROR, "Error reading PPM marker\n");
return OPJ_FALSE;
}
l_cp = &(p_j2k->m_cp);
l_cp->ppm = 1;
opj_read_bytes(p_header_data,&l_Z_ppm,1); /* Z_ppm */
++p_header_data;
--p_header_size;
/* check allocation needed */
if (l_cp->ppm_markers == NULL) { /* first PPM marker */
OPJ_UINT32 l_newCount = l_Z_ppm + 1U; /* can't overflow, l_Z_ppm is UINT8 */
assert(l_cp->ppm_markers_count == 0U);
l_cp->ppm_markers = (opj_ppx *) opj_calloc(l_newCount, sizeof(opj_ppx));
if (l_cp->ppm_markers == NULL) {
opj_event_msg(p_manager, EVT_ERROR, "Not enough memory to read PPM marker\n");
return OPJ_FALSE;
}
l_cp->ppm_markers_count = l_newCount;
} else if (l_cp->ppm_markers_count <= l_Z_ppm) {
OPJ_UINT32 l_newCount = l_Z_ppm + 1U; /* can't overflow, l_Z_ppm is UINT8 */
opj_ppx *new_ppm_markers;
new_ppm_markers = (opj_ppx *) opj_realloc(l_cp->ppm_markers, l_newCount * sizeof(opj_ppx));
if (new_ppm_markers == NULL) {
/* clean up to be done on l_cp destruction */
opj_event_msg(p_manager, EVT_ERROR, "Not enough memory to read PPM marker\n");
return OPJ_FALSE;
}
l_cp->ppm_markers = new_ppm_markers;
memset(l_cp->ppm_markers + l_cp->ppm_markers_count, 0, (l_newCount - l_cp->ppm_markers_count) * sizeof(opj_ppx));
l_cp->ppm_markers_count = l_newCount;
}
if (l_cp->ppm_markers[l_Z_ppm].m_data != NULL) {
/* clean up to be done on l_cp destruction */
opj_event_msg(p_manager, EVT_ERROR, "Zppm %u already read\n", l_Z_ppm);
return OPJ_FALSE;
}
l_cp->ppm_markers[l_Z_ppm].m_data = opj_malloc(p_header_size);
if (l_cp->ppm_markers[l_Z_ppm].m_data == NULL) {
/* clean up to be done on l_cp destruction */
opj_event_msg(p_manager, EVT_ERROR, "Not enough memory to read PPM marker\n");
return OPJ_FALSE;
}
l_cp->ppm_markers[l_Z_ppm].m_data_size = p_header_size;
memcpy(l_cp->ppm_markers[l_Z_ppm].m_data, p_header_data, p_header_size);
return OPJ_TRUE;
}
/**
* Merges all PPM markers read (Packed headers, main header)
*
* @param p_cp main coding parameters.
* @param p_manager the user event manager.
*/
static OPJ_BOOL opj_j2k_merge_ppm ( opj_cp_t *p_cp, opj_event_mgr_t * p_manager )
{
OPJ_UINT32 i, l_ppm_data_size, l_N_ppm_remaining;
/* preconditions */
assert(p_cp != 00);
assert(p_manager != 00);
assert(p_cp->ppm_buffer == NULL);
if (p_cp->ppm == 0U) {
return OPJ_TRUE;
}
l_ppm_data_size = 0U;
l_N_ppm_remaining = 0U;
for (i = 0U; i < p_cp->ppm_markers_count; ++i) {
if (p_cp->ppm_markers[i].m_data != NULL) { /* standard doesn't seem to require contiguous Zppm */
OPJ_UINT32 l_N_ppm;
OPJ_UINT32 l_data_size = p_cp->ppm_markers[i].m_data_size;
const OPJ_BYTE* l_data = p_cp->ppm_markers[i].m_data;
if (l_N_ppm_remaining >= l_data_size) {
l_N_ppm_remaining -= l_data_size;
l_data_size = 0U;
} else {
l_data += l_N_ppm_remaining;
l_data_size -= l_N_ppm_remaining;
l_N_ppm_remaining = 0U;
}
if (l_data_size > 0U) {
do
{
/* read Nppm */
if (l_data_size < 4U) {
/* clean up to be done on l_cp destruction */
opj_event_msg(p_manager, EVT_ERROR, "Not enough bytes to read Nppm\n");
return OPJ_FALSE;
}
opj_read_bytes(l_data, &l_N_ppm, 4);
l_data+=4;
l_data_size-=4;
l_ppm_data_size += l_N_ppm; /* can't overflow, max 256 markers of max 65536 bytes, that is when PPM markers are not corrupted which is checked elsewhere */
if (l_data_size >= l_N_ppm) {
l_data_size -= l_N_ppm;
l_data += l_N_ppm;
} else {
l_N_ppm_remaining = l_N_ppm - l_data_size;
l_data_size = 0U;
}
} while (l_data_size > 0U);
}
}
}
if (l_N_ppm_remaining != 0U) {
/* clean up to be done on l_cp destruction */
opj_event_msg(p_manager, EVT_ERROR, "Corrupted PPM markers\n");
return OPJ_FALSE;
}
p_cp->ppm_buffer = (OPJ_BYTE *) opj_malloc(l_ppm_data_size);
if (p_cp->ppm_buffer == 00) {
opj_event_msg(p_manager, EVT_ERROR, "Not enough memory to read PPM marker\n");
return OPJ_FALSE;
}
p_cp->ppm_len = l_ppm_data_size;
l_ppm_data_size = 0U;
l_N_ppm_remaining = 0U;
for (i = 0U; i < p_cp->ppm_markers_count; ++i) {
if (p_cp->ppm_markers[i].m_data != NULL) { /* standard doesn't seem to require contiguous Zppm */
OPJ_UINT32 l_N_ppm;
OPJ_UINT32 l_data_size = p_cp->ppm_markers[i].m_data_size;
const OPJ_BYTE* l_data = p_cp->ppm_markers[i].m_data;
if (l_N_ppm_remaining >= l_data_size) {
memcpy(p_cp->ppm_buffer + l_ppm_data_size, l_data, l_data_size);
l_ppm_data_size += l_data_size;
l_N_ppm_remaining -= l_data_size;
l_data_size = 0U;
} else {
memcpy(p_cp->ppm_buffer + l_ppm_data_size, l_data, l_N_ppm_remaining);
l_ppm_data_size += l_N_ppm_remaining;
l_data += l_N_ppm_remaining;
l_data_size -= l_N_ppm_remaining;
l_N_ppm_remaining = 0U;
}
if (l_data_size > 0U) {
do
{
/* read Nppm */
if (l_data_size < 4U) {
/* clean up to be done on l_cp destruction */
opj_event_msg(p_manager, EVT_ERROR, "Not enough bytes to read Nppm\n");
return OPJ_FALSE;
}
opj_read_bytes(l_data, &l_N_ppm, 4);
l_data+=4;
l_data_size-=4;
if (l_data_size >= l_N_ppm) {
memcpy(p_cp->ppm_buffer + l_ppm_data_size, l_data, l_N_ppm);
l_ppm_data_size += l_N_ppm;
l_data_size -= l_N_ppm;
l_data += l_N_ppm;
} else {
memcpy(p_cp->ppm_buffer + l_ppm_data_size, l_data, l_data_size);
l_ppm_data_size += l_data_size;
l_N_ppm_remaining = l_N_ppm - l_data_size;
l_data_size = 0U;
}
} while (l_data_size > 0U);
}
opj_free(p_cp->ppm_markers[i].m_data);
p_cp->ppm_markers[i].m_data = NULL;
p_cp->ppm_markers[i].m_data_size = 0U;
}
}
p_cp->ppm_data = p_cp->ppm_buffer;
p_cp->ppm_data_size = p_cp->ppm_len;
p_cp->ppm_markers_count = 0U;
opj_free(p_cp->ppm_markers);
p_cp->ppm_markers = NULL;
return OPJ_TRUE;
}
/**
* Reads a PPT marker (Packed packet headers, tile-part header)
*
* @param p_header_data the data contained in the PPT box.
* @param p_j2k the jpeg2000 codec.
* @param p_header_size the size of the data contained in the PPT marker.
* @param p_manager the user event manager.
*/
static OPJ_BOOL opj_j2k_read_ppt ( opj_j2k_t *p_j2k,
OPJ_BYTE * p_header_data,
OPJ_UINT32 p_header_size,
opj_event_mgr_t * p_manager
)
{
opj_cp_t *l_cp = 00;
opj_tcp_t *l_tcp = 00;
OPJ_UINT32 l_Z_ppt;
/* preconditions */
assert(p_header_data != 00);
assert(p_j2k != 00);
assert(p_manager != 00);
/* We need to have the Z_ppt element + 1 byte of Ippt at minimum */
if (p_header_size < 2) {
opj_event_msg(p_manager, EVT_ERROR, "Error reading PPT marker\n");
return OPJ_FALSE;
}
l_cp = &(p_j2k->m_cp);
if (l_cp->ppm){
opj_event_msg(p_manager, EVT_ERROR, "Error reading PPT marker: packet header have been previously found in the main header (PPM marker).\n");
return OPJ_FALSE;
}
l_tcp = &(l_cp->tcps[p_j2k->m_current_tile_number]);
l_tcp->ppt = 1;
opj_read_bytes(p_header_data,&l_Z_ppt,1); /* Z_ppt */
++p_header_data;
--p_header_size;
/* check allocation needed */
if (l_tcp->ppt_markers == NULL) { /* first PPT marker */
OPJ_UINT32 l_newCount = l_Z_ppt + 1U; /* can't overflow, l_Z_ppt is UINT8 */
assert(l_tcp->ppt_markers_count == 0U);
l_tcp->ppt_markers = (opj_ppx *) opj_calloc(l_newCount, sizeof(opj_ppx));
if (l_tcp->ppt_markers == NULL) {
opj_event_msg(p_manager, EVT_ERROR, "Not enough memory to read PPT marker\n");
return OPJ_FALSE;
}
l_tcp->ppt_markers_count = l_newCount;
} else if (l_tcp->ppt_markers_count <= l_Z_ppt) {
OPJ_UINT32 l_newCount = l_Z_ppt + 1U; /* can't overflow, l_Z_ppt is UINT8 */
opj_ppx *new_ppt_markers;
new_ppt_markers = (opj_ppx *) opj_realloc(l_tcp->ppt_markers, l_newCount * sizeof(opj_ppx));
if (new_ppt_markers == NULL) {
/* clean up to be done on l_tcp destruction */
opj_event_msg(p_manager, EVT_ERROR, "Not enough memory to read PPT marker\n");
return OPJ_FALSE;
}
l_tcp->ppt_markers = new_ppt_markers;
memset(l_tcp->ppt_markers + l_tcp->ppt_markers_count, 0, (l_newCount - l_tcp->ppt_markers_count) * sizeof(opj_ppx));
l_tcp->ppt_markers_count = l_newCount;
}
if (l_tcp->ppt_markers[l_Z_ppt].m_data != NULL) {
/* clean up to be done on l_tcp destruction */
opj_event_msg(p_manager, EVT_ERROR, "Zppt %u already read\n", l_Z_ppt);
return OPJ_FALSE;
}
l_tcp->ppt_markers[l_Z_ppt].m_data = opj_malloc(p_header_size);
if (l_tcp->ppt_markers[l_Z_ppt].m_data == NULL) {
/* clean up to be done on l_tcp destruction */
opj_event_msg(p_manager, EVT_ERROR, "Not enough memory to read PPT marker\n");
return OPJ_FALSE;
}
l_tcp->ppt_markers[l_Z_ppt].m_data_size = p_header_size;
memcpy(l_tcp->ppt_markers[l_Z_ppt].m_data, p_header_data, p_header_size);
return OPJ_TRUE;
}
/**
* Merges all PPT markers read (Packed packet headers, tile-part header)
*
* @param p_tcp the tile.
* @param p_manager the user event manager.
*/
static OPJ_BOOL opj_j2k_merge_ppt(opj_tcp_t *p_tcp, opj_event_mgr_t * p_manager)
{
OPJ_UINT32 i, l_ppt_data_size;
/* preconditions */
assert(p_tcp != 00);
assert(p_manager != 00);
assert(p_tcp->ppt_buffer == NULL);
if (p_tcp->ppt == 0U) {
return OPJ_TRUE;
}
l_ppt_data_size = 0U;
for (i = 0U; i < p_tcp->ppt_markers_count; ++i) {
l_ppt_data_size += p_tcp->ppt_markers[i].m_data_size; /* can't overflow, max 256 markers of max 65536 bytes */
}
p_tcp->ppt_buffer = (OPJ_BYTE *) opj_malloc(l_ppt_data_size);
if (p_tcp->ppt_buffer == 00) {
opj_event_msg(p_manager, EVT_ERROR, "Not enough memory to read PPT marker\n");
return OPJ_FALSE;
}
p_tcp->ppt_len = l_ppt_data_size;
l_ppt_data_size = 0U;
for (i = 0U; i < p_tcp->ppt_markers_count; ++i) {
if (p_tcp->ppt_markers[i].m_data != NULL) { /* standard doesn't seem to require contiguous Zppt */
memcpy(p_tcp->ppt_buffer + l_ppt_data_size, p_tcp->ppt_markers[i].m_data, p_tcp->ppt_markers[i].m_data_size);
l_ppt_data_size += p_tcp->ppt_markers[i].m_data_size; /* can't overflow, max 256 markers of max 65536 bytes */
opj_free(p_tcp->ppt_markers[i].m_data);
p_tcp->ppt_markers[i].m_data = NULL;
p_tcp->ppt_markers[i].m_data_size = 0U;
}
}
p_tcp->ppt_markers_count = 0U;
opj_free(p_tcp->ppt_markers);
p_tcp->ppt_markers = NULL;
p_tcp->ppt_data = p_tcp->ppt_buffer;
p_tcp->ppt_data_size = p_tcp->ppt_len;
return OPJ_TRUE;
}
static OPJ_BOOL opj_j2k_write_tlm( opj_j2k_t *p_j2k,
opj_stream_private_t *p_stream,
opj_event_mgr_t * p_manager
)
{
OPJ_BYTE * l_current_data = 00;
OPJ_UINT32 l_tlm_size;
/* preconditions */
assert(p_j2k != 00);
assert(p_manager != 00);
assert(p_stream != 00);
l_tlm_size = 6 + (5*p_j2k->m_specific_param.m_encoder.m_total_tile_parts);
if (l_tlm_size > p_j2k->m_specific_param.m_encoder.m_header_tile_data_size) {
OPJ_BYTE *new_header_tile_data = (OPJ_BYTE *) opj_realloc(p_j2k->m_specific_param.m_encoder.m_header_tile_data, l_tlm_size);
if (! new_header_tile_data) {
opj_free(p_j2k->m_specific_param.m_encoder.m_header_tile_data);
p_j2k->m_specific_param.m_encoder.m_header_tile_data = NULL;
p_j2k->m_specific_param.m_encoder.m_header_tile_data_size = 0;
opj_event_msg(p_manager, EVT_ERROR, "Not enough memory to write TLM marker\n");
return OPJ_FALSE;
}
p_j2k->m_specific_param.m_encoder.m_header_tile_data = new_header_tile_data;
p_j2k->m_specific_param.m_encoder.m_header_tile_data_size = l_tlm_size;
}
l_current_data = p_j2k->m_specific_param.m_encoder.m_header_tile_data;
/* change the way data is written to avoid seeking if possible */
/* TODO */
p_j2k->m_specific_param.m_encoder.m_tlm_start = opj_stream_tell(p_stream);
opj_write_bytes(l_current_data,J2K_MS_TLM,2); /* TLM */
l_current_data += 2;
opj_write_bytes(l_current_data,l_tlm_size-2,2); /* Lpoc */
l_current_data += 2;
opj_write_bytes(l_current_data,0,1); /* Ztlm=0*/
++l_current_data;
opj_write_bytes(l_current_data,0x50,1); /* Stlm ST=1(8bits-255 tiles max),SP=1(Ptlm=32bits) */
++l_current_data;
/* do nothing on the 5 * l_j2k->m_specific_param.m_encoder.m_total_tile_parts remaining data */
if (opj_stream_write_data(p_stream,p_j2k->m_specific_param.m_encoder.m_header_tile_data,l_tlm_size,p_manager) != l_tlm_size) {
return OPJ_FALSE;
}
return OPJ_TRUE;
}
static OPJ_BOOL opj_j2k_write_sot( opj_j2k_t *p_j2k,
OPJ_BYTE * p_data,
OPJ_UINT32 * p_data_written,
const opj_stream_private_t *p_stream,
opj_event_mgr_t * p_manager
)
{
/* preconditions */
assert(p_j2k != 00);
assert(p_manager != 00);
assert(p_stream != 00);
opj_write_bytes(p_data,J2K_MS_SOT,2); /* SOT */
p_data += 2;
opj_write_bytes(p_data,10,2); /* Lsot */
p_data += 2;
opj_write_bytes(p_data, p_j2k->m_current_tile_number,2); /* Isot */
p_data += 2;
/* Psot */
p_data += 4;
opj_write_bytes(p_data, p_j2k->m_specific_param.m_encoder.m_current_tile_part_number,1); /* TPsot */
++p_data;
opj_write_bytes(p_data, p_j2k->m_cp.tcps[p_j2k->m_current_tile_number].m_nb_tile_parts,1); /* TNsot */
++p_data;
/* UniPG>> */
#ifdef USE_JPWL
/* update markers struct */
/*
OPJ_BOOL res = j2k_add_marker(p_j2k->cstr_info, J2K_MS_SOT, p_j2k->sot_start, len + 2);
*/
assert( 0 && "TODO" );
#endif /* USE_JPWL */
* p_data_written = 12;
return OPJ_TRUE;
}
static OPJ_BOOL opj_j2k_get_sot_values(OPJ_BYTE * p_header_data,
OPJ_UINT32 p_header_size,
OPJ_UINT32* p_tile_no,
OPJ_UINT32* p_tot_len,
OPJ_UINT32* p_current_part,
OPJ_UINT32* p_num_parts,
opj_event_mgr_t * p_manager )
{
/* preconditions */
assert(p_header_data != 00);
assert(p_manager != 00);
/* Size of this marker is fixed = 12 (we have already read marker and its size)*/
if (p_header_size != 8) {
opj_event_msg(p_manager, EVT_ERROR, "Error reading SOT marker\n");
return OPJ_FALSE;
}
opj_read_bytes(p_header_data,p_tile_no,2); /* Isot */
p_header_data+=2;
opj_read_bytes(p_header_data,p_tot_len,4); /* Psot */
p_header_data+=4;
opj_read_bytes(p_header_data,p_current_part,1); /* TPsot */
++p_header_data;
opj_read_bytes(p_header_data,p_num_parts ,1); /* TNsot */
++p_header_data;
return OPJ_TRUE;
}
static OPJ_BOOL opj_j2k_read_sot ( opj_j2k_t *p_j2k,
OPJ_BYTE * p_header_data,
OPJ_UINT32 p_header_size,
opj_event_mgr_t * p_manager )
{
opj_cp_t *l_cp = 00;
opj_tcp_t *l_tcp = 00;
OPJ_UINT32 l_tot_len, l_num_parts = 0;
OPJ_UINT32 l_current_part;
OPJ_UINT32 l_tile_x,l_tile_y;
/* preconditions */
assert(p_j2k != 00);
assert(p_manager != 00);
if (! opj_j2k_get_sot_values(p_header_data, p_header_size, &(p_j2k->m_current_tile_number), &l_tot_len, &l_current_part, &l_num_parts, p_manager)) {
opj_event_msg(p_manager, EVT_ERROR, "Error reading SOT marker\n");
return OPJ_FALSE;
}
l_cp = &(p_j2k->m_cp);
/* testcase 2.pdf.SIGFPE.706.1112 */
if (p_j2k->m_current_tile_number >= l_cp->tw * l_cp->th) {
opj_event_msg(p_manager, EVT_ERROR, "Invalid tile number %d\n", p_j2k->m_current_tile_number);
return OPJ_FALSE;
}
l_tcp = &l_cp->tcps[p_j2k->m_current_tile_number];
l_tile_x = p_j2k->m_current_tile_number % l_cp->tw;
l_tile_y = p_j2k->m_current_tile_number / l_cp->tw;
#ifdef USE_JPWL
if (l_cp->correct) {
OPJ_UINT32 tileno = p_j2k->m_current_tile_number;
static OPJ_UINT32 backup_tileno = 0;
/* tileno is negative or larger than the number of tiles!!! */
if (tileno > (l_cp->tw * l_cp->th)) {
opj_event_msg(p_manager, EVT_ERROR,
"JPWL: bad tile number (%d out of a maximum of %d)\n",
tileno, (l_cp->tw * l_cp->th));
if (!JPWL_ASSUME) {
opj_event_msg(p_manager, EVT_ERROR, "JPWL: giving up\n");
return OPJ_FALSE;
}
/* we try to correct */
tileno = backup_tileno;
opj_event_msg(p_manager, EVT_WARNING, "- trying to adjust this\n"
"- setting tile number to %d\n",
tileno);
}
/* keep your private count of tiles */
backup_tileno++;
};
#endif /* USE_JPWL */
/* look for the tile in the list of already processed tile (in parts). */
/* Optimization possible here with a more complex data structure and with the removing of tiles */
/* since the time taken by this function can only grow at the time */
/* PSot should be equal to zero or >=14 or <= 2^32-1 */
if ((l_tot_len !=0 ) && (l_tot_len < 14) )
{
if (l_tot_len == 12 ) /* MSD: Special case for the PHR data which are read by kakadu*/
{
opj_event_msg(p_manager, EVT_WARNING, "Empty SOT marker detected: Psot=%d.\n", l_tot_len);
}
else
{
opj_event_msg(p_manager, EVT_ERROR, "Psot value is not correct regards to the JPEG2000 norm: %d.\n", l_tot_len);
return OPJ_FALSE;
}
}
#ifdef USE_JPWL
if (l_cp->correct) {
/* totlen is negative or larger than the bytes left!!! */
if (/*(l_tot_len < 0) ||*/ (l_tot_len > p_header_size ) ) { /* FIXME it seems correct; for info in V1 -> (p_stream_numbytesleft(p_stream) + 8))) { */
opj_event_msg(p_manager, EVT_ERROR,
"JPWL: bad tile byte size (%d bytes against %d bytes left)\n",
l_tot_len, p_header_size ); /* FIXME it seems correct; for info in V1 -> p_stream_numbytesleft(p_stream) + 8); */
if (!JPWL_ASSUME) {
opj_event_msg(p_manager, EVT_ERROR, "JPWL: giving up\n");
return OPJ_FALSE;
}
/* we try to correct */
l_tot_len = 0;
opj_event_msg(p_manager, EVT_WARNING, "- trying to adjust this\n"
"- setting Psot to %d => assuming it is the last tile\n",
l_tot_len);
}
};
#endif /* USE_JPWL */
/* Ref A.4.2: Psot could be equal zero if it is the last tile-part of the codestream.*/
if (!l_tot_len) {
opj_event_msg(p_manager, EVT_INFO, "Psot value of the current tile-part is equal to zero, "
"we assuming it is the last tile-part of the codestream.\n");
p_j2k->m_specific_param.m_decoder.m_last_tile_part = 1;
}
if (l_num_parts != 0) { /* Number of tile-part header is provided by this tile-part header */
l_num_parts += p_j2k->m_specific_param.m_decoder.m_nb_tile_parts_correction;
/* Useful to manage the case of textGBR.jp2 file because two values of TNSot are allowed: the correct numbers of
* tile-parts for that tile and zero (A.4.2 of 15444-1 : 2002). */
if (l_tcp->m_nb_tile_parts) {
if (l_current_part >= l_tcp->m_nb_tile_parts){
opj_event_msg(p_manager, EVT_ERROR, "In SOT marker, TPSot (%d) is not valid regards to the current "
"number of tile-part (%d), giving up\n", l_current_part, l_tcp->m_nb_tile_parts );
p_j2k->m_specific_param.m_decoder.m_last_tile_part = 1;
return OPJ_FALSE;
}
}
if( l_current_part >= l_num_parts ) {
/* testcase 451.pdf.SIGSEGV.ce9.3723 */
opj_event_msg(p_manager, EVT_ERROR, "In SOT marker, TPSot (%d) is not valid regards to the current "
"number of tile-part (header) (%d), giving up\n", l_current_part, l_num_parts );
p_j2k->m_specific_param.m_decoder.m_last_tile_part = 1;
return OPJ_FALSE;
}
l_tcp->m_nb_tile_parts = l_num_parts;
}
/* If know the number of tile part header we will check if we didn't read the last*/
if (l_tcp->m_nb_tile_parts) {
if (l_tcp->m_nb_tile_parts == (l_current_part+1)) {
p_j2k->m_specific_param.m_decoder.m_can_decode = 1; /* Process the last tile-part header*/
}
}
if (!p_j2k->m_specific_param.m_decoder.m_last_tile_part){
/* Keep the size of data to skip after this marker */
p_j2k->m_specific_param.m_decoder.m_sot_length = l_tot_len - 12; /* SOT_marker_size = 12 */
}
else {
/* FIXME: need to be computed from the number of bytes remaining in the codestream */
p_j2k->m_specific_param.m_decoder.m_sot_length = 0;
}
p_j2k->m_specific_param.m_decoder.m_state = J2K_STATE_TPH;
/* Check if the current tile is outside the area we want decode or not corresponding to the tile index*/
if (p_j2k->m_specific_param.m_decoder.m_tile_ind_to_dec == -1) {
p_j2k->m_specific_param.m_decoder.m_skip_data =
(l_tile_x < p_j2k->m_specific_param.m_decoder.m_start_tile_x)
|| (l_tile_x >= p_j2k->m_specific_param.m_decoder.m_end_tile_x)
|| (l_tile_y < p_j2k->m_specific_param.m_decoder.m_start_tile_y)
|| (l_tile_y >= p_j2k->m_specific_param.m_decoder.m_end_tile_y);
}
else {
assert( p_j2k->m_specific_param.m_decoder.m_tile_ind_to_dec >= 0 );
p_j2k->m_specific_param.m_decoder.m_skip_data =
(p_j2k->m_current_tile_number != (OPJ_UINT32)p_j2k->m_specific_param.m_decoder.m_tile_ind_to_dec);
}
/* Index */
if (p_j2k->cstr_index)
{
assert(p_j2k->cstr_index->tile_index != 00);
p_j2k->cstr_index->tile_index[p_j2k->m_current_tile_number].tileno = p_j2k->m_current_tile_number;
p_j2k->cstr_index->tile_index[p_j2k->m_current_tile_number].current_tpsno = l_current_part;
if (l_num_parts != 0){
p_j2k->cstr_index->tile_index[p_j2k->m_current_tile_number].nb_tps = l_num_parts;
p_j2k->cstr_index->tile_index[p_j2k->m_current_tile_number].current_nb_tps = l_num_parts;
if (!p_j2k->cstr_index->tile_index[p_j2k->m_current_tile_number].tp_index) {
p_j2k->cstr_index->tile_index[p_j2k->m_current_tile_number].tp_index =
(opj_tp_index_t*)opj_calloc(l_num_parts, sizeof(opj_tp_index_t));
if (!p_j2k->cstr_index->tile_index[p_j2k->m_current_tile_number].tp_index) {
opj_event_msg(p_manager, EVT_ERROR, "Not enough memory to read SOT marker. Tile index allocation failed\n");
return OPJ_FALSE;
}
}
else {
opj_tp_index_t *new_tp_index = (opj_tp_index_t *) opj_realloc(
p_j2k->cstr_index->tile_index[p_j2k->m_current_tile_number].tp_index, l_num_parts* sizeof(opj_tp_index_t));
if (! new_tp_index) {
opj_free(p_j2k->cstr_index->tile_index[p_j2k->m_current_tile_number].tp_index);
p_j2k->cstr_index->tile_index[p_j2k->m_current_tile_number].tp_index = NULL;
opj_event_msg(p_manager, EVT_ERROR, "Not enough memory to read SOT marker. Tile index allocation failed\n");
return OPJ_FALSE;
}
p_j2k->cstr_index->tile_index[p_j2k->m_current_tile_number].tp_index = new_tp_index;
}
}
else{
/*if (!p_j2k->cstr_index->tile_index[p_j2k->m_current_tile_number].tp_index)*/ {
if (!p_j2k->cstr_index->tile_index[p_j2k->m_current_tile_number].tp_index) {
p_j2k->cstr_index->tile_index[p_j2k->m_current_tile_number].current_nb_tps = 10;
p_j2k->cstr_index->tile_index[p_j2k->m_current_tile_number].tp_index =
(opj_tp_index_t*)opj_calloc( p_j2k->cstr_index->tile_index[p_j2k->m_current_tile_number].current_nb_tps,
sizeof(opj_tp_index_t));
if (!p_j2k->cstr_index->tile_index[p_j2k->m_current_tile_number].tp_index) {
p_j2k->cstr_index->tile_index[p_j2k->m_current_tile_number].current_nb_tps = 0;
opj_event_msg(p_manager, EVT_ERROR, "Not enough memory to read SOT marker. Tile index allocation failed\n");
return OPJ_FALSE;
}
}
if ( l_current_part >= p_j2k->cstr_index->tile_index[p_j2k->m_current_tile_number].current_nb_tps ){
opj_tp_index_t *new_tp_index;
p_j2k->cstr_index->tile_index[p_j2k->m_current_tile_number].current_nb_tps = l_current_part + 1;
new_tp_index = (opj_tp_index_t *) opj_realloc(
p_j2k->cstr_index->tile_index[p_j2k->m_current_tile_number].tp_index,
p_j2k->cstr_index->tile_index[p_j2k->m_current_tile_number].current_nb_tps * sizeof(opj_tp_index_t));
if (! new_tp_index) {
opj_free(p_j2k->cstr_index->tile_index[p_j2k->m_current_tile_number].tp_index);
p_j2k->cstr_index->tile_index[p_j2k->m_current_tile_number].tp_index = NULL;
p_j2k->cstr_index->tile_index[p_j2k->m_current_tile_number].current_nb_tps = 0;
opj_event_msg(p_manager, EVT_ERROR, "Not enough memory to read SOT marker. Tile index allocation failed\n");
return OPJ_FALSE;
}
p_j2k->cstr_index->tile_index[p_j2k->m_current_tile_number].tp_index = new_tp_index;
}
}
}
}
/* FIXME move this onto a separate method to call before reading any SOT, remove part about main_end header, use a index struct inside p_j2k */
/* if (p_j2k->cstr_info) {
if (l_tcp->first) {
if (tileno == 0) {
p_j2k->cstr_info->main_head_end = p_stream_tell(p_stream) - 13;
}
p_j2k->cstr_info->tile[tileno].tileno = tileno;
p_j2k->cstr_info->tile[tileno].start_pos = p_stream_tell(p_stream) - 12;
p_j2k->cstr_info->tile[tileno].end_pos = p_j2k->cstr_info->tile[tileno].start_pos + totlen - 1;
p_j2k->cstr_info->tile[tileno].num_tps = numparts;
if (numparts) {
p_j2k->cstr_info->tile[tileno].tp = (opj_tp_info_t *) opj_malloc(numparts * sizeof(opj_tp_info_t));
}
else {
p_j2k->cstr_info->tile[tileno].tp = (opj_tp_info_t *) opj_malloc(10 * sizeof(opj_tp_info_t)); // Fixme (10)
}
}
else {
p_j2k->cstr_info->tile[tileno].end_pos += totlen;
}
p_j2k->cstr_info->tile[tileno].tp[partno].tp_start_pos = p_stream_tell(p_stream) - 12;
p_j2k->cstr_info->tile[tileno].tp[partno].tp_end_pos =
p_j2k->cstr_info->tile[tileno].tp[partno].tp_start_pos + totlen - 1;
}*/
return OPJ_TRUE;
}
static OPJ_BOOL opj_j2k_write_sod( opj_j2k_t *p_j2k,
opj_tcd_t * p_tile_coder,
OPJ_BYTE * p_data,
OPJ_UINT32 * p_data_written,
OPJ_UINT32 p_total_data_size,
const opj_stream_private_t *p_stream,
opj_event_mgr_t * p_manager
)
{
opj_codestream_info_t *l_cstr_info = 00;
OPJ_UINT32 l_remaining_data;
/* preconditions */
assert(p_j2k != 00);
assert(p_manager != 00);
assert(p_stream != 00);
opj_write_bytes(p_data,J2K_MS_SOD,2); /* SOD */
p_data += 2;
/* make room for the EOF marker */
l_remaining_data = p_total_data_size - 4;
/* update tile coder */
p_tile_coder->tp_num = p_j2k->m_specific_param.m_encoder.m_current_poc_tile_part_number ;
p_tile_coder->cur_tp_num = p_j2k->m_specific_param.m_encoder.m_current_tile_part_number;
/* INDEX >> */
/* TODO mergeV2: check this part which use cstr_info */
/*l_cstr_info = p_j2k->cstr_info;
if (l_cstr_info) {
if (!p_j2k->m_specific_param.m_encoder.m_current_tile_part_number ) {
//TODO cstr_info->tile[p_j2k->m_current_tile_number].end_header = p_stream_tell(p_stream) + p_j2k->pos_correction - 1;
l_cstr_info->tile[p_j2k->m_current_tile_number].tileno = p_j2k->m_current_tile_number;
}
else {*/
/*
TODO
if
(cstr_info->tile[p_j2k->m_current_tile_number].packet[cstr_info->packno - 1].end_pos < p_stream_tell(p_stream))
{
cstr_info->tile[p_j2k->m_current_tile_number].packet[cstr_info->packno].start_pos = p_stream_tell(p_stream);
}*/
/*}*/
/* UniPG>> */
#ifdef USE_JPWL
/* update markers struct */
/*OPJ_BOOL res = j2k_add_marker(p_j2k->cstr_info, J2K_MS_SOD, p_j2k->sod_start, 2);
*/
assert( 0 && "TODO" );
#endif /* USE_JPWL */
/* <<UniPG */
/*}*/
/* << INDEX */
if (p_j2k->m_specific_param.m_encoder.m_current_tile_part_number == 0) {
p_tile_coder->tcd_image->tiles->packno = 0;
if (l_cstr_info) {
l_cstr_info->packno = 0;
}
}
*p_data_written = 0;
if (! opj_tcd_encode_tile(p_tile_coder, p_j2k->m_current_tile_number, p_data, p_data_written, l_remaining_data , l_cstr_info)) {
opj_event_msg(p_manager, EVT_ERROR, "Cannot encode tile\n");
return OPJ_FALSE;
}
*p_data_written += 2;
return OPJ_TRUE;
}
static OPJ_BOOL opj_j2k_read_sod (opj_j2k_t *p_j2k,
opj_stream_private_t *p_stream,
opj_event_mgr_t * p_manager
)
{
OPJ_SIZE_T l_current_read_size;
opj_codestream_index_t * l_cstr_index = 00;
OPJ_BYTE ** l_current_data = 00;
opj_tcp_t * l_tcp = 00;
OPJ_UINT32 * l_tile_len = 00;
OPJ_BOOL l_sot_length_pb_detected = OPJ_FALSE;
/* preconditions */
assert(p_j2k != 00);
assert(p_manager != 00);
assert(p_stream != 00);
l_tcp = &(p_j2k->m_cp.tcps[p_j2k->m_current_tile_number]);
if (p_j2k->m_specific_param.m_decoder.m_last_tile_part) {
/* opj_stream_get_number_byte_left returns OPJ_OFF_T
// but we are in the last tile part,
// so its result will fit on OPJ_UINT32 unless we find
// a file with a single tile part of more than 4 GB...*/
p_j2k->m_specific_param.m_decoder.m_sot_length = (OPJ_UINT32)(opj_stream_get_number_byte_left(p_stream) - 2);
}
else {
/* Check to avoid pass the limit of OPJ_UINT32 */
if (p_j2k->m_specific_param.m_decoder.m_sot_length >= 2 )
p_j2k->m_specific_param.m_decoder.m_sot_length -= 2;
else {
/* MSD: case commented to support empty SOT marker (PHR data) */
}
}
l_current_data = &(l_tcp->m_data);
l_tile_len = &l_tcp->m_data_size;
/* Patch to support new PHR data */
if (p_j2k->m_specific_param.m_decoder.m_sot_length) {
/* If we are here, we'll try to read the data after allocation */
/* Check enough bytes left in stream before allocation */
if ((OPJ_OFF_T)p_j2k->m_specific_param.m_decoder.m_sot_length > opj_stream_get_number_byte_left(p_stream)) {
opj_event_msg(p_manager, EVT_ERROR, "Tile part length size inconsistent with stream length\n");
return OPJ_FALSE;
}
if (! *l_current_data) {
/* LH: oddly enough, in this path, l_tile_len!=0.
* TODO: If this was consistant, we could simplify the code to only use realloc(), as realloc(0,...) default to malloc(0,...).
*/
*l_current_data = (OPJ_BYTE*) opj_malloc(p_j2k->m_specific_param.m_decoder.m_sot_length);
}
else {
OPJ_BYTE *l_new_current_data = (OPJ_BYTE *) opj_realloc(*l_current_data, *l_tile_len + p_j2k->m_specific_param.m_decoder.m_sot_length);
if (! l_new_current_data) {
opj_free(*l_current_data);
/*nothing more is done as l_current_data will be set to null, and just
afterward we enter in the error path
and the actual tile_len is updated (committed) at the end of the
function. */
}
*l_current_data = l_new_current_data;
}
if (*l_current_data == 00) {
opj_event_msg(p_manager, EVT_ERROR, "Not enough memory to decode tile\n");
return OPJ_FALSE;
}
}
else {
l_sot_length_pb_detected = OPJ_TRUE;
}
/* Index */
l_cstr_index = p_j2k->cstr_index;
if (l_cstr_index) {
OPJ_OFF_T l_current_pos = opj_stream_tell(p_stream) - 2;
OPJ_UINT32 l_current_tile_part = l_cstr_index->tile_index[p_j2k->m_current_tile_number].current_tpsno;
l_cstr_index->tile_index[p_j2k->m_current_tile_number].tp_index[l_current_tile_part].end_header =
l_current_pos;
l_cstr_index->tile_index[p_j2k->m_current_tile_number].tp_index[l_current_tile_part].end_pos =
l_current_pos + p_j2k->m_specific_param.m_decoder.m_sot_length + 2;
if (OPJ_FALSE == opj_j2k_add_tlmarker(p_j2k->m_current_tile_number,
l_cstr_index,
J2K_MS_SOD,
l_current_pos,
p_j2k->m_specific_param.m_decoder.m_sot_length + 2)) {
opj_event_msg(p_manager, EVT_ERROR, "Not enough memory to add tl marker\n");
return OPJ_FALSE;
}
/*l_cstr_index->packno = 0;*/
}
/* Patch to support new PHR data */
if (!l_sot_length_pb_detected) {
l_current_read_size = opj_stream_read_data(
p_stream,
*l_current_data + *l_tile_len,
p_j2k->m_specific_param.m_decoder.m_sot_length,
p_manager);
}
else
{
l_current_read_size = 0;
}
if (l_current_read_size != p_j2k->m_specific_param.m_decoder.m_sot_length) {
p_j2k->m_specific_param.m_decoder.m_state = J2K_STATE_NEOC;
}
else {
p_j2k->m_specific_param.m_decoder.m_state = J2K_STATE_TPHSOT;
}
*l_tile_len += (OPJ_UINT32)l_current_read_size;
return OPJ_TRUE;
}
static OPJ_BOOL opj_j2k_write_rgn(opj_j2k_t *p_j2k,
OPJ_UINT32 p_tile_no,
OPJ_UINT32 p_comp_no,
OPJ_UINT32 nb_comps,
opj_stream_private_t *p_stream,
opj_event_mgr_t * p_manager
)
{
OPJ_BYTE * l_current_data = 00;
OPJ_UINT32 l_rgn_size;
opj_cp_t *l_cp = 00;
opj_tcp_t *l_tcp = 00;
opj_tccp_t *l_tccp = 00;
OPJ_UINT32 l_comp_room;
/* preconditions */
assert(p_j2k != 00);
assert(p_manager != 00);
assert(p_stream != 00);
l_cp = &(p_j2k->m_cp);
l_tcp = &l_cp->tcps[p_tile_no];
l_tccp = &l_tcp->tccps[p_comp_no];
if (nb_comps <= 256) {
l_comp_room = 1;
}
else {
l_comp_room = 2;
}
l_rgn_size = 6 + l_comp_room;
l_current_data = p_j2k->m_specific_param.m_encoder.m_header_tile_data;
opj_write_bytes(l_current_data,J2K_MS_RGN,2); /* RGN */
l_current_data += 2;
opj_write_bytes(l_current_data,l_rgn_size-2,2); /* Lrgn */
l_current_data += 2;
opj_write_bytes(l_current_data,p_comp_no,l_comp_room); /* Crgn */
l_current_data+=l_comp_room;
opj_write_bytes(l_current_data, 0,1); /* Srgn */
++l_current_data;
opj_write_bytes(l_current_data, (OPJ_UINT32)l_tccp->roishift,1); /* SPrgn */
++l_current_data;
if (opj_stream_write_data(p_stream,p_j2k->m_specific_param.m_encoder.m_header_tile_data,l_rgn_size,p_manager) != l_rgn_size) {
return OPJ_FALSE;
}
return OPJ_TRUE;
}
static OPJ_BOOL opj_j2k_write_eoc( opj_j2k_t *p_j2k,
opj_stream_private_t *p_stream,
opj_event_mgr_t * p_manager
)
{
/* preconditions */
assert(p_j2k != 00);
assert(p_manager != 00);
assert(p_stream != 00);
opj_write_bytes(p_j2k->m_specific_param.m_encoder.m_header_tile_data,J2K_MS_EOC,2); /* EOC */
/* UniPG>> */
#ifdef USE_JPWL
/* update markers struct */
/*
OPJ_BOOL res = j2k_add_marker(p_j2k->cstr_info, J2K_MS_EOC, p_stream_tell(p_stream) - 2, 2);
*/
#endif /* USE_JPWL */
if ( opj_stream_write_data(p_stream,p_j2k->m_specific_param.m_encoder.m_header_tile_data,2,p_manager) != 2) {
return OPJ_FALSE;
}
if ( ! opj_stream_flush(p_stream,p_manager) ) {
return OPJ_FALSE;
}
return OPJ_TRUE;
}
/**
* Reads a RGN marker (Region Of Interest)
*
* @param p_header_data the data contained in the POC box.
* @param p_j2k the jpeg2000 codec.
* @param p_header_size the size of the data contained in the POC marker.
* @param p_manager the user event manager.
*/
static OPJ_BOOL opj_j2k_read_rgn (opj_j2k_t *p_j2k,
OPJ_BYTE * p_header_data,
OPJ_UINT32 p_header_size,
opj_event_mgr_t * p_manager
)
{
OPJ_UINT32 l_nb_comp;
opj_image_t * l_image = 00;
opj_cp_t *l_cp = 00;
opj_tcp_t *l_tcp = 00;
OPJ_UINT32 l_comp_room, l_comp_no, l_roi_sty;
/* preconditions*/
assert(p_header_data != 00);
assert(p_j2k != 00);
assert(p_manager != 00);
l_image = p_j2k->m_private_image;
l_nb_comp = l_image->numcomps;
if (l_nb_comp <= 256) {
l_comp_room = 1; }
else {
l_comp_room = 2; }
if (p_header_size != 2 + l_comp_room) {
opj_event_msg(p_manager, EVT_ERROR, "Error reading RGN marker\n");
return OPJ_FALSE;
}
l_cp = &(p_j2k->m_cp);
l_tcp = (p_j2k->m_specific_param.m_decoder.m_state == J2K_STATE_TPH) ?
&l_cp->tcps[p_j2k->m_current_tile_number] :
p_j2k->m_specific_param.m_decoder.m_default_tcp;
opj_read_bytes(p_header_data,&l_comp_no,l_comp_room); /* Crgn */
p_header_data+=l_comp_room;
opj_read_bytes(p_header_data,&l_roi_sty,1); /* Srgn */
++p_header_data;
#ifdef USE_JPWL
if (l_cp->correct) {
/* totlen is negative or larger than the bytes left!!! */
if (l_comp_room >= l_nb_comp) {
opj_event_msg(p_manager, EVT_ERROR,
"JPWL: bad component number in RGN (%d when there are only %d)\n",
l_comp_room, l_nb_comp);
if (!JPWL_ASSUME || JPWL_ASSUME) {
opj_event_msg(p_manager, EVT_ERROR, "JPWL: giving up\n");
return OPJ_FALSE;
}
}
};
#endif /* USE_JPWL */
/* testcase 3635.pdf.asan.77.2930 */
if (l_comp_no >= l_nb_comp) {
opj_event_msg(p_manager, EVT_ERROR,
"bad component number in RGN (%d when there are only %d)\n",
l_comp_no, l_nb_comp);
return OPJ_FALSE;
}
opj_read_bytes(p_header_data,(OPJ_UINT32 *) (&(l_tcp->tccps[l_comp_no].roishift)),1); /* SPrgn */
++p_header_data;
return OPJ_TRUE;
}
static OPJ_FLOAT32 opj_j2k_get_tp_stride (opj_tcp_t * p_tcp)
{
return (OPJ_FLOAT32) ((p_tcp->m_nb_tile_parts - 1) * 14);
}
static OPJ_FLOAT32 opj_j2k_get_default_stride (opj_tcp_t * p_tcp)
{
(void)p_tcp;
return 0;
}
static OPJ_BOOL opj_j2k_update_rates( opj_j2k_t *p_j2k,
opj_stream_private_t *p_stream,
opj_event_mgr_t * p_manager )
{
opj_cp_t * l_cp = 00;
opj_image_t * l_image = 00;
opj_tcp_t * l_tcp = 00;
opj_image_comp_t * l_img_comp = 00;
OPJ_UINT32 i,j,k;
OPJ_INT32 l_x0,l_y0,l_x1,l_y1;
OPJ_FLOAT32 * l_rates = 0;
OPJ_FLOAT32 l_sot_remove;
OPJ_UINT32 l_bits_empty, l_size_pixel;
OPJ_UINT32 l_tile_size = 0;
OPJ_UINT32 l_last_res;
OPJ_FLOAT32 (* l_tp_stride_func)(opj_tcp_t *) = 00;
/* preconditions */
assert(p_j2k != 00);
assert(p_manager != 00);
assert(p_stream != 00);
l_cp = &(p_j2k->m_cp);
l_image = p_j2k->m_private_image;
l_tcp = l_cp->tcps;
l_bits_empty = 8 * l_image->comps->dx * l_image->comps->dy;
l_size_pixel = l_image->numcomps * l_image->comps->prec;
l_sot_remove = (OPJ_FLOAT32) opj_stream_tell(p_stream) / (OPJ_FLOAT32)(l_cp->th * l_cp->tw);
if (l_cp->m_specific_param.m_enc.m_tp_on) {
l_tp_stride_func = opj_j2k_get_tp_stride;
}
else {
l_tp_stride_func = opj_j2k_get_default_stride;
}
for (i=0;i<l_cp->th;++i) {
for (j=0;j<l_cp->tw;++j) {
OPJ_FLOAT32 l_offset = (OPJ_FLOAT32)(*l_tp_stride_func)(l_tcp) / (OPJ_FLOAT32)l_tcp->numlayers;
/* 4 borders of the tile rescale on the image if necessary */
l_x0 = opj_int_max((OPJ_INT32)(l_cp->tx0 + j * l_cp->tdx), (OPJ_INT32)l_image->x0);
l_y0 = opj_int_max((OPJ_INT32)(l_cp->ty0 + i * l_cp->tdy), (OPJ_INT32)l_image->y0);
l_x1 = opj_int_min((OPJ_INT32)(l_cp->tx0 + (j + 1) * l_cp->tdx), (OPJ_INT32)l_image->x1);
l_y1 = opj_int_min((OPJ_INT32)(l_cp->ty0 + (i + 1) * l_cp->tdy), (OPJ_INT32)l_image->y1);
l_rates = l_tcp->rates;
/* Modification of the RATE >> */
if (*l_rates) {
*l_rates = (( (OPJ_FLOAT32) (l_size_pixel * (OPJ_UINT32)(l_x1 - l_x0) * (OPJ_UINT32)(l_y1 - l_y0)))
/
((*l_rates) * (OPJ_FLOAT32)l_bits_empty)
)
-
l_offset;
}
++l_rates;
for (k = 1; k < l_tcp->numlayers; ++k) {
if (*l_rates) {
*l_rates = (( (OPJ_FLOAT32) (l_size_pixel * (OPJ_UINT32)(l_x1 - l_x0) * (OPJ_UINT32)(l_y1 - l_y0)))
/
((*l_rates) * (OPJ_FLOAT32)l_bits_empty)
)
-
l_offset;
}
++l_rates;
}
++l_tcp;
}
}
l_tcp = l_cp->tcps;
for (i=0;i<l_cp->th;++i) {
for (j=0;j<l_cp->tw;++j) {
l_rates = l_tcp->rates;
if (*l_rates) {
*l_rates -= l_sot_remove;
if (*l_rates < 30) {
*l_rates = 30;
}
}
++l_rates;
l_last_res = l_tcp->numlayers - 1;
for (k = 1; k < l_last_res; ++k) {
if (*l_rates) {
*l_rates -= l_sot_remove;
if (*l_rates < *(l_rates - 1) + 10) {
*l_rates = (*(l_rates - 1)) + 20;
}
}
++l_rates;
}
if (*l_rates) {
*l_rates -= (l_sot_remove + 2.f);
if (*l_rates < *(l_rates - 1) + 10) {
*l_rates = (*(l_rates - 1)) + 20;
}
}
++l_tcp;
}
}
l_img_comp = l_image->comps;
l_tile_size = 0;
for (i=0;i<l_image->numcomps;++i) {
l_tile_size += ( opj_uint_ceildiv(l_cp->tdx,l_img_comp->dx)
*
opj_uint_ceildiv(l_cp->tdy,l_img_comp->dy)
*
l_img_comp->prec
);
++l_img_comp;
}
l_tile_size = (OPJ_UINT32) (l_tile_size * 0.1625); /* 1.3/8 = 0.1625 */
l_tile_size += opj_j2k_get_specific_header_sizes(p_j2k);
p_j2k->m_specific_param.m_encoder.m_encoded_tile_size = l_tile_size;
p_j2k->m_specific_param.m_encoder.m_encoded_tile_data =
(OPJ_BYTE *) opj_malloc(p_j2k->m_specific_param.m_encoder.m_encoded_tile_size);
if (p_j2k->m_specific_param.m_encoder.m_encoded_tile_data == 00) {
return OPJ_FALSE;
}
if (OPJ_IS_CINEMA(l_cp->rsiz)) {
p_j2k->m_specific_param.m_encoder.m_tlm_sot_offsets_buffer =
(OPJ_BYTE *) opj_malloc(5*p_j2k->m_specific_param.m_encoder.m_total_tile_parts);
if (! p_j2k->m_specific_param.m_encoder.m_tlm_sot_offsets_buffer) {
return OPJ_FALSE;
}
p_j2k->m_specific_param.m_encoder.m_tlm_sot_offsets_current =
p_j2k->m_specific_param.m_encoder.m_tlm_sot_offsets_buffer;
}
return OPJ_TRUE;
}
#if 0
static OPJ_BOOL opj_j2k_read_eoc ( opj_j2k_t *p_j2k,
opj_stream_private_t *p_stream,
opj_event_mgr_t * p_manager )
{
OPJ_UINT32 i;
opj_tcd_t * l_tcd = 00;
OPJ_UINT32 l_nb_tiles;
opj_tcp_t * l_tcp = 00;
OPJ_BOOL l_success;
/* preconditions */
assert(p_j2k != 00);
assert(p_manager != 00);
assert(p_stream != 00);
l_nb_tiles = p_j2k->m_cp.th * p_j2k->m_cp.tw;
l_tcp = p_j2k->m_cp.tcps;
l_tcd = opj_tcd_create(OPJ_TRUE);
if (l_tcd == 00) {
opj_event_msg(p_manager, EVT_ERROR, "Cannot decode tile, memory error\n");
return OPJ_FALSE;
}
for (i = 0; i < l_nb_tiles; ++i) {
if (l_tcp->m_data) {
if (! opj_tcd_init_decode_tile(l_tcd, i)) {
opj_tcd_destroy(l_tcd);
opj_event_msg(p_manager, EVT_ERROR, "Cannot decode tile, memory error\n");
return OPJ_FALSE;
}
l_success = opj_tcd_decode_tile(l_tcd, l_tcp->m_data, l_tcp->m_data_size, i, p_j2k->cstr_index);
/* cleanup */
if (! l_success) {
p_j2k->m_specific_param.m_decoder.m_state |= J2K_STATE_ERR;
break;
}
}
opj_j2k_tcp_destroy(l_tcp);
++l_tcp;
}
opj_tcd_destroy(l_tcd);
return OPJ_TRUE;
}
#endif
static OPJ_BOOL opj_j2k_get_end_header(opj_j2k_t *p_j2k,
struct opj_stream_private *p_stream,
struct opj_event_mgr * p_manager )
{
/* preconditions */
assert(p_j2k != 00);
assert(p_manager != 00);
assert(p_stream != 00);
p_j2k->cstr_index->main_head_end = opj_stream_tell(p_stream);
return OPJ_TRUE;
}
static OPJ_BOOL opj_j2k_write_mct_data_group( opj_j2k_t *p_j2k,
struct opj_stream_private *p_stream,
struct opj_event_mgr * p_manager )
{
OPJ_UINT32 i;
opj_simple_mcc_decorrelation_data_t * l_mcc_record;
opj_mct_data_t * l_mct_record;
opj_tcp_t * l_tcp;
/* preconditions */
assert(p_j2k != 00);
assert(p_stream != 00);
assert(p_manager != 00);
if (! opj_j2k_write_cbd(p_j2k,p_stream,p_manager)) {
return OPJ_FALSE;
}
l_tcp = &(p_j2k->m_cp.tcps[p_j2k->m_current_tile_number]);
l_mct_record = l_tcp->m_mct_records;
for (i=0;i<l_tcp->m_nb_mct_records;++i) {
if (! opj_j2k_write_mct_record(p_j2k,l_mct_record,p_stream,p_manager)) {
return OPJ_FALSE;
}
++l_mct_record;
}
l_mcc_record = l_tcp->m_mcc_records;
for (i=0;i<l_tcp->m_nb_mcc_records;++i) {
if (! opj_j2k_write_mcc_record(p_j2k,l_mcc_record,p_stream,p_manager)) {
return OPJ_FALSE;
}
++l_mcc_record;
}
if (! opj_j2k_write_mco(p_j2k,p_stream,p_manager)) {
return OPJ_FALSE;
}
return OPJ_TRUE;
}
#if 0
static OPJ_BOOL opj_j2k_write_all_coc(opj_j2k_t *p_j2k,
struct opj_stream_private *p_stream,
struct opj_event_mgr * p_manager )
{
OPJ_UINT32 compno;
/* preconditions */
assert(p_j2k != 00);
assert(p_manager != 00);
assert(p_stream != 00);
for (compno = 0; compno < p_j2k->m_private_image->numcomps; ++compno)
{
if (! opj_j2k_write_coc(p_j2k,compno,p_stream, p_manager)) {
return OPJ_FALSE;
}
}
return OPJ_TRUE;
}
#endif
#if 0
static OPJ_BOOL opj_j2k_write_all_qcc(opj_j2k_t *p_j2k,
struct opj_stream_private *p_stream,
struct opj_event_mgr * p_manager )
{
OPJ_UINT32 compno;
/* preconditions */
assert(p_j2k != 00);
assert(p_manager != 00);
assert(p_stream != 00);
for (compno = 0; compno < p_j2k->m_private_image->numcomps; ++compno)
{
if (! opj_j2k_write_qcc(p_j2k,compno,p_stream, p_manager)) {
return OPJ_FALSE;
}
}
return OPJ_TRUE;
}
#endif
static OPJ_BOOL opj_j2k_write_regions( opj_j2k_t *p_j2k,
struct opj_stream_private *p_stream,
struct opj_event_mgr * p_manager )
{
OPJ_UINT32 compno;
const opj_tccp_t *l_tccp = 00;
/* preconditions */
assert(p_j2k != 00);
assert(p_manager != 00);
assert(p_stream != 00);
l_tccp = p_j2k->m_cp.tcps->tccps;
for (compno = 0; compno < p_j2k->m_private_image->numcomps; ++compno) {
if (l_tccp->roishift) {
if (! opj_j2k_write_rgn(p_j2k,0,compno,p_j2k->m_private_image->numcomps,p_stream,p_manager)) {
return OPJ_FALSE;
}
}
++l_tccp;
}
return OPJ_TRUE;
}
static OPJ_BOOL opj_j2k_write_epc( opj_j2k_t *p_j2k,
struct opj_stream_private *p_stream,
struct opj_event_mgr * p_manager )
{
opj_codestream_index_t * l_cstr_index = 00;
/* preconditions */
assert(p_j2k != 00);
assert(p_manager != 00);
assert(p_stream != 00);
l_cstr_index = p_j2k->cstr_index;
if (l_cstr_index) {
l_cstr_index->codestream_size = (OPJ_UINT64)opj_stream_tell(p_stream);
/* UniPG>> */
/* The following adjustment is done to adjust the codestream size */
/* if SOD is not at 0 in the buffer. Useful in case of JP2, where */
/* the first bunch of bytes is not in the codestream */
l_cstr_index->codestream_size -= (OPJ_UINT64)l_cstr_index->main_head_start;
/* <<UniPG */
}
#ifdef USE_JPWL
/* preparation of JPWL marker segments */
#if 0
if(cp->epc_on) {
/* encode according to JPWL */
jpwl_encode(p_j2k, p_stream, image);
}
#endif
assert( 0 && "TODO" );
#endif /* USE_JPWL */
return OPJ_TRUE;
}
static OPJ_BOOL opj_j2k_read_unk ( opj_j2k_t *p_j2k,
opj_stream_private_t *p_stream,
OPJ_UINT32 *output_marker,
opj_event_mgr_t * p_manager
)
{
OPJ_UINT32 l_unknown_marker;
const opj_dec_memory_marker_handler_t * l_marker_handler;
OPJ_UINT32 l_size_unk = 2;
/* preconditions*/
assert(p_j2k != 00);
assert(p_manager != 00);
assert(p_stream != 00);
opj_event_msg(p_manager, EVT_WARNING, "Unknown marker\n");
for (;;) {
/* Try to read 2 bytes (the next marker ID) from stream and copy them into the buffer*/
if (opj_stream_read_data(p_stream,p_j2k->m_specific_param.m_decoder.m_header_data,2,p_manager) != 2) {
opj_event_msg(p_manager, EVT_ERROR, "Stream too short\n");
return OPJ_FALSE;
}
/* read 2 bytes as the new marker ID*/
opj_read_bytes(p_j2k->m_specific_param.m_decoder.m_header_data,&l_unknown_marker,2);
if (!(l_unknown_marker < 0xff00)) {
/* Get the marker handler from the marker ID*/
l_marker_handler = opj_j2k_get_marker_handler(l_unknown_marker);
if (!(p_j2k->m_specific_param.m_decoder.m_state & l_marker_handler->states)) {
opj_event_msg(p_manager, EVT_ERROR, "Marker is not compliant with its position\n");
return OPJ_FALSE;
}
else {
if (l_marker_handler->id != J2K_MS_UNK) {
/* Add the marker to the codestream index*/
if (l_marker_handler->id != J2K_MS_SOT)
{
OPJ_BOOL res = opj_j2k_add_mhmarker(p_j2k->cstr_index, J2K_MS_UNK,
(OPJ_UINT32) opj_stream_tell(p_stream) - l_size_unk,
l_size_unk);
if (res == OPJ_FALSE) {
opj_event_msg(p_manager, EVT_ERROR, "Not enough memory to add mh marker\n");
return OPJ_FALSE;
}
}
break; /* next marker is known and well located */
}
else
l_size_unk += 2;
}
}
}
*output_marker = l_marker_handler->id ;
return OPJ_TRUE;
}
static OPJ_BOOL opj_j2k_write_mct_record( opj_j2k_t *p_j2k,
opj_mct_data_t * p_mct_record,
struct opj_stream_private *p_stream,
struct opj_event_mgr * p_manager )
{
OPJ_UINT32 l_mct_size;
OPJ_BYTE * l_current_data = 00;
OPJ_UINT32 l_tmp;
/* preconditions */
assert(p_j2k != 00);
assert(p_manager != 00);
assert(p_stream != 00);
l_mct_size = 10 + p_mct_record->m_data_size;
if (l_mct_size > p_j2k->m_specific_param.m_encoder.m_header_tile_data_size) {
OPJ_BYTE *new_header_tile_data = (OPJ_BYTE *) opj_realloc(p_j2k->m_specific_param.m_encoder.m_header_tile_data, l_mct_size);
if (! new_header_tile_data) {
opj_free(p_j2k->m_specific_param.m_encoder.m_header_tile_data);
p_j2k->m_specific_param.m_encoder.m_header_tile_data = NULL;
p_j2k->m_specific_param.m_encoder.m_header_tile_data_size = 0;
opj_event_msg(p_manager, EVT_ERROR, "Not enough memory to write MCT marker\n");
return OPJ_FALSE;
}
p_j2k->m_specific_param.m_encoder.m_header_tile_data = new_header_tile_data;
p_j2k->m_specific_param.m_encoder.m_header_tile_data_size = l_mct_size;
}
l_current_data = p_j2k->m_specific_param.m_encoder.m_header_tile_data;
opj_write_bytes(l_current_data,J2K_MS_MCT,2); /* MCT */
l_current_data += 2;
opj_write_bytes(l_current_data,l_mct_size-2,2); /* Lmct */
l_current_data += 2;
opj_write_bytes(l_current_data,0,2); /* Zmct */
l_current_data += 2;
/* only one marker atm */
l_tmp = (p_mct_record->m_index & 0xff) | (p_mct_record->m_array_type << 8) | (p_mct_record->m_element_type << 10);
opj_write_bytes(l_current_data,l_tmp,2);
l_current_data += 2;
opj_write_bytes(l_current_data,0,2); /* Ymct */
l_current_data+=2;
memcpy(l_current_data,p_mct_record->m_data,p_mct_record->m_data_size);
if (opj_stream_write_data(p_stream,p_j2k->m_specific_param.m_encoder.m_header_tile_data,l_mct_size,p_manager) != l_mct_size) {
return OPJ_FALSE;
}
return OPJ_TRUE;
}
/**
* Reads a MCT marker (Multiple Component Transform)
*
* @param p_header_data the data contained in the MCT box.
* @param p_j2k the jpeg2000 codec.
* @param p_header_size the size of the data contained in the MCT marker.
* @param p_manager the user event manager.
*/
static OPJ_BOOL opj_j2k_read_mct ( opj_j2k_t *p_j2k,
OPJ_BYTE * p_header_data,
OPJ_UINT32 p_header_size,
opj_event_mgr_t * p_manager
)
{
OPJ_UINT32 i;
opj_tcp_t *l_tcp = 00;
OPJ_UINT32 l_tmp;
OPJ_UINT32 l_indix;
opj_mct_data_t * l_mct_data;
/* preconditions */
assert(p_header_data != 00);
assert(p_j2k != 00);
l_tcp = p_j2k->m_specific_param.m_decoder.m_state == J2K_STATE_TPH ?
&p_j2k->m_cp.tcps[p_j2k->m_current_tile_number] :
p_j2k->m_specific_param.m_decoder.m_default_tcp;
if (p_header_size < 2) {
opj_event_msg(p_manager, EVT_ERROR, "Error reading MCT marker\n");
return OPJ_FALSE;
}
/* first marker */
opj_read_bytes(p_header_data,&l_tmp,2); /* Zmct */
p_header_data += 2;
if (l_tmp != 0) {
opj_event_msg(p_manager, EVT_WARNING, "Cannot take in charge mct data within multiple MCT records\n");
return OPJ_TRUE;
}
if(p_header_size <= 6) {
opj_event_msg(p_manager, EVT_ERROR, "Error reading MCT marker\n");
return OPJ_FALSE;
}
/* Imct -> no need for other values, take the first, type is double with decorrelation x0000 1101 0000 0000*/
opj_read_bytes(p_header_data,&l_tmp,2); /* Imct */
p_header_data += 2;
l_indix = l_tmp & 0xff;
l_mct_data = l_tcp->m_mct_records;
for (i=0;i<l_tcp->m_nb_mct_records;++i) {
if (l_mct_data->m_index == l_indix) {
break;
}
++l_mct_data;
}
/* NOT FOUND */
if (i == l_tcp->m_nb_mct_records) {
if (l_tcp->m_nb_mct_records == l_tcp->m_nb_max_mct_records) {
opj_mct_data_t *new_mct_records;
l_tcp->m_nb_max_mct_records += OPJ_J2K_MCT_DEFAULT_NB_RECORDS;
new_mct_records = (opj_mct_data_t *) opj_realloc(l_tcp->m_mct_records, l_tcp->m_nb_max_mct_records * sizeof(opj_mct_data_t));
if (! new_mct_records) {
opj_free(l_tcp->m_mct_records);
l_tcp->m_mct_records = NULL;
l_tcp->m_nb_max_mct_records = 0;
l_tcp->m_nb_mct_records = 0;
opj_event_msg(p_manager, EVT_ERROR, "Not enough memory to read MCT marker\n");
return OPJ_FALSE;
}
l_tcp->m_mct_records = new_mct_records;
l_mct_data = l_tcp->m_mct_records + l_tcp->m_nb_mct_records;
memset(l_mct_data ,0,(l_tcp->m_nb_max_mct_records - l_tcp->m_nb_mct_records) * sizeof(opj_mct_data_t));
}
l_mct_data = l_tcp->m_mct_records + l_tcp->m_nb_mct_records;
++l_tcp->m_nb_mct_records;
}
if (l_mct_data->m_data) {
opj_free(l_mct_data->m_data);
l_mct_data->m_data = 00;
}
l_mct_data->m_index = l_indix;
l_mct_data->m_array_type = (J2K_MCT_ARRAY_TYPE)((l_tmp >> 8) & 3);
l_mct_data->m_element_type = (J2K_MCT_ELEMENT_TYPE)((l_tmp >> 10) & 3);
opj_read_bytes(p_header_data,&l_tmp,2); /* Ymct */
p_header_data+=2;
if (l_tmp != 0) {
opj_event_msg(p_manager, EVT_WARNING, "Cannot take in charge multiple MCT markers\n");
return OPJ_TRUE;
}
p_header_size -= 6;
l_mct_data->m_data = (OPJ_BYTE*)opj_malloc(p_header_size);
if (! l_mct_data->m_data) {
opj_event_msg(p_manager, EVT_ERROR, "Error reading MCT marker\n");
return OPJ_FALSE;
}
memcpy(l_mct_data->m_data,p_header_data,p_header_size);
l_mct_data->m_data_size = p_header_size;
return OPJ_TRUE;
}
static OPJ_BOOL opj_j2k_write_mcc_record( opj_j2k_t *p_j2k,
struct opj_simple_mcc_decorrelation_data * p_mcc_record,
struct opj_stream_private *p_stream,
struct opj_event_mgr * p_manager )
{
OPJ_UINT32 i;
OPJ_UINT32 l_mcc_size;
OPJ_BYTE * l_current_data = 00;
OPJ_UINT32 l_nb_bytes_for_comp;
OPJ_UINT32 l_mask;
OPJ_UINT32 l_tmcc;
/* preconditions */
assert(p_j2k != 00);
assert(p_manager != 00);
assert(p_stream != 00);
if (p_mcc_record->m_nb_comps > 255 ) {
l_nb_bytes_for_comp = 2;
l_mask = 0x8000;
}
else {
l_nb_bytes_for_comp = 1;
l_mask = 0;
}
l_mcc_size = p_mcc_record->m_nb_comps * 2 * l_nb_bytes_for_comp + 19;
if (l_mcc_size > p_j2k->m_specific_param.m_encoder.m_header_tile_data_size)
{
OPJ_BYTE *new_header_tile_data = (OPJ_BYTE *) opj_realloc(p_j2k->m_specific_param.m_encoder.m_header_tile_data, l_mcc_size);
if (! new_header_tile_data) {
opj_free(p_j2k->m_specific_param.m_encoder.m_header_tile_data);
p_j2k->m_specific_param.m_encoder.m_header_tile_data = NULL;
p_j2k->m_specific_param.m_encoder.m_header_tile_data_size = 0;
opj_event_msg(p_manager, EVT_ERROR, "Not enough memory to write MCC marker\n");
return OPJ_FALSE;
}
p_j2k->m_specific_param.m_encoder.m_header_tile_data = new_header_tile_data;
p_j2k->m_specific_param.m_encoder.m_header_tile_data_size = l_mcc_size;
}
l_current_data = p_j2k->m_specific_param.m_encoder.m_header_tile_data;
opj_write_bytes(l_current_data,J2K_MS_MCC,2); /* MCC */
l_current_data += 2;
opj_write_bytes(l_current_data,l_mcc_size-2,2); /* Lmcc */
l_current_data += 2;
/* first marker */
opj_write_bytes(l_current_data,0,2); /* Zmcc */
l_current_data += 2;
opj_write_bytes(l_current_data,p_mcc_record->m_index,1); /* Imcc -> no need for other values, take the first */
++l_current_data;
/* only one marker atm */
opj_write_bytes(l_current_data,0,2); /* Ymcc */
l_current_data+=2;
opj_write_bytes(l_current_data,1,2); /* Qmcc -> number of collections -> 1 */
l_current_data+=2;
opj_write_bytes(l_current_data,0x1,1); /* Xmcci type of component transformation -> array based decorrelation */
++l_current_data;
opj_write_bytes(l_current_data,p_mcc_record->m_nb_comps | l_mask,2); /* Nmcci number of input components involved and size for each component offset = 8 bits */
l_current_data+=2;
for (i=0;i<p_mcc_record->m_nb_comps;++i) {
opj_write_bytes(l_current_data,i,l_nb_bytes_for_comp); /* Cmccij Component offset*/
l_current_data+=l_nb_bytes_for_comp;
}
opj_write_bytes(l_current_data,p_mcc_record->m_nb_comps|l_mask,2); /* Mmcci number of output components involved and size for each component offset = 8 bits */
l_current_data+=2;
for (i=0;i<p_mcc_record->m_nb_comps;++i)
{
opj_write_bytes(l_current_data,i,l_nb_bytes_for_comp); /* Wmccij Component offset*/
l_current_data+=l_nb_bytes_for_comp;
}
l_tmcc = ((!p_mcc_record->m_is_irreversible)&1)<<16;
if (p_mcc_record->m_decorrelation_array) {
l_tmcc |= p_mcc_record->m_decorrelation_array->m_index;
}
if (p_mcc_record->m_offset_array) {
l_tmcc |= ((p_mcc_record->m_offset_array->m_index)<<8);
}
opj_write_bytes(l_current_data,l_tmcc,3); /* Tmcci : use MCT defined as number 1 and irreversible array based. */
l_current_data+=3;
if (opj_stream_write_data(p_stream,p_j2k->m_specific_param.m_encoder.m_header_tile_data,l_mcc_size,p_manager) != l_mcc_size) {
return OPJ_FALSE;
}
return OPJ_TRUE;
}
static OPJ_BOOL opj_j2k_read_mcc ( opj_j2k_t *p_j2k,
OPJ_BYTE * p_header_data,
OPJ_UINT32 p_header_size,
opj_event_mgr_t * p_manager )
{
OPJ_UINT32 i,j;
OPJ_UINT32 l_tmp;
OPJ_UINT32 l_indix;
opj_tcp_t * l_tcp;
opj_simple_mcc_decorrelation_data_t * l_mcc_record;
opj_mct_data_t * l_mct_data;
OPJ_UINT32 l_nb_collections;
OPJ_UINT32 l_nb_comps;
OPJ_UINT32 l_nb_bytes_by_comp;
/* preconditions */
assert(p_header_data != 00);
assert(p_j2k != 00);
assert(p_manager != 00);
l_tcp = p_j2k->m_specific_param.m_decoder.m_state == J2K_STATE_TPH ?
&p_j2k->m_cp.tcps[p_j2k->m_current_tile_number] :
p_j2k->m_specific_param.m_decoder.m_default_tcp;
if (p_header_size < 2) {
opj_event_msg(p_manager, EVT_ERROR, "Error reading MCC marker\n");
return OPJ_FALSE;
}
/* first marker */
opj_read_bytes(p_header_data,&l_tmp,2); /* Zmcc */
p_header_data += 2;
if (l_tmp != 0) {
opj_event_msg(p_manager, EVT_WARNING, "Cannot take in charge multiple data spanning\n");
return OPJ_TRUE;
}
if (p_header_size < 7) {
opj_event_msg(p_manager, EVT_ERROR, "Error reading MCC marker\n");
return OPJ_FALSE;
}
opj_read_bytes(p_header_data,&l_indix,1); /* Imcc -> no need for other values, take the first */
++p_header_data;
l_mcc_record = l_tcp->m_mcc_records;
for(i=0;i<l_tcp->m_nb_mcc_records;++i) {
if (l_mcc_record->m_index == l_indix) {
break;
}
++l_mcc_record;
}
/** NOT FOUND */
if (i == l_tcp->m_nb_mcc_records) {
if (l_tcp->m_nb_mcc_records == l_tcp->m_nb_max_mcc_records) {
opj_simple_mcc_decorrelation_data_t *new_mcc_records;
l_tcp->m_nb_max_mcc_records += OPJ_J2K_MCC_DEFAULT_NB_RECORDS;
new_mcc_records = (opj_simple_mcc_decorrelation_data_t *) opj_realloc(
l_tcp->m_mcc_records, l_tcp->m_nb_max_mcc_records * sizeof(opj_simple_mcc_decorrelation_data_t));
if (! new_mcc_records) {
opj_free(l_tcp->m_mcc_records);
l_tcp->m_mcc_records = NULL;
l_tcp->m_nb_max_mcc_records = 0;
l_tcp->m_nb_mcc_records = 0;
opj_event_msg(p_manager, EVT_ERROR, "Not enough memory to read MCC marker\n");
return OPJ_FALSE;
}
l_tcp->m_mcc_records = new_mcc_records;
l_mcc_record = l_tcp->m_mcc_records + l_tcp->m_nb_mcc_records;
memset(l_mcc_record,0,(l_tcp->m_nb_max_mcc_records-l_tcp->m_nb_mcc_records) * sizeof(opj_simple_mcc_decorrelation_data_t));
}
l_mcc_record = l_tcp->m_mcc_records + l_tcp->m_nb_mcc_records;
}
l_mcc_record->m_index = l_indix;
/* only one marker atm */
opj_read_bytes(p_header_data,&l_tmp,2); /* Ymcc */
p_header_data+=2;
if (l_tmp != 0) {
opj_event_msg(p_manager, EVT_WARNING, "Cannot take in charge multiple data spanning\n");
return OPJ_TRUE;
}
opj_read_bytes(p_header_data,&l_nb_collections,2); /* Qmcc -> number of collections -> 1 */
p_header_data+=2;
if (l_nb_collections > 1) {
opj_event_msg(p_manager, EVT_WARNING, "Cannot take in charge multiple collections\n");
return OPJ_TRUE;
}
p_header_size -= 7;
for (i=0;i<l_nb_collections;++i) {
if (p_header_size < 3) {
opj_event_msg(p_manager, EVT_ERROR, "Error reading MCC marker\n");
return OPJ_FALSE;
}
opj_read_bytes(p_header_data,&l_tmp,1); /* Xmcci type of component transformation -> array based decorrelation */
++p_header_data;
if (l_tmp != 1) {
opj_event_msg(p_manager, EVT_WARNING, "Cannot take in charge collections other than array decorrelation\n");
return OPJ_TRUE;
}
opj_read_bytes(p_header_data,&l_nb_comps,2);
p_header_data+=2;
p_header_size-=3;
l_nb_bytes_by_comp = 1 + (l_nb_comps>>15);
l_mcc_record->m_nb_comps = l_nb_comps & 0x7fff;
if (p_header_size < (l_nb_bytes_by_comp * l_mcc_record->m_nb_comps + 2)) {
opj_event_msg(p_manager, EVT_ERROR, "Error reading MCC marker\n");
return OPJ_FALSE;
}
p_header_size -= (l_nb_bytes_by_comp * l_mcc_record->m_nb_comps + 2);
for (j=0;j<l_mcc_record->m_nb_comps;++j) {
opj_read_bytes(p_header_data,&l_tmp,l_nb_bytes_by_comp); /* Cmccij Component offset*/
p_header_data+=l_nb_bytes_by_comp;
if (l_tmp != j) {
opj_event_msg(p_manager, EVT_WARNING, "Cannot take in charge collections with indix shuffle\n");
return OPJ_TRUE;
}
}
opj_read_bytes(p_header_data,&l_nb_comps,2);
p_header_data+=2;
l_nb_bytes_by_comp = 1 + (l_nb_comps>>15);
l_nb_comps &= 0x7fff;
if (l_nb_comps != l_mcc_record->m_nb_comps) {
opj_event_msg(p_manager, EVT_WARNING, "Cannot take in charge collections without same number of indixes\n");
return OPJ_TRUE;
}
if (p_header_size < (l_nb_bytes_by_comp * l_mcc_record->m_nb_comps + 3)) {
opj_event_msg(p_manager, EVT_ERROR, "Error reading MCC marker\n");
return OPJ_FALSE;
}
p_header_size -= (l_nb_bytes_by_comp * l_mcc_record->m_nb_comps + 3);
for (j=0;j<l_mcc_record->m_nb_comps;++j) {
opj_read_bytes(p_header_data,&l_tmp,l_nb_bytes_by_comp); /* Wmccij Component offset*/
p_header_data+=l_nb_bytes_by_comp;
if (l_tmp != j) {
opj_event_msg(p_manager, EVT_WARNING, "Cannot take in charge collections with indix shuffle\n");
return OPJ_TRUE;
}
}
opj_read_bytes(p_header_data,&l_tmp,3); /* Wmccij Component offset*/
p_header_data += 3;
l_mcc_record->m_is_irreversible = ! ((l_tmp>>16) & 1);
l_mcc_record->m_decorrelation_array = 00;
l_mcc_record->m_offset_array = 00;
l_indix = l_tmp & 0xff;
if (l_indix != 0) {
l_mct_data = l_tcp->m_mct_records;
for (j=0;j<l_tcp->m_nb_mct_records;++j) {
if (l_mct_data->m_index == l_indix) {
l_mcc_record->m_decorrelation_array = l_mct_data;
break;
}
++l_mct_data;
}
if (l_mcc_record->m_decorrelation_array == 00) {
opj_event_msg(p_manager, EVT_ERROR, "Error reading MCC marker\n");
return OPJ_FALSE;
}
}
l_indix = (l_tmp >> 8) & 0xff;
if (l_indix != 0) {
l_mct_data = l_tcp->m_mct_records;
for (j=0;j<l_tcp->m_nb_mct_records;++j) {
if (l_mct_data->m_index == l_indix) {
l_mcc_record->m_offset_array = l_mct_data;
break;
}
++l_mct_data;
}
if (l_mcc_record->m_offset_array == 00) {
opj_event_msg(p_manager, EVT_ERROR, "Error reading MCC marker\n");
return OPJ_FALSE;
}
}
}
if (p_header_size != 0) {
opj_event_msg(p_manager, EVT_ERROR, "Error reading MCC marker\n");
return OPJ_FALSE;
}
++l_tcp->m_nb_mcc_records;
return OPJ_TRUE;
}
static OPJ_BOOL opj_j2k_write_mco( opj_j2k_t *p_j2k,
struct opj_stream_private *p_stream,
struct opj_event_mgr * p_manager
)
{
OPJ_BYTE * l_current_data = 00;
OPJ_UINT32 l_mco_size;
opj_tcp_t * l_tcp = 00;
opj_simple_mcc_decorrelation_data_t * l_mcc_record;
OPJ_UINT32 i;
/* preconditions */
assert(p_j2k != 00);
assert(p_manager != 00);
assert(p_stream != 00);
l_tcp =&(p_j2k->m_cp.tcps[p_j2k->m_current_tile_number]);
l_mco_size = 5 + l_tcp->m_nb_mcc_records;
if (l_mco_size > p_j2k->m_specific_param.m_encoder.m_header_tile_data_size) {
OPJ_BYTE *new_header_tile_data = (OPJ_BYTE *) opj_realloc(p_j2k->m_specific_param.m_encoder.m_header_tile_data, l_mco_size);
if (! new_header_tile_data) {
opj_free(p_j2k->m_specific_param.m_encoder.m_header_tile_data);
p_j2k->m_specific_param.m_encoder.m_header_tile_data = NULL;
p_j2k->m_specific_param.m_encoder.m_header_tile_data_size = 0;
opj_event_msg(p_manager, EVT_ERROR, "Not enough memory to write MCO marker\n");
return OPJ_FALSE;
}
p_j2k->m_specific_param.m_encoder.m_header_tile_data = new_header_tile_data;
p_j2k->m_specific_param.m_encoder.m_header_tile_data_size = l_mco_size;
}
l_current_data = p_j2k->m_specific_param.m_encoder.m_header_tile_data;
opj_write_bytes(l_current_data,J2K_MS_MCO,2); /* MCO */
l_current_data += 2;
opj_write_bytes(l_current_data,l_mco_size-2,2); /* Lmco */
l_current_data += 2;
opj_write_bytes(l_current_data,l_tcp->m_nb_mcc_records,1); /* Nmco : only one tranform stage*/
++l_current_data;
l_mcc_record = l_tcp->m_mcc_records;
for (i=0;i<l_tcp->m_nb_mcc_records;++i) {
opj_write_bytes(l_current_data,l_mcc_record->m_index,1);/* Imco -> use the mcc indicated by 1*/
++l_current_data;
++l_mcc_record;
}
if (opj_stream_write_data(p_stream,p_j2k->m_specific_param.m_encoder.m_header_tile_data,l_mco_size,p_manager) != l_mco_size) {
return OPJ_FALSE;
}
return OPJ_TRUE;
}
/**
* Reads a MCO marker (Multiple Component Transform Ordering)
*
* @param p_header_data the data contained in the MCO box.
* @param p_j2k the jpeg2000 codec.
* @param p_header_size the size of the data contained in the MCO marker.
* @param p_manager the user event manager.
*/
static OPJ_BOOL opj_j2k_read_mco ( opj_j2k_t *p_j2k,
OPJ_BYTE * p_header_data,
OPJ_UINT32 p_header_size,
opj_event_mgr_t * p_manager
)
{
OPJ_UINT32 l_tmp, i;
OPJ_UINT32 l_nb_stages;
opj_tcp_t * l_tcp;
opj_tccp_t * l_tccp;
opj_image_t * l_image;
/* preconditions */
assert(p_header_data != 00);
assert(p_j2k != 00);
assert(p_manager != 00);
l_image = p_j2k->m_private_image;
l_tcp = p_j2k->m_specific_param.m_decoder.m_state == J2K_STATE_TPH ?
&p_j2k->m_cp.tcps[p_j2k->m_current_tile_number] :
p_j2k->m_specific_param.m_decoder.m_default_tcp;
if (p_header_size < 1) {
opj_event_msg(p_manager, EVT_ERROR, "Error reading MCO marker\n");
return OPJ_FALSE;
}
opj_read_bytes(p_header_data,&l_nb_stages,1); /* Nmco : only one tranform stage*/
++p_header_data;
if (l_nb_stages > 1) {
opj_event_msg(p_manager, EVT_WARNING, "Cannot take in charge multiple transformation stages.\n");
return OPJ_TRUE;
}
if (p_header_size != l_nb_stages + 1) {
opj_event_msg(p_manager, EVT_WARNING, "Error reading MCO marker\n");
return OPJ_FALSE;
}
l_tccp = l_tcp->tccps;
for (i=0;i<l_image->numcomps;++i) {
l_tccp->m_dc_level_shift = 0;
++l_tccp;
}
if (l_tcp->m_mct_decoding_matrix) {
opj_free(l_tcp->m_mct_decoding_matrix);
l_tcp->m_mct_decoding_matrix = 00;
}
for (i=0;i<l_nb_stages;++i) {
opj_read_bytes(p_header_data,&l_tmp,1);
++p_header_data;
if (! opj_j2k_add_mct(l_tcp,p_j2k->m_private_image,l_tmp)) {
return OPJ_FALSE;
}
}
return OPJ_TRUE;
}
static OPJ_BOOL opj_j2k_add_mct(opj_tcp_t * p_tcp, opj_image_t * p_image, OPJ_UINT32 p_index)
{
OPJ_UINT32 i;
opj_simple_mcc_decorrelation_data_t * l_mcc_record;
opj_mct_data_t * l_deco_array, * l_offset_array;
OPJ_UINT32 l_data_size,l_mct_size, l_offset_size;
OPJ_UINT32 l_nb_elem;
OPJ_UINT32 * l_offset_data, * l_current_offset_data;
opj_tccp_t * l_tccp;
/* preconditions */
assert(p_tcp != 00);
l_mcc_record = p_tcp->m_mcc_records;
for (i=0;i<p_tcp->m_nb_mcc_records;++i) {
if (l_mcc_record->m_index == p_index) {
break;
}
}
if (i==p_tcp->m_nb_mcc_records) {
/** element discarded **/
return OPJ_TRUE;
}
if (l_mcc_record->m_nb_comps != p_image->numcomps) {
/** do not support number of comps != image */
return OPJ_TRUE;
}
l_deco_array = l_mcc_record->m_decorrelation_array;
if (l_deco_array) {
l_data_size = MCT_ELEMENT_SIZE[l_deco_array->m_element_type] * p_image->numcomps * p_image->numcomps;
if (l_deco_array->m_data_size != l_data_size) {
return OPJ_FALSE;
}
l_nb_elem = p_image->numcomps * p_image->numcomps;
l_mct_size = l_nb_elem * (OPJ_UINT32)sizeof(OPJ_FLOAT32);
p_tcp->m_mct_decoding_matrix = (OPJ_FLOAT32*)opj_malloc(l_mct_size);
if (! p_tcp->m_mct_decoding_matrix ) {
return OPJ_FALSE;
}
j2k_mct_read_functions_to_float[l_deco_array->m_element_type](l_deco_array->m_data,p_tcp->m_mct_decoding_matrix,l_nb_elem);
}
l_offset_array = l_mcc_record->m_offset_array;
if (l_offset_array) {
l_data_size = MCT_ELEMENT_SIZE[l_offset_array->m_element_type] * p_image->numcomps;
if (l_offset_array->m_data_size != l_data_size) {
return OPJ_FALSE;
}
l_nb_elem = p_image->numcomps;
l_offset_size = l_nb_elem * (OPJ_UINT32)sizeof(OPJ_UINT32);
l_offset_data = (OPJ_UINT32*)opj_malloc(l_offset_size);
if (! l_offset_data ) {
return OPJ_FALSE;
}
j2k_mct_read_functions_to_int32[l_offset_array->m_element_type](l_offset_array->m_data,l_offset_data,l_nb_elem);
l_tccp = p_tcp->tccps;
l_current_offset_data = l_offset_data;
for (i=0;i<p_image->numcomps;++i) {
l_tccp->m_dc_level_shift = (OPJ_INT32)*(l_current_offset_data++);
++l_tccp;
}
opj_free(l_offset_data);
}
return OPJ_TRUE;
}
static OPJ_BOOL opj_j2k_write_cbd( opj_j2k_t *p_j2k,
struct opj_stream_private *p_stream,
struct opj_event_mgr * p_manager )
{
OPJ_UINT32 i;
OPJ_UINT32 l_cbd_size;
OPJ_BYTE * l_current_data = 00;
opj_image_t *l_image = 00;
opj_image_comp_t * l_comp = 00;
/* preconditions */
assert(p_j2k != 00);
assert(p_manager != 00);
assert(p_stream != 00);
l_image = p_j2k->m_private_image;
l_cbd_size = 6 + p_j2k->m_private_image->numcomps;
if (l_cbd_size > p_j2k->m_specific_param.m_encoder.m_header_tile_data_size) {
OPJ_BYTE *new_header_tile_data = (OPJ_BYTE *) opj_realloc(p_j2k->m_specific_param.m_encoder.m_header_tile_data, l_cbd_size);
if (! new_header_tile_data) {
opj_free(p_j2k->m_specific_param.m_encoder.m_header_tile_data);
p_j2k->m_specific_param.m_encoder.m_header_tile_data = NULL;
p_j2k->m_specific_param.m_encoder.m_header_tile_data_size = 0;
opj_event_msg(p_manager, EVT_ERROR, "Not enough memory to write CBD marker\n");
return OPJ_FALSE;
}
p_j2k->m_specific_param.m_encoder.m_header_tile_data = new_header_tile_data;
p_j2k->m_specific_param.m_encoder.m_header_tile_data_size = l_cbd_size;
}
l_current_data = p_j2k->m_specific_param.m_encoder.m_header_tile_data;
opj_write_bytes(l_current_data,J2K_MS_CBD,2); /* CBD */
l_current_data += 2;
opj_write_bytes(l_current_data,l_cbd_size-2,2); /* L_CBD */
l_current_data += 2;
opj_write_bytes(l_current_data,l_image->numcomps, 2); /* Ncbd */
l_current_data+=2;
l_comp = l_image->comps;
for (i=0;i<l_image->numcomps;++i) {
opj_write_bytes(l_current_data, (l_comp->sgnd << 7) | (l_comp->prec - 1), 1); /* Component bit depth */
++l_current_data;
++l_comp;
}
if (opj_stream_write_data(p_stream,p_j2k->m_specific_param.m_encoder.m_header_tile_data,l_cbd_size,p_manager) != l_cbd_size) {
return OPJ_FALSE;
}
return OPJ_TRUE;
}
/**
* Reads a CBD marker (Component bit depth definition)
* @param p_header_data the data contained in the CBD box.
* @param p_j2k the jpeg2000 codec.
* @param p_header_size the size of the data contained in the CBD marker.
* @param p_manager the user event manager.
*/
static OPJ_BOOL opj_j2k_read_cbd ( opj_j2k_t *p_j2k,
OPJ_BYTE * p_header_data,
OPJ_UINT32 p_header_size,
opj_event_mgr_t * p_manager
)
{
OPJ_UINT32 l_nb_comp,l_num_comp;
OPJ_UINT32 l_comp_def;
OPJ_UINT32 i;
opj_image_comp_t * l_comp = 00;
/* preconditions */
assert(p_header_data != 00);
assert(p_j2k != 00);
assert(p_manager != 00);
l_num_comp = p_j2k->m_private_image->numcomps;
if (p_header_size != (p_j2k->m_private_image->numcomps + 2)) {
opj_event_msg(p_manager, EVT_ERROR, "Crror reading CBD marker\n");
return OPJ_FALSE;
}
opj_read_bytes(p_header_data,&l_nb_comp,2); /* Ncbd */
p_header_data+=2;
if (l_nb_comp != l_num_comp) {
opj_event_msg(p_manager, EVT_ERROR, "Crror reading CBD marker\n");
return OPJ_FALSE;
}
l_comp = p_j2k->m_private_image->comps;
for (i=0;i<l_num_comp;++i) {
opj_read_bytes(p_header_data,&l_comp_def,1); /* Component bit depth */
++p_header_data;
l_comp->sgnd = (l_comp_def>>7) & 1;
l_comp->prec = (l_comp_def&0x7f) + 1;
++l_comp;
}
return OPJ_TRUE;
}
/* ----------------------------------------------------------------------- */
/* J2K / JPT decoder interface */
/* ----------------------------------------------------------------------- */
void opj_j2k_setup_decoder(opj_j2k_t *j2k, opj_dparameters_t *parameters)
{
if(j2k && parameters) {
j2k->m_cp.m_specific_param.m_dec.m_layer = parameters->cp_layer;
j2k->m_cp.m_specific_param.m_dec.m_reduce = parameters->cp_reduce;
#ifdef USE_JPWL
j2k->m_cp.correct = parameters->jpwl_correct;
j2k->m_cp.exp_comps = parameters->jpwl_exp_comps;
j2k->m_cp.max_tiles = parameters->jpwl_max_tiles;
#endif /* USE_JPWL */
}
}
/* ----------------------------------------------------------------------- */
/* J2K encoder interface */
/* ----------------------------------------------------------------------- */
opj_j2k_t* opj_j2k_create_compress(void)
{
opj_j2k_t *l_j2k = (opj_j2k_t*) opj_calloc(1,sizeof(opj_j2k_t));
if (!l_j2k) {
return NULL;
}
l_j2k->m_is_decoder = 0;
l_j2k->m_cp.m_is_decoder = 0;
l_j2k->m_specific_param.m_encoder.m_header_tile_data = (OPJ_BYTE *) opj_malloc(OPJ_J2K_DEFAULT_HEADER_SIZE);
if (! l_j2k->m_specific_param.m_encoder.m_header_tile_data) {
opj_j2k_destroy(l_j2k);
return NULL;
}
l_j2k->m_specific_param.m_encoder.m_header_tile_data_size = OPJ_J2K_DEFAULT_HEADER_SIZE;
/* validation list creation*/
l_j2k->m_validation_list = opj_procedure_list_create();
if (! l_j2k->m_validation_list) {
opj_j2k_destroy(l_j2k);
return NULL;
}
/* execution list creation*/
l_j2k->m_procedure_list = opj_procedure_list_create();
if (! l_j2k->m_procedure_list) {
opj_j2k_destroy(l_j2k);
return NULL;
}
return l_j2k;
}
static int opj_j2k_initialise_4K_poc(opj_poc_t *POC, int numres){
POC[0].tile = 1;
POC[0].resno0 = 0;
POC[0].compno0 = 0;
POC[0].layno1 = 1;
POC[0].resno1 = (OPJ_UINT32)(numres-1);
POC[0].compno1 = 3;
POC[0].prg1 = OPJ_CPRL;
POC[1].tile = 1;
POC[1].resno0 = (OPJ_UINT32)(numres-1);
POC[1].compno0 = 0;
POC[1].layno1 = 1;
POC[1].resno1 = (OPJ_UINT32)numres;
POC[1].compno1 = 3;
POC[1].prg1 = OPJ_CPRL;
return 2;
}
static void opj_j2k_set_cinema_parameters(opj_cparameters_t *parameters, opj_image_t *image, opj_event_mgr_t *p_manager)
{
/* Configure cinema parameters */
int i;
/* No tiling */
parameters->tile_size_on = OPJ_FALSE;
parameters->cp_tdx=1;
parameters->cp_tdy=1;
/* One tile part for each component */
parameters->tp_flag = 'C';
parameters->tp_on = 1;
/* Tile and Image shall be at (0,0) */
parameters->cp_tx0 = 0;
parameters->cp_ty0 = 0;
parameters->image_offset_x0 = 0;
parameters->image_offset_y0 = 0;
/* Codeblock size= 32*32 */
parameters->cblockw_init = 32;
parameters->cblockh_init = 32;
/* Codeblock style: no mode switch enabled */
parameters->mode = 0;
/* No ROI */
parameters->roi_compno = -1;
/* No subsampling */
parameters->subsampling_dx = 1;
parameters->subsampling_dy = 1;
/* 9-7 transform */
parameters->irreversible = 1;
/* Number of layers */
if (parameters->tcp_numlayers > 1){
opj_event_msg(p_manager, EVT_WARNING,
"JPEG 2000 Profile-3 and 4 (2k/4k dc profile) requires:\n"
"1 single quality layer"
"-> Number of layers forced to 1 (rather than %d)\n"
"-> Rate of the last layer (%3.1f) will be used",
parameters->tcp_numlayers, parameters->tcp_rates[parameters->tcp_numlayers-1]);
parameters->tcp_rates[0] = parameters->tcp_rates[parameters->tcp_numlayers-1];
parameters->tcp_numlayers = 1;
}
/* Resolution levels */
switch (parameters->rsiz){
case OPJ_PROFILE_CINEMA_2K:
if(parameters->numresolution > 6){
opj_event_msg(p_manager, EVT_WARNING,
"JPEG 2000 Profile-3 (2k dc profile) requires:\n"
"Number of decomposition levels <= 5\n"
"-> Number of decomposition levels forced to 5 (rather than %d)\n",
parameters->numresolution+1);
parameters->numresolution = 6;
}
break;
case OPJ_PROFILE_CINEMA_4K:
if(parameters->numresolution < 2){
opj_event_msg(p_manager, EVT_WARNING,
"JPEG 2000 Profile-4 (4k dc profile) requires:\n"
"Number of decomposition levels >= 1 && <= 6\n"
"-> Number of decomposition levels forced to 1 (rather than %d)\n",
parameters->numresolution+1);
parameters->numresolution = 1;
}else if(parameters->numresolution > 7){
opj_event_msg(p_manager, EVT_WARNING,
"JPEG 2000 Profile-4 (4k dc profile) requires:\n"
"Number of decomposition levels >= 1 && <= 6\n"
"-> Number of decomposition levels forced to 6 (rather than %d)\n",
parameters->numresolution+1);
parameters->numresolution = 7;
}
break;
default :
break;
}
/* Precincts */
parameters->csty |= 0x01;
parameters->res_spec = parameters->numresolution-1;
for (i = 0; i<parameters->res_spec; i++) {
parameters->prcw_init[i] = 256;
parameters->prch_init[i] = 256;
}
/* The progression order shall be CPRL */
parameters->prog_order = OPJ_CPRL;
/* Progression order changes for 4K, disallowed for 2K */
if (parameters->rsiz == OPJ_PROFILE_CINEMA_4K) {
parameters->numpocs = (OPJ_UINT32)opj_j2k_initialise_4K_poc(parameters->POC,parameters->numresolution);
} else {
parameters->numpocs = 0;
}
/* Limited bit-rate */
parameters->cp_disto_alloc = 1;
if (parameters->max_cs_size <= 0) {
/* No rate has been introduced, 24 fps is assumed */
parameters->max_cs_size = OPJ_CINEMA_24_CS;
opj_event_msg(p_manager, EVT_WARNING,
"JPEG 2000 Profile-3 and 4 (2k/4k dc profile) requires:\n"
"Maximum 1302083 compressed bytes @ 24fps\n"
"As no rate has been given, this limit will be used.\n");
} else if (parameters->max_cs_size > OPJ_CINEMA_24_CS) {
opj_event_msg(p_manager, EVT_WARNING,
"JPEG 2000 Profile-3 and 4 (2k/4k dc profile) requires:\n"
"Maximum 1302083 compressed bytes @ 24fps\n"
"-> Specified rate exceeds this limit. Rate will be forced to 1302083 bytes.\n");
parameters->max_cs_size = OPJ_CINEMA_24_CS;
}
if (parameters->max_comp_size <= 0) {
/* No rate has been introduced, 24 fps is assumed */
parameters->max_comp_size = OPJ_CINEMA_24_COMP;
opj_event_msg(p_manager, EVT_WARNING,
"JPEG 2000 Profile-3 and 4 (2k/4k dc profile) requires:\n"
"Maximum 1041666 compressed bytes @ 24fps\n"
"As no rate has been given, this limit will be used.\n");
} else if (parameters->max_comp_size > OPJ_CINEMA_24_COMP) {
opj_event_msg(p_manager, EVT_WARNING,
"JPEG 2000 Profile-3 and 4 (2k/4k dc profile) requires:\n"
"Maximum 1041666 compressed bytes @ 24fps\n"
"-> Specified rate exceeds this limit. Rate will be forced to 1041666 bytes.\n");
parameters->max_comp_size = OPJ_CINEMA_24_COMP;
}
parameters->tcp_rates[0] = (OPJ_FLOAT32) (image->numcomps * image->comps[0].w * image->comps[0].h * image->comps[0].prec)/
(OPJ_FLOAT32)(((OPJ_UINT32)parameters->max_cs_size) * 8 * image->comps[0].dx * image->comps[0].dy);
}
static OPJ_BOOL opj_j2k_is_cinema_compliant(opj_image_t *image, OPJ_UINT16 rsiz, opj_event_mgr_t *p_manager)
{
OPJ_UINT32 i;
/* Number of components */
if (image->numcomps != 3){
opj_event_msg(p_manager, EVT_WARNING,
"JPEG 2000 Profile-3 (2k dc profile) requires:\n"
"3 components"
"-> Number of components of input image (%d) is not compliant\n"
"-> Non-profile-3 codestream will be generated\n",
image->numcomps);
return OPJ_FALSE;
}
/* Bitdepth */
for (i = 0; i < image->numcomps; i++) {
if ((image->comps[i].bpp != 12) | (image->comps[i].sgnd)){
char signed_str[] = "signed";
char unsigned_str[] = "unsigned";
char *tmp_str = image->comps[i].sgnd?signed_str:unsigned_str;
opj_event_msg(p_manager, EVT_WARNING,
"JPEG 2000 Profile-3 (2k dc profile) requires:\n"
"Precision of each component shall be 12 bits unsigned"
"-> At least component %d of input image (%d bits, %s) is not compliant\n"
"-> Non-profile-3 codestream will be generated\n",
i,image->comps[i].bpp, tmp_str);
return OPJ_FALSE;
}
}
/* Image size */
switch (rsiz){
case OPJ_PROFILE_CINEMA_2K:
if (((image->comps[0].w > 2048) | (image->comps[0].h > 1080))){
opj_event_msg(p_manager, EVT_WARNING,
"JPEG 2000 Profile-3 (2k dc profile) requires:\n"
"width <= 2048 and height <= 1080\n"
"-> Input image size %d x %d is not compliant\n"
"-> Non-profile-3 codestream will be generated\n",
image->comps[0].w,image->comps[0].h);
return OPJ_FALSE;
}
break;
case OPJ_PROFILE_CINEMA_4K:
if (((image->comps[0].w > 4096) | (image->comps[0].h > 2160))){
opj_event_msg(p_manager, EVT_WARNING,
"JPEG 2000 Profile-4 (4k dc profile) requires:\n"
"width <= 4096 and height <= 2160\n"
"-> Image size %d x %d is not compliant\n"
"-> Non-profile-4 codestream will be generated\n",
image->comps[0].w,image->comps[0].h);
return OPJ_FALSE;
}
break;
default :
break;
}
return OPJ_TRUE;
}
OPJ_BOOL opj_j2k_setup_encoder( opj_j2k_t *p_j2k,
opj_cparameters_t *parameters,
opj_image_t *image,
opj_event_mgr_t * p_manager)
{
OPJ_UINT32 i, j, tileno, numpocs_tile;
opj_cp_t *cp = 00;
if(!p_j2k || !parameters || ! image) {
return OPJ_FALSE;
}
if ((parameters->numresolution <= 0) || (parameters->numresolution > OPJ_J2K_MAXRLVLS)) {
opj_event_msg(p_manager, EVT_ERROR, "Invalid number of resolutions : %d not in range [1,%d]\n", parameters->numresolution, OPJ_J2K_MAXRLVLS);
return OPJ_FALSE;
}
/* keep a link to cp so that we can destroy it later in j2k_destroy_compress */
cp = &(p_j2k->m_cp);
/* set default values for cp */
cp->tw = 1;
cp->th = 1;
/* FIXME ADE: to be removed once deprecated cp_cinema and cp_rsiz have been removed */
if (parameters->rsiz == OPJ_PROFILE_NONE) { /* consider deprecated fields only if RSIZ has not been set */
OPJ_BOOL deprecated_used = OPJ_FALSE;
switch (parameters->cp_cinema){
case OPJ_CINEMA2K_24:
parameters->rsiz = OPJ_PROFILE_CINEMA_2K;
parameters->max_cs_size = OPJ_CINEMA_24_CS;
parameters->max_comp_size = OPJ_CINEMA_24_COMP;
deprecated_used = OPJ_TRUE;
break;
case OPJ_CINEMA2K_48:
parameters->rsiz = OPJ_PROFILE_CINEMA_2K;
parameters->max_cs_size = OPJ_CINEMA_48_CS;
parameters->max_comp_size = OPJ_CINEMA_48_COMP;
deprecated_used = OPJ_TRUE;
break;
case OPJ_CINEMA4K_24:
parameters->rsiz = OPJ_PROFILE_CINEMA_4K;
parameters->max_cs_size = OPJ_CINEMA_24_CS;
parameters->max_comp_size = OPJ_CINEMA_24_COMP;
deprecated_used = OPJ_TRUE;
break;
case OPJ_OFF:
default:
break;
}
switch (parameters->cp_rsiz){
case OPJ_CINEMA2K:
parameters->rsiz = OPJ_PROFILE_CINEMA_2K;
deprecated_used = OPJ_TRUE;
break;
case OPJ_CINEMA4K:
parameters->rsiz = OPJ_PROFILE_CINEMA_4K;
deprecated_used = OPJ_TRUE;
break;
case OPJ_MCT:
parameters->rsiz = OPJ_PROFILE_PART2 | OPJ_EXTENSION_MCT;
deprecated_used = OPJ_TRUE;
case OPJ_STD_RSIZ:
default:
break;
}
if (deprecated_used) {
opj_event_msg(p_manager, EVT_WARNING,
"Deprecated fields cp_cinema or cp_rsiz are used\n"
"Please consider using only the rsiz field\n"
"See openjpeg.h documentation for more details\n");
}
}
/* see if max_codestream_size does limit input rate */
if (parameters->max_cs_size <= 0) {
if (parameters->tcp_rates[parameters->tcp_numlayers-1] > 0) {
OPJ_FLOAT32 temp_size;
temp_size =(OPJ_FLOAT32)(image->numcomps * image->comps[0].w * image->comps[0].h * image->comps[0].prec)/
(parameters->tcp_rates[parameters->tcp_numlayers-1] * 8 * (OPJ_FLOAT32)image->comps[0].dx * (OPJ_FLOAT32)image->comps[0].dy);
parameters->max_cs_size = (int) floor(temp_size);
} else {
parameters->max_cs_size = 0;
}
} else {
OPJ_FLOAT32 temp_rate;
OPJ_BOOL cap = OPJ_FALSE;
temp_rate = (OPJ_FLOAT32) (image->numcomps * image->comps[0].w * image->comps[0].h * image->comps[0].prec)/
(OPJ_FLOAT32)(((OPJ_UINT32)parameters->max_cs_size) * 8 * image->comps[0].dx * image->comps[0].dy);
for (i = 0; i < (OPJ_UINT32) parameters->tcp_numlayers; i++) {
if (parameters->tcp_rates[i] < temp_rate) {
parameters->tcp_rates[i] = temp_rate;
cap = OPJ_TRUE;
}
}
if (cap) {
opj_event_msg(p_manager, EVT_WARNING,
"The desired maximum codestream size has limited\n"
"at least one of the desired quality layers\n");
}
}
/* Manage profiles and applications and set RSIZ */
/* set cinema parameters if required */
if (OPJ_IS_CINEMA(parameters->rsiz)){
if ((parameters->rsiz == OPJ_PROFILE_CINEMA_S2K)
|| (parameters->rsiz == OPJ_PROFILE_CINEMA_S4K)){
opj_event_msg(p_manager, EVT_WARNING,
"JPEG 2000 Scalable Digital Cinema profiles not yet supported\n");
parameters->rsiz = OPJ_PROFILE_NONE;
} else {
opj_j2k_set_cinema_parameters(parameters,image,p_manager);
if (!opj_j2k_is_cinema_compliant(image,parameters->rsiz,p_manager)) {
parameters->rsiz = OPJ_PROFILE_NONE;
}
}
} else if (OPJ_IS_STORAGE(parameters->rsiz)) {
opj_event_msg(p_manager, EVT_WARNING,
"JPEG 2000 Long Term Storage profile not yet supported\n");
parameters->rsiz = OPJ_PROFILE_NONE;
} else if (OPJ_IS_BROADCAST(parameters->rsiz)) {
opj_event_msg(p_manager, EVT_WARNING,
"JPEG 2000 Broadcast profiles not yet supported\n");
parameters->rsiz = OPJ_PROFILE_NONE;
} else if (OPJ_IS_IMF(parameters->rsiz)) {
opj_event_msg(p_manager, EVT_WARNING,
"JPEG 2000 IMF profiles not yet supported\n");
parameters->rsiz = OPJ_PROFILE_NONE;
} else if (OPJ_IS_PART2(parameters->rsiz)) {
if (parameters->rsiz == ((OPJ_PROFILE_PART2) | (OPJ_EXTENSION_NONE))) {
opj_event_msg(p_manager, EVT_WARNING,
"JPEG 2000 Part-2 profile defined\n"
"but no Part-2 extension enabled.\n"
"Profile set to NONE.\n");
parameters->rsiz = OPJ_PROFILE_NONE;
} else if (parameters->rsiz != ((OPJ_PROFILE_PART2) | (OPJ_EXTENSION_MCT))) {
opj_event_msg(p_manager, EVT_WARNING,
"Unsupported Part-2 extension enabled\n"
"Profile set to NONE.\n");
parameters->rsiz = OPJ_PROFILE_NONE;
}
}
/*
copy user encoding parameters
*/
cp->m_specific_param.m_enc.m_max_comp_size = (OPJ_UINT32)parameters->max_comp_size;
cp->rsiz = parameters->rsiz;
cp->m_specific_param.m_enc.m_disto_alloc = (OPJ_UINT32)parameters->cp_disto_alloc & 1u;
cp->m_specific_param.m_enc.m_fixed_alloc = (OPJ_UINT32)parameters->cp_fixed_alloc & 1u;
cp->m_specific_param.m_enc.m_fixed_quality = (OPJ_UINT32)parameters->cp_fixed_quality & 1u;
/* mod fixed_quality */
if (parameters->cp_fixed_alloc && parameters->cp_matrice) {
size_t array_size = (size_t)parameters->tcp_numlayers * (size_t)parameters->numresolution * 3 * sizeof(OPJ_INT32);
cp->m_specific_param.m_enc.m_matrice = (OPJ_INT32 *) opj_malloc(array_size);
if (!cp->m_specific_param.m_enc.m_matrice) {
opj_event_msg(p_manager, EVT_ERROR, "Not enough memory to allocate copy of user encoding parameters matrix \n");
return OPJ_FALSE;
}
memcpy(cp->m_specific_param.m_enc.m_matrice, parameters->cp_matrice, array_size);
}
/* tiles */
cp->tdx = (OPJ_UINT32)parameters->cp_tdx;
cp->tdy = (OPJ_UINT32)parameters->cp_tdy;
/* tile offset */
cp->tx0 = (OPJ_UINT32)parameters->cp_tx0;
cp->ty0 = (OPJ_UINT32)parameters->cp_ty0;
/* comment string */
if(parameters->cp_comment) {
cp->comment = (char*)opj_malloc(strlen(parameters->cp_comment) + 1U);
if(!cp->comment) {
opj_event_msg(p_manager, EVT_ERROR, "Not enough memory to allocate copy of comment string\n");
return OPJ_FALSE;
}
strcpy(cp->comment, parameters->cp_comment);
} else {
/* Create default comment for codestream */
const char comment[] = "Created by OpenJPEG version ";
const size_t clen = strlen(comment);
const char *version = opj_version();
/* UniPG>> */
#ifdef USE_JPWL
cp->comment = (char*)opj_malloc(clen+strlen(version)+11);
if(!cp->comment) {
opj_event_msg(p_manager, EVT_ERROR, "Not enough memory to allocate comment string\n");
return OPJ_FALSE;
}
sprintf(cp->comment,"%s%s with JPWL", comment, version);
#else
cp->comment = (char*)opj_malloc(clen+strlen(version)+1);
if(!cp->comment) {
opj_event_msg(p_manager, EVT_ERROR, "Not enough memory to allocate comment string\n");
return OPJ_FALSE;
}
sprintf(cp->comment,"%s%s", comment, version);
#endif
/* <<UniPG */
}
/*
calculate other encoding parameters
*/
if (parameters->tile_size_on) {
cp->tw = (OPJ_UINT32)opj_int_ceildiv((OPJ_INT32)(image->x1 - cp->tx0), (OPJ_INT32)cp->tdx);
cp->th = (OPJ_UINT32)opj_int_ceildiv((OPJ_INT32)(image->y1 - cp->ty0), (OPJ_INT32)cp->tdy);
} else {
cp->tdx = image->x1 - cp->tx0;
cp->tdy = image->y1 - cp->ty0;
}
if (parameters->tp_on) {
cp->m_specific_param.m_enc.m_tp_flag = (OPJ_BYTE)parameters->tp_flag;
cp->m_specific_param.m_enc.m_tp_on = 1;
}
#ifdef USE_JPWL
/*
calculate JPWL encoding parameters
*/
if (parameters->jpwl_epc_on) {
OPJ_INT32 i;
/* set JPWL on */
cp->epc_on = OPJ_TRUE;
cp->info_on = OPJ_FALSE; /* no informative technique */
/* set EPB on */
if ((parameters->jpwl_hprot_MH > 0) || (parameters->jpwl_hprot_TPH[0] > 0)) {
cp->epb_on = OPJ_TRUE;
cp->hprot_MH = parameters->jpwl_hprot_MH;
for (i = 0; i < JPWL_MAX_NO_TILESPECS; i++) {
cp->hprot_TPH_tileno[i] = parameters->jpwl_hprot_TPH_tileno[i];
cp->hprot_TPH[i] = parameters->jpwl_hprot_TPH[i];
}
/* if tile specs are not specified, copy MH specs */
if (cp->hprot_TPH[0] == -1) {
cp->hprot_TPH_tileno[0] = 0;
cp->hprot_TPH[0] = parameters->jpwl_hprot_MH;
}
for (i = 0; i < JPWL_MAX_NO_PACKSPECS; i++) {
cp->pprot_tileno[i] = parameters->jpwl_pprot_tileno[i];
cp->pprot_packno[i] = parameters->jpwl_pprot_packno[i];
cp->pprot[i] = parameters->jpwl_pprot[i];
}
}
/* set ESD writing */
if ((parameters->jpwl_sens_size == 1) || (parameters->jpwl_sens_size == 2)) {
cp->esd_on = OPJ_TRUE;
cp->sens_size = parameters->jpwl_sens_size;
cp->sens_addr = parameters->jpwl_sens_addr;
cp->sens_range = parameters->jpwl_sens_range;
cp->sens_MH = parameters->jpwl_sens_MH;
for (i = 0; i < JPWL_MAX_NO_TILESPECS; i++) {
cp->sens_TPH_tileno[i] = parameters->jpwl_sens_TPH_tileno[i];
cp->sens_TPH[i] = parameters->jpwl_sens_TPH[i];
}
}
/* always set RED writing to false: we are at the encoder */
cp->red_on = OPJ_FALSE;
} else {
cp->epc_on = OPJ_FALSE;
}
#endif /* USE_JPWL */
/* initialize the mutiple tiles */
/* ---------------------------- */
cp->tcps = (opj_tcp_t*) opj_calloc(cp->tw * cp->th, sizeof(opj_tcp_t));
if (!cp->tcps) {
opj_event_msg(p_manager, EVT_ERROR, "Not enough memory to allocate tile coding parameters\n");
return OPJ_FALSE;
}
if (parameters->numpocs) {
/* initialisation of POC */
opj_j2k_check_poc_val(parameters->POC,parameters->numpocs, (OPJ_UINT32)parameters->numresolution, image->numcomps, (OPJ_UINT32)parameters->tcp_numlayers, p_manager);
/* TODO MSD use the return value*/
}
for (tileno = 0; tileno < cp->tw * cp->th; tileno++) {
opj_tcp_t *tcp = &cp->tcps[tileno];
tcp->numlayers = (OPJ_UINT32)parameters->tcp_numlayers;
for (j = 0; j < tcp->numlayers; j++) {
if(OPJ_IS_CINEMA(cp->rsiz)){
if (cp->m_specific_param.m_enc.m_fixed_quality) {
tcp->distoratio[j] = parameters->tcp_distoratio[j];
}
tcp->rates[j] = parameters->tcp_rates[j];
}else{
if (cp->m_specific_param.m_enc.m_fixed_quality) { /* add fixed_quality */
tcp->distoratio[j] = parameters->tcp_distoratio[j];
} else {
tcp->rates[j] = parameters->tcp_rates[j];
}
}
}
tcp->csty = (OPJ_UINT32)parameters->csty;
tcp->prg = parameters->prog_order;
tcp->mct = (OPJ_UINT32)parameters->tcp_mct;
numpocs_tile = 0;
tcp->POC = 0;
if (parameters->numpocs) {
/* initialisation of POC */
tcp->POC = 1;
for (i = 0; i < parameters->numpocs; i++) {
if (tileno + 1 == parameters->POC[i].tile ) {
opj_poc_t *tcp_poc = &tcp->pocs[numpocs_tile];
tcp_poc->resno0 = parameters->POC[numpocs_tile].resno0;
tcp_poc->compno0 = parameters->POC[numpocs_tile].compno0;
tcp_poc->layno1 = parameters->POC[numpocs_tile].layno1;
tcp_poc->resno1 = parameters->POC[numpocs_tile].resno1;
tcp_poc->compno1 = parameters->POC[numpocs_tile].compno1;
tcp_poc->prg1 = parameters->POC[numpocs_tile].prg1;
tcp_poc->tile = parameters->POC[numpocs_tile].tile;
numpocs_tile++;
}
}
tcp->numpocs = numpocs_tile -1 ;
}else{
tcp->numpocs = 0;
}
tcp->tccps = (opj_tccp_t*) opj_calloc(image->numcomps, sizeof(opj_tccp_t));
if (!tcp->tccps) {
opj_event_msg(p_manager, EVT_ERROR, "Not enough memory to allocate tile component coding parameters\n");
return OPJ_FALSE;
}
if (parameters->mct_data) {
OPJ_UINT32 lMctSize = image->numcomps * image->numcomps * (OPJ_UINT32)sizeof(OPJ_FLOAT32);
OPJ_FLOAT32 * lTmpBuf = (OPJ_FLOAT32*)opj_malloc(lMctSize);
OPJ_INT32 * l_dc_shift = (OPJ_INT32 *) ((OPJ_BYTE *) parameters->mct_data + lMctSize);
if (!lTmpBuf) {
opj_event_msg(p_manager, EVT_ERROR, "Not enough memory to allocate temp buffer\n");
return OPJ_FALSE;
}
tcp->mct = 2;
tcp->m_mct_coding_matrix = (OPJ_FLOAT32*)opj_malloc(lMctSize);
if (! tcp->m_mct_coding_matrix) {
opj_free(lTmpBuf);
lTmpBuf = NULL;
opj_event_msg(p_manager, EVT_ERROR, "Not enough memory to allocate encoder MCT coding matrix \n");
return OPJ_FALSE;
}
memcpy(tcp->m_mct_coding_matrix,parameters->mct_data,lMctSize);
memcpy(lTmpBuf,parameters->mct_data,lMctSize);
tcp->m_mct_decoding_matrix = (OPJ_FLOAT32*)opj_malloc(lMctSize);
if (! tcp->m_mct_decoding_matrix) {
opj_free(lTmpBuf);
lTmpBuf = NULL;
opj_event_msg(p_manager, EVT_ERROR, "Not enough memory to allocate encoder MCT decoding matrix \n");
return OPJ_FALSE;
}
if(opj_matrix_inversion_f(lTmpBuf,(tcp->m_mct_decoding_matrix),image->numcomps) == OPJ_FALSE) {
opj_free(lTmpBuf);
lTmpBuf = NULL;
opj_event_msg(p_manager, EVT_ERROR, "Failed to inverse encoder MCT decoding matrix \n");
return OPJ_FALSE;
}
tcp->mct_norms = (OPJ_FLOAT64*)
opj_malloc(image->numcomps * sizeof(OPJ_FLOAT64));
if (! tcp->mct_norms) {
opj_free(lTmpBuf);
lTmpBuf = NULL;
opj_event_msg(p_manager, EVT_ERROR, "Not enough memory to allocate encoder MCT norms \n");
return OPJ_FALSE;
}
opj_calculate_norms(tcp->mct_norms,image->numcomps,tcp->m_mct_decoding_matrix);
opj_free(lTmpBuf);
for (i = 0; i < image->numcomps; i++) {
opj_tccp_t *tccp = &tcp->tccps[i];
tccp->m_dc_level_shift = l_dc_shift[i];
}
if (opj_j2k_setup_mct_encoding(tcp,image) == OPJ_FALSE) {
/* free will be handled by opj_j2k_destroy */
opj_event_msg(p_manager, EVT_ERROR, "Failed to setup j2k mct encoding\n");
return OPJ_FALSE;
}
}
else {
if(tcp->mct==1 && image->numcomps >= 3) { /* RGB->YCC MCT is enabled */
if ((image->comps[0].dx != image->comps[1].dx) ||
(image->comps[0].dx != image->comps[2].dx) ||
(image->comps[0].dy != image->comps[1].dy) ||
(image->comps[0].dy != image->comps[2].dy)) {
opj_event_msg(p_manager, EVT_WARNING, "Cannot perform MCT on components with different sizes. Disabling MCT.\n");
tcp->mct = 0;
}
}
for (i = 0; i < image->numcomps; i++) {
opj_tccp_t *tccp = &tcp->tccps[i];
opj_image_comp_t * l_comp = &(image->comps[i]);
if (! l_comp->sgnd) {
tccp->m_dc_level_shift = 1 << (l_comp->prec - 1);
}
}
}
for (i = 0; i < image->numcomps; i++) {
opj_tccp_t *tccp = &tcp->tccps[i];
tccp->csty = parameters->csty & 0x01; /* 0 => one precinct || 1 => custom precinct */
tccp->numresolutions = (OPJ_UINT32)parameters->numresolution;
tccp->cblkw = (OPJ_UINT32)opj_int_floorlog2(parameters->cblockw_init);
tccp->cblkh = (OPJ_UINT32)opj_int_floorlog2(parameters->cblockh_init);
tccp->cblksty = (OPJ_UINT32)parameters->mode;
tccp->qmfbid = parameters->irreversible ? 0 : 1;
tccp->qntsty = parameters->irreversible ? J2K_CCP_QNTSTY_SEQNT : J2K_CCP_QNTSTY_NOQNT;
tccp->numgbits = 2;
if ((OPJ_INT32)i == parameters->roi_compno) {
tccp->roishift = parameters->roi_shift;
} else {
tccp->roishift = 0;
}
if (parameters->csty & J2K_CCP_CSTY_PRT) {
OPJ_INT32 p = 0, it_res;
assert( tccp->numresolutions > 0 );
for (it_res = (OPJ_INT32)tccp->numresolutions - 1; it_res >= 0; it_res--) {
if (p < parameters->res_spec) {
if (parameters->prcw_init[p] < 1) {
tccp->prcw[it_res] = 1;
} else {
tccp->prcw[it_res] = (OPJ_UINT32)opj_int_floorlog2(parameters->prcw_init[p]);
}
if (parameters->prch_init[p] < 1) {
tccp->prch[it_res] = 1;
}else {
tccp->prch[it_res] = (OPJ_UINT32)opj_int_floorlog2(parameters->prch_init[p]);
}
} else {
OPJ_INT32 res_spec = parameters->res_spec;
OPJ_INT32 size_prcw = 0;
OPJ_INT32 size_prch = 0;
assert(res_spec>0); /* issue 189 */
size_prcw = parameters->prcw_init[res_spec - 1] >> (p - (res_spec - 1));
size_prch = parameters->prch_init[res_spec - 1] >> (p - (res_spec - 1));
if (size_prcw < 1) {
tccp->prcw[it_res] = 1;
} else {
tccp->prcw[it_res] = (OPJ_UINT32)opj_int_floorlog2(size_prcw);
}
if (size_prch < 1) {
tccp->prch[it_res] = 1;
} else {
tccp->prch[it_res] = (OPJ_UINT32)opj_int_floorlog2(size_prch);
}
}
p++;
/*printf("\nsize precinct for level %d : %d,%d\n", it_res,tccp->prcw[it_res], tccp->prch[it_res]); */
} /*end for*/
} else {
for (j = 0; j < tccp->numresolutions; j++) {
tccp->prcw[j] = 15;
tccp->prch[j] = 15;
}
}
opj_dwt_calc_explicit_stepsizes(tccp, image->comps[i].prec);
}
}
if (parameters->mct_data) {
opj_free(parameters->mct_data);
parameters->mct_data = 00;
}
return OPJ_TRUE;
}
static OPJ_BOOL opj_j2k_add_mhmarker(opj_codestream_index_t *cstr_index, OPJ_UINT32 type, OPJ_OFF_T pos, OPJ_UINT32 len)
{
assert(cstr_index != 00);
/* expand the list? */
if ((cstr_index->marknum + 1) > cstr_index->maxmarknum) {
opj_marker_info_t *new_marker;
cstr_index->maxmarknum = (OPJ_UINT32)(100 + (OPJ_FLOAT32) cstr_index->maxmarknum);
new_marker = (opj_marker_info_t *) opj_realloc(cstr_index->marker, cstr_index->maxmarknum *sizeof(opj_marker_info_t));
if (! new_marker) {
opj_free(cstr_index->marker);
cstr_index->marker = NULL;
cstr_index->maxmarknum = 0;
cstr_index->marknum = 0;
/* opj_event_msg(p_manager, EVT_ERROR, "Not enough memory to add mh marker\n"); */
return OPJ_FALSE;
}
cstr_index->marker = new_marker;
}
/* add the marker */
cstr_index->marker[cstr_index->marknum].type = (OPJ_UINT16)type;
cstr_index->marker[cstr_index->marknum].pos = (OPJ_INT32)pos;
cstr_index->marker[cstr_index->marknum].len = (OPJ_INT32)len;
cstr_index->marknum++;
return OPJ_TRUE;
}
static OPJ_BOOL opj_j2k_add_tlmarker(OPJ_UINT32 tileno, opj_codestream_index_t *cstr_index, OPJ_UINT32 type, OPJ_OFF_T pos, OPJ_UINT32 len)
{
assert(cstr_index != 00);
assert(cstr_index->tile_index != 00);
/* expand the list? */
if ((cstr_index->tile_index[tileno].marknum + 1) > cstr_index->tile_index[tileno].maxmarknum) {
opj_marker_info_t *new_marker;
cstr_index->tile_index[tileno].maxmarknum = (OPJ_UINT32)(100 + (OPJ_FLOAT32) cstr_index->tile_index[tileno].maxmarknum);
new_marker = (opj_marker_info_t *) opj_realloc(
cstr_index->tile_index[tileno].marker,
cstr_index->tile_index[tileno].maxmarknum *sizeof(opj_marker_info_t));
if (! new_marker) {
opj_free(cstr_index->tile_index[tileno].marker);
cstr_index->tile_index[tileno].marker = NULL;
cstr_index->tile_index[tileno].maxmarknum = 0;
cstr_index->tile_index[tileno].marknum = 0;
/* opj_event_msg(p_manager, EVT_ERROR, "Not enough memory to add tl marker\n"); */
return OPJ_FALSE;
}
cstr_index->tile_index[tileno].marker = new_marker;
}
/* add the marker */
cstr_index->tile_index[tileno].marker[cstr_index->tile_index[tileno].marknum].type = (OPJ_UINT16)type;
cstr_index->tile_index[tileno].marker[cstr_index->tile_index[tileno].marknum].pos = (OPJ_INT32)pos;
cstr_index->tile_index[tileno].marker[cstr_index->tile_index[tileno].marknum].len = (OPJ_INT32)len;
cstr_index->tile_index[tileno].marknum++;
if (type == J2K_MS_SOT) {
OPJ_UINT32 l_current_tile_part = cstr_index->tile_index[tileno].current_tpsno;
if (cstr_index->tile_index[tileno].tp_index)
cstr_index->tile_index[tileno].tp_index[l_current_tile_part].start_pos = pos;
}
return OPJ_TRUE;
}
/*
* -----------------------------------------------------------------------
* -----------------------------------------------------------------------
* -----------------------------------------------------------------------
*/
OPJ_BOOL opj_j2k_end_decompress(opj_j2k_t *p_j2k,
opj_stream_private_t *p_stream,
opj_event_mgr_t * p_manager
)
{
(void)p_j2k;
(void)p_stream;
(void)p_manager;
return OPJ_TRUE;
}
OPJ_BOOL opj_j2k_read_header( opj_stream_private_t *p_stream,
opj_j2k_t* p_j2k,
opj_image_t** p_image,
opj_event_mgr_t* p_manager )
{
/* preconditions */
assert(p_j2k != 00);
assert(p_stream != 00);
assert(p_manager != 00);
/* create an empty image header */
p_j2k->m_private_image = opj_image_create0();
if (! p_j2k->m_private_image) {
return OPJ_FALSE;
}
/* customization of the validation */
if (! opj_j2k_setup_decoding_validation(p_j2k, p_manager)) {
opj_image_destroy(p_j2k->m_private_image);
p_j2k->m_private_image = NULL;
return OPJ_FALSE;
}
/* validation of the parameters codec */
if (! opj_j2k_exec(p_j2k, p_j2k->m_validation_list, p_stream,p_manager)) {
opj_image_destroy(p_j2k->m_private_image);
p_j2k->m_private_image = NULL;
return OPJ_FALSE;
}
/* customization of the encoding */
if (! opj_j2k_setup_header_reading(p_j2k, p_manager)) {
opj_image_destroy(p_j2k->m_private_image);
p_j2k->m_private_image = NULL;
return OPJ_FALSE;
}
/* read header */
if (! opj_j2k_exec (p_j2k,p_j2k->m_procedure_list,p_stream,p_manager)) {
opj_image_destroy(p_j2k->m_private_image);
p_j2k->m_private_image = NULL;
return OPJ_FALSE;
}
*p_image = opj_image_create0();
if (! (*p_image)) {
return OPJ_FALSE;
}
/* Copy codestream image information to the output image */
opj_copy_image_header(p_j2k->m_private_image, *p_image);
/*Allocate and initialize some elements of codestrem index*/
if (!opj_j2k_allocate_tile_element_cstr_index(p_j2k)){
return OPJ_FALSE;
}
return OPJ_TRUE;
}
static OPJ_BOOL opj_j2k_setup_header_reading (opj_j2k_t *p_j2k, opj_event_mgr_t * p_manager)
{
/* preconditions*/
assert(p_j2k != 00);
assert(p_manager != 00);
if (! opj_procedure_list_add_procedure(p_j2k->m_procedure_list,(opj_procedure)opj_j2k_read_header_procedure, p_manager)) {
return OPJ_FALSE;
}
/* DEVELOPER CORNER, add your custom procedures */
if (! opj_procedure_list_add_procedure(p_j2k->m_procedure_list,(opj_procedure)opj_j2k_copy_default_tcp_and_create_tcd, p_manager)) {
return OPJ_FALSE;
}
return OPJ_TRUE;
}
static OPJ_BOOL opj_j2k_setup_decoding_validation (opj_j2k_t *p_j2k, opj_event_mgr_t * p_manager)
{
/* preconditions*/
assert(p_j2k != 00);
assert(p_manager != 00);
if (! opj_procedure_list_add_procedure(p_j2k->m_validation_list,(opj_procedure)opj_j2k_build_decoder, p_manager)) {
return OPJ_FALSE;
}
if (! opj_procedure_list_add_procedure(p_j2k->m_validation_list,(opj_procedure)opj_j2k_decoding_validation, p_manager)) {
return OPJ_FALSE;
}
/* DEVELOPER CORNER, add your custom validation procedure */
return OPJ_TRUE;
}
static OPJ_BOOL opj_j2k_mct_validation ( opj_j2k_t * p_j2k,
opj_stream_private_t *p_stream,
opj_event_mgr_t * p_manager )
{
OPJ_BOOL l_is_valid = OPJ_TRUE;
OPJ_UINT32 i,j;
/* preconditions */
assert(p_j2k != 00);
assert(p_stream != 00);
assert(p_manager != 00);
if ((p_j2k->m_cp.rsiz & 0x8200) == 0x8200) {
OPJ_UINT32 l_nb_tiles = p_j2k->m_cp.th * p_j2k->m_cp.tw;
opj_tcp_t * l_tcp = p_j2k->m_cp.tcps;
for (i=0;i<l_nb_tiles;++i) {
if (l_tcp->mct == 2) {
opj_tccp_t * l_tccp = l_tcp->tccps;
l_is_valid &= (l_tcp->m_mct_coding_matrix != 00);
for (j=0;j<p_j2k->m_private_image->numcomps;++j) {
l_is_valid &= ! (l_tccp->qmfbid & 1);
++l_tccp;
}
}
++l_tcp;
}
}
return l_is_valid;
}
OPJ_BOOL opj_j2k_setup_mct_encoding(opj_tcp_t * p_tcp, opj_image_t * p_image)
{
OPJ_UINT32 i;
OPJ_UINT32 l_indix = 1;
opj_mct_data_t * l_mct_deco_data = 00,* l_mct_offset_data = 00;
opj_simple_mcc_decorrelation_data_t * l_mcc_data;
OPJ_UINT32 l_mct_size,l_nb_elem;
OPJ_FLOAT32 * l_data, * l_current_data;
opj_tccp_t * l_tccp;
/* preconditions */
assert(p_tcp != 00);
if (p_tcp->mct != 2) {
return OPJ_TRUE;
}
if (p_tcp->m_mct_decoding_matrix) {
if (p_tcp->m_nb_mct_records == p_tcp->m_nb_max_mct_records) {
opj_mct_data_t *new_mct_records;
p_tcp->m_nb_max_mct_records += OPJ_J2K_MCT_DEFAULT_NB_RECORDS;
new_mct_records = (opj_mct_data_t *) opj_realloc(p_tcp->m_mct_records, p_tcp->m_nb_max_mct_records * sizeof(opj_mct_data_t));
if (! new_mct_records) {
opj_free(p_tcp->m_mct_records);
p_tcp->m_mct_records = NULL;
p_tcp->m_nb_max_mct_records = 0;
p_tcp->m_nb_mct_records = 0;
/* opj_event_msg(p_manager, EVT_ERROR, "Not enough memory to setup mct encoding\n"); */
return OPJ_FALSE;
}
p_tcp->m_mct_records = new_mct_records;
l_mct_deco_data = p_tcp->m_mct_records + p_tcp->m_nb_mct_records;
memset(l_mct_deco_data ,0,(p_tcp->m_nb_max_mct_records - p_tcp->m_nb_mct_records) * sizeof(opj_mct_data_t));
}
l_mct_deco_data = p_tcp->m_mct_records + p_tcp->m_nb_mct_records;
if (l_mct_deco_data->m_data) {
opj_free(l_mct_deco_data->m_data);
l_mct_deco_data->m_data = 00;
}
l_mct_deco_data->m_index = l_indix++;
l_mct_deco_data->m_array_type = MCT_TYPE_DECORRELATION;
l_mct_deco_data->m_element_type = MCT_TYPE_FLOAT;
l_nb_elem = p_image->numcomps * p_image->numcomps;
l_mct_size = l_nb_elem * MCT_ELEMENT_SIZE[l_mct_deco_data->m_element_type];
l_mct_deco_data->m_data = (OPJ_BYTE*)opj_malloc(l_mct_size );
if (! l_mct_deco_data->m_data) {
return OPJ_FALSE;
}
j2k_mct_write_functions_from_float[l_mct_deco_data->m_element_type](p_tcp->m_mct_decoding_matrix,l_mct_deco_data->m_data,l_nb_elem);
l_mct_deco_data->m_data_size = l_mct_size;
++p_tcp->m_nb_mct_records;
}
if (p_tcp->m_nb_mct_records == p_tcp->m_nb_max_mct_records) {
opj_mct_data_t *new_mct_records;
p_tcp->m_nb_max_mct_records += OPJ_J2K_MCT_DEFAULT_NB_RECORDS;
new_mct_records = (opj_mct_data_t *) opj_realloc(p_tcp->m_mct_records, p_tcp->m_nb_max_mct_records * sizeof(opj_mct_data_t));
if (! new_mct_records) {
opj_free(p_tcp->m_mct_records);
p_tcp->m_mct_records = NULL;
p_tcp->m_nb_max_mct_records = 0;
p_tcp->m_nb_mct_records = 0;
/* opj_event_msg(p_manager, EVT_ERROR, "Not enough memory to setup mct encoding\n"); */
return OPJ_FALSE;
}
p_tcp->m_mct_records = new_mct_records;
l_mct_offset_data = p_tcp->m_mct_records + p_tcp->m_nb_mct_records;
memset(l_mct_offset_data ,0,(p_tcp->m_nb_max_mct_records - p_tcp->m_nb_mct_records) * sizeof(opj_mct_data_t));
if (l_mct_deco_data) {
l_mct_deco_data = l_mct_offset_data - 1;
}
}
l_mct_offset_data = p_tcp->m_mct_records + p_tcp->m_nb_mct_records;
if (l_mct_offset_data->m_data) {
opj_free(l_mct_offset_data->m_data);
l_mct_offset_data->m_data = 00;
}
l_mct_offset_data->m_index = l_indix++;
l_mct_offset_data->m_array_type = MCT_TYPE_OFFSET;
l_mct_offset_data->m_element_type = MCT_TYPE_FLOAT;
l_nb_elem = p_image->numcomps;
l_mct_size = l_nb_elem * MCT_ELEMENT_SIZE[l_mct_offset_data->m_element_type];
l_mct_offset_data->m_data = (OPJ_BYTE*)opj_malloc(l_mct_size );
if (! l_mct_offset_data->m_data) {
return OPJ_FALSE;
}
l_data = (OPJ_FLOAT32*)opj_malloc(l_nb_elem * sizeof(OPJ_FLOAT32));
if (! l_data) {
opj_free(l_mct_offset_data->m_data);
l_mct_offset_data->m_data = 00;
return OPJ_FALSE;
}
l_tccp = p_tcp->tccps;
l_current_data = l_data;
for (i=0;i<l_nb_elem;++i) {
*(l_current_data++) = (OPJ_FLOAT32) (l_tccp->m_dc_level_shift);
++l_tccp;
}
j2k_mct_write_functions_from_float[l_mct_offset_data->m_element_type](l_data,l_mct_offset_data->m_data,l_nb_elem);
opj_free(l_data);
l_mct_offset_data->m_data_size = l_mct_size;
++p_tcp->m_nb_mct_records;
if (p_tcp->m_nb_mcc_records == p_tcp->m_nb_max_mcc_records) {
opj_simple_mcc_decorrelation_data_t *new_mcc_records;
p_tcp->m_nb_max_mcc_records += OPJ_J2K_MCT_DEFAULT_NB_RECORDS;
new_mcc_records = (opj_simple_mcc_decorrelation_data_t *) opj_realloc(
p_tcp->m_mcc_records, p_tcp->m_nb_max_mcc_records * sizeof(opj_simple_mcc_decorrelation_data_t));
if (! new_mcc_records) {
opj_free(p_tcp->m_mcc_records);
p_tcp->m_mcc_records = NULL;
p_tcp->m_nb_max_mcc_records = 0;
p_tcp->m_nb_mcc_records = 0;
/* opj_event_msg(p_manager, EVT_ERROR, "Not enough memory to setup mct encoding\n"); */
return OPJ_FALSE;
}
p_tcp->m_mcc_records = new_mcc_records;
l_mcc_data = p_tcp->m_mcc_records + p_tcp->m_nb_mcc_records;
memset(l_mcc_data ,0,(p_tcp->m_nb_max_mcc_records - p_tcp->m_nb_mcc_records) * sizeof(opj_simple_mcc_decorrelation_data_t));
}
l_mcc_data = p_tcp->m_mcc_records + p_tcp->m_nb_mcc_records;
l_mcc_data->m_decorrelation_array = l_mct_deco_data;
l_mcc_data->m_is_irreversible = 1;
l_mcc_data->m_nb_comps = p_image->numcomps;
l_mcc_data->m_index = l_indix++;
l_mcc_data->m_offset_array = l_mct_offset_data;
++p_tcp->m_nb_mcc_records;
return OPJ_TRUE;
}
static OPJ_BOOL opj_j2k_build_decoder (opj_j2k_t * p_j2k,
opj_stream_private_t *p_stream,
opj_event_mgr_t * p_manager )
{
/* add here initialization of cp
copy paste of setup_decoder */
(void)p_j2k;
(void)p_stream;
(void)p_manager;
return OPJ_TRUE;
}
static OPJ_BOOL opj_j2k_build_encoder (opj_j2k_t * p_j2k,
opj_stream_private_t *p_stream,
opj_event_mgr_t * p_manager )
{
/* add here initialization of cp
copy paste of setup_encoder */
(void)p_j2k;
(void)p_stream;
(void)p_manager;
return OPJ_TRUE;
}
static OPJ_BOOL opj_j2k_encoding_validation ( opj_j2k_t * p_j2k,
opj_stream_private_t *p_stream,
opj_event_mgr_t * p_manager )
{
OPJ_BOOL l_is_valid = OPJ_TRUE;
/* preconditions */
assert(p_j2k != 00);
assert(p_stream != 00);
assert(p_manager != 00);
/* STATE checking */
/* make sure the state is at 0 */
l_is_valid &= (p_j2k->m_specific_param.m_decoder.m_state == J2K_STATE_NONE);
/* POINTER validation */
/* make sure a p_j2k codec is present */
l_is_valid &= (p_j2k->m_procedure_list != 00);
/* make sure a validation list is present */
l_is_valid &= (p_j2k->m_validation_list != 00);
/* ISO 15444-1:2004 states between 1 & 33 (0 -> 32) */
/* 33 (32) would always fail the check below (if a cast to 64bits was done) */
/* FIXME Shall we change OPJ_J2K_MAXRLVLS to 32 ? */
if ((p_j2k->m_cp.tcps->tccps->numresolutions <= 0) || (p_j2k->m_cp.tcps->tccps->numresolutions > 32)) {
opj_event_msg(p_manager, EVT_ERROR, "Number of resolutions is too high in comparison to the size of tiles\n");
return OPJ_FALSE;
}
if ((p_j2k->m_cp.tdx) < (OPJ_UINT32) (1 << (p_j2k->m_cp.tcps->tccps->numresolutions - 1U))) {
opj_event_msg(p_manager, EVT_ERROR, "Number of resolutions is too high in comparison to the size of tiles\n");
return OPJ_FALSE;
}
if ((p_j2k->m_cp.tdy) < (OPJ_UINT32) (1 << (p_j2k->m_cp.tcps->tccps->numresolutions - 1U))) {
opj_event_msg(p_manager, EVT_ERROR, "Number of resolutions is too high in comparison to the size of tiles\n");
return OPJ_FALSE;
}
/* PARAMETER VALIDATION */
return l_is_valid;
}
static OPJ_BOOL opj_j2k_decoding_validation ( opj_j2k_t *p_j2k,
opj_stream_private_t *p_stream,
opj_event_mgr_t * p_manager
)
{
OPJ_BOOL l_is_valid = OPJ_TRUE;
/* preconditions*/
assert(p_j2k != 00);
assert(p_stream != 00);
assert(p_manager != 00);
/* STATE checking */
/* make sure the state is at 0 */
#ifdef TODO_MSD
l_is_valid &= (p_j2k->m_specific_param.m_decoder.m_state == J2K_DEC_STATE_NONE);
#endif
l_is_valid &= (p_j2k->m_specific_param.m_decoder.m_state == 0x0000);
/* POINTER validation */
/* make sure a p_j2k codec is present */
/* make sure a procedure list is present */
l_is_valid &= (p_j2k->m_procedure_list != 00);
/* make sure a validation list is present */
l_is_valid &= (p_j2k->m_validation_list != 00);
/* PARAMETER VALIDATION */
return l_is_valid;
}
static OPJ_BOOL opj_j2k_read_header_procedure( opj_j2k_t *p_j2k,
opj_stream_private_t *p_stream,
opj_event_mgr_t * p_manager)
{
OPJ_UINT32 l_current_marker;
OPJ_UINT32 l_marker_size;
const opj_dec_memory_marker_handler_t * l_marker_handler = 00;
OPJ_BOOL l_has_siz = 0;
OPJ_BOOL l_has_cod = 0;
OPJ_BOOL l_has_qcd = 0;
/* preconditions */
assert(p_stream != 00);
assert(p_j2k != 00);
assert(p_manager != 00);
/* We enter in the main header */
p_j2k->m_specific_param.m_decoder.m_state = J2K_STATE_MHSOC;
/* Try to read the SOC marker, the codestream must begin with SOC marker */
if (! opj_j2k_read_soc(p_j2k,p_stream,p_manager)) {
opj_event_msg(p_manager, EVT_ERROR, "Expected a SOC marker \n");
return OPJ_FALSE;
}
/* Try to read 2 bytes (the next marker ID) from stream and copy them into the buffer */
if (opj_stream_read_data(p_stream,p_j2k->m_specific_param.m_decoder.m_header_data,2,p_manager) != 2) {
opj_event_msg(p_manager, EVT_ERROR, "Stream too short\n");
return OPJ_FALSE;
}
/* Read 2 bytes as the new marker ID */
opj_read_bytes(p_j2k->m_specific_param.m_decoder.m_header_data,&l_current_marker,2);
/* Try to read until the SOT is detected */
while (l_current_marker != J2K_MS_SOT) {
/* Check if the current marker ID is valid */
if (l_current_marker < 0xff00) {
opj_event_msg(p_manager, EVT_ERROR, "A marker ID was expected (0xff--) instead of %.8x\n", l_current_marker);
return OPJ_FALSE;
}
/* Get the marker handler from the marker ID */
l_marker_handler = opj_j2k_get_marker_handler(l_current_marker);
/* Manage case where marker is unknown */
if (l_marker_handler->id == J2K_MS_UNK) {
if (! opj_j2k_read_unk(p_j2k, p_stream, &l_current_marker, p_manager)){
opj_event_msg(p_manager, EVT_ERROR, "Unknow marker have been detected and generated error.\n");
return OPJ_FALSE;
}
if (l_current_marker == J2K_MS_SOT)
break; /* SOT marker is detected main header is completely read */
else /* Get the marker handler from the marker ID */
l_marker_handler = opj_j2k_get_marker_handler(l_current_marker);
}
if (l_marker_handler->id == J2K_MS_SIZ) {
/* Mark required SIZ marker as found */
l_has_siz = 1;
}
if (l_marker_handler->id == J2K_MS_COD) {
/* Mark required COD marker as found */
l_has_cod = 1;
}
if (l_marker_handler->id == J2K_MS_QCD) {
/* Mark required QCD marker as found */
l_has_qcd = 1;
}
/* Check if the marker is known and if it is the right place to find it */
if (! (p_j2k->m_specific_param.m_decoder.m_state & l_marker_handler->states) ) {
opj_event_msg(p_manager, EVT_ERROR, "Marker is not compliant with its position\n");
return OPJ_FALSE;
}
/* Try to read 2 bytes (the marker size) from stream and copy them into the buffer */
if (opj_stream_read_data(p_stream,p_j2k->m_specific_param.m_decoder.m_header_data,2,p_manager) != 2) {
opj_event_msg(p_manager, EVT_ERROR, "Stream too short\n");
return OPJ_FALSE;
}
/* read 2 bytes as the marker size */
opj_read_bytes(p_j2k->m_specific_param.m_decoder.m_header_data,&l_marker_size,2);
l_marker_size -= 2; /* Subtract the size of the marker ID already read */
/* Check if the marker size is compatible with the header data size */
if (l_marker_size > p_j2k->m_specific_param.m_decoder.m_header_data_size) {
OPJ_BYTE *new_header_data = (OPJ_BYTE *) opj_realloc(p_j2k->m_specific_param.m_decoder.m_header_data, l_marker_size);
if (! new_header_data) {
opj_free(p_j2k->m_specific_param.m_decoder.m_header_data);
p_j2k->m_specific_param.m_decoder.m_header_data = NULL;
p_j2k->m_specific_param.m_decoder.m_header_data_size = 0;
opj_event_msg(p_manager, EVT_ERROR, "Not enough memory to read header\n");
return OPJ_FALSE;
}
p_j2k->m_specific_param.m_decoder.m_header_data = new_header_data;
p_j2k->m_specific_param.m_decoder.m_header_data_size = l_marker_size;
}
/* Try to read the rest of the marker segment from stream and copy them into the buffer */
if (opj_stream_read_data(p_stream,p_j2k->m_specific_param.m_decoder.m_header_data,l_marker_size,p_manager) != l_marker_size) {
opj_event_msg(p_manager, EVT_ERROR, "Stream too short\n");
return OPJ_FALSE;
}
/* Read the marker segment with the correct marker handler */
if (! (*(l_marker_handler->handler))(p_j2k,p_j2k->m_specific_param.m_decoder.m_header_data,l_marker_size,p_manager)) {
opj_event_msg(p_manager, EVT_ERROR, "Marker handler function failed to read the marker segment\n");
return OPJ_FALSE;
}
/* Add the marker to the codestream index*/
if (OPJ_FALSE == opj_j2k_add_mhmarker(
p_j2k->cstr_index,
l_marker_handler->id,
(OPJ_UINT32) opj_stream_tell(p_stream) - l_marker_size - 4,
l_marker_size + 4 )) {
opj_event_msg(p_manager, EVT_ERROR, "Not enough memory to add mh marker\n");
return OPJ_FALSE;
}
/* Try to read 2 bytes (the next marker ID) from stream and copy them into the buffer */
if (opj_stream_read_data(p_stream,p_j2k->m_specific_param.m_decoder.m_header_data,2,p_manager) != 2) {
opj_event_msg(p_manager, EVT_ERROR, "Stream too short\n");
return OPJ_FALSE;
}
/* read 2 bytes as the new marker ID */
opj_read_bytes(p_j2k->m_specific_param.m_decoder.m_header_data,&l_current_marker,2);
}
if (l_has_siz == 0) {
opj_event_msg(p_manager, EVT_ERROR, "required SIZ marker not found in main header\n");
return OPJ_FALSE;
}
if (l_has_cod == 0) {
opj_event_msg(p_manager, EVT_ERROR, "required COD marker not found in main header\n");
return OPJ_FALSE;
}
if (l_has_qcd == 0) {
opj_event_msg(p_manager, EVT_ERROR, "required QCD marker not found in main header\n");
return OPJ_FALSE;
}
if (! opj_j2k_merge_ppm(&(p_j2k->m_cp), p_manager)) {
opj_event_msg(p_manager, EVT_ERROR, "Failed to merge PPM data\n");
return OPJ_FALSE;
}
opj_event_msg(p_manager, EVT_INFO, "Main header has been correctly decoded.\n");
/* Position of the last element if the main header */
p_j2k->cstr_index->main_head_end = (OPJ_UINT32) opj_stream_tell(p_stream) - 2;
/* Next step: read a tile-part header */
p_j2k->m_specific_param.m_decoder.m_state = J2K_STATE_TPHSOT;
return OPJ_TRUE;
}
static OPJ_BOOL opj_j2k_exec ( opj_j2k_t * p_j2k,
opj_procedure_list_t * p_procedure_list,
opj_stream_private_t *p_stream,
opj_event_mgr_t * p_manager )
{
OPJ_BOOL (** l_procedure) (opj_j2k_t * ,opj_stream_private_t *,opj_event_mgr_t *) = 00;
OPJ_BOOL l_result = OPJ_TRUE;
OPJ_UINT32 l_nb_proc, i;
/* preconditions*/
assert(p_procedure_list != 00);
assert(p_j2k != 00);
assert(p_stream != 00);
assert(p_manager != 00);
l_nb_proc = opj_procedure_list_get_nb_procedures(p_procedure_list);
l_procedure = (OPJ_BOOL (**) (opj_j2k_t * ,opj_stream_private_t *,opj_event_mgr_t *)) opj_procedure_list_get_first_procedure(p_procedure_list);
for (i=0;i<l_nb_proc;++i) {
l_result = l_result && ((*l_procedure) (p_j2k,p_stream,p_manager));
++l_procedure;
}
/* and clear the procedure list at the end.*/
opj_procedure_list_clear(p_procedure_list);
return l_result;
}
/* FIXME DOC*/
static OPJ_BOOL opj_j2k_copy_default_tcp_and_create_tcd ( opj_j2k_t * p_j2k,
opj_stream_private_t *p_stream,
opj_event_mgr_t * p_manager
)
{
opj_tcp_t * l_tcp = 00;
opj_tcp_t * l_default_tcp = 00;
OPJ_UINT32 l_nb_tiles;
OPJ_UINT32 i,j;
opj_tccp_t *l_current_tccp = 00;
OPJ_UINT32 l_tccp_size;
OPJ_UINT32 l_mct_size;
opj_image_t * l_image;
OPJ_UINT32 l_mcc_records_size,l_mct_records_size;
opj_mct_data_t * l_src_mct_rec, *l_dest_mct_rec;
opj_simple_mcc_decorrelation_data_t * l_src_mcc_rec, *l_dest_mcc_rec;
OPJ_UINT32 l_offset;
/* preconditions */
assert(p_j2k != 00);
assert(p_stream != 00);
assert(p_manager != 00);
l_image = p_j2k->m_private_image;
l_nb_tiles = p_j2k->m_cp.th * p_j2k->m_cp.tw;
l_tcp = p_j2k->m_cp.tcps;
l_tccp_size = l_image->numcomps * (OPJ_UINT32)sizeof(opj_tccp_t);
l_default_tcp = p_j2k->m_specific_param.m_decoder.m_default_tcp;
l_mct_size = l_image->numcomps * l_image->numcomps * (OPJ_UINT32)sizeof(OPJ_FLOAT32);
/* For each tile */
for (i=0; i<l_nb_tiles; ++i) {
/* keep the tile-compo coding parameters pointer of the current tile coding parameters*/
l_current_tccp = l_tcp->tccps;
/*Copy default coding parameters into the current tile coding parameters*/
memcpy(l_tcp, l_default_tcp, sizeof(opj_tcp_t));
/* Initialize some values of the current tile coding parameters*/
l_tcp->cod = 0;
l_tcp->ppt = 0;
l_tcp->ppt_data = 00;
/* Remove memory not owned by this tile in case of early error return. */
l_tcp->m_mct_decoding_matrix = 00;
l_tcp->m_nb_max_mct_records = 0;
l_tcp->m_mct_records = 00;
l_tcp->m_nb_max_mcc_records = 0;
l_tcp->m_mcc_records = 00;
/* Reconnect the tile-compo coding parameters pointer to the current tile coding parameters*/
l_tcp->tccps = l_current_tccp;
/* Get the mct_decoding_matrix of the dflt_tile_cp and copy them into the current tile cp*/
if (l_default_tcp->m_mct_decoding_matrix) {
l_tcp->m_mct_decoding_matrix = (OPJ_FLOAT32*)opj_malloc(l_mct_size);
if (! l_tcp->m_mct_decoding_matrix ) {
return OPJ_FALSE;
}
memcpy(l_tcp->m_mct_decoding_matrix,l_default_tcp->m_mct_decoding_matrix,l_mct_size);
}
/* Get the mct_record of the dflt_tile_cp and copy them into the current tile cp*/
l_mct_records_size = l_default_tcp->m_nb_max_mct_records * (OPJ_UINT32)sizeof(opj_mct_data_t);
l_tcp->m_mct_records = (opj_mct_data_t*)opj_malloc(l_mct_records_size);
if (! l_tcp->m_mct_records) {
return OPJ_FALSE;
}
memcpy(l_tcp->m_mct_records, l_default_tcp->m_mct_records,l_mct_records_size);
/* Copy the mct record data from dflt_tile_cp to the current tile*/
l_src_mct_rec = l_default_tcp->m_mct_records;
l_dest_mct_rec = l_tcp->m_mct_records;
for (j=0;j<l_default_tcp->m_nb_mct_records;++j) {
if (l_src_mct_rec->m_data) {
l_dest_mct_rec->m_data = (OPJ_BYTE*) opj_malloc(l_src_mct_rec->m_data_size);
if(! l_dest_mct_rec->m_data) {
return OPJ_FALSE;
}
memcpy(l_dest_mct_rec->m_data,l_src_mct_rec->m_data,l_src_mct_rec->m_data_size);
}
++l_src_mct_rec;
++l_dest_mct_rec;
/* Update with each pass to free exactly what has been allocated on early return. */
l_tcp->m_nb_max_mct_records += 1;
}
/* Get the mcc_record of the dflt_tile_cp and copy them into the current tile cp*/
l_mcc_records_size = l_default_tcp->m_nb_max_mcc_records * (OPJ_UINT32)sizeof(opj_simple_mcc_decorrelation_data_t);
l_tcp->m_mcc_records = (opj_simple_mcc_decorrelation_data_t*) opj_malloc(l_mcc_records_size);
if (! l_tcp->m_mcc_records) {
return OPJ_FALSE;
}
memcpy(l_tcp->m_mcc_records,l_default_tcp->m_mcc_records,l_mcc_records_size);
l_tcp->m_nb_max_mcc_records = l_default_tcp->m_nb_max_mcc_records;
/* Copy the mcc record data from dflt_tile_cp to the current tile*/
l_src_mcc_rec = l_default_tcp->m_mcc_records;
l_dest_mcc_rec = l_tcp->m_mcc_records;
for (j=0;j<l_default_tcp->m_nb_max_mcc_records;++j) {
if (l_src_mcc_rec->m_decorrelation_array) {
l_offset = (OPJ_UINT32)(l_src_mcc_rec->m_decorrelation_array - l_default_tcp->m_mct_records);
l_dest_mcc_rec->m_decorrelation_array = l_tcp->m_mct_records + l_offset;
}
if (l_src_mcc_rec->m_offset_array) {
l_offset = (OPJ_UINT32)(l_src_mcc_rec->m_offset_array - l_default_tcp->m_mct_records);
l_dest_mcc_rec->m_offset_array = l_tcp->m_mct_records + l_offset;
}
++l_src_mcc_rec;
++l_dest_mcc_rec;
}
/* Copy all the dflt_tile_compo_cp to the current tile cp */
memcpy(l_current_tccp,l_default_tcp->tccps,l_tccp_size);
/* Move to next tile cp*/
++l_tcp;
}
/* Create the current tile decoder*/
p_j2k->m_tcd = (opj_tcd_t*)opj_tcd_create(OPJ_TRUE); /* FIXME why a cast ? */
if (! p_j2k->m_tcd ) {
return OPJ_FALSE;
}
if ( !opj_tcd_init(p_j2k->m_tcd, l_image, &(p_j2k->m_cp)) ) {
opj_tcd_destroy(p_j2k->m_tcd);
p_j2k->m_tcd = 00;
opj_event_msg(p_manager, EVT_ERROR, "Cannot decode tile, memory error\n");
return OPJ_FALSE;
}
return OPJ_TRUE;
}
static const opj_dec_memory_marker_handler_t * opj_j2k_get_marker_handler (OPJ_UINT32 p_id)
{
const opj_dec_memory_marker_handler_t *e;
for (e = j2k_memory_marker_handler_tab; e->id != 0; ++e) {
if (e->id == p_id) {
break; /* we find a handler corresponding to the marker ID*/
}
}
return e;
}
void opj_j2k_destroy (opj_j2k_t *p_j2k)
{
if (p_j2k == 00) {
return;
}
if (p_j2k->m_is_decoder) {
if (p_j2k->m_specific_param.m_decoder.m_default_tcp != 00) {
opj_j2k_tcp_destroy(p_j2k->m_specific_param.m_decoder.m_default_tcp);
opj_free(p_j2k->m_specific_param.m_decoder.m_default_tcp);
p_j2k->m_specific_param.m_decoder.m_default_tcp = 00;
}
if (p_j2k->m_specific_param.m_decoder.m_header_data != 00) {
opj_free(p_j2k->m_specific_param.m_decoder.m_header_data);
p_j2k->m_specific_param.m_decoder.m_header_data = 00;
p_j2k->m_specific_param.m_decoder.m_header_data_size = 0;
}
}
else {
if (p_j2k->m_specific_param.m_encoder.m_encoded_tile_data) {
opj_free(p_j2k->m_specific_param.m_encoder.m_encoded_tile_data);
p_j2k->m_specific_param.m_encoder.m_encoded_tile_data = 00;
}
if (p_j2k->m_specific_param.m_encoder.m_tlm_sot_offsets_buffer) {
opj_free(p_j2k->m_specific_param.m_encoder.m_tlm_sot_offsets_buffer);
p_j2k->m_specific_param.m_encoder.m_tlm_sot_offsets_buffer = 00;
p_j2k->m_specific_param.m_encoder.m_tlm_sot_offsets_current = 00;
}
if (p_j2k->m_specific_param.m_encoder.m_header_tile_data) {
opj_free(p_j2k->m_specific_param.m_encoder.m_header_tile_data);
p_j2k->m_specific_param.m_encoder.m_header_tile_data = 00;
p_j2k->m_specific_param.m_encoder.m_header_tile_data_size = 0;
}
}
opj_tcd_destroy(p_j2k->m_tcd);
opj_j2k_cp_destroy(&(p_j2k->m_cp));
memset(&(p_j2k->m_cp),0,sizeof(opj_cp_t));
opj_procedure_list_destroy(p_j2k->m_procedure_list);
p_j2k->m_procedure_list = 00;
opj_procedure_list_destroy(p_j2k->m_validation_list);
p_j2k->m_procedure_list = 00;
j2k_destroy_cstr_index(p_j2k->cstr_index);
p_j2k->cstr_index = NULL;
opj_image_destroy(p_j2k->m_private_image);
p_j2k->m_private_image = NULL;
opj_image_destroy(p_j2k->m_output_image);
p_j2k->m_output_image = NULL;
opj_free(p_j2k);
}
void j2k_destroy_cstr_index (opj_codestream_index_t *p_cstr_ind)
{
if (p_cstr_ind) {
if (p_cstr_ind->marker) {
opj_free(p_cstr_ind->marker);
p_cstr_ind->marker = NULL;
}
if (p_cstr_ind->tile_index) {
OPJ_UINT32 it_tile = 0;
for (it_tile=0; it_tile < p_cstr_ind->nb_of_tiles; it_tile++) {
if(p_cstr_ind->tile_index[it_tile].packet_index) {
opj_free(p_cstr_ind->tile_index[it_tile].packet_index);
p_cstr_ind->tile_index[it_tile].packet_index = NULL;
}
if(p_cstr_ind->tile_index[it_tile].tp_index){
opj_free(p_cstr_ind->tile_index[it_tile].tp_index);
p_cstr_ind->tile_index[it_tile].tp_index = NULL;
}
if(p_cstr_ind->tile_index[it_tile].marker){
opj_free(p_cstr_ind->tile_index[it_tile].marker);
p_cstr_ind->tile_index[it_tile].marker = NULL;
}
}
opj_free( p_cstr_ind->tile_index);
p_cstr_ind->tile_index = NULL;
}
opj_free(p_cstr_ind);
}
}
static void opj_j2k_tcp_destroy (opj_tcp_t *p_tcp)
{
if (p_tcp == 00) {
return;
}
if (p_tcp->ppt_markers != 00) {
OPJ_UINT32 i;
for (i = 0U; i < p_tcp->ppt_markers_count; ++i) {
if (p_tcp->ppt_markers[i].m_data != NULL) {
opj_free(p_tcp->ppt_markers[i].m_data);
}
}
p_tcp->ppt_markers_count = 0U;
opj_free(p_tcp->ppt_markers);
p_tcp->ppt_markers = NULL;
}
if (p_tcp->ppt_buffer != 00) {
opj_free(p_tcp->ppt_buffer);
p_tcp->ppt_buffer = 00;
}
if (p_tcp->tccps != 00) {
opj_free(p_tcp->tccps);
p_tcp->tccps = 00;
}
if (p_tcp->m_mct_coding_matrix != 00) {
opj_free(p_tcp->m_mct_coding_matrix);
p_tcp->m_mct_coding_matrix = 00;
}
if (p_tcp->m_mct_decoding_matrix != 00) {
opj_free(p_tcp->m_mct_decoding_matrix);
p_tcp->m_mct_decoding_matrix = 00;
}
if (p_tcp->m_mcc_records) {
opj_free(p_tcp->m_mcc_records);
p_tcp->m_mcc_records = 00;
p_tcp->m_nb_max_mcc_records = 0;
p_tcp->m_nb_mcc_records = 0;
}
if (p_tcp->m_mct_records) {
opj_mct_data_t * l_mct_data = p_tcp->m_mct_records;
OPJ_UINT32 i;
for (i=0;i<p_tcp->m_nb_mct_records;++i) {
if (l_mct_data->m_data) {
opj_free(l_mct_data->m_data);
l_mct_data->m_data = 00;
}
++l_mct_data;
}
opj_free(p_tcp->m_mct_records);
p_tcp->m_mct_records = 00;
}
if (p_tcp->mct_norms != 00) {
opj_free(p_tcp->mct_norms);
p_tcp->mct_norms = 00;
}
opj_j2k_tcp_data_destroy(p_tcp);
}
static void opj_j2k_tcp_data_destroy (opj_tcp_t *p_tcp)
{
if (p_tcp->m_data) {
opj_free(p_tcp->m_data);
p_tcp->m_data = NULL;
p_tcp->m_data_size = 0;
}
}
static void opj_j2k_cp_destroy (opj_cp_t *p_cp)
{
OPJ_UINT32 l_nb_tiles;
opj_tcp_t * l_current_tile = 00;
if (p_cp == 00)
{
return;
}
if (p_cp->tcps != 00)
{
OPJ_UINT32 i;
l_current_tile = p_cp->tcps;
l_nb_tiles = p_cp->th * p_cp->tw;
for (i = 0U; i < l_nb_tiles; ++i)
{
opj_j2k_tcp_destroy(l_current_tile);
++l_current_tile;
}
opj_free(p_cp->tcps);
p_cp->tcps = 00;
}
if (p_cp->ppm_markers != 00) {
OPJ_UINT32 i;
for (i = 0U; i < p_cp->ppm_markers_count; ++i) {
if (p_cp->ppm_markers[i].m_data != NULL) {
opj_free(p_cp->ppm_markers[i].m_data);
}
}
p_cp->ppm_markers_count = 0U;
opj_free(p_cp->ppm_markers);
p_cp->ppm_markers = NULL;
}
opj_free(p_cp->ppm_buffer);
p_cp->ppm_buffer = 00;
p_cp->ppm_data = NULL; /* ppm_data belongs to the allocated buffer pointed by ppm_buffer */
opj_free(p_cp->comment);
p_cp->comment = 00;
if (! p_cp->m_is_decoder)
{
opj_free(p_cp->m_specific_param.m_enc.m_matrice);
p_cp->m_specific_param.m_enc.m_matrice = 00;
}
}
static OPJ_BOOL opj_j2k_need_nb_tile_parts_correction(opj_stream_private_t *p_stream, OPJ_UINT32 tile_no, OPJ_BOOL* p_correction_needed, opj_event_mgr_t * p_manager )
{
OPJ_BYTE l_header_data[10];
OPJ_OFF_T l_stream_pos_backup;
OPJ_UINT32 l_current_marker;
OPJ_UINT32 l_marker_size;
OPJ_UINT32 l_tile_no, l_tot_len, l_current_part, l_num_parts;
/* initialize to no correction needed */
*p_correction_needed = OPJ_FALSE;
l_stream_pos_backup = opj_stream_tell(p_stream);
if (l_stream_pos_backup == -1) {
/* let's do nothing */
return OPJ_TRUE;
}
for (;;) {
/* Try to read 2 bytes (the next marker ID) from stream and copy them into the buffer */
if (opj_stream_read_data(p_stream,l_header_data, 2, p_manager) != 2) {
/* assume all is OK */
if (! opj_stream_seek(p_stream, l_stream_pos_backup, p_manager)) {
return OPJ_FALSE;
}
return OPJ_TRUE;
}
/* Read 2 bytes from buffer as the new marker ID */
opj_read_bytes(l_header_data, &l_current_marker, 2);
if (l_current_marker != J2K_MS_SOT) {
/* assume all is OK */
if (! opj_stream_seek(p_stream, l_stream_pos_backup, p_manager)) {
return OPJ_FALSE;
}
return OPJ_TRUE;
}
/* Try to read 2 bytes (the marker size) from stream and copy them into the buffer */
if (opj_stream_read_data(p_stream, l_header_data, 2, p_manager) != 2) {
opj_event_msg(p_manager, EVT_ERROR, "Stream too short\n");
return OPJ_FALSE;
}
/* Read 2 bytes from the buffer as the marker size */
opj_read_bytes(l_header_data, &l_marker_size, 2);
/* Check marker size for SOT Marker */
if (l_marker_size != 10) {
opj_event_msg(p_manager, EVT_ERROR, "Inconsistent marker size\n");
return OPJ_FALSE;
}
l_marker_size -= 2;
if (opj_stream_read_data(p_stream, l_header_data, l_marker_size, p_manager) != l_marker_size) {
opj_event_msg(p_manager, EVT_ERROR, "Stream too short\n");
return OPJ_FALSE;
}
if (! opj_j2k_get_sot_values(l_header_data, l_marker_size, &l_tile_no, &l_tot_len, &l_current_part, &l_num_parts, p_manager)) {
return OPJ_FALSE;
}
if (l_tile_no == tile_no) {
/* we found what we were looking for */
break;
}
if ((l_tot_len == 0U) || (l_tot_len < 14U)) {
/* last SOT until EOC or invalid Psot value */
/* assume all is OK */
if (! opj_stream_seek(p_stream, l_stream_pos_backup, p_manager)) {
return OPJ_FALSE;
}
return OPJ_TRUE;
}
l_tot_len -= 12U;
/* look for next SOT marker */
if (opj_stream_skip(p_stream, (OPJ_OFF_T)(l_tot_len), p_manager) != (OPJ_OFF_T)(l_tot_len)) {
/* assume all is OK */
if (! opj_stream_seek(p_stream, l_stream_pos_backup, p_manager)) {
return OPJ_FALSE;
}
return OPJ_TRUE;
}
}
/* check for correction */
if (l_current_part == l_num_parts) {
*p_correction_needed = OPJ_TRUE;
}
if (! opj_stream_seek(p_stream, l_stream_pos_backup, p_manager)) {
return OPJ_FALSE;
}
return OPJ_TRUE;
}
OPJ_BOOL opj_j2k_read_tile_header( opj_j2k_t * p_j2k,
OPJ_UINT32 * p_tile_index,
OPJ_UINT32 * p_data_size,
OPJ_INT32 * p_tile_x0, OPJ_INT32 * p_tile_y0,
OPJ_INT32 * p_tile_x1, OPJ_INT32 * p_tile_y1,
OPJ_UINT32 * p_nb_comps,
OPJ_BOOL * p_go_on,
opj_stream_private_t *p_stream,
opj_event_mgr_t * p_manager )
{
OPJ_UINT32 l_current_marker = J2K_MS_SOT;
OPJ_UINT32 l_marker_size;
const opj_dec_memory_marker_handler_t * l_marker_handler = 00;
opj_tcp_t * l_tcp = NULL;
/* preconditions */
assert(p_stream != 00);
assert(p_j2k != 00);
assert(p_manager != 00);
/* Reach the End Of Codestream ?*/
if (p_j2k->m_specific_param.m_decoder.m_state == J2K_STATE_EOC){
l_current_marker = J2K_MS_EOC;
}
/* We need to encounter a SOT marker (a new tile-part header) */
else if (p_j2k->m_specific_param.m_decoder.m_state != J2K_STATE_TPHSOT){
return OPJ_FALSE;
}
/* Read into the codestream until reach the EOC or ! can_decode ??? FIXME */
while ( (!p_j2k->m_specific_param.m_decoder.m_can_decode) && (l_current_marker != J2K_MS_EOC) ) {
/* Try to read until the Start Of Data is detected */
while (l_current_marker != J2K_MS_SOD) {
if(opj_stream_get_number_byte_left(p_stream) == 0)
{
p_j2k->m_specific_param.m_decoder.m_state = J2K_STATE_NEOC;
break;
}
/* Try to read 2 bytes (the marker size) from stream and copy them into the buffer */
if (opj_stream_read_data(p_stream,p_j2k->m_specific_param.m_decoder.m_header_data,2,p_manager) != 2) {
opj_event_msg(p_manager, EVT_ERROR, "Stream too short\n");
return OPJ_FALSE;
}
/* Read 2 bytes from the buffer as the marker size */
opj_read_bytes(p_j2k->m_specific_param.m_decoder.m_header_data,&l_marker_size,2);
/* Check marker size (does not include marker ID but includes marker size) */
if (l_marker_size < 2) {
opj_event_msg(p_manager, EVT_ERROR, "Inconsistent marker size\n");
return OPJ_FALSE;
}
/* cf. https://code.google.com/p/openjpeg/issues/detail?id=226 */
if (l_current_marker == 0x8080 && opj_stream_get_number_byte_left(p_stream) == 0) {
p_j2k->m_specific_param.m_decoder.m_state = J2K_STATE_NEOC;
break;
}
/* Why this condition? FIXME */
if (p_j2k->m_specific_param.m_decoder.m_state & J2K_STATE_TPH){
p_j2k->m_specific_param.m_decoder.m_sot_length -= (l_marker_size + 2);
}
l_marker_size -= 2; /* Subtract the size of the marker ID already read */
/* Get the marker handler from the marker ID */
l_marker_handler = opj_j2k_get_marker_handler(l_current_marker);
/* Check if the marker is known and if it is the right place to find it */
if (! (p_j2k->m_specific_param.m_decoder.m_state & l_marker_handler->states) ) {
opj_event_msg(p_manager, EVT_ERROR, "Marker is not compliant with its position\n");
return OPJ_FALSE;
}
/* FIXME manage case of unknown marker as in the main header ? */
/* Check if the marker size is compatible with the header data size */
if (l_marker_size > p_j2k->m_specific_param.m_decoder.m_header_data_size) {
OPJ_BYTE *new_header_data = NULL;
/* If we are here, this means we consider this marker as known & we will read it */
/* Check enough bytes left in stream before allocation */
if ((OPJ_OFF_T)l_marker_size > opj_stream_get_number_byte_left(p_stream)) {
opj_event_msg(p_manager, EVT_ERROR, "Marker size inconsistent with stream length\n");
return OPJ_FALSE;
}
new_header_data = (OPJ_BYTE *) opj_realloc(p_j2k->m_specific_param.m_decoder.m_header_data, l_marker_size);
if (! new_header_data) {
opj_free(p_j2k->m_specific_param.m_decoder.m_header_data);
p_j2k->m_specific_param.m_decoder.m_header_data = NULL;
p_j2k->m_specific_param.m_decoder.m_header_data_size = 0;
opj_event_msg(p_manager, EVT_ERROR, "Not enough memory to read header\n");
return OPJ_FALSE;
}
p_j2k->m_specific_param.m_decoder.m_header_data = new_header_data;
p_j2k->m_specific_param.m_decoder.m_header_data_size = l_marker_size;
}
/* Try to read the rest of the marker segment from stream and copy them into the buffer */
if (opj_stream_read_data(p_stream,p_j2k->m_specific_param.m_decoder.m_header_data,l_marker_size,p_manager) != l_marker_size) {
opj_event_msg(p_manager, EVT_ERROR, "Stream too short\n");
return OPJ_FALSE;
}
if (!l_marker_handler->handler) {
/* See issue #175 */
opj_event_msg(p_manager, EVT_ERROR, "Not sure how that happened.\n");
return OPJ_FALSE;
}
/* Read the marker segment with the correct marker handler */
if (! (*(l_marker_handler->handler))(p_j2k,p_j2k->m_specific_param.m_decoder.m_header_data,l_marker_size,p_manager)) {
opj_event_msg(p_manager, EVT_ERROR, "Fail to read the current marker segment (%#x)\n", l_current_marker);
return OPJ_FALSE;
}
/* Add the marker to the codestream index*/
if (OPJ_FALSE == opj_j2k_add_tlmarker(p_j2k->m_current_tile_number,
p_j2k->cstr_index,
l_marker_handler->id,
(OPJ_UINT32) opj_stream_tell(p_stream) - l_marker_size - 4,
l_marker_size + 4 )) {
opj_event_msg(p_manager, EVT_ERROR, "Not enough memory to add tl marker\n");
return OPJ_FALSE;
}
/* Keep the position of the last SOT marker read */
if ( l_marker_handler->id == J2K_MS_SOT ) {
OPJ_UINT32 sot_pos = (OPJ_UINT32) opj_stream_tell(p_stream) - l_marker_size - 4 ;
if (sot_pos > p_j2k->m_specific_param.m_decoder.m_last_sot_read_pos)
{
p_j2k->m_specific_param.m_decoder.m_last_sot_read_pos = sot_pos;
}
}
if (p_j2k->m_specific_param.m_decoder.m_skip_data) {
/* Skip the rest of the tile part header*/
if (opj_stream_skip(p_stream,p_j2k->m_specific_param.m_decoder.m_sot_length,p_manager) != p_j2k->m_specific_param.m_decoder.m_sot_length) {
opj_event_msg(p_manager, EVT_ERROR, "Stream too short\n");
return OPJ_FALSE;
}
l_current_marker = J2K_MS_SOD; /* Normally we reached a SOD */
}
else {
/* Try to read 2 bytes (the next marker ID) from stream and copy them into the buffer*/
if (opj_stream_read_data(p_stream,p_j2k->m_specific_param.m_decoder.m_header_data,2,p_manager) != 2) {
opj_event_msg(p_manager, EVT_ERROR, "Stream too short\n");
return OPJ_FALSE;
}
/* Read 2 bytes from the buffer as the new marker ID */
opj_read_bytes(p_j2k->m_specific_param.m_decoder.m_header_data,&l_current_marker,2);
}
}
if(opj_stream_get_number_byte_left(p_stream) == 0
&& p_j2k->m_specific_param.m_decoder.m_state == J2K_STATE_NEOC)
break;
/* If we didn't skip data before, we need to read the SOD marker*/
if (! p_j2k->m_specific_param.m_decoder.m_skip_data) {
/* Try to read the SOD marker and skip data ? FIXME */
if (! opj_j2k_read_sod(p_j2k, p_stream, p_manager)) {
return OPJ_FALSE;
}
if (p_j2k->m_specific_param.m_decoder.m_can_decode && !p_j2k->m_specific_param.m_decoder.m_nb_tile_parts_correction_checked) {
/* Issue 254 */
OPJ_BOOL l_correction_needed;
p_j2k->m_specific_param.m_decoder.m_nb_tile_parts_correction_checked = 1;
if(!opj_j2k_need_nb_tile_parts_correction(p_stream, p_j2k->m_current_tile_number, &l_correction_needed, p_manager)) {
opj_event_msg(p_manager, EVT_ERROR, "opj_j2k_apply_nb_tile_parts_correction error\n");
return OPJ_FALSE;
}
if (l_correction_needed) {
OPJ_UINT32 l_nb_tiles = p_j2k->m_cp.tw * p_j2k->m_cp.th;
OPJ_UINT32 l_tile_no;
p_j2k->m_specific_param.m_decoder.m_can_decode = 0;
p_j2k->m_specific_param.m_decoder.m_nb_tile_parts_correction = 1;
/* correct tiles */
for (l_tile_no = 0U; l_tile_no < l_nb_tiles; ++l_tile_no) {
if (p_j2k->m_cp.tcps[l_tile_no].m_nb_tile_parts != 0U) {
p_j2k->m_cp.tcps[l_tile_no].m_nb_tile_parts+=1;
}
}
opj_event_msg(p_manager, EVT_WARNING, "Non conformant codestream TPsot==TNsot.\n");
}
}
if (! p_j2k->m_specific_param.m_decoder.m_can_decode){
/* Try to read 2 bytes (the next marker ID) from stream and copy them into the buffer */
if (opj_stream_read_data(p_stream,p_j2k->m_specific_param.m_decoder.m_header_data,2,p_manager) != 2) {
opj_event_msg(p_manager, EVT_ERROR, "Stream too short\n");
return OPJ_FALSE;
}
/* Read 2 bytes from buffer as the new marker ID */
opj_read_bytes(p_j2k->m_specific_param.m_decoder.m_header_data,&l_current_marker,2);
}
}
else {
/* Indicate we will try to read a new tile-part header*/
p_j2k->m_specific_param.m_decoder.m_skip_data = 0;
p_j2k->m_specific_param.m_decoder.m_can_decode = 0;
p_j2k->m_specific_param.m_decoder.m_state = J2K_STATE_TPHSOT;
/* Try to read 2 bytes (the next marker ID) from stream and copy them into the buffer */
if (opj_stream_read_data(p_stream,p_j2k->m_specific_param.m_decoder.m_header_data,2,p_manager) != 2) {
opj_event_msg(p_manager, EVT_ERROR, "Stream too short\n");
return OPJ_FALSE;
}
/* Read 2 bytes from buffer as the new marker ID */
opj_read_bytes(p_j2k->m_specific_param.m_decoder.m_header_data,&l_current_marker,2);
}
}
/* Current marker is the EOC marker ?*/
if (l_current_marker == J2K_MS_EOC) {
if (p_j2k->m_specific_param.m_decoder.m_state != J2K_STATE_EOC ){
p_j2k->m_current_tile_number = 0;
p_j2k->m_specific_param.m_decoder.m_state = J2K_STATE_EOC;
}
}
/* FIXME DOC ???*/
if ( ! p_j2k->m_specific_param.m_decoder.m_can_decode) {
OPJ_UINT32 l_nb_tiles = p_j2k->m_cp.th * p_j2k->m_cp.tw;
l_tcp = p_j2k->m_cp.tcps + p_j2k->m_current_tile_number;
while( (p_j2k->m_current_tile_number < l_nb_tiles) && (l_tcp->m_data == 00) ) {
++p_j2k->m_current_tile_number;
++l_tcp;
}
if (p_j2k->m_current_tile_number == l_nb_tiles) {
*p_go_on = OPJ_FALSE;
return OPJ_TRUE;
}
}
if (! opj_j2k_merge_ppt(p_j2k->m_cp.tcps + p_j2k->m_current_tile_number, p_manager)) {
opj_event_msg(p_manager, EVT_ERROR, "Failed to merge PPT data\n");
return OPJ_FALSE;
}
/*FIXME ???*/
if (! opj_tcd_init_decode_tile(p_j2k->m_tcd, p_j2k->m_current_tile_number, p_manager)) {
opj_event_msg(p_manager, EVT_ERROR, "Cannot decode tile, memory error\n");
return OPJ_FALSE;
}
opj_event_msg(p_manager, EVT_INFO, "Header of tile %d / %d has been read.\n",
p_j2k->m_current_tile_number+1, (p_j2k->m_cp.th * p_j2k->m_cp.tw));
*p_tile_index = p_j2k->m_current_tile_number;
*p_go_on = OPJ_TRUE;
*p_data_size = opj_tcd_get_decoded_tile_size(p_j2k->m_tcd);
*p_tile_x0 = p_j2k->m_tcd->tcd_image->tiles->x0;
*p_tile_y0 = p_j2k->m_tcd->tcd_image->tiles->y0;
*p_tile_x1 = p_j2k->m_tcd->tcd_image->tiles->x1;
*p_tile_y1 = p_j2k->m_tcd->tcd_image->tiles->y1;
*p_nb_comps = p_j2k->m_tcd->tcd_image->tiles->numcomps;
p_j2k->m_specific_param.m_decoder.m_state |= 0x0080;/* FIXME J2K_DEC_STATE_DATA;*/
return OPJ_TRUE;
}
OPJ_BOOL opj_j2k_decode_tile ( opj_j2k_t * p_j2k,
OPJ_UINT32 p_tile_index,
OPJ_BYTE * p_data,
OPJ_UINT32 p_data_size,
opj_stream_private_t *p_stream,
opj_event_mgr_t * p_manager )
{
OPJ_UINT32 l_current_marker;
OPJ_BYTE l_data [2];
opj_tcp_t * l_tcp;
/* preconditions */
assert(p_stream != 00);
assert(p_j2k != 00);
assert(p_manager != 00);
if ( !(p_j2k->m_specific_param.m_decoder.m_state & 0x0080/*FIXME J2K_DEC_STATE_DATA*/)
|| (p_tile_index != p_j2k->m_current_tile_number) ) {
return OPJ_FALSE;
}
l_tcp = &(p_j2k->m_cp.tcps[p_tile_index]);
if (! l_tcp->m_data) {
opj_j2k_tcp_destroy(l_tcp);
return OPJ_FALSE;
}
if (! opj_tcd_decode_tile( p_j2k->m_tcd,
l_tcp->m_data,
l_tcp->m_data_size,
p_tile_index,
p_j2k->cstr_index, p_manager) ) {
opj_j2k_tcp_destroy(l_tcp);
p_j2k->m_specific_param.m_decoder.m_state |= 0x8000;/*FIXME J2K_DEC_STATE_ERR;*/
opj_event_msg(p_manager, EVT_ERROR, "Failed to decode.\n");
return OPJ_FALSE;
}
if (! opj_tcd_update_tile_data(p_j2k->m_tcd,p_data,p_data_size)) {
return OPJ_FALSE;
}
/* To avoid to destroy the tcp which can be useful when we try to decode a tile decoded before (cf j2k_random_tile_access)
* we destroy just the data which will be re-read in read_tile_header*/
/*opj_j2k_tcp_destroy(l_tcp);
p_j2k->m_tcd->tcp = 0;*/
opj_j2k_tcp_data_destroy(l_tcp);
p_j2k->m_specific_param.m_decoder.m_can_decode = 0;
p_j2k->m_specific_param.m_decoder.m_state &= (~ (0x0080u));/* FIXME J2K_DEC_STATE_DATA);*/
if(opj_stream_get_number_byte_left(p_stream) == 0
&& p_j2k->m_specific_param.m_decoder.m_state == J2K_STATE_NEOC){
return OPJ_TRUE;
}
if (p_j2k->m_specific_param.m_decoder.m_state != 0x0100){ /*FIXME J2K_DEC_STATE_EOC)*/
if (opj_stream_read_data(p_stream,l_data,2,p_manager) != 2) {
opj_event_msg(p_manager, EVT_ERROR, "Stream too short\n");
return OPJ_FALSE;
}
opj_read_bytes(l_data,&l_current_marker,2);
if (l_current_marker == J2K_MS_EOC) {
p_j2k->m_current_tile_number = 0;
p_j2k->m_specific_param.m_decoder.m_state = 0x0100;/*FIXME J2K_DEC_STATE_EOC;*/
}
else if (l_current_marker != J2K_MS_SOT)
{
if(opj_stream_get_number_byte_left(p_stream) == 0) {
p_j2k->m_specific_param.m_decoder.m_state = J2K_STATE_NEOC;
opj_event_msg(p_manager, EVT_WARNING, "Stream does not end with EOC\n");
return OPJ_TRUE;
}
opj_event_msg(p_manager, EVT_ERROR, "Stream too short, expected SOT\n");
return OPJ_FALSE;
}
}
return OPJ_TRUE;
}
static OPJ_BOOL opj_j2k_update_image_data (opj_tcd_t * p_tcd, OPJ_BYTE * p_data, opj_image_t* p_output_image)
{
OPJ_UINT32 i,j,k = 0;
OPJ_UINT32 l_width_src,l_height_src;
OPJ_UINT32 l_width_dest,l_height_dest;
OPJ_INT32 l_offset_x0_src, l_offset_y0_src, l_offset_x1_src, l_offset_y1_src;
OPJ_SIZE_T l_start_offset_src, l_line_offset_src, l_end_offset_src ;
OPJ_UINT32 l_start_x_dest , l_start_y_dest;
OPJ_UINT32 l_x0_dest, l_y0_dest, l_x1_dest, l_y1_dest;
OPJ_SIZE_T l_start_offset_dest, l_line_offset_dest;
opj_image_comp_t * l_img_comp_src = 00;
opj_image_comp_t * l_img_comp_dest = 00;
opj_tcd_tilecomp_t * l_tilec = 00;
opj_image_t * l_image_src = 00;
OPJ_UINT32 l_size_comp, l_remaining;
OPJ_INT32 * l_dest_ptr;
opj_tcd_resolution_t* l_res= 00;
l_tilec = p_tcd->tcd_image->tiles->comps;
l_image_src = p_tcd->image;
l_img_comp_src = l_image_src->comps;
l_img_comp_dest = p_output_image->comps;
for (i=0; i<l_image_src->numcomps; i++) {
/* Allocate output component buffer if necessary */
if (!l_img_comp_dest->data) {
l_img_comp_dest->data = (OPJ_INT32*) opj_calloc((OPJ_SIZE_T)l_img_comp_dest->w * (OPJ_SIZE_T)l_img_comp_dest->h, sizeof(OPJ_INT32));
if (! l_img_comp_dest->data) {
return OPJ_FALSE;
}
}
/* Copy info from decoded comp image to output image */
l_img_comp_dest->resno_decoded = l_img_comp_src->resno_decoded;
/*-----*/
/* Compute the precision of the output buffer */
l_size_comp = l_img_comp_src->prec >> 3; /*(/ 8)*/
l_remaining = l_img_comp_src->prec & 7; /* (%8) */
l_res = l_tilec->resolutions + l_img_comp_src->resno_decoded;
if (l_remaining) {
++l_size_comp;
}
if (l_size_comp == 3) {
l_size_comp = 4;
}
/*-----*/
/* Current tile component size*/
/*if (i == 0) {
fprintf(stdout, "SRC: l_res_x0=%d, l_res_x1=%d, l_res_y0=%d, l_res_y1=%d\n",
l_res->x0, l_res->x1, l_res->y0, l_res->y1);
}*/
l_width_src = (OPJ_UINT32)(l_res->x1 - l_res->x0);
l_height_src = (OPJ_UINT32)(l_res->y1 - l_res->y0);
/* Border of the current output component*/
l_x0_dest = opj_uint_ceildivpow2(l_img_comp_dest->x0, l_img_comp_dest->factor);
l_y0_dest = opj_uint_ceildivpow2(l_img_comp_dest->y0, l_img_comp_dest->factor);
l_x1_dest = l_x0_dest + l_img_comp_dest->w; /* can't overflow given that image->x1 is uint32 */
l_y1_dest = l_y0_dest + l_img_comp_dest->h;
/*if (i == 0) {
fprintf(stdout, "DEST: l_x0_dest=%d, l_x1_dest=%d, l_y0_dest=%d, l_y1_dest=%d (%d)\n",
l_x0_dest, l_x1_dest, l_y0_dest, l_y1_dest, l_img_comp_dest->factor );
}*/
/*-----*/
/* Compute the area (l_offset_x0_src, l_offset_y0_src, l_offset_x1_src, l_offset_y1_src)
* of the input buffer (decoded tile component) which will be move
* in the output buffer. Compute the area of the output buffer (l_start_x_dest,
* l_start_y_dest, l_width_dest, l_height_dest) which will be modified
* by this input area.
* */
assert( l_res->x0 >= 0);
assert( l_res->x1 >= 0);
if ( l_x0_dest < (OPJ_UINT32)l_res->x0 ) {
l_start_x_dest = (OPJ_UINT32)l_res->x0 - l_x0_dest;
l_offset_x0_src = 0;
if ( l_x1_dest >= (OPJ_UINT32)l_res->x1 ) {
l_width_dest = l_width_src;
l_offset_x1_src = 0;
}
else {
l_width_dest = l_x1_dest - (OPJ_UINT32)l_res->x0 ;
l_offset_x1_src = (OPJ_INT32)(l_width_src - l_width_dest);
}
}
else {
l_start_x_dest = 0U;
l_offset_x0_src = (OPJ_INT32)l_x0_dest - l_res->x0;
if ( l_x1_dest >= (OPJ_UINT32)l_res->x1 ) {
l_width_dest = l_width_src - (OPJ_UINT32)l_offset_x0_src;
l_offset_x1_src = 0;
}
else {
l_width_dest = l_img_comp_dest->w ;
l_offset_x1_src = l_res->x1 - (OPJ_INT32)l_x1_dest;
}
}
if ( l_y0_dest < (OPJ_UINT32)l_res->y0 ) {
l_start_y_dest = (OPJ_UINT32)l_res->y0 - l_y0_dest;
l_offset_y0_src = 0;
if ( l_y1_dest >= (OPJ_UINT32)l_res->y1 ) {
l_height_dest = l_height_src;
l_offset_y1_src = 0;
}
else {
l_height_dest = l_y1_dest - (OPJ_UINT32)l_res->y0 ;
l_offset_y1_src = (OPJ_INT32)(l_height_src - l_height_dest);
}
}
else {
l_start_y_dest = 0U;
l_offset_y0_src = (OPJ_INT32)l_y0_dest - l_res->y0;
if ( l_y1_dest >= (OPJ_UINT32)l_res->y1 ) {
l_height_dest = l_height_src - (OPJ_UINT32)l_offset_y0_src;
l_offset_y1_src = 0;
}
else {
l_height_dest = l_img_comp_dest->h ;
l_offset_y1_src = l_res->y1 - (OPJ_INT32)l_y1_dest;
}
}
if( (l_offset_x0_src < 0 ) || (l_offset_y0_src < 0 ) || (l_offset_x1_src < 0 ) || (l_offset_y1_src < 0 ) ){
return OPJ_FALSE;
}
/* testcase 2977.pdf.asan.67.2198 */
if ((OPJ_INT32)l_width_dest < 0 || (OPJ_INT32)l_height_dest < 0) {
return OPJ_FALSE;
}
/*-----*/
/* Compute the input buffer offset */
l_start_offset_src = (OPJ_SIZE_T)l_offset_x0_src + (OPJ_SIZE_T)l_offset_y0_src * (OPJ_SIZE_T)l_width_src;
l_line_offset_src = (OPJ_SIZE_T)l_offset_x1_src + (OPJ_SIZE_T)l_offset_x0_src;
l_end_offset_src = (OPJ_SIZE_T)l_offset_y1_src * (OPJ_SIZE_T)l_width_src - (OPJ_SIZE_T)l_offset_x0_src;
/* Compute the output buffer offset */
l_start_offset_dest = (OPJ_SIZE_T)l_start_x_dest + (OPJ_SIZE_T)l_start_y_dest * (OPJ_SIZE_T)l_img_comp_dest->w;
l_line_offset_dest = (OPJ_SIZE_T)l_img_comp_dest->w - (OPJ_SIZE_T)l_width_dest;
/* Move the output buffer to the first place where we will write*/
l_dest_ptr = l_img_comp_dest->data + l_start_offset_dest;
/*if (i == 0) {
fprintf(stdout, "COMPO[%d]:\n",i);
fprintf(stdout, "SRC: l_start_x_src=%d, l_start_y_src=%d, l_width_src=%d, l_height_src=%d\n"
"\t tile offset:%d, %d, %d, %d\n"
"\t buffer offset: %d; %d, %d\n",
l_res->x0, l_res->y0, l_width_src, l_height_src,
l_offset_x0_src, l_offset_y0_src, l_offset_x1_src, l_offset_y1_src,
l_start_offset_src, l_line_offset_src, l_end_offset_src);
fprintf(stdout, "DEST: l_start_x_dest=%d, l_start_y_dest=%d, l_width_dest=%d, l_height_dest=%d\n"
"\t start offset: %d, line offset= %d\n",
l_start_x_dest, l_start_y_dest, l_width_dest, l_height_dest, l_start_offset_dest, l_line_offset_dest);
}*/
switch (l_size_comp) {
case 1:
{
OPJ_CHAR * l_src_ptr = (OPJ_CHAR*) p_data;
l_src_ptr += l_start_offset_src; /* Move to the first place where we will read*/
if (l_img_comp_src->sgnd) {
for (j = 0 ; j < l_height_dest ; ++j) {
for ( k = 0 ; k < l_width_dest ; ++k) {
*(l_dest_ptr++) = (OPJ_INT32) (*(l_src_ptr++)); /* Copy only the data needed for the output image */
}
l_dest_ptr+= l_line_offset_dest; /* Move to the next place where we will write */
l_src_ptr += l_line_offset_src ; /* Move to the next place where we will read */
}
}
else {
for ( j = 0 ; j < l_height_dest ; ++j ) {
for ( k = 0 ; k < l_width_dest ; ++k) {
*(l_dest_ptr++) = (OPJ_INT32) ((*(l_src_ptr++))&0xff);
}
l_dest_ptr+= l_line_offset_dest;
l_src_ptr += l_line_offset_src;
}
}
l_src_ptr += l_end_offset_src; /* Move to the end of this component-part of the input buffer */
p_data = (OPJ_BYTE*) l_src_ptr; /* Keep the current position for the next component-part */
}
break;
case 2:
{
OPJ_INT16 * l_src_ptr = (OPJ_INT16 *) p_data;
l_src_ptr += l_start_offset_src;
if (l_img_comp_src->sgnd) {
for (j=0;j<l_height_dest;++j) {
for (k=0;k<l_width_dest;++k) {
*(l_dest_ptr++) = *(l_src_ptr++);
}
l_dest_ptr+= l_line_offset_dest;
l_src_ptr += l_line_offset_src ;
}
}
else {
for (j=0;j<l_height_dest;++j) {
for (k=0;k<l_width_dest;++k) {
*(l_dest_ptr++) = (*(l_src_ptr++))&0xffff;
}
l_dest_ptr+= l_line_offset_dest;
l_src_ptr += l_line_offset_src ;
}
}
l_src_ptr += l_end_offset_src;
p_data = (OPJ_BYTE*) l_src_ptr;
}
break;
case 4:
{
OPJ_INT32 * l_src_ptr = (OPJ_INT32 *) p_data;
l_src_ptr += l_start_offset_src;
for (j=0;j<l_height_dest;++j) {
for (k=0;k<l_width_dest;++k) {
*(l_dest_ptr++) = (*(l_src_ptr++));
}
l_dest_ptr+= l_line_offset_dest;
l_src_ptr += l_line_offset_src ;
}
l_src_ptr += l_end_offset_src;
p_data = (OPJ_BYTE*) l_src_ptr;
}
break;
}
++l_img_comp_dest;
++l_img_comp_src;
++l_tilec;
}
return OPJ_TRUE;
}
OPJ_BOOL opj_j2k_set_decode_area( opj_j2k_t *p_j2k,
opj_image_t* p_image,
OPJ_INT32 p_start_x, OPJ_INT32 p_start_y,
OPJ_INT32 p_end_x, OPJ_INT32 p_end_y,
opj_event_mgr_t * p_manager )
{
opj_cp_t * l_cp = &(p_j2k->m_cp);
opj_image_t * l_image = p_j2k->m_private_image;
OPJ_UINT32 it_comp;
OPJ_INT32 l_comp_x1, l_comp_y1;
opj_image_comp_t* l_img_comp = NULL;
/* Check if we are read the main header */
if (p_j2k->m_specific_param.m_decoder.m_state != J2K_STATE_TPHSOT) { /* FIXME J2K_DEC_STATE_TPHSOT)*/
opj_event_msg(p_manager, EVT_ERROR, "Need to decode the main header before begin to decode the remaining codestream");
return OPJ_FALSE;
}
if ( !p_start_x && !p_start_y && !p_end_x && !p_end_y){
opj_event_msg(p_manager, EVT_INFO, "No decoded area parameters, set the decoded area to the whole image\n");
p_j2k->m_specific_param.m_decoder.m_start_tile_x = 0;
p_j2k->m_specific_param.m_decoder.m_start_tile_y = 0;
p_j2k->m_specific_param.m_decoder.m_end_tile_x = l_cp->tw;
p_j2k->m_specific_param.m_decoder.m_end_tile_y = l_cp->th;
return OPJ_TRUE;
}
/* ----- */
/* Check if the positions provided by the user are correct */
/* Left */
assert(p_start_x >= 0 );
assert(p_start_y >= 0 );
if ((OPJ_UINT32)p_start_x > l_image->x1 ) {
opj_event_msg(p_manager, EVT_ERROR,
"Left position of the decoded area (region_x0=%d) is outside the image area (Xsiz=%d).\n",
p_start_x, l_image->x1);
return OPJ_FALSE;
}
else if ((OPJ_UINT32)p_start_x < l_image->x0){
opj_event_msg(p_manager, EVT_WARNING,
"Left position of the decoded area (region_x0=%d) is outside the image area (XOsiz=%d).\n",
p_start_x, l_image->x0);
p_j2k->m_specific_param.m_decoder.m_start_tile_x = 0;
p_image->x0 = l_image->x0;
}
else {
p_j2k->m_specific_param.m_decoder.m_start_tile_x = ((OPJ_UINT32)p_start_x - l_cp->tx0) / l_cp->tdx;
p_image->x0 = (OPJ_UINT32)p_start_x;
}
/* Up */
if ((OPJ_UINT32)p_start_y > l_image->y1){
opj_event_msg(p_manager, EVT_ERROR,
"Up position of the decoded area (region_y0=%d) is outside the image area (Ysiz=%d).\n",
p_start_y, l_image->y1);
return OPJ_FALSE;
}
else if ((OPJ_UINT32)p_start_y < l_image->y0){
opj_event_msg(p_manager, EVT_WARNING,
"Up position of the decoded area (region_y0=%d) is outside the image area (YOsiz=%d).\n",
p_start_y, l_image->y0);
p_j2k->m_specific_param.m_decoder.m_start_tile_y = 0;
p_image->y0 = l_image->y0;
}
else {
p_j2k->m_specific_param.m_decoder.m_start_tile_y = ((OPJ_UINT32)p_start_y - l_cp->ty0) / l_cp->tdy;
p_image->y0 = (OPJ_UINT32)p_start_y;
}
/* Right */
assert((OPJ_UINT32)p_end_x > 0);
assert((OPJ_UINT32)p_end_y > 0);
if ((OPJ_UINT32)p_end_x < l_image->x0) {
opj_event_msg(p_manager, EVT_ERROR,
"Right position of the decoded area (region_x1=%d) is outside the image area (XOsiz=%d).\n",
p_end_x, l_image->x0);
return OPJ_FALSE;
}
else if ((OPJ_UINT32)p_end_x > l_image->x1) {
opj_event_msg(p_manager, EVT_WARNING,
"Right position of the decoded area (region_x1=%d) is outside the image area (Xsiz=%d).\n",
p_end_x, l_image->x1);
p_j2k->m_specific_param.m_decoder.m_end_tile_x = l_cp->tw;
p_image->x1 = l_image->x1;
}
else {
p_j2k->m_specific_param.m_decoder.m_end_tile_x = (OPJ_UINT32)opj_int_ceildiv(p_end_x - (OPJ_INT32)l_cp->tx0, (OPJ_INT32)l_cp->tdx);
p_image->x1 = (OPJ_UINT32)p_end_x;
}
/* Bottom */
if ((OPJ_UINT32)p_end_y < l_image->y0) {
opj_event_msg(p_manager, EVT_ERROR,
"Bottom position of the decoded area (region_y1=%d) is outside the image area (YOsiz=%d).\n",
p_end_y, l_image->y0);
return OPJ_FALSE;
}
if ((OPJ_UINT32)p_end_y > l_image->y1){
opj_event_msg(p_manager, EVT_WARNING,
"Bottom position of the decoded area (region_y1=%d) is outside the image area (Ysiz=%d).\n",
p_end_y, l_image->y1);
p_j2k->m_specific_param.m_decoder.m_end_tile_y = l_cp->th;
p_image->y1 = l_image->y1;
}
else{
p_j2k->m_specific_param.m_decoder.m_end_tile_y = (OPJ_UINT32)opj_int_ceildiv(p_end_y - (OPJ_INT32)l_cp->ty0, (OPJ_INT32)l_cp->tdy);
p_image->y1 = (OPJ_UINT32)p_end_y;
}
/* ----- */
p_j2k->m_specific_param.m_decoder.m_discard_tiles = 1;
l_img_comp = p_image->comps;
for (it_comp=0; it_comp < p_image->numcomps; ++it_comp)
{
OPJ_INT32 l_h,l_w;
l_img_comp->x0 = (OPJ_UINT32)opj_int_ceildiv((OPJ_INT32)p_image->x0, (OPJ_INT32)l_img_comp->dx);
l_img_comp->y0 = (OPJ_UINT32)opj_int_ceildiv((OPJ_INT32)p_image->y0, (OPJ_INT32)l_img_comp->dy);
l_comp_x1 = opj_int_ceildiv((OPJ_INT32)p_image->x1, (OPJ_INT32)l_img_comp->dx);
l_comp_y1 = opj_int_ceildiv((OPJ_INT32)p_image->y1, (OPJ_INT32)l_img_comp->dy);
l_w = opj_int_ceildivpow2(l_comp_x1, (OPJ_INT32)l_img_comp->factor)
- opj_int_ceildivpow2((OPJ_INT32)l_img_comp->x0, (OPJ_INT32)l_img_comp->factor);
if (l_w < 0){
opj_event_msg(p_manager, EVT_ERROR,
"Size x of the decoded component image is incorrect (comp[%d].w=%d).\n",
it_comp, l_w);
return OPJ_FALSE;
}
l_img_comp->w = (OPJ_UINT32)l_w;
l_h = opj_int_ceildivpow2(l_comp_y1, (OPJ_INT32)l_img_comp->factor)
- opj_int_ceildivpow2((OPJ_INT32)l_img_comp->y0, (OPJ_INT32)l_img_comp->factor);
if (l_h < 0){
opj_event_msg(p_manager, EVT_ERROR,
"Size y of the decoded component image is incorrect (comp[%d].h=%d).\n",
it_comp, l_h);
return OPJ_FALSE;
}
l_img_comp->h = (OPJ_UINT32)l_h;
l_img_comp++;
}
opj_event_msg( p_manager, EVT_INFO,"Setting decoding area to %d,%d,%d,%d\n",
p_image->x0, p_image->y0, p_image->x1, p_image->y1);
return OPJ_TRUE;
}
opj_j2k_t* opj_j2k_create_decompress(void)
{
opj_j2k_t *l_j2k = (opj_j2k_t*) opj_calloc(1,sizeof(opj_j2k_t));
if (!l_j2k) {
return 00;
}
l_j2k->m_is_decoder = 1;
l_j2k->m_cp.m_is_decoder = 1;
#ifdef OPJ_DISABLE_TPSOT_FIX
l_j2k->m_specific_param.m_decoder.m_nb_tile_parts_correction_checked = 1;
#endif
l_j2k->m_specific_param.m_decoder.m_default_tcp = (opj_tcp_t*) opj_calloc(1,sizeof(opj_tcp_t));
if (!l_j2k->m_specific_param.m_decoder.m_default_tcp) {
opj_j2k_destroy(l_j2k);
return 00;
}
l_j2k->m_specific_param.m_decoder.m_header_data = (OPJ_BYTE *) opj_calloc(1,OPJ_J2K_DEFAULT_HEADER_SIZE);
if (! l_j2k->m_specific_param.m_decoder.m_header_data) {
opj_j2k_destroy(l_j2k);
return 00;
}
l_j2k->m_specific_param.m_decoder.m_header_data_size = OPJ_J2K_DEFAULT_HEADER_SIZE;
l_j2k->m_specific_param.m_decoder.m_tile_ind_to_dec = -1 ;
l_j2k->m_specific_param.m_decoder.m_last_sot_read_pos = 0 ;
/* codestream index creation */
l_j2k->cstr_index = opj_j2k_create_cstr_index();
if (!l_j2k->cstr_index){
opj_j2k_destroy(l_j2k);
return 00;
}
/* validation list creation */
l_j2k->m_validation_list = opj_procedure_list_create();
if (! l_j2k->m_validation_list) {
opj_j2k_destroy(l_j2k);
return 00;
}
/* execution list creation */
l_j2k->m_procedure_list = opj_procedure_list_create();
if (! l_j2k->m_procedure_list) {
opj_j2k_destroy(l_j2k);
return 00;
}
return l_j2k;
}
static opj_codestream_index_t* opj_j2k_create_cstr_index(void)
{
opj_codestream_index_t* cstr_index = (opj_codestream_index_t*)
opj_calloc(1,sizeof(opj_codestream_index_t));
if (!cstr_index)
return NULL;
cstr_index->maxmarknum = 100;
cstr_index->marknum = 0;
cstr_index->marker = (opj_marker_info_t*)
opj_calloc(cstr_index->maxmarknum, sizeof(opj_marker_info_t));
if (!cstr_index-> marker)
return NULL;
cstr_index->tile_index = NULL;
return cstr_index;
}
static OPJ_UINT32 opj_j2k_get_SPCod_SPCoc_size ( opj_j2k_t *p_j2k,
OPJ_UINT32 p_tile_no,
OPJ_UINT32 p_comp_no )
{
opj_cp_t *l_cp = 00;
opj_tcp_t *l_tcp = 00;
opj_tccp_t *l_tccp = 00;
/* preconditions */
assert(p_j2k != 00);
l_cp = &(p_j2k->m_cp);
l_tcp = &l_cp->tcps[p_tile_no];
l_tccp = &l_tcp->tccps[p_comp_no];
/* preconditions again */
assert(p_tile_no < (l_cp->tw * l_cp->th));
assert(p_comp_no < p_j2k->m_private_image->numcomps);
if (l_tccp->csty & J2K_CCP_CSTY_PRT) {
return 5 + l_tccp->numresolutions;
}
else {
return 5;
}
}
static OPJ_BOOL opj_j2k_write_SPCod_SPCoc( opj_j2k_t *p_j2k,
OPJ_UINT32 p_tile_no,
OPJ_UINT32 p_comp_no,
OPJ_BYTE * p_data,
OPJ_UINT32 * p_header_size,
struct opj_event_mgr * p_manager )
{
OPJ_UINT32 i;
opj_cp_t *l_cp = 00;
opj_tcp_t *l_tcp = 00;
opj_tccp_t *l_tccp = 00;
/* preconditions */
assert(p_j2k != 00);
assert(p_header_size != 00);
assert(p_manager != 00);
assert(p_data != 00);
l_cp = &(p_j2k->m_cp);
l_tcp = &l_cp->tcps[p_tile_no];
l_tccp = &l_tcp->tccps[p_comp_no];
/* preconditions again */
assert(p_tile_no < (l_cp->tw * l_cp->th));
assert(p_comp_no <(p_j2k->m_private_image->numcomps));
if (*p_header_size < 5) {
opj_event_msg(p_manager, EVT_ERROR, "Error writing SPCod SPCoc element\n");
return OPJ_FALSE;
}
opj_write_bytes(p_data,l_tccp->numresolutions - 1, 1); /* SPcoc (D) */
++p_data;
opj_write_bytes(p_data,l_tccp->cblkw - 2, 1); /* SPcoc (E) */
++p_data;
opj_write_bytes(p_data,l_tccp->cblkh - 2, 1); /* SPcoc (F) */
++p_data;
opj_write_bytes(p_data,l_tccp->cblksty, 1); /* SPcoc (G) */
++p_data;
opj_write_bytes(p_data,l_tccp->qmfbid, 1); /* SPcoc (H) */
++p_data;
*p_header_size = *p_header_size - 5;
if (l_tccp->csty & J2K_CCP_CSTY_PRT) {
if (*p_header_size < l_tccp->numresolutions) {
opj_event_msg(p_manager, EVT_ERROR, "Error writing SPCod SPCoc element\n");
return OPJ_FALSE;
}
for (i = 0; i < l_tccp->numresolutions; ++i) {
opj_write_bytes(p_data,l_tccp->prcw[i] + (l_tccp->prch[i] << 4), 1); /* SPcoc (I_i) */
++p_data;
}
*p_header_size = *p_header_size - l_tccp->numresolutions;
}
return OPJ_TRUE;
}
static OPJ_BOOL opj_j2k_read_SPCod_SPCoc( opj_j2k_t *p_j2k,
OPJ_UINT32 compno,
OPJ_BYTE * p_header_data,
OPJ_UINT32 * p_header_size,
opj_event_mgr_t * p_manager)
{
OPJ_UINT32 i, l_tmp;
opj_cp_t *l_cp = NULL;
opj_tcp_t *l_tcp = NULL;
opj_tccp_t *l_tccp = NULL;
OPJ_BYTE * l_current_ptr = NULL;
/* preconditions */
assert(p_j2k != 00);
assert(p_manager != 00);
assert(p_header_data != 00);
l_cp = &(p_j2k->m_cp);
l_tcp = (p_j2k->m_specific_param.m_decoder.m_state == J2K_STATE_TPH) ?
&l_cp->tcps[p_j2k->m_current_tile_number] :
p_j2k->m_specific_param.m_decoder.m_default_tcp;
/* precondition again */
assert(compno < p_j2k->m_private_image->numcomps);
l_tccp = &l_tcp->tccps[compno];
l_current_ptr = p_header_data;
/* make sure room is sufficient */
if (*p_header_size < 5) {
opj_event_msg(p_manager, EVT_ERROR, "Error reading SPCod SPCoc element\n");
return OPJ_FALSE;
}
opj_read_bytes(l_current_ptr, &l_tccp->numresolutions ,1); /* SPcox (D) */
++l_tccp->numresolutions; /* tccp->numresolutions = read() + 1 */
if (l_tccp->numresolutions > OPJ_J2K_MAXRLVLS) {
opj_event_msg(p_manager, EVT_ERROR,
"Invalid value for numresolutions : %d, max value is set in openjpeg.h at %d\n",
l_tccp->numresolutions, OPJ_J2K_MAXRLVLS);
return OPJ_FALSE;
}
++l_current_ptr;
/* If user wants to remove more resolutions than the codestream contains, return error */
if (l_cp->m_specific_param.m_dec.m_reduce >= l_tccp->numresolutions) {
opj_event_msg(p_manager, EVT_ERROR, "Error decoding component %d.\nThe number of resolutions to remove is higher than the number "
"of resolutions of this component\nModify the cp_reduce parameter.\n\n", compno);
p_j2k->m_specific_param.m_decoder.m_state |= 0x8000;/* FIXME J2K_DEC_STATE_ERR;*/
return OPJ_FALSE;
}
opj_read_bytes(l_current_ptr,&l_tccp->cblkw ,1); /* SPcoc (E) */
++l_current_ptr;
l_tccp->cblkw += 2;
opj_read_bytes(l_current_ptr,&l_tccp->cblkh ,1); /* SPcoc (F) */
++l_current_ptr;
l_tccp->cblkh += 2;
if ((l_tccp->cblkw > 10) || (l_tccp->cblkh > 10) || ((l_tccp->cblkw + l_tccp->cblkh) > 12)) {
opj_event_msg(p_manager, EVT_ERROR, "Error reading SPCod SPCoc element, Invalid cblkw/cblkh combination\n");
return OPJ_FALSE;
}
opj_read_bytes(l_current_ptr,&l_tccp->cblksty ,1); /* SPcoc (G) */
++l_current_ptr;
opj_read_bytes(l_current_ptr,&l_tccp->qmfbid ,1); /* SPcoc (H) */
++l_current_ptr;
*p_header_size = *p_header_size - 5;
/* use custom precinct size ? */
if (l_tccp->csty & J2K_CCP_CSTY_PRT) {
if (*p_header_size < l_tccp->numresolutions) {
opj_event_msg(p_manager, EVT_ERROR, "Error reading SPCod SPCoc element\n");
return OPJ_FALSE;
}
for (i = 0; i < l_tccp->numresolutions; ++i) {
opj_read_bytes(l_current_ptr,&l_tmp ,1); /* SPcoc (I_i) */
++l_current_ptr;
/* Precinct exponent 0 is only allowed for lowest resolution level (Table A.21) */
if ((i != 0) && (((l_tmp & 0xf) == 0) || ((l_tmp >> 4) == 0))) {
opj_event_msg(p_manager, EVT_ERROR, "Invalid precinct size\n");
return OPJ_FALSE;
}
l_tccp->prcw[i] = l_tmp & 0xf;
l_tccp->prch[i] = l_tmp >> 4;
}
*p_header_size = *p_header_size - l_tccp->numresolutions;
}
else {
/* set default size for the precinct width and height */
for (i = 0; i < l_tccp->numresolutions; ++i) {
l_tccp->prcw[i] = 15;
l_tccp->prch[i] = 15;
}
}
#ifdef WIP_REMOVE_MSD
/* INDEX >> */
if (p_j2k->cstr_info && compno == 0) {
OPJ_UINT32 l_data_size = l_tccp->numresolutions * sizeof(OPJ_UINT32);
p_j2k->cstr_info->tile[p_j2k->m_current_tile_number].tccp_info[compno].cblkh = l_tccp->cblkh;
p_j2k->cstr_info->tile[p_j2k->m_current_tile_number].tccp_info[compno].cblkw = l_tccp->cblkw;
p_j2k->cstr_info->tile[p_j2k->m_current_tile_number].tccp_info[compno].numresolutions = l_tccp->numresolutions;
p_j2k->cstr_info->tile[p_j2k->m_current_tile_number].tccp_info[compno].cblksty = l_tccp->cblksty;
p_j2k->cstr_info->tile[p_j2k->m_current_tile_number].tccp_info[compno].qmfbid = l_tccp->qmfbid;
memcpy(p_j2k->cstr_info->tile[p_j2k->m_current_tile_number].pdx,l_tccp->prcw, l_data_size);
memcpy(p_j2k->cstr_info->tile[p_j2k->m_current_tile_number].pdy,l_tccp->prch, l_data_size);
}
/* << INDEX */
#endif
return OPJ_TRUE;
}
static void opj_j2k_copy_tile_component_parameters( opj_j2k_t *p_j2k )
{
/* loop */
OPJ_UINT32 i;
opj_cp_t *l_cp = NULL;
opj_tcp_t *l_tcp = NULL;
opj_tccp_t *l_ref_tccp = NULL, *l_copied_tccp = NULL;
OPJ_UINT32 l_prc_size;
/* preconditions */
assert(p_j2k != 00);
l_cp = &(p_j2k->m_cp);
l_tcp = (p_j2k->m_specific_param.m_decoder.m_state == J2K_STATE_TPH) ? /* FIXME J2K_DEC_STATE_TPH*/
&l_cp->tcps[p_j2k->m_current_tile_number] :
p_j2k->m_specific_param.m_decoder.m_default_tcp;
l_ref_tccp = &l_tcp->tccps[0];
l_copied_tccp = l_ref_tccp + 1;
l_prc_size = l_ref_tccp->numresolutions * (OPJ_UINT32)sizeof(OPJ_UINT32);
for (i=1; i<p_j2k->m_private_image->numcomps; ++i) {
l_copied_tccp->numresolutions = l_ref_tccp->numresolutions;
l_copied_tccp->cblkw = l_ref_tccp->cblkw;
l_copied_tccp->cblkh = l_ref_tccp->cblkh;
l_copied_tccp->cblksty = l_ref_tccp->cblksty;
l_copied_tccp->qmfbid = l_ref_tccp->qmfbid;
memcpy(l_copied_tccp->prcw,l_ref_tccp->prcw,l_prc_size);
memcpy(l_copied_tccp->prch,l_ref_tccp->prch,l_prc_size);
++l_copied_tccp;
}
}
static OPJ_UINT32 opj_j2k_get_SQcd_SQcc_size ( opj_j2k_t *p_j2k,
OPJ_UINT32 p_tile_no,
OPJ_UINT32 p_comp_no )
{
OPJ_UINT32 l_num_bands;
opj_cp_t *l_cp = 00;
opj_tcp_t *l_tcp = 00;
opj_tccp_t *l_tccp = 00;
/* preconditions */
assert(p_j2k != 00);
l_cp = &(p_j2k->m_cp);
l_tcp = &l_cp->tcps[p_tile_no];
l_tccp = &l_tcp->tccps[p_comp_no];
/* preconditions again */
assert(p_tile_no < l_cp->tw * l_cp->th);
assert(p_comp_no < p_j2k->m_private_image->numcomps);
l_num_bands = (l_tccp->qntsty == J2K_CCP_QNTSTY_SIQNT) ? 1 : (l_tccp->numresolutions * 3 - 2);
if (l_tccp->qntsty == J2K_CCP_QNTSTY_NOQNT) {
return 1 + l_num_bands;
}
else {
return 1 + 2*l_num_bands;
}
}
static OPJ_BOOL opj_j2k_write_SQcd_SQcc( opj_j2k_t *p_j2k,
OPJ_UINT32 p_tile_no,
OPJ_UINT32 p_comp_no,
OPJ_BYTE * p_data,
OPJ_UINT32 * p_header_size,
struct opj_event_mgr * p_manager )
{
OPJ_UINT32 l_header_size;
OPJ_UINT32 l_band_no, l_num_bands;
OPJ_UINT32 l_expn,l_mant;
opj_cp_t *l_cp = 00;
opj_tcp_t *l_tcp = 00;
opj_tccp_t *l_tccp = 00;
/* preconditions */
assert(p_j2k != 00);
assert(p_header_size != 00);
assert(p_manager != 00);
assert(p_data != 00);
l_cp = &(p_j2k->m_cp);
l_tcp = &l_cp->tcps[p_tile_no];
l_tccp = &l_tcp->tccps[p_comp_no];
/* preconditions again */
assert(p_tile_no < l_cp->tw * l_cp->th);
assert(p_comp_no <p_j2k->m_private_image->numcomps);
l_num_bands = (l_tccp->qntsty == J2K_CCP_QNTSTY_SIQNT) ? 1 : (l_tccp->numresolutions * 3 - 2);
if (l_tccp->qntsty == J2K_CCP_QNTSTY_NOQNT) {
l_header_size = 1 + l_num_bands;
if (*p_header_size < l_header_size) {
opj_event_msg(p_manager, EVT_ERROR, "Error writing SQcd SQcc element\n");
return OPJ_FALSE;
}
opj_write_bytes(p_data,l_tccp->qntsty + (l_tccp->numgbits << 5), 1); /* Sqcx */
++p_data;
for (l_band_no = 0; l_band_no < l_num_bands; ++l_band_no) {
l_expn = (OPJ_UINT32)l_tccp->stepsizes[l_band_no].expn;
opj_write_bytes(p_data, l_expn << 3, 1); /* SPqcx_i */
++p_data;
}
}
else {
l_header_size = 1 + 2*l_num_bands;
if (*p_header_size < l_header_size) {
opj_event_msg(p_manager, EVT_ERROR, "Error writing SQcd SQcc element\n");
return OPJ_FALSE;
}
opj_write_bytes(p_data,l_tccp->qntsty + (l_tccp->numgbits << 5), 1); /* Sqcx */
++p_data;
for (l_band_no = 0; l_band_no < l_num_bands; ++l_band_no) {
l_expn = (OPJ_UINT32)l_tccp->stepsizes[l_band_no].expn;
l_mant = (OPJ_UINT32)l_tccp->stepsizes[l_band_no].mant;
opj_write_bytes(p_data, (l_expn << 11) + l_mant, 2); /* SPqcx_i */
p_data += 2;
}
}
*p_header_size = *p_header_size - l_header_size;
return OPJ_TRUE;
}
static OPJ_BOOL opj_j2k_read_SQcd_SQcc(opj_j2k_t *p_j2k,
OPJ_UINT32 p_comp_no,
OPJ_BYTE* p_header_data,
OPJ_UINT32 * p_header_size,
opj_event_mgr_t * p_manager
)
{
/* loop*/
OPJ_UINT32 l_band_no;
opj_cp_t *l_cp = 00;
opj_tcp_t *l_tcp = 00;
opj_tccp_t *l_tccp = 00;
OPJ_BYTE * l_current_ptr = 00;
OPJ_UINT32 l_tmp, l_num_band;
/* preconditions*/
assert(p_j2k != 00);
assert(p_manager != 00);
assert(p_header_data != 00);
l_cp = &(p_j2k->m_cp);
/* come from tile part header or main header ?*/
l_tcp = (p_j2k->m_specific_param.m_decoder.m_state == J2K_STATE_TPH) ? /*FIXME J2K_DEC_STATE_TPH*/
&l_cp->tcps[p_j2k->m_current_tile_number] :
p_j2k->m_specific_param.m_decoder.m_default_tcp;
/* precondition again*/
assert(p_comp_no < p_j2k->m_private_image->numcomps);
l_tccp = &l_tcp->tccps[p_comp_no];
l_current_ptr = p_header_data;
if (*p_header_size < 1) {
opj_event_msg(p_manager, EVT_ERROR, "Error reading SQcd or SQcc element\n");
return OPJ_FALSE;
}
*p_header_size -= 1;
opj_read_bytes(l_current_ptr, &l_tmp ,1); /* Sqcx */
++l_current_ptr;
l_tccp->qntsty = l_tmp & 0x1f;
l_tccp->numgbits = l_tmp >> 5;
if (l_tccp->qntsty == J2K_CCP_QNTSTY_SIQNT) {
l_num_band = 1;
}
else {
l_num_band = (l_tccp->qntsty == J2K_CCP_QNTSTY_NOQNT) ?
(*p_header_size) :
(*p_header_size) / 2;
if( l_num_band > OPJ_J2K_MAXBANDS ) {
opj_event_msg(p_manager, EVT_WARNING, "While reading CCP_QNTSTY element inside QCD or QCC marker segment, "
"number of subbands (%d) is greater to OPJ_J2K_MAXBANDS (%d). So we limit the number of elements stored to "
"OPJ_J2K_MAXBANDS (%d) and skip the rest. \n", l_num_band, OPJ_J2K_MAXBANDS, OPJ_J2K_MAXBANDS);
/*return OPJ_FALSE;*/
}
}
#ifdef USE_JPWL
if (l_cp->correct) {
/* if JPWL is on, we check whether there are too many subbands */
if (/*(l_num_band < 0) ||*/ (l_num_band >= OPJ_J2K_MAXBANDS)) {
opj_event_msg(p_manager, JPWL_ASSUME ? EVT_WARNING : EVT_ERROR,
"JPWL: bad number of subbands in Sqcx (%d)\n",
l_num_band);
if (!JPWL_ASSUME) {
opj_event_msg(p_manager, EVT_ERROR, "JPWL: giving up\n");
return OPJ_FALSE;
}
/* we try to correct */
l_num_band = 1;
opj_event_msg(p_manager, EVT_WARNING, "- trying to adjust them\n"
"- setting number of bands to %d => HYPOTHESIS!!!\n",
l_num_band);
};
};
#endif /* USE_JPWL */
if (l_tccp->qntsty == J2K_CCP_QNTSTY_NOQNT) {
for (l_band_no = 0; l_band_no < l_num_band; l_band_no++) {
opj_read_bytes(l_current_ptr, &l_tmp ,1); /* SPqcx_i */
++l_current_ptr;
if (l_band_no < OPJ_J2K_MAXBANDS){
l_tccp->stepsizes[l_band_no].expn = (OPJ_INT32)(l_tmp >> 3);
l_tccp->stepsizes[l_band_no].mant = 0;
}
}
*p_header_size = *p_header_size - l_num_band;
}
else {
for (l_band_no = 0; l_band_no < l_num_band; l_band_no++) {
opj_read_bytes(l_current_ptr, &l_tmp ,2); /* SPqcx_i */
l_current_ptr+=2;
if (l_band_no < OPJ_J2K_MAXBANDS){
l_tccp->stepsizes[l_band_no].expn = (OPJ_INT32)(l_tmp >> 11);
l_tccp->stepsizes[l_band_no].mant = l_tmp & 0x7ff;
}
}
*p_header_size = *p_header_size - 2*l_num_band;
}
/* Add Antonin : if scalar_derived -> compute other stepsizes */
if (l_tccp->qntsty == J2K_CCP_QNTSTY_SIQNT) {
for (l_band_no = 1; l_band_no < OPJ_J2K_MAXBANDS; l_band_no++) {
l_tccp->stepsizes[l_band_no].expn =
((OPJ_INT32)(l_tccp->stepsizes[0].expn) - (OPJ_INT32)((l_band_no - 1) / 3) > 0) ?
(OPJ_INT32)(l_tccp->stepsizes[0].expn) - (OPJ_INT32)((l_band_no - 1) / 3) : 0;
l_tccp->stepsizes[l_band_no].mant = l_tccp->stepsizes[0].mant;
}
}
return OPJ_TRUE;
}
static void opj_j2k_copy_tile_quantization_parameters( opj_j2k_t *p_j2k )
{
OPJ_UINT32 i;
opj_cp_t *l_cp = NULL;
opj_tcp_t *l_tcp = NULL;
opj_tccp_t *l_ref_tccp = NULL;
opj_tccp_t *l_copied_tccp = NULL;
OPJ_UINT32 l_size;
/* preconditions */
assert(p_j2k != 00);
l_cp = &(p_j2k->m_cp);
l_tcp = p_j2k->m_specific_param.m_decoder.m_state == J2K_STATE_TPH ?
&l_cp->tcps[p_j2k->m_current_tile_number] :
p_j2k->m_specific_param.m_decoder.m_default_tcp;
l_ref_tccp = &l_tcp->tccps[0];
l_copied_tccp = l_ref_tccp + 1;
l_size = OPJ_J2K_MAXBANDS * sizeof(opj_stepsize_t);
for (i=1;i<p_j2k->m_private_image->numcomps;++i) {
l_copied_tccp->qntsty = l_ref_tccp->qntsty;
l_copied_tccp->numgbits = l_ref_tccp->numgbits;
memcpy(l_copied_tccp->stepsizes,l_ref_tccp->stepsizes,l_size);
++l_copied_tccp;
}
}
static void opj_j2k_dump_tile_info( opj_tcp_t * l_default_tile,OPJ_INT32 numcomps,FILE* out_stream)
{
if (l_default_tile)
{
OPJ_INT32 compno;
fprintf(out_stream, "\t default tile {\n");
fprintf(out_stream, "\t\t csty=%#x\n", l_default_tile->csty);
fprintf(out_stream, "\t\t prg=%#x\n", l_default_tile->prg);
fprintf(out_stream, "\t\t numlayers=%d\n", l_default_tile->numlayers);
fprintf(out_stream, "\t\t mct=%x\n", l_default_tile->mct);
for (compno = 0; compno < numcomps; compno++) {
opj_tccp_t *l_tccp = &(l_default_tile->tccps[compno]);
OPJ_UINT32 resno;
OPJ_INT32 bandno, numbands;
/* coding style*/
fprintf(out_stream, "\t\t comp %d {\n", compno);
fprintf(out_stream, "\t\t\t csty=%#x\n", l_tccp->csty);
fprintf(out_stream, "\t\t\t numresolutions=%d\n", l_tccp->numresolutions);
fprintf(out_stream, "\t\t\t cblkw=2^%d\n", l_tccp->cblkw);
fprintf(out_stream, "\t\t\t cblkh=2^%d\n", l_tccp->cblkh);
fprintf(out_stream, "\t\t\t cblksty=%#x\n", l_tccp->cblksty);
fprintf(out_stream, "\t\t\t qmfbid=%d\n", l_tccp->qmfbid);
fprintf(out_stream, "\t\t\t preccintsize (w,h)=");
for (resno = 0; resno < l_tccp->numresolutions; resno++) {
fprintf(out_stream, "(%d,%d) ", l_tccp->prcw[resno], l_tccp->prch[resno]);
}
fprintf(out_stream, "\n");
/* quantization style*/
fprintf(out_stream, "\t\t\t qntsty=%d\n", l_tccp->qntsty);
fprintf(out_stream, "\t\t\t numgbits=%d\n", l_tccp->numgbits);
fprintf(out_stream, "\t\t\t stepsizes (m,e)=");
numbands = (l_tccp->qntsty == J2K_CCP_QNTSTY_SIQNT) ? 1 : (OPJ_INT32)l_tccp->numresolutions * 3 - 2;
for (bandno = 0; bandno < numbands; bandno++) {
fprintf(out_stream, "(%d,%d) ", l_tccp->stepsizes[bandno].mant,
l_tccp->stepsizes[bandno].expn);
}
fprintf(out_stream, "\n");
/* RGN value*/
fprintf(out_stream, "\t\t\t roishift=%d\n", l_tccp->roishift);
fprintf(out_stream, "\t\t }\n");
} /*end of component of default tile*/
fprintf(out_stream, "\t }\n"); /*end of default tile*/
}
}
void j2k_dump (opj_j2k_t* p_j2k, OPJ_INT32 flag, FILE* out_stream)
{
/* Check if the flag is compatible with j2k file*/
if ( (flag & OPJ_JP2_INFO) || (flag & OPJ_JP2_IND)){
fprintf(out_stream, "Wrong flag\n");
return;
}
/* Dump the image_header */
if (flag & OPJ_IMG_INFO){
if (p_j2k->m_private_image)
j2k_dump_image_header(p_j2k->m_private_image, 0, out_stream);
}
/* Dump the codestream info from main header */
if (flag & OPJ_J2K_MH_INFO){
opj_j2k_dump_MH_info(p_j2k, out_stream);
}
/* Dump all tile/codestream info */
if (flag & OPJ_J2K_TCH_INFO){
OPJ_UINT32 l_nb_tiles = p_j2k->m_cp.th * p_j2k->m_cp.tw;
OPJ_UINT32 i;
opj_tcp_t * l_tcp = p_j2k->m_cp.tcps;
for (i=0;i<l_nb_tiles;++i) {
opj_j2k_dump_tile_info( l_tcp,(OPJ_INT32)p_j2k->m_private_image->numcomps, out_stream);
++l_tcp;
}
}
/* Dump the codestream info of the current tile */
if (flag & OPJ_J2K_TH_INFO){
}
/* Dump the codestream index from main header */
if (flag & OPJ_J2K_MH_IND){
opj_j2k_dump_MH_index(p_j2k, out_stream);
}
/* Dump the codestream index of the current tile */
if (flag & OPJ_J2K_TH_IND){
}
}
static void opj_j2k_dump_MH_index(opj_j2k_t* p_j2k, FILE* out_stream)
{
opj_codestream_index_t* cstr_index = p_j2k->cstr_index;
OPJ_UINT32 it_marker, it_tile, it_tile_part;
fprintf(out_stream, "Codestream index from main header: {\n");
fprintf(out_stream, "\t Main header start position=%" PRIi64 "\n"
"\t Main header end position=%" PRIi64 "\n",
cstr_index->main_head_start, cstr_index->main_head_end);
fprintf(out_stream, "\t Marker list: {\n");
if (cstr_index->marker){
for (it_marker=0; it_marker < cstr_index->marknum ; it_marker++){
fprintf(out_stream, "\t\t type=%#x, pos=%" PRIi64 ", len=%d\n",
cstr_index->marker[it_marker].type,
cstr_index->marker[it_marker].pos,
cstr_index->marker[it_marker].len );
}
}
fprintf(out_stream, "\t }\n");
if (cstr_index->tile_index){
/* Simple test to avoid to write empty information*/
OPJ_UINT32 l_acc_nb_of_tile_part = 0;
for (it_tile=0; it_tile < cstr_index->nb_of_tiles ; it_tile++){
l_acc_nb_of_tile_part += cstr_index->tile_index[it_tile].nb_tps;
}
if (l_acc_nb_of_tile_part)
{
fprintf(out_stream, "\t Tile index: {\n");
for (it_tile=0; it_tile < cstr_index->nb_of_tiles ; it_tile++){
OPJ_UINT32 nb_of_tile_part = cstr_index->tile_index[it_tile].nb_tps;
fprintf(out_stream, "\t\t nb of tile-part in tile [%d]=%d\n", it_tile, nb_of_tile_part);
if (cstr_index->tile_index[it_tile].tp_index){
for (it_tile_part =0; it_tile_part < nb_of_tile_part; it_tile_part++){
fprintf(out_stream, "\t\t\t tile-part[%d]: star_pos=%" PRIi64 ", end_header=%" PRIi64 ", end_pos=%" PRIi64 ".\n",
it_tile_part,
cstr_index->tile_index[it_tile].tp_index[it_tile_part].start_pos,
cstr_index->tile_index[it_tile].tp_index[it_tile_part].end_header,
cstr_index->tile_index[it_tile].tp_index[it_tile_part].end_pos);
}
}
if (cstr_index->tile_index[it_tile].marker){
for (it_marker=0; it_marker < cstr_index->tile_index[it_tile].marknum ; it_marker++){
fprintf(out_stream, "\t\t type=%#x, pos=%" PRIi64 ", len=%d\n",
cstr_index->tile_index[it_tile].marker[it_marker].type,
cstr_index->tile_index[it_tile].marker[it_marker].pos,
cstr_index->tile_index[it_tile].marker[it_marker].len );
}
}
}
fprintf(out_stream,"\t }\n");
}
}
fprintf(out_stream,"}\n");
}
static void opj_j2k_dump_MH_info(opj_j2k_t* p_j2k, FILE* out_stream)
{
fprintf(out_stream, "Codestream info from main header: {\n");
fprintf(out_stream, "\t tx0=%d, ty0=%d\n", p_j2k->m_cp.tx0, p_j2k->m_cp.ty0);
fprintf(out_stream, "\t tdx=%d, tdy=%d\n", p_j2k->m_cp.tdx, p_j2k->m_cp.tdy);
fprintf(out_stream, "\t tw=%d, th=%d\n", p_j2k->m_cp.tw, p_j2k->m_cp.th);
opj_j2k_dump_tile_info(p_j2k->m_specific_param.m_decoder.m_default_tcp,(OPJ_INT32)p_j2k->m_private_image->numcomps, out_stream);
fprintf(out_stream, "}\n");
}
void j2k_dump_image_header(opj_image_t* img_header, OPJ_BOOL dev_dump_flag, FILE* out_stream)
{
char tab[2];
if (dev_dump_flag){
fprintf(stdout, "[DEV] Dump an image_header struct {\n");
tab[0] = '\0';
}
else {
fprintf(out_stream, "Image info {\n");
tab[0] = '\t';tab[1] = '\0';
}
fprintf(out_stream, "%s x0=%d, y0=%d\n", tab, img_header->x0, img_header->y0);
fprintf(out_stream, "%s x1=%d, y1=%d\n", tab, img_header->x1, img_header->y1);
fprintf(out_stream, "%s numcomps=%d\n", tab, img_header->numcomps);
if (img_header->comps){
OPJ_UINT32 compno;
for (compno = 0; compno < img_header->numcomps; compno++) {
fprintf(out_stream, "%s\t component %d {\n", tab, compno);
j2k_dump_image_comp_header(&(img_header->comps[compno]), dev_dump_flag, out_stream);
fprintf(out_stream,"%s}\n",tab);
}
}
fprintf(out_stream, "}\n");
}
void j2k_dump_image_comp_header(opj_image_comp_t* comp_header, OPJ_BOOL dev_dump_flag, FILE* out_stream)
{
char tab[3];
if (dev_dump_flag){
fprintf(stdout, "[DEV] Dump an image_comp_header struct {\n");
tab[0] = '\0';
} else {
tab[0] = '\t';tab[1] = '\t';tab[2] = '\0';
}
fprintf(out_stream, "%s dx=%d, dy=%d\n", tab, comp_header->dx, comp_header->dy);
fprintf(out_stream, "%s prec=%d\n", tab, comp_header->prec);
fprintf(out_stream, "%s sgnd=%d\n", tab, comp_header->sgnd);
if (dev_dump_flag)
fprintf(out_stream, "}\n");
}
opj_codestream_info_v2_t* j2k_get_cstr_info(opj_j2k_t* p_j2k)
{
OPJ_UINT32 compno;
OPJ_UINT32 numcomps = p_j2k->m_private_image->numcomps;
opj_tcp_t *l_default_tile;
opj_codestream_info_v2_t* cstr_info = (opj_codestream_info_v2_t*) opj_calloc(1,sizeof(opj_codestream_info_v2_t));
if (!cstr_info)
return NULL;
cstr_info->nbcomps = p_j2k->m_private_image->numcomps;
cstr_info->tx0 = p_j2k->m_cp.tx0;
cstr_info->ty0 = p_j2k->m_cp.ty0;
cstr_info->tdx = p_j2k->m_cp.tdx;
cstr_info->tdy = p_j2k->m_cp.tdy;
cstr_info->tw = p_j2k->m_cp.tw;
cstr_info->th = p_j2k->m_cp.th;
cstr_info->tile_info = NULL; /* Not fill from the main header*/
l_default_tile = p_j2k->m_specific_param.m_decoder.m_default_tcp;
cstr_info->m_default_tile_info.csty = l_default_tile->csty;
cstr_info->m_default_tile_info.prg = l_default_tile->prg;
cstr_info->m_default_tile_info.numlayers = l_default_tile->numlayers;
cstr_info->m_default_tile_info.mct = l_default_tile->mct;
cstr_info->m_default_tile_info.tccp_info = (opj_tccp_info_t*) opj_calloc(cstr_info->nbcomps, sizeof(opj_tccp_info_t));
if (!cstr_info->m_default_tile_info.tccp_info)
{
opj_destroy_cstr_info(&cstr_info);
return NULL;
}
for (compno = 0; compno < numcomps; compno++) {
opj_tccp_t *l_tccp = &(l_default_tile->tccps[compno]);
opj_tccp_info_t *l_tccp_info = &(cstr_info->m_default_tile_info.tccp_info[compno]);
OPJ_INT32 bandno, numbands;
/* coding style*/
l_tccp_info->csty = l_tccp->csty;
l_tccp_info->numresolutions = l_tccp->numresolutions;
l_tccp_info->cblkw = l_tccp->cblkw;
l_tccp_info->cblkh = l_tccp->cblkh;
l_tccp_info->cblksty = l_tccp->cblksty;
l_tccp_info->qmfbid = l_tccp->qmfbid;
if (l_tccp->numresolutions < OPJ_J2K_MAXRLVLS)
{
memcpy(l_tccp_info->prch, l_tccp->prch, l_tccp->numresolutions);
memcpy(l_tccp_info->prcw, l_tccp->prcw, l_tccp->numresolutions);
}
/* quantization style*/
l_tccp_info->qntsty = l_tccp->qntsty;
l_tccp_info->numgbits = l_tccp->numgbits;
numbands = (l_tccp->qntsty == J2K_CCP_QNTSTY_SIQNT) ? 1 : (OPJ_INT32)l_tccp->numresolutions * 3 - 2;
if (numbands < OPJ_J2K_MAXBANDS) {
for (bandno = 0; bandno < numbands; bandno++) {
l_tccp_info->stepsizes_mant[bandno] = (OPJ_UINT32)l_tccp->stepsizes[bandno].mant;
l_tccp_info->stepsizes_expn[bandno] = (OPJ_UINT32)l_tccp->stepsizes[bandno].expn;
}
}
/* RGN value*/
l_tccp_info->roishift = l_tccp->roishift;
}
return cstr_info;
}
opj_codestream_index_t* j2k_get_cstr_index(opj_j2k_t* p_j2k)
{
opj_codestream_index_t* l_cstr_index = (opj_codestream_index_t*)
opj_calloc(1,sizeof(opj_codestream_index_t));
if (!l_cstr_index)
return NULL;
l_cstr_index->main_head_start = p_j2k->cstr_index->main_head_start;
l_cstr_index->main_head_end = p_j2k->cstr_index->main_head_end;
l_cstr_index->codestream_size = p_j2k->cstr_index->codestream_size;
l_cstr_index->marknum = p_j2k->cstr_index->marknum;
l_cstr_index->marker = (opj_marker_info_t*)opj_malloc(l_cstr_index->marknum*sizeof(opj_marker_info_t));
if (!l_cstr_index->marker){
opj_free( l_cstr_index);
return NULL;
}
if (p_j2k->cstr_index->marker)
memcpy(l_cstr_index->marker, p_j2k->cstr_index->marker, l_cstr_index->marknum * sizeof(opj_marker_info_t) );
else{
opj_free(l_cstr_index->marker);
l_cstr_index->marker = NULL;
}
l_cstr_index->nb_of_tiles = p_j2k->cstr_index->nb_of_tiles;
l_cstr_index->tile_index = (opj_tile_index_t*)opj_calloc(l_cstr_index->nb_of_tiles, sizeof(opj_tile_index_t) );
if (!l_cstr_index->tile_index){
opj_free( l_cstr_index->marker);
opj_free( l_cstr_index);
return NULL;
}
if (!p_j2k->cstr_index->tile_index){
opj_free(l_cstr_index->tile_index);
l_cstr_index->tile_index = NULL;
}
else {
OPJ_UINT32 it_tile = 0;
for (it_tile = 0; it_tile < l_cstr_index->nb_of_tiles; it_tile++ ){
/* Tile Marker*/
l_cstr_index->tile_index[it_tile].marknum = p_j2k->cstr_index->tile_index[it_tile].marknum;
l_cstr_index->tile_index[it_tile].marker =
(opj_marker_info_t*)opj_malloc(l_cstr_index->tile_index[it_tile].marknum*sizeof(opj_marker_info_t));
if (!l_cstr_index->tile_index[it_tile].marker) {
OPJ_UINT32 it_tile_free;
for (it_tile_free=0; it_tile_free < it_tile; it_tile_free++){
opj_free(l_cstr_index->tile_index[it_tile_free].marker);
}
opj_free( l_cstr_index->tile_index);
opj_free( l_cstr_index->marker);
opj_free( l_cstr_index);
return NULL;
}
if (p_j2k->cstr_index->tile_index[it_tile].marker)
memcpy( l_cstr_index->tile_index[it_tile].marker,
p_j2k->cstr_index->tile_index[it_tile].marker,
l_cstr_index->tile_index[it_tile].marknum * sizeof(opj_marker_info_t) );
else{
opj_free(l_cstr_index->tile_index[it_tile].marker);
l_cstr_index->tile_index[it_tile].marker = NULL;
}
/* Tile part index*/
l_cstr_index->tile_index[it_tile].nb_tps = p_j2k->cstr_index->tile_index[it_tile].nb_tps;
l_cstr_index->tile_index[it_tile].tp_index =
(opj_tp_index_t*)opj_malloc(l_cstr_index->tile_index[it_tile].nb_tps*sizeof(opj_tp_index_t));
if(!l_cstr_index->tile_index[it_tile].tp_index){
OPJ_UINT32 it_tile_free;
for (it_tile_free=0; it_tile_free < it_tile; it_tile_free++){
opj_free(l_cstr_index->tile_index[it_tile_free].marker);
opj_free(l_cstr_index->tile_index[it_tile_free].tp_index);
}
opj_free( l_cstr_index->tile_index);
opj_free( l_cstr_index->marker);
opj_free( l_cstr_index);
return NULL;
}
if (p_j2k->cstr_index->tile_index[it_tile].tp_index){
memcpy( l_cstr_index->tile_index[it_tile].tp_index,
p_j2k->cstr_index->tile_index[it_tile].tp_index,
l_cstr_index->tile_index[it_tile].nb_tps * sizeof(opj_tp_index_t) );
}
else{
opj_free(l_cstr_index->tile_index[it_tile].tp_index);
l_cstr_index->tile_index[it_tile].tp_index = NULL;
}
/* Packet index (NOT USED)*/
l_cstr_index->tile_index[it_tile].nb_packet = 0;
l_cstr_index->tile_index[it_tile].packet_index = NULL;
}
}
return l_cstr_index;
}
static OPJ_BOOL opj_j2k_allocate_tile_element_cstr_index(opj_j2k_t *p_j2k)
{
OPJ_UINT32 it_tile=0;
p_j2k->cstr_index->nb_of_tiles = p_j2k->m_cp.tw * p_j2k->m_cp.th;
p_j2k->cstr_index->tile_index = (opj_tile_index_t*)opj_calloc(p_j2k->cstr_index->nb_of_tiles, sizeof(opj_tile_index_t));
if (!p_j2k->cstr_index->tile_index)
return OPJ_FALSE;
for (it_tile=0; it_tile < p_j2k->cstr_index->nb_of_tiles; it_tile++){
p_j2k->cstr_index->tile_index[it_tile].maxmarknum = 100;
p_j2k->cstr_index->tile_index[it_tile].marknum = 0;
p_j2k->cstr_index->tile_index[it_tile].marker = (opj_marker_info_t*)
opj_calloc(p_j2k->cstr_index->tile_index[it_tile].maxmarknum, sizeof(opj_marker_info_t));
if (!p_j2k->cstr_index->tile_index[it_tile].marker)
return OPJ_FALSE;
}
return OPJ_TRUE;
}
static OPJ_BOOL opj_j2k_decode_tiles ( opj_j2k_t *p_j2k,
opj_stream_private_t *p_stream,
opj_event_mgr_t * p_manager)
{
OPJ_BOOL l_go_on = OPJ_TRUE;
OPJ_UINT32 l_current_tile_no;
OPJ_UINT32 l_data_size,l_max_data_size;
OPJ_INT32 l_tile_x0,l_tile_y0,l_tile_x1,l_tile_y1;
OPJ_UINT32 l_nb_comps;
OPJ_BYTE * l_current_data;
OPJ_UINT32 nr_tiles = 0;
l_current_data = (OPJ_BYTE*)opj_malloc(1000);
if (! l_current_data) {
opj_event_msg(p_manager, EVT_ERROR, "Not enough memory to decode tiles\n");
return OPJ_FALSE;
}
l_max_data_size = 1000;
for (;;) {
if (! opj_j2k_read_tile_header( p_j2k,
&l_current_tile_no,
&l_data_size,
&l_tile_x0, &l_tile_y0,
&l_tile_x1, &l_tile_y1,
&l_nb_comps,
&l_go_on,
p_stream,
p_manager)) {
opj_free(l_current_data);
return OPJ_FALSE;
}
if (! l_go_on) {
break;
}
if (l_data_size > l_max_data_size) {
OPJ_BYTE *l_new_current_data = (OPJ_BYTE *) opj_realloc(l_current_data, l_data_size);
if (! l_new_current_data) {
opj_free(l_current_data);
opj_event_msg(p_manager, EVT_ERROR, "Not enough memory to decode tile %d/%d\n", l_current_tile_no +1, p_j2k->m_cp.th * p_j2k->m_cp.tw);
return OPJ_FALSE;
}
l_current_data = l_new_current_data;
l_max_data_size = l_data_size;
}
if (! opj_j2k_decode_tile(p_j2k,l_current_tile_no,l_current_data,l_data_size,p_stream,p_manager)) {
opj_free(l_current_data);
opj_event_msg(p_manager, EVT_ERROR, "Failed to decode tile %d/%d\n", l_current_tile_no +1, p_j2k->m_cp.th * p_j2k->m_cp.tw);
return OPJ_FALSE;
}
opj_event_msg(p_manager, EVT_INFO, "Tile %d/%d has been decoded.\n", l_current_tile_no +1, p_j2k->m_cp.th * p_j2k->m_cp.tw);
if (! opj_j2k_update_image_data(p_j2k->m_tcd,l_current_data, p_j2k->m_output_image)) {
opj_free(l_current_data);
return OPJ_FALSE;
}
opj_event_msg(p_manager, EVT_INFO, "Image data has been updated with tile %d.\n\n", l_current_tile_no + 1);
if(opj_stream_get_number_byte_left(p_stream) == 0
&& p_j2k->m_specific_param.m_decoder.m_state == J2K_STATE_NEOC)
break;
if(++nr_tiles == p_j2k->m_cp.th * p_j2k->m_cp.tw)
break;
}
opj_free(l_current_data);
return OPJ_TRUE;
}
/**
* Sets up the procedures to do on decoding data. Developpers wanting to extend the library can add their own reading procedures.
*/
static OPJ_BOOL opj_j2k_setup_decoding (opj_j2k_t *p_j2k, opj_event_mgr_t * p_manager)
{
/* preconditions*/
assert(p_j2k != 00);
assert(p_manager != 00);
if (! opj_procedure_list_add_procedure(p_j2k->m_procedure_list,(opj_procedure)opj_j2k_decode_tiles, p_manager)) {
return OPJ_FALSE;
}
/* DEVELOPER CORNER, add your custom procedures */
return OPJ_TRUE;
}
/*
* Read and decode one tile.
*/
static OPJ_BOOL opj_j2k_decode_one_tile ( opj_j2k_t *p_j2k,
opj_stream_private_t *p_stream,
opj_event_mgr_t * p_manager)
{
OPJ_BOOL l_go_on = OPJ_TRUE;
OPJ_UINT32 l_current_tile_no;
OPJ_UINT32 l_tile_no_to_dec;
OPJ_UINT32 l_data_size,l_max_data_size;
OPJ_INT32 l_tile_x0,l_tile_y0,l_tile_x1,l_tile_y1;
OPJ_UINT32 l_nb_comps;
OPJ_BYTE * l_current_data;
l_current_data = (OPJ_BYTE*)opj_malloc(1000);
if (! l_current_data) {
opj_event_msg(p_manager, EVT_ERROR, "Not enough memory to decode one tile\n");
return OPJ_FALSE;
}
l_max_data_size = 1000;
/*Allocate and initialize some elements of codestrem index if not already done*/
if( !p_j2k->cstr_index->tile_index)
{
if (!opj_j2k_allocate_tile_element_cstr_index(p_j2k)){
opj_free(l_current_data);
return OPJ_FALSE;
}
}
/* Move into the codestream to the first SOT used to decode the desired tile */
l_tile_no_to_dec = (OPJ_UINT32)p_j2k->m_specific_param.m_decoder.m_tile_ind_to_dec;
if (p_j2k->cstr_index->tile_index)
if(p_j2k->cstr_index->tile_index->tp_index)
{
if ( ! p_j2k->cstr_index->tile_index[l_tile_no_to_dec].nb_tps) {
/* the index for this tile has not been built,
* so move to the last SOT read */
if ( !(opj_stream_read_seek(p_stream, p_j2k->m_specific_param.m_decoder.m_last_sot_read_pos+2, p_manager)) ){
opj_event_msg(p_manager, EVT_ERROR, "Problem with seek function\n");
opj_free(l_current_data);
return OPJ_FALSE;
}
}
else{
if ( !(opj_stream_read_seek(p_stream, p_j2k->cstr_index->tile_index[l_tile_no_to_dec].tp_index[0].start_pos+2, p_manager)) ) {
opj_event_msg(p_manager, EVT_ERROR, "Problem with seek function\n");
opj_free(l_current_data);
return OPJ_FALSE;
}
}
/* Special case if we have previously read the EOC marker (if the previous tile getted is the last ) */
if(p_j2k->m_specific_param.m_decoder.m_state == J2K_STATE_EOC)
p_j2k->m_specific_param.m_decoder.m_state = J2K_STATE_TPHSOT;
}
for (;;) {
if (! opj_j2k_read_tile_header( p_j2k,
&l_current_tile_no,
&l_data_size,
&l_tile_x0, &l_tile_y0,
&l_tile_x1, &l_tile_y1,
&l_nb_comps,
&l_go_on,
p_stream,
p_manager)) {
opj_free(l_current_data);
return OPJ_FALSE;
}
if (! l_go_on) {
break;
}
if (l_data_size > l_max_data_size) {
OPJ_BYTE *l_new_current_data = (OPJ_BYTE *) opj_realloc(l_current_data, l_data_size);
if (! l_new_current_data) {
opj_free(l_current_data);
l_current_data = NULL;
/* TODO: LH: why tile numbering policy used in messages differs from
the one used in opj_j2k_decode_tiles() ? */
opj_event_msg(p_manager, EVT_ERROR, "Not enough memory to decode tile %d/%d\n", l_current_tile_no, (p_j2k->m_cp.th * p_j2k->m_cp.tw) - 1);
return OPJ_FALSE;
}
l_current_data = l_new_current_data;
l_max_data_size = l_data_size;
}
if (! opj_j2k_decode_tile(p_j2k,l_current_tile_no,l_current_data,l_data_size,p_stream,p_manager)) {
opj_free(l_current_data);
return OPJ_FALSE;
}
opj_event_msg(p_manager, EVT_INFO, "Tile %d/%d has been decoded.\n", l_current_tile_no, (p_j2k->m_cp.th * p_j2k->m_cp.tw) - 1);
if (! opj_j2k_update_image_data(p_j2k->m_tcd,l_current_data, p_j2k->m_output_image)) {
opj_free(l_current_data);
return OPJ_FALSE;
}
opj_event_msg(p_manager, EVT_INFO, "Image data has been updated with tile %d.\n\n", l_current_tile_no);
if(l_current_tile_no == l_tile_no_to_dec)
{
/* move into the codestream to the the first SOT (FIXME or not move?)*/
if (!(opj_stream_read_seek(p_stream, p_j2k->cstr_index->main_head_end + 2, p_manager) ) ) {
opj_event_msg(p_manager, EVT_ERROR, "Problem with seek function\n");
return OPJ_FALSE;
}
break;
}
else {
opj_event_msg(p_manager, EVT_WARNING, "Tile read, decode and updated is not the desired (%d vs %d).\n", l_current_tile_no, l_tile_no_to_dec);
}
}
opj_free(l_current_data);
return OPJ_TRUE;
}
/**
* Sets up the procedures to do on decoding one tile. Developpers wanting to extend the library can add their own reading procedures.
*/
static OPJ_BOOL opj_j2k_setup_decoding_tile (opj_j2k_t *p_j2k, opj_event_mgr_t * p_manager)
{
/* preconditions*/
assert(p_j2k != 00);
assert(p_manager != 00);
if (! opj_procedure_list_add_procedure(p_j2k->m_procedure_list,(opj_procedure)opj_j2k_decode_one_tile, p_manager)) {
return OPJ_FALSE;
}
/* DEVELOPER CORNER, add your custom procedures */
return OPJ_TRUE;
}
OPJ_BOOL opj_j2k_decode(opj_j2k_t * p_j2k,
opj_stream_private_t * p_stream,
opj_image_t * p_image,
opj_event_mgr_t * p_manager)
{
OPJ_UINT32 compno;
if (!p_image)
return OPJ_FALSE;
p_j2k->m_output_image = opj_image_create0();
if (! (p_j2k->m_output_image)) {
return OPJ_FALSE;
}
opj_copy_image_header(p_image, p_j2k->m_output_image);
/* customization of the decoding */
opj_j2k_setup_decoding(p_j2k, p_manager);
/* Decode the codestream */
if (! opj_j2k_exec (p_j2k,p_j2k->m_procedure_list,p_stream,p_manager)) {
opj_image_destroy(p_j2k->m_private_image);
p_j2k->m_private_image = NULL;
return OPJ_FALSE;
}
/* Move data and copy one information from codec to output image*/
for (compno = 0; compno < p_image->numcomps; compno++) {
p_image->comps[compno].resno_decoded = p_j2k->m_output_image->comps[compno].resno_decoded;
p_image->comps[compno].data = p_j2k->m_output_image->comps[compno].data;
#if 0
char fn[256];
sprintf( fn, "/tmp/%d.raw", compno );
FILE *debug = fopen( fn, "wb" );
fwrite( p_image->comps[compno].data, sizeof(OPJ_INT32), p_image->comps[compno].w * p_image->comps[compno].h, debug );
fclose( debug );
#endif
p_j2k->m_output_image->comps[compno].data = NULL;
}
return OPJ_TRUE;
}
OPJ_BOOL opj_j2k_get_tile( opj_j2k_t *p_j2k,
opj_stream_private_t *p_stream,
opj_image_t* p_image,
opj_event_mgr_t * p_manager,
OPJ_UINT32 tile_index )
{
OPJ_UINT32 compno;
OPJ_UINT32 l_tile_x, l_tile_y;
opj_image_comp_t* l_img_comp;
if (!p_image) {
opj_event_msg(p_manager, EVT_ERROR, "We need an image previously created.\n");
return OPJ_FALSE;
}
if ( /*(tile_index < 0) &&*/ (tile_index >= p_j2k->m_cp.tw * p_j2k->m_cp.th) ){
opj_event_msg(p_manager, EVT_ERROR, "Tile index provided by the user is incorrect %d (max = %d) \n", tile_index, (p_j2k->m_cp.tw * p_j2k->m_cp.th) - 1);
return OPJ_FALSE;
}
/* Compute the dimension of the desired tile*/
l_tile_x = tile_index % p_j2k->m_cp.tw;
l_tile_y = tile_index / p_j2k->m_cp.tw;
p_image->x0 = l_tile_x * p_j2k->m_cp.tdx + p_j2k->m_cp.tx0;
if (p_image->x0 < p_j2k->m_private_image->x0)
p_image->x0 = p_j2k->m_private_image->x0;
p_image->x1 = (l_tile_x + 1) * p_j2k->m_cp.tdx + p_j2k->m_cp.tx0;
if (p_image->x1 > p_j2k->m_private_image->x1)
p_image->x1 = p_j2k->m_private_image->x1;
p_image->y0 = l_tile_y * p_j2k->m_cp.tdy + p_j2k->m_cp.ty0;
if (p_image->y0 < p_j2k->m_private_image->y0)
p_image->y0 = p_j2k->m_private_image->y0;
p_image->y1 = (l_tile_y + 1) * p_j2k->m_cp.tdy + p_j2k->m_cp.ty0;
if (p_image->y1 > p_j2k->m_private_image->y1)
p_image->y1 = p_j2k->m_private_image->y1;
l_img_comp = p_image->comps;
for (compno=0; compno < p_image->numcomps; ++compno)
{
OPJ_INT32 l_comp_x1, l_comp_y1;
l_img_comp->factor = p_j2k->m_private_image->comps[compno].factor;
l_img_comp->x0 = (OPJ_UINT32)opj_int_ceildiv((OPJ_INT32)p_image->x0, (OPJ_INT32)l_img_comp->dx);
l_img_comp->y0 = (OPJ_UINT32)opj_int_ceildiv((OPJ_INT32)p_image->y0, (OPJ_INT32)l_img_comp->dy);
l_comp_x1 = opj_int_ceildiv((OPJ_INT32)p_image->x1, (OPJ_INT32)l_img_comp->dx);
l_comp_y1 = opj_int_ceildiv((OPJ_INT32)p_image->y1, (OPJ_INT32)l_img_comp->dy);
l_img_comp->w = (OPJ_UINT32)(opj_int_ceildivpow2(l_comp_x1, (OPJ_INT32)l_img_comp->factor) - opj_int_ceildivpow2((OPJ_INT32)l_img_comp->x0, (OPJ_INT32)l_img_comp->factor));
l_img_comp->h = (OPJ_UINT32)(opj_int_ceildivpow2(l_comp_y1, (OPJ_INT32)l_img_comp->factor) - opj_int_ceildivpow2((OPJ_INT32)l_img_comp->y0, (OPJ_INT32)l_img_comp->factor));
l_img_comp++;
}
/* Destroy the previous output image*/
if (p_j2k->m_output_image)
opj_image_destroy(p_j2k->m_output_image);
/* Create the ouput image from the information previously computed*/
p_j2k->m_output_image = opj_image_create0();
if (! (p_j2k->m_output_image)) {
return OPJ_FALSE;
}
opj_copy_image_header(p_image, p_j2k->m_output_image);
p_j2k->m_specific_param.m_decoder.m_tile_ind_to_dec = (OPJ_INT32)tile_index;
/* customization of the decoding */
opj_j2k_setup_decoding_tile(p_j2k, p_manager);
/* Decode the codestream */
if (! opj_j2k_exec (p_j2k,p_j2k->m_procedure_list,p_stream,p_manager)) {
opj_image_destroy(p_j2k->m_private_image);
p_j2k->m_private_image = NULL;
return OPJ_FALSE;
}
/* Move data and copy one information from codec to output image*/
for (compno = 0; compno < p_image->numcomps; compno++) {
p_image->comps[compno].resno_decoded = p_j2k->m_output_image->comps[compno].resno_decoded;
if (p_image->comps[compno].data)
opj_free(p_image->comps[compno].data);
p_image->comps[compno].data = p_j2k->m_output_image->comps[compno].data;
p_j2k->m_output_image->comps[compno].data = NULL;
}
return OPJ_TRUE;
}
OPJ_BOOL opj_j2k_set_decoded_resolution_factor(opj_j2k_t *p_j2k,
OPJ_UINT32 res_factor,
opj_event_mgr_t * p_manager)
{
OPJ_UINT32 it_comp;
p_j2k->m_cp.m_specific_param.m_dec.m_reduce = res_factor;
if (p_j2k->m_private_image) {
if (p_j2k->m_private_image->comps) {
if (p_j2k->m_specific_param.m_decoder.m_default_tcp) {
if (p_j2k->m_specific_param.m_decoder.m_default_tcp->tccps) {
for (it_comp = 0 ; it_comp < p_j2k->m_private_image->numcomps; it_comp++) {
OPJ_UINT32 max_res = p_j2k->m_specific_param.m_decoder.m_default_tcp->tccps[it_comp].numresolutions;
if ( res_factor >= max_res){
opj_event_msg(p_manager, EVT_ERROR, "Resolution factor is greater than the maximum resolution in the component.\n");
return OPJ_FALSE;
}
p_j2k->m_private_image->comps[it_comp].factor = res_factor;
}
return OPJ_TRUE;
}
}
}
}
return OPJ_FALSE;
}
OPJ_BOOL opj_j2k_encode(opj_j2k_t * p_j2k,
opj_stream_private_t *p_stream,
opj_event_mgr_t * p_manager )
{
OPJ_UINT32 i, j;
OPJ_UINT32 l_nb_tiles;
OPJ_UINT32 l_max_tile_size = 0, l_current_tile_size;
OPJ_BYTE * l_current_data = 00;
opj_tcd_t* p_tcd = 00;
/* preconditions */
assert(p_j2k != 00);
assert(p_stream != 00);
assert(p_manager != 00);
p_tcd = p_j2k->m_tcd;
l_nb_tiles = p_j2k->m_cp.th * p_j2k->m_cp.tw;
for (i=0;i<l_nb_tiles;++i) {
if (! opj_j2k_pre_write_tile(p_j2k,i,p_stream,p_manager)) {
if (l_current_data) {
opj_free(l_current_data);
}
return OPJ_FALSE;
}
/* if we only have one tile, then simply set tile component data equal to image component data */
/* otherwise, allocate the data */
for (j=0;j<p_j2k->m_tcd->image->numcomps;++j) {
opj_tcd_tilecomp_t* l_tilec = p_tcd->tcd_image->tiles->comps + j;
if (l_nb_tiles == 1) {
opj_image_comp_t * l_img_comp = p_tcd->image->comps + j;
l_tilec->data = l_img_comp->data;
l_tilec->ownsData = OPJ_FALSE;
} else {
if(! opj_alloc_tile_component_data(l_tilec)) {
opj_event_msg(p_manager, EVT_ERROR, "Error allocating tile component data." );
if (l_current_data) {
opj_free(l_current_data);
}
return OPJ_FALSE;
}
}
}
l_current_tile_size = opj_tcd_get_encoded_tile_size(p_j2k->m_tcd);
if (l_nb_tiles > 1) {
if (l_current_tile_size > l_max_tile_size) {
OPJ_BYTE *l_new_current_data = (OPJ_BYTE *) opj_realloc(l_current_data, l_current_tile_size);
if (! l_new_current_data) {
if (l_current_data) {
opj_free(l_current_data);
}
opj_event_msg(p_manager, EVT_ERROR, "Not enough memory to encode all tiles\n");
return OPJ_FALSE;
}
l_current_data = l_new_current_data;
l_max_tile_size = l_current_tile_size;
}
/* copy image data (32 bit) to l_current_data as contiguous, all-component, zero offset buffer */
/* 32 bit components @ 8 bit precision get converted to 8 bit */
/* 32 bit components @ 16 bit precision get converted to 16 bit */
opj_j2k_get_tile_data(p_j2k->m_tcd,l_current_data);
/* now copy this data into the tile component */
if (! opj_tcd_copy_tile_data(p_j2k->m_tcd,l_current_data,l_current_tile_size)) {
opj_event_msg(p_manager, EVT_ERROR, "Size mismatch between tile data and sent data." );
return OPJ_FALSE;
}
}
if (! opj_j2k_post_write_tile (p_j2k,p_stream,p_manager)) {
return OPJ_FALSE;
}
}
if (l_current_data) {
opj_free(l_current_data);
}
return OPJ_TRUE;
}
OPJ_BOOL opj_j2k_end_compress( opj_j2k_t *p_j2k,
opj_stream_private_t *p_stream,
opj_event_mgr_t * p_manager)
{
/* customization of the encoding */
if (! opj_j2k_setup_end_compress(p_j2k, p_manager)) {
return OPJ_FALSE;
}
if (! opj_j2k_exec (p_j2k, p_j2k->m_procedure_list, p_stream, p_manager))
{
return OPJ_FALSE;
}
return OPJ_TRUE;
}
OPJ_BOOL opj_j2k_start_compress(opj_j2k_t *p_j2k,
opj_stream_private_t *p_stream,
opj_image_t * p_image,
opj_event_mgr_t * p_manager)
{
/* preconditions */
assert(p_j2k != 00);
assert(p_stream != 00);
assert(p_manager != 00);
p_j2k->m_private_image = opj_image_create0();
if (! p_j2k->m_private_image) {
opj_event_msg(p_manager, EVT_ERROR, "Failed to allocate image header." );
return OPJ_FALSE;
}
opj_copy_image_header(p_image, p_j2k->m_private_image);
/* TODO_MSD: Find a better way */
if (p_image->comps) {
OPJ_UINT32 it_comp;
for (it_comp = 0 ; it_comp < p_image->numcomps; it_comp++) {
if (p_image->comps[it_comp].data) {
p_j2k->m_private_image->comps[it_comp].data =p_image->comps[it_comp].data;
p_image->comps[it_comp].data = NULL;
}
}
}
/* customization of the validation */
if (! opj_j2k_setup_encoding_validation (p_j2k, p_manager)) {
return OPJ_FALSE;
}
/* validation of the parameters codec */
if (! opj_j2k_exec(p_j2k,p_j2k->m_validation_list,p_stream,p_manager)) {
return OPJ_FALSE;
}
/* customization of the encoding */
if (! opj_j2k_setup_header_writing(p_j2k, p_manager)) {
return OPJ_FALSE;
}
/* write header */
if (! opj_j2k_exec (p_j2k,p_j2k->m_procedure_list,p_stream,p_manager)) {
return OPJ_FALSE;
}
return OPJ_TRUE;
}
static OPJ_BOOL opj_j2k_pre_write_tile ( opj_j2k_t * p_j2k,
OPJ_UINT32 p_tile_index,
opj_stream_private_t *p_stream,
opj_event_mgr_t * p_manager )
{
(void)p_stream;
if (p_tile_index != p_j2k->m_current_tile_number) {
opj_event_msg(p_manager, EVT_ERROR, "The given tile index does not match." );
return OPJ_FALSE;
}
opj_event_msg(p_manager, EVT_INFO, "tile number %d / %d\n", p_j2k->m_current_tile_number + 1, p_j2k->m_cp.tw * p_j2k->m_cp.th);
p_j2k->m_specific_param.m_encoder.m_current_tile_part_number = 0;
p_j2k->m_tcd->cur_totnum_tp = p_j2k->m_cp.tcps[p_tile_index].m_nb_tile_parts;
p_j2k->m_specific_param.m_encoder.m_current_poc_tile_part_number = 0;
/* initialisation before tile encoding */
if (! opj_tcd_init_encode_tile(p_j2k->m_tcd, p_j2k->m_current_tile_number, p_manager)) {
return OPJ_FALSE;
}
return OPJ_TRUE;
}
static void opj_get_tile_dimensions(opj_image_t * l_image,
opj_tcd_tilecomp_t * l_tilec,
opj_image_comp_t * l_img_comp,
OPJ_UINT32* l_size_comp,
OPJ_UINT32* l_width,
OPJ_UINT32* l_height,
OPJ_UINT32* l_offset_x,
OPJ_UINT32* l_offset_y,
OPJ_UINT32* l_image_width,
OPJ_UINT32* l_stride,
OPJ_UINT32* l_tile_offset) {
OPJ_UINT32 l_remaining;
*l_size_comp = l_img_comp->prec >> 3; /* (/8) */
l_remaining = l_img_comp->prec & 7; /* (%8) */
if (l_remaining) {
*l_size_comp += 1;
}
if (*l_size_comp == 3) {
*l_size_comp = 4;
}
*l_width = (OPJ_UINT32)(l_tilec->x1 - l_tilec->x0);
*l_height = (OPJ_UINT32)(l_tilec->y1 - l_tilec->y0);
*l_offset_x = (OPJ_UINT32)opj_int_ceildiv((OPJ_INT32)l_image->x0, (OPJ_INT32)l_img_comp->dx);
*l_offset_y = (OPJ_UINT32)opj_int_ceildiv((OPJ_INT32)l_image->y0, (OPJ_INT32)l_img_comp->dy);
*l_image_width = (OPJ_UINT32)opj_int_ceildiv((OPJ_INT32)l_image->x1 - (OPJ_INT32)l_image->x0, (OPJ_INT32)l_img_comp->dx);
*l_stride = *l_image_width - *l_width;
*l_tile_offset = ((OPJ_UINT32)l_tilec->x0 - *l_offset_x) + ((OPJ_UINT32)l_tilec->y0 - *l_offset_y) * *l_image_width;
}
static void opj_j2k_get_tile_data (opj_tcd_t * p_tcd, OPJ_BYTE * p_data)
{
OPJ_UINT32 i,j,k = 0;
for (i=0;i<p_tcd->image->numcomps;++i) {
opj_image_t * l_image = p_tcd->image;
OPJ_INT32 * l_src_ptr;
opj_tcd_tilecomp_t * l_tilec = p_tcd->tcd_image->tiles->comps + i;
opj_image_comp_t * l_img_comp = l_image->comps + i;
OPJ_UINT32 l_size_comp,l_width,l_height,l_offset_x,l_offset_y, l_image_width,l_stride,l_tile_offset;
opj_get_tile_dimensions(l_image,
l_tilec,
l_img_comp,
&l_size_comp,
&l_width,
&l_height,
&l_offset_x,
&l_offset_y,
&l_image_width,
&l_stride,
&l_tile_offset);
l_src_ptr = l_img_comp->data + l_tile_offset;
switch (l_size_comp) {
case 1:
{
OPJ_CHAR * l_dest_ptr = (OPJ_CHAR*) p_data;
if (l_img_comp->sgnd) {
for (j=0;j<l_height;++j) {
for (k=0;k<l_width;++k) {
*(l_dest_ptr) = (OPJ_CHAR) (*l_src_ptr);
++l_dest_ptr;
++l_src_ptr;
}
l_src_ptr += l_stride;
}
}
else {
for (j=0;j<l_height;++j) {
for (k=0;k<l_width;++k) {
*(l_dest_ptr) = (OPJ_CHAR)((*l_src_ptr)&0xff);
++l_dest_ptr;
++l_src_ptr;
}
l_src_ptr += l_stride;
}
}
p_data = (OPJ_BYTE*) l_dest_ptr;
}
break;
case 2:
{
OPJ_INT16 * l_dest_ptr = (OPJ_INT16 *) p_data;
if (l_img_comp->sgnd) {
for (j=0;j<l_height;++j) {
for (k=0;k<l_width;++k) {
*(l_dest_ptr++) = (OPJ_INT16) (*(l_src_ptr++));
}
l_src_ptr += l_stride;
}
}
else {
for (j=0;j<l_height;++j) {
for (k=0;k<l_width;++k) {
*(l_dest_ptr++) = (OPJ_INT16)((*(l_src_ptr++)) & 0xffff);
}
l_src_ptr += l_stride;
}
}
p_data = (OPJ_BYTE*) l_dest_ptr;
}
break;
case 4:
{
OPJ_INT32 * l_dest_ptr = (OPJ_INT32 *) p_data;
for (j=0;j<l_height;++j) {
for (k=0;k<l_width;++k) {
*(l_dest_ptr++) = *(l_src_ptr++);
}
l_src_ptr += l_stride;
}
p_data = (OPJ_BYTE*) l_dest_ptr;
}
break;
}
}
}
static OPJ_BOOL opj_j2k_post_write_tile ( opj_j2k_t * p_j2k,
opj_stream_private_t *p_stream,
opj_event_mgr_t * p_manager )
{
OPJ_UINT32 l_nb_bytes_written;
OPJ_BYTE * l_current_data = 00;
OPJ_UINT32 l_tile_size = 0;
OPJ_UINT32 l_available_data;
/* preconditions */
assert(p_j2k->m_specific_param.m_encoder.m_encoded_tile_data);
l_tile_size = p_j2k->m_specific_param.m_encoder.m_encoded_tile_size;
l_available_data = l_tile_size;
l_current_data = p_j2k->m_specific_param.m_encoder.m_encoded_tile_data;
l_nb_bytes_written = 0;
if (! opj_j2k_write_first_tile_part(p_j2k,l_current_data,&l_nb_bytes_written,l_available_data,p_stream,p_manager)) {
return OPJ_FALSE;
}
l_current_data += l_nb_bytes_written;
l_available_data -= l_nb_bytes_written;
l_nb_bytes_written = 0;
if (! opj_j2k_write_all_tile_parts(p_j2k,l_current_data,&l_nb_bytes_written,l_available_data,p_stream,p_manager)) {
return OPJ_FALSE;
}
l_available_data -= l_nb_bytes_written;
l_nb_bytes_written = l_tile_size - l_available_data;
if ( opj_stream_write_data( p_stream,
p_j2k->m_specific_param.m_encoder.m_encoded_tile_data,
l_nb_bytes_written,p_manager) != l_nb_bytes_written) {
return OPJ_FALSE;
}
++p_j2k->m_current_tile_number;
return OPJ_TRUE;
}
static OPJ_BOOL opj_j2k_setup_end_compress (opj_j2k_t *p_j2k, opj_event_mgr_t * p_manager)
{
/* preconditions */
assert(p_j2k != 00);
assert(p_manager != 00);
/* DEVELOPER CORNER, insert your custom procedures */
if (! opj_procedure_list_add_procedure(p_j2k->m_procedure_list,(opj_procedure)opj_j2k_write_eoc, p_manager)) {
return OPJ_FALSE;
}
if (OPJ_IS_CINEMA(p_j2k->m_cp.rsiz)) {
if (! opj_procedure_list_add_procedure(p_j2k->m_procedure_list,(opj_procedure)opj_j2k_write_updated_tlm, p_manager)) {
return OPJ_FALSE;
}
}
if (! opj_procedure_list_add_procedure(p_j2k->m_procedure_list,(opj_procedure)opj_j2k_write_epc, p_manager)) {
return OPJ_FALSE;
}
if (! opj_procedure_list_add_procedure(p_j2k->m_procedure_list,(opj_procedure)opj_j2k_end_encoding, p_manager)) {
return OPJ_FALSE;
}
if (! opj_procedure_list_add_procedure(p_j2k->m_procedure_list,(opj_procedure)opj_j2k_destroy_header_memory, p_manager)) {
return OPJ_FALSE;
}
return OPJ_TRUE;
}
static OPJ_BOOL opj_j2k_setup_encoding_validation (opj_j2k_t *p_j2k, opj_event_mgr_t * p_manager)
{
/* preconditions */
assert(p_j2k != 00);
assert(p_manager != 00);
if (! opj_procedure_list_add_procedure(p_j2k->m_validation_list, (opj_procedure)opj_j2k_build_encoder, p_manager)) {
return OPJ_FALSE;
}
if (! opj_procedure_list_add_procedure(p_j2k->m_validation_list, (opj_procedure)opj_j2k_encoding_validation, p_manager)) {
return OPJ_FALSE;
}
/* DEVELOPER CORNER, add your custom validation procedure */
if (! opj_procedure_list_add_procedure(p_j2k->m_validation_list, (opj_procedure)opj_j2k_mct_validation, p_manager)) {
return OPJ_FALSE;
}
return OPJ_TRUE;
}
static OPJ_BOOL opj_j2k_setup_header_writing (opj_j2k_t *p_j2k, opj_event_mgr_t * p_manager)
{
/* preconditions */
assert(p_j2k != 00);
assert(p_manager != 00);
if (! opj_procedure_list_add_procedure(p_j2k->m_procedure_list,(opj_procedure)opj_j2k_init_info, p_manager)) {
return OPJ_FALSE;
}
if (! opj_procedure_list_add_procedure(p_j2k->m_procedure_list,(opj_procedure)opj_j2k_write_soc, p_manager)) {
return OPJ_FALSE;
}
if (! opj_procedure_list_add_procedure(p_j2k->m_procedure_list,(opj_procedure)opj_j2k_write_siz, p_manager)) {
return OPJ_FALSE;
}
if (! opj_procedure_list_add_procedure(p_j2k->m_procedure_list,(opj_procedure)opj_j2k_write_cod, p_manager)) {
return OPJ_FALSE;
}
if (! opj_procedure_list_add_procedure(p_j2k->m_procedure_list,(opj_procedure)opj_j2k_write_qcd, p_manager)) {
return OPJ_FALSE;
}
if (OPJ_IS_CINEMA(p_j2k->m_cp.rsiz)) {
/* No need for COC or QCC, QCD and COD are used
if (! opj_procedure_list_add_procedure(p_j2k->m_procedure_list,(opj_procedure)opj_j2k_write_all_coc, p_manager)) {
return OPJ_FALSE;
}
if (! opj_procedure_list_add_procedure(p_j2k->m_procedure_list,(opj_procedure)opj_j2k_write_all_qcc, p_manager)) {
return OPJ_FALSE;
}
*/
if (! opj_procedure_list_add_procedure(p_j2k->m_procedure_list,(opj_procedure)opj_j2k_write_tlm, p_manager)) {
return OPJ_FALSE;
}
if (p_j2k->m_cp.rsiz == OPJ_PROFILE_CINEMA_4K) {
if (! opj_procedure_list_add_procedure(p_j2k->m_procedure_list,(opj_procedure)opj_j2k_write_poc, p_manager)) {
return OPJ_FALSE;
}
}
}
if (! opj_procedure_list_add_procedure(p_j2k->m_procedure_list,(opj_procedure)opj_j2k_write_regions, p_manager)) {
return OPJ_FALSE;
}
if (p_j2k->m_cp.comment != 00) {
if (! opj_procedure_list_add_procedure(p_j2k->m_procedure_list,(opj_procedure)opj_j2k_write_com, p_manager)) {
return OPJ_FALSE;
}
}
/* DEVELOPER CORNER, insert your custom procedures */
if (p_j2k->m_cp.rsiz & OPJ_EXTENSION_MCT) {
if (! opj_procedure_list_add_procedure(p_j2k->m_procedure_list,(opj_procedure)opj_j2k_write_mct_data_group, p_manager)) {
return OPJ_FALSE;
}
}
/* End of Developer Corner */
if (p_j2k->cstr_index) {
if (! opj_procedure_list_add_procedure(p_j2k->m_procedure_list,(opj_procedure)opj_j2k_get_end_header, p_manager)) {
return OPJ_FALSE;
}
}
if (! opj_procedure_list_add_procedure(p_j2k->m_procedure_list,(opj_procedure)opj_j2k_create_tcd, p_manager)) {
return OPJ_FALSE;
}
if (! opj_procedure_list_add_procedure(p_j2k->m_procedure_list,(opj_procedure)opj_j2k_update_rates, p_manager)) {
return OPJ_FALSE;
}
return OPJ_TRUE;
}
static OPJ_BOOL opj_j2k_write_first_tile_part (opj_j2k_t *p_j2k,
OPJ_BYTE * p_data,
OPJ_UINT32 * p_data_written,
OPJ_UINT32 p_total_data_size,
opj_stream_private_t *p_stream,
struct opj_event_mgr * p_manager )
{
OPJ_UINT32 l_nb_bytes_written = 0;
OPJ_UINT32 l_current_nb_bytes_written;
OPJ_BYTE * l_begin_data = 00;
opj_tcd_t * l_tcd = 00;
opj_cp_t * l_cp = 00;
l_tcd = p_j2k->m_tcd;
l_cp = &(p_j2k->m_cp);
l_tcd->cur_pino = 0;
/*Get number of tile parts*/
p_j2k->m_specific_param.m_encoder.m_current_poc_tile_part_number = 0;
/* INDEX >> */
/* << INDEX */
l_current_nb_bytes_written = 0;
l_begin_data = p_data;
if (! opj_j2k_write_sot(p_j2k,p_data,&l_current_nb_bytes_written,p_stream,p_manager))
{
return OPJ_FALSE;
}
l_nb_bytes_written += l_current_nb_bytes_written;
p_data += l_current_nb_bytes_written;
p_total_data_size -= l_current_nb_bytes_written;
if (!OPJ_IS_CINEMA(l_cp->rsiz)) {
#if 0
for (compno = 1; compno < p_j2k->m_private_image->numcomps; compno++) {
l_current_nb_bytes_written = 0;
opj_j2k_write_coc_in_memory(p_j2k,compno,p_data,&l_current_nb_bytes_written,p_manager);
l_nb_bytes_written += l_current_nb_bytes_written;
p_data += l_current_nb_bytes_written;
p_total_data_size -= l_current_nb_bytes_written;
l_current_nb_bytes_written = 0;
opj_j2k_write_qcc_in_memory(p_j2k,compno,p_data,&l_current_nb_bytes_written,p_manager);
l_nb_bytes_written += l_current_nb_bytes_written;
p_data += l_current_nb_bytes_written;
p_total_data_size -= l_current_nb_bytes_written;
}
#endif
if (l_cp->tcps[p_j2k->m_current_tile_number].numpocs) {
l_current_nb_bytes_written = 0;
opj_j2k_write_poc_in_memory(p_j2k,p_data,&l_current_nb_bytes_written,p_manager);
l_nb_bytes_written += l_current_nb_bytes_written;
p_data += l_current_nb_bytes_written;
p_total_data_size -= l_current_nb_bytes_written;
}
}
l_current_nb_bytes_written = 0;
if (! opj_j2k_write_sod(p_j2k,l_tcd,p_data,&l_current_nb_bytes_written,p_total_data_size,p_stream,p_manager)) {
return OPJ_FALSE;
}
l_nb_bytes_written += l_current_nb_bytes_written;
* p_data_written = l_nb_bytes_written;
/* Writing Psot in SOT marker */
opj_write_bytes(l_begin_data + 6,l_nb_bytes_written,4); /* PSOT */
if (OPJ_IS_CINEMA(l_cp->rsiz)){
opj_j2k_update_tlm(p_j2k,l_nb_bytes_written);
}
return OPJ_TRUE;
}
static OPJ_BOOL opj_j2k_write_all_tile_parts( opj_j2k_t *p_j2k,
OPJ_BYTE * p_data,
OPJ_UINT32 * p_data_written,
OPJ_UINT32 p_total_data_size,
opj_stream_private_t *p_stream,
struct opj_event_mgr * p_manager
)
{
OPJ_UINT32 tilepartno=0;
OPJ_UINT32 l_nb_bytes_written = 0;
OPJ_UINT32 l_current_nb_bytes_written;
OPJ_UINT32 l_part_tile_size;
OPJ_UINT32 tot_num_tp;
OPJ_UINT32 pino;
OPJ_BYTE * l_begin_data;
opj_tcp_t *l_tcp = 00;
opj_tcd_t * l_tcd = 00;
opj_cp_t * l_cp = 00;
l_tcd = p_j2k->m_tcd;
l_cp = &(p_j2k->m_cp);
l_tcp = l_cp->tcps + p_j2k->m_current_tile_number;
/*Get number of tile parts*/
tot_num_tp = opj_j2k_get_num_tp(l_cp,0,p_j2k->m_current_tile_number);
/* start writing remaining tile parts */
++p_j2k->m_specific_param.m_encoder.m_current_tile_part_number;
for (tilepartno = 1; tilepartno < tot_num_tp ; ++tilepartno) {
p_j2k->m_specific_param.m_encoder.m_current_poc_tile_part_number = tilepartno;
l_current_nb_bytes_written = 0;
l_part_tile_size = 0;
l_begin_data = p_data;
if (! opj_j2k_write_sot(p_j2k,p_data,&l_current_nb_bytes_written,p_stream,p_manager)) {
return OPJ_FALSE;
}
l_nb_bytes_written += l_current_nb_bytes_written;
p_data += l_current_nb_bytes_written;
p_total_data_size -= l_current_nb_bytes_written;
l_part_tile_size += l_current_nb_bytes_written;
l_current_nb_bytes_written = 0;
if (! opj_j2k_write_sod(p_j2k,l_tcd,p_data,&l_current_nb_bytes_written,p_total_data_size,p_stream,p_manager)) {
return OPJ_FALSE;
}
p_data += l_current_nb_bytes_written;
l_nb_bytes_written += l_current_nb_bytes_written;
p_total_data_size -= l_current_nb_bytes_written;
l_part_tile_size += l_current_nb_bytes_written;
/* Writing Psot in SOT marker */
opj_write_bytes(l_begin_data + 6,l_part_tile_size,4); /* PSOT */
if (OPJ_IS_CINEMA(l_cp->rsiz)) {
opj_j2k_update_tlm(p_j2k,l_part_tile_size);
}
++p_j2k->m_specific_param.m_encoder.m_current_tile_part_number;
}
for (pino = 1; pino <= l_tcp->numpocs; ++pino) {
l_tcd->cur_pino = pino;
/*Get number of tile parts*/
tot_num_tp = opj_j2k_get_num_tp(l_cp,pino,p_j2k->m_current_tile_number);
for (tilepartno = 0; tilepartno < tot_num_tp ; ++tilepartno) {
p_j2k->m_specific_param.m_encoder.m_current_poc_tile_part_number = tilepartno;
l_current_nb_bytes_written = 0;
l_part_tile_size = 0;
l_begin_data = p_data;
if (! opj_j2k_write_sot(p_j2k,p_data,&l_current_nb_bytes_written,p_stream,p_manager)) {
return OPJ_FALSE;
}
l_nb_bytes_written += l_current_nb_bytes_written;
p_data += l_current_nb_bytes_written;
p_total_data_size -= l_current_nb_bytes_written;
l_part_tile_size += l_current_nb_bytes_written;
l_current_nb_bytes_written = 0;
if (! opj_j2k_write_sod(p_j2k,l_tcd,p_data,&l_current_nb_bytes_written,p_total_data_size,p_stream,p_manager)) {
return OPJ_FALSE;
}
l_nb_bytes_written += l_current_nb_bytes_written;
p_data += l_current_nb_bytes_written;
p_total_data_size -= l_current_nb_bytes_written;
l_part_tile_size += l_current_nb_bytes_written;
/* Writing Psot in SOT marker */
opj_write_bytes(l_begin_data + 6,l_part_tile_size,4); /* PSOT */
if (OPJ_IS_CINEMA(l_cp->rsiz)) {
opj_j2k_update_tlm(p_j2k,l_part_tile_size);
}
++p_j2k->m_specific_param.m_encoder.m_current_tile_part_number;
}
}
*p_data_written = l_nb_bytes_written;
return OPJ_TRUE;
}
static OPJ_BOOL opj_j2k_write_updated_tlm( opj_j2k_t *p_j2k,
struct opj_stream_private *p_stream,
struct opj_event_mgr * p_manager )
{
OPJ_UINT32 l_tlm_size;
OPJ_OFF_T l_tlm_position, l_current_position;
/* preconditions */
assert(p_j2k != 00);
assert(p_manager != 00);
assert(p_stream != 00);
l_tlm_size = 5 * p_j2k->m_specific_param.m_encoder.m_total_tile_parts;
l_tlm_position = 6 + p_j2k->m_specific_param.m_encoder.m_tlm_start;
l_current_position = opj_stream_tell(p_stream);
if (! opj_stream_seek(p_stream,l_tlm_position,p_manager)) {
return OPJ_FALSE;
}
if (opj_stream_write_data(p_stream,p_j2k->m_specific_param.m_encoder.m_tlm_sot_offsets_buffer,l_tlm_size,p_manager) != l_tlm_size) {
return OPJ_FALSE;
}
if (! opj_stream_seek(p_stream,l_current_position,p_manager)) {
return OPJ_FALSE;
}
return OPJ_TRUE;
}
static OPJ_BOOL opj_j2k_end_encoding( opj_j2k_t *p_j2k,
struct opj_stream_private *p_stream,
struct opj_event_mgr * p_manager )
{
/* preconditions */
assert(p_j2k != 00);
assert(p_manager != 00);
assert(p_stream != 00);
opj_tcd_destroy(p_j2k->m_tcd);
p_j2k->m_tcd = 00;
if (p_j2k->m_specific_param.m_encoder.m_tlm_sot_offsets_buffer) {
opj_free(p_j2k->m_specific_param.m_encoder.m_tlm_sot_offsets_buffer);
p_j2k->m_specific_param.m_encoder.m_tlm_sot_offsets_buffer = 0;
p_j2k->m_specific_param.m_encoder.m_tlm_sot_offsets_current = 0;
}
if (p_j2k->m_specific_param.m_encoder.m_encoded_tile_data) {
opj_free(p_j2k->m_specific_param.m_encoder.m_encoded_tile_data);
p_j2k->m_specific_param.m_encoder.m_encoded_tile_data = 0;
}
p_j2k->m_specific_param.m_encoder.m_encoded_tile_size = 0;
return OPJ_TRUE;
}
/**
* Destroys the memory associated with the decoding of headers.
*/
static OPJ_BOOL opj_j2k_destroy_header_memory ( opj_j2k_t * p_j2k,
opj_stream_private_t *p_stream,
opj_event_mgr_t * p_manager
)
{
/* preconditions */
assert(p_j2k != 00);
assert(p_stream != 00);
assert(p_manager != 00);
if (p_j2k->m_specific_param.m_encoder.m_header_tile_data) {
opj_free(p_j2k->m_specific_param.m_encoder.m_header_tile_data);
p_j2k->m_specific_param.m_encoder.m_header_tile_data = 0;
}
p_j2k->m_specific_param.m_encoder.m_header_tile_data_size = 0;
return OPJ_TRUE;
}
static OPJ_BOOL opj_j2k_init_info( opj_j2k_t *p_j2k,
struct opj_stream_private *p_stream,
struct opj_event_mgr * p_manager )
{
opj_codestream_info_t * l_cstr_info = 00;
/* preconditions */
assert(p_j2k != 00);
assert(p_manager != 00);
assert(p_stream != 00);
(void)l_cstr_info;
/* TODO mergeV2: check this part which use cstr_info */
/*l_cstr_info = p_j2k->cstr_info;
if (l_cstr_info) {
OPJ_UINT32 compno;
l_cstr_info->tile = (opj_tile_info_t *) opj_malloc(p_j2k->m_cp.tw * p_j2k->m_cp.th * sizeof(opj_tile_info_t));
l_cstr_info->image_w = p_j2k->m_image->x1 - p_j2k->m_image->x0;
l_cstr_info->image_h = p_j2k->m_image->y1 - p_j2k->m_image->y0;
l_cstr_info->prog = (&p_j2k->m_cp.tcps[0])->prg;
l_cstr_info->tw = p_j2k->m_cp.tw;
l_cstr_info->th = p_j2k->m_cp.th;
l_cstr_info->tile_x = p_j2k->m_cp.tdx;*/ /* new version parser */
/*l_cstr_info->tile_y = p_j2k->m_cp.tdy;*/ /* new version parser */
/*l_cstr_info->tile_Ox = p_j2k->m_cp.tx0;*/ /* new version parser */
/*l_cstr_info->tile_Oy = p_j2k->m_cp.ty0;*/ /* new version parser */
/*l_cstr_info->numcomps = p_j2k->m_image->numcomps;
l_cstr_info->numlayers = (&p_j2k->m_cp.tcps[0])->numlayers;
l_cstr_info->numdecompos = (OPJ_INT32*) opj_malloc(p_j2k->m_image->numcomps * sizeof(OPJ_INT32));
for (compno=0; compno < p_j2k->m_image->numcomps; compno++) {
l_cstr_info->numdecompos[compno] = (&p_j2k->m_cp.tcps[0])->tccps->numresolutions - 1;
}
l_cstr_info->D_max = 0.0; */ /* ADD Marcela */
/*l_cstr_info->main_head_start = opj_stream_tell(p_stream);*/ /* position of SOC */
/*l_cstr_info->maxmarknum = 100;
l_cstr_info->marker = (opj_marker_info_t *) opj_malloc(l_cstr_info->maxmarknum * sizeof(opj_marker_info_t));
l_cstr_info->marknum = 0;
}*/
return opj_j2k_calculate_tp(p_j2k,&(p_j2k->m_cp),&p_j2k->m_specific_param.m_encoder.m_total_tile_parts,p_j2k->m_private_image,p_manager);
}
/**
* Creates a tile-coder decoder.
*
* @param p_stream the stream to write data to.
* @param p_j2k J2K codec.
* @param p_manager the user event manager.
*/
static OPJ_BOOL opj_j2k_create_tcd( opj_j2k_t *p_j2k,
opj_stream_private_t *p_stream,
opj_event_mgr_t * p_manager
)
{
/* preconditions */
assert(p_j2k != 00);
assert(p_manager != 00);
assert(p_stream != 00);
p_j2k->m_tcd = opj_tcd_create(OPJ_FALSE);
if (! p_j2k->m_tcd) {
opj_event_msg(p_manager, EVT_ERROR, "Not enough memory to create Tile Coder\n");
return OPJ_FALSE;
}
if (!opj_tcd_init(p_j2k->m_tcd,p_j2k->m_private_image,&p_j2k->m_cp)) {
opj_tcd_destroy(p_j2k->m_tcd);
p_j2k->m_tcd = 00;
return OPJ_FALSE;
}
return OPJ_TRUE;
}
OPJ_BOOL opj_j2k_write_tile (opj_j2k_t * p_j2k,
OPJ_UINT32 p_tile_index,
OPJ_BYTE * p_data,
OPJ_UINT32 p_data_size,
opj_stream_private_t *p_stream,
opj_event_mgr_t * p_manager )
{
if (! opj_j2k_pre_write_tile(p_j2k,p_tile_index,p_stream,p_manager)) {
opj_event_msg(p_manager, EVT_ERROR, "Error while opj_j2k_pre_write_tile with tile index = %d\n", p_tile_index);
return OPJ_FALSE;
}
else {
OPJ_UINT32 j;
/* Allocate data */
for (j=0;j<p_j2k->m_tcd->image->numcomps;++j) {
opj_tcd_tilecomp_t* l_tilec = p_j2k->m_tcd->tcd_image->tiles->comps + j;
if(! opj_alloc_tile_component_data(l_tilec)) {
opj_event_msg(p_manager, EVT_ERROR, "Error allocating tile component data." );
return OPJ_FALSE;
}
}
/* now copy data into the the tile component */
if (! opj_tcd_copy_tile_data(p_j2k->m_tcd,p_data,p_data_size)) {
opj_event_msg(p_manager, EVT_ERROR, "Size mismatch between tile data and sent data." );
return OPJ_FALSE;
}
if (! opj_j2k_post_write_tile(p_j2k,p_stream,p_manager)) {
opj_event_msg(p_manager, EVT_ERROR, "Error while opj_j2k_post_write_tile with tile index = %d\n", p_tile_index);
return OPJ_FALSE;
}
}
return OPJ_TRUE;
}
| ./CrossVul/dataset_final_sorted/CWE-416/c/good_1830_0 |
crossvul-cpp_data_good_5336_0 | /* tuner-xc2028
*
* Copyright (c) 2007-2008 Mauro Carvalho Chehab (mchehab@infradead.org)
*
* Copyright (c) 2007 Michel Ludwig (michel.ludwig@gmail.com)
* - frontend interface
*
* This code is placed under the terms of the GNU General Public License v2
*/
#include <linux/i2c.h>
#include <asm/div64.h>
#include <linux/firmware.h>
#include <linux/videodev2.h>
#include <linux/delay.h>
#include <media/tuner.h>
#include <linux/mutex.h>
#include <linux/slab.h>
#include <asm/unaligned.h>
#include "tuner-i2c.h"
#include "tuner-xc2028.h"
#include "tuner-xc2028-types.h"
#include <linux/dvb/frontend.h>
#include "dvb_frontend.h"
/* Max transfer size done by I2C transfer functions */
#define MAX_XFER_SIZE 80
/* Registers (Write-only) */
#define XREG_INIT 0x00
#define XREG_RF_FREQ 0x02
#define XREG_POWER_DOWN 0x08
/* Registers (Read-only) */
#define XREG_FREQ_ERROR 0x01
#define XREG_LOCK 0x02
#define XREG_VERSION 0x04
#define XREG_PRODUCT_ID 0x08
#define XREG_HSYNC_FREQ 0x10
#define XREG_FRAME_LINES 0x20
#define XREG_SNR 0x40
#define XREG_ADC_ENV 0x0100
static int debug;
module_param(debug, int, 0644);
MODULE_PARM_DESC(debug, "enable verbose debug messages");
static int no_poweroff;
module_param(no_poweroff, int, 0644);
MODULE_PARM_DESC(no_poweroff, "0 (default) powers device off when not used.\n"
"1 keep device energized and with tuner ready all the times.\n"
" Faster, but consumes more power and keeps the device hotter\n");
static char audio_std[8];
module_param_string(audio_std, audio_std, sizeof(audio_std), 0);
MODULE_PARM_DESC(audio_std,
"Audio standard. XC3028 audio decoder explicitly "
"needs to know what audio\n"
"standard is needed for some video standards with audio A2 or NICAM.\n"
"The valid values are:\n"
"A2\n"
"A2/A\n"
"A2/B\n"
"NICAM\n"
"NICAM/A\n"
"NICAM/B\n");
static char firmware_name[30];
module_param_string(firmware_name, firmware_name, sizeof(firmware_name), 0);
MODULE_PARM_DESC(firmware_name, "Firmware file name. Allows overriding the "
"default firmware name\n");
static LIST_HEAD(hybrid_tuner_instance_list);
static DEFINE_MUTEX(xc2028_list_mutex);
/* struct for storing firmware table */
struct firmware_description {
unsigned int type;
v4l2_std_id id;
__u16 int_freq;
unsigned char *ptr;
unsigned int size;
};
struct firmware_properties {
unsigned int type;
v4l2_std_id id;
v4l2_std_id std_req;
__u16 int_freq;
unsigned int scode_table;
int scode_nr;
};
enum xc2028_state {
XC2028_NO_FIRMWARE = 0,
XC2028_WAITING_FIRMWARE,
XC2028_ACTIVE,
XC2028_SLEEP,
XC2028_NODEV,
};
struct xc2028_data {
struct list_head hybrid_tuner_instance_list;
struct tuner_i2c_props i2c_props;
__u32 frequency;
enum xc2028_state state;
const char *fname;
struct firmware_description *firm;
int firm_size;
__u16 firm_version;
__u16 hwmodel;
__u16 hwvers;
struct xc2028_ctrl ctrl;
struct firmware_properties cur_fw;
struct mutex lock;
};
#define i2c_send(priv, buf, size) ({ \
int _rc; \
_rc = tuner_i2c_xfer_send(&priv->i2c_props, buf, size); \
if (size != _rc) \
tuner_info("i2c output error: rc = %d (should be %d)\n",\
_rc, (int)size); \
if (priv->ctrl.msleep) \
msleep(priv->ctrl.msleep); \
_rc; \
})
#define i2c_send_recv(priv, obuf, osize, ibuf, isize) ({ \
int _rc; \
_rc = tuner_i2c_xfer_send_recv(&priv->i2c_props, obuf, osize, \
ibuf, isize); \
if (isize != _rc) \
tuner_err("i2c input error: rc = %d (should be %d)\n", \
_rc, (int)isize); \
if (priv->ctrl.msleep) \
msleep(priv->ctrl.msleep); \
_rc; \
})
#define send_seq(priv, data...) ({ \
static u8 _val[] = data; \
int _rc; \
if (sizeof(_val) != \
(_rc = tuner_i2c_xfer_send(&priv->i2c_props, \
_val, sizeof(_val)))) { \
tuner_err("Error on line %d: %d\n", __LINE__, _rc); \
} else if (priv->ctrl.msleep) \
msleep(priv->ctrl.msleep); \
_rc; \
})
static int xc2028_get_reg(struct xc2028_data *priv, u16 reg, u16 *val)
{
unsigned char buf[2];
unsigned char ibuf[2];
tuner_dbg("%s %04x called\n", __func__, reg);
buf[0] = reg >> 8;
buf[1] = (unsigned char) reg;
if (i2c_send_recv(priv, buf, 2, ibuf, 2) != 2)
return -EIO;
*val = (ibuf[1]) | (ibuf[0] << 8);
return 0;
}
#define dump_firm_type(t) dump_firm_type_and_int_freq(t, 0)
static void dump_firm_type_and_int_freq(unsigned int type, u16 int_freq)
{
if (type & BASE)
printk("BASE ");
if (type & INIT1)
printk("INIT1 ");
if (type & F8MHZ)
printk("F8MHZ ");
if (type & MTS)
printk("MTS ");
if (type & D2620)
printk("D2620 ");
if (type & D2633)
printk("D2633 ");
if (type & DTV6)
printk("DTV6 ");
if (type & QAM)
printk("QAM ");
if (type & DTV7)
printk("DTV7 ");
if (type & DTV78)
printk("DTV78 ");
if (type & DTV8)
printk("DTV8 ");
if (type & FM)
printk("FM ");
if (type & INPUT1)
printk("INPUT1 ");
if (type & LCD)
printk("LCD ");
if (type & NOGD)
printk("NOGD ");
if (type & MONO)
printk("MONO ");
if (type & ATSC)
printk("ATSC ");
if (type & IF)
printk("IF ");
if (type & LG60)
printk("LG60 ");
if (type & ATI638)
printk("ATI638 ");
if (type & OREN538)
printk("OREN538 ");
if (type & OREN36)
printk("OREN36 ");
if (type & TOYOTA388)
printk("TOYOTA388 ");
if (type & TOYOTA794)
printk("TOYOTA794 ");
if (type & DIBCOM52)
printk("DIBCOM52 ");
if (type & ZARLINK456)
printk("ZARLINK456 ");
if (type & CHINA)
printk("CHINA ");
if (type & F6MHZ)
printk("F6MHZ ");
if (type & INPUT2)
printk("INPUT2 ");
if (type & SCODE)
printk("SCODE ");
if (type & HAS_IF)
printk("HAS_IF_%d ", int_freq);
}
static v4l2_std_id parse_audio_std_option(void)
{
if (strcasecmp(audio_std, "A2") == 0)
return V4L2_STD_A2;
if (strcasecmp(audio_std, "A2/A") == 0)
return V4L2_STD_A2_A;
if (strcasecmp(audio_std, "A2/B") == 0)
return V4L2_STD_A2_B;
if (strcasecmp(audio_std, "NICAM") == 0)
return V4L2_STD_NICAM;
if (strcasecmp(audio_std, "NICAM/A") == 0)
return V4L2_STD_NICAM_A;
if (strcasecmp(audio_std, "NICAM/B") == 0)
return V4L2_STD_NICAM_B;
return 0;
}
static int check_device_status(struct xc2028_data *priv)
{
switch (priv->state) {
case XC2028_NO_FIRMWARE:
case XC2028_WAITING_FIRMWARE:
return -EAGAIN;
case XC2028_ACTIVE:
return 1;
case XC2028_SLEEP:
return 0;
case XC2028_NODEV:
return -ENODEV;
}
return 0;
}
static void free_firmware(struct xc2028_data *priv)
{
int i;
tuner_dbg("%s called\n", __func__);
if (!priv->firm)
return;
for (i = 0; i < priv->firm_size; i++)
kfree(priv->firm[i].ptr);
kfree(priv->firm);
priv->firm = NULL;
priv->firm_size = 0;
priv->state = XC2028_NO_FIRMWARE;
memset(&priv->cur_fw, 0, sizeof(priv->cur_fw));
}
static int load_all_firmwares(struct dvb_frontend *fe,
const struct firmware *fw)
{
struct xc2028_data *priv = fe->tuner_priv;
const unsigned char *p, *endp;
int rc = 0;
int n, n_array;
char name[33];
tuner_dbg("%s called\n", __func__);
p = fw->data;
endp = p + fw->size;
if (fw->size < sizeof(name) - 1 + 2 + 2) {
tuner_err("Error: firmware file %s has invalid size!\n",
priv->fname);
goto corrupt;
}
memcpy(name, p, sizeof(name) - 1);
name[sizeof(name) - 1] = 0;
p += sizeof(name) - 1;
priv->firm_version = get_unaligned_le16(p);
p += 2;
n_array = get_unaligned_le16(p);
p += 2;
tuner_info("Loading %d firmware images from %s, type: %s, ver %d.%d\n",
n_array, priv->fname, name,
priv->firm_version >> 8, priv->firm_version & 0xff);
priv->firm = kcalloc(n_array, sizeof(*priv->firm), GFP_KERNEL);
if (priv->firm == NULL) {
tuner_err("Not enough memory to load firmware file.\n");
rc = -ENOMEM;
goto err;
}
priv->firm_size = n_array;
n = -1;
while (p < endp) {
__u32 type, size;
v4l2_std_id id;
__u16 int_freq = 0;
n++;
if (n >= n_array) {
tuner_err("More firmware images in file than "
"were expected!\n");
goto corrupt;
}
/* Checks if there's enough bytes to read */
if (endp - p < sizeof(type) + sizeof(id) + sizeof(size))
goto header;
type = get_unaligned_le32(p);
p += sizeof(type);
id = get_unaligned_le64(p);
p += sizeof(id);
if (type & HAS_IF) {
int_freq = get_unaligned_le16(p);
p += sizeof(int_freq);
if (endp - p < sizeof(size))
goto header;
}
size = get_unaligned_le32(p);
p += sizeof(size);
if (!size || size > endp - p) {
tuner_err("Firmware type ");
dump_firm_type(type);
printk("(%x), id %llx is corrupted "
"(size=%d, expected %d)\n",
type, (unsigned long long)id,
(unsigned)(endp - p), size);
goto corrupt;
}
priv->firm[n].ptr = kzalloc(size, GFP_KERNEL);
if (priv->firm[n].ptr == NULL) {
tuner_err("Not enough memory to load firmware file.\n");
rc = -ENOMEM;
goto err;
}
tuner_dbg("Reading firmware type ");
if (debug) {
dump_firm_type_and_int_freq(type, int_freq);
printk("(%x), id %llx, size=%d.\n",
type, (unsigned long long)id, size);
}
memcpy(priv->firm[n].ptr, p, size);
priv->firm[n].type = type;
priv->firm[n].id = id;
priv->firm[n].size = size;
priv->firm[n].int_freq = int_freq;
p += size;
}
if (n + 1 != priv->firm_size) {
tuner_err("Firmware file is incomplete!\n");
goto corrupt;
}
goto done;
header:
tuner_err("Firmware header is incomplete!\n");
corrupt:
rc = -EINVAL;
tuner_err("Error: firmware file is corrupted!\n");
err:
tuner_info("Releasing partially loaded firmware file.\n");
free_firmware(priv);
done:
if (rc == 0)
tuner_dbg("Firmware files loaded.\n");
else
priv->state = XC2028_NODEV;
return rc;
}
static int seek_firmware(struct dvb_frontend *fe, unsigned int type,
v4l2_std_id *id)
{
struct xc2028_data *priv = fe->tuner_priv;
int i, best_i = -1, best_nr_matches = 0;
unsigned int type_mask = 0;
tuner_dbg("%s called, want type=", __func__);
if (debug) {
dump_firm_type(type);
printk("(%x), id %016llx.\n", type, (unsigned long long)*id);
}
if (!priv->firm) {
tuner_err("Error! firmware not loaded\n");
return -EINVAL;
}
if (((type & ~SCODE) == 0) && (*id == 0))
*id = V4L2_STD_PAL;
if (type & BASE)
type_mask = BASE_TYPES;
else if (type & SCODE) {
type &= SCODE_TYPES;
type_mask = SCODE_TYPES & ~HAS_IF;
} else if (type & DTV_TYPES)
type_mask = DTV_TYPES;
else if (type & STD_SPECIFIC_TYPES)
type_mask = STD_SPECIFIC_TYPES;
type &= type_mask;
if (!(type & SCODE))
type_mask = ~0;
/* Seek for exact match */
for (i = 0; i < priv->firm_size; i++) {
if ((type == (priv->firm[i].type & type_mask)) &&
(*id == priv->firm[i].id))
goto found;
}
/* Seek for generic video standard match */
for (i = 0; i < priv->firm_size; i++) {
v4l2_std_id match_mask;
int nr_matches;
if (type != (priv->firm[i].type & type_mask))
continue;
match_mask = *id & priv->firm[i].id;
if (!match_mask)
continue;
if ((*id & match_mask) == *id)
goto found; /* Supports all the requested standards */
nr_matches = hweight64(match_mask);
if (nr_matches > best_nr_matches) {
best_nr_matches = nr_matches;
best_i = i;
}
}
if (best_nr_matches > 0) {
tuner_dbg("Selecting best matching firmware (%d bits) for "
"type=", best_nr_matches);
dump_firm_type(type);
printk("(%x), id %016llx:\n", type, (unsigned long long)*id);
i = best_i;
goto found;
}
/*FIXME: Would make sense to seek for type "hint" match ? */
i = -ENOENT;
goto ret;
found:
*id = priv->firm[i].id;
ret:
tuner_dbg("%s firmware for type=", (i < 0) ? "Can't find" : "Found");
if (debug) {
dump_firm_type(type);
printk("(%x), id %016llx.\n", type, (unsigned long long)*id);
}
return i;
}
static inline int do_tuner_callback(struct dvb_frontend *fe, int cmd, int arg)
{
struct xc2028_data *priv = fe->tuner_priv;
/* analog side (tuner-core) uses i2c_adap->algo_data.
* digital side is not guaranteed to have algo_data defined.
*
* digital side will always have fe->dvb defined.
* analog side (tuner-core) doesn't (yet) define fe->dvb.
*/
return (!fe->callback) ? -EINVAL :
fe->callback(((fe->dvb) && (fe->dvb->priv)) ?
fe->dvb->priv : priv->i2c_props.adap->algo_data,
DVB_FRONTEND_COMPONENT_TUNER, cmd, arg);
}
static int load_firmware(struct dvb_frontend *fe, unsigned int type,
v4l2_std_id *id)
{
struct xc2028_data *priv = fe->tuner_priv;
int pos, rc;
unsigned char *p, *endp, buf[MAX_XFER_SIZE];
if (priv->ctrl.max_len > sizeof(buf))
priv->ctrl.max_len = sizeof(buf);
tuner_dbg("%s called\n", __func__);
pos = seek_firmware(fe, type, id);
if (pos < 0)
return pos;
tuner_info("Loading firmware for type=");
dump_firm_type(priv->firm[pos].type);
printk("(%x), id %016llx.\n", priv->firm[pos].type,
(unsigned long long)*id);
p = priv->firm[pos].ptr;
endp = p + priv->firm[pos].size;
while (p < endp) {
__u16 size;
/* Checks if there's enough bytes to read */
if (p + sizeof(size) > endp) {
tuner_err("Firmware chunk size is wrong\n");
return -EINVAL;
}
size = le16_to_cpu(*(__le16 *) p);
p += sizeof(size);
if (size == 0xffff)
return 0;
if (!size) {
/* Special callback command received */
rc = do_tuner_callback(fe, XC2028_TUNER_RESET, 0);
if (rc < 0) {
tuner_err("Error at RESET code %d\n",
(*p) & 0x7f);
return -EINVAL;
}
continue;
}
if (size >= 0xff00) {
switch (size) {
case 0xff00:
rc = do_tuner_callback(fe, XC2028_RESET_CLK, 0);
if (rc < 0) {
tuner_err("Error at RESET code %d\n",
(*p) & 0x7f);
return -EINVAL;
}
break;
default:
tuner_info("Invalid RESET code %d\n",
size & 0x7f);
return -EINVAL;
}
continue;
}
/* Checks for a sleep command */
if (size & 0x8000) {
msleep(size & 0x7fff);
continue;
}
if ((size + p > endp)) {
tuner_err("missing bytes: need %d, have %d\n",
size, (int)(endp - p));
return -EINVAL;
}
buf[0] = *p;
p++;
size--;
/* Sends message chunks */
while (size > 0) {
int len = (size < priv->ctrl.max_len - 1) ?
size : priv->ctrl.max_len - 1;
memcpy(buf + 1, p, len);
rc = i2c_send(priv, buf, len + 1);
if (rc < 0) {
tuner_err("%d returned from send\n", rc);
return -EINVAL;
}
p += len;
size -= len;
}
/* silently fail if the frontend doesn't support I2C flush */
rc = do_tuner_callback(fe, XC2028_I2C_FLUSH, 0);
if ((rc < 0) && (rc != -EINVAL)) {
tuner_err("error executing flush: %d\n", rc);
return rc;
}
}
return 0;
}
static int load_scode(struct dvb_frontend *fe, unsigned int type,
v4l2_std_id *id, __u16 int_freq, int scode)
{
struct xc2028_data *priv = fe->tuner_priv;
int pos, rc;
unsigned char *p;
tuner_dbg("%s called\n", __func__);
if (!int_freq) {
pos = seek_firmware(fe, type, id);
if (pos < 0)
return pos;
} else {
for (pos = 0; pos < priv->firm_size; pos++) {
if ((priv->firm[pos].int_freq == int_freq) &&
(priv->firm[pos].type & HAS_IF))
break;
}
if (pos == priv->firm_size)
return -ENOENT;
}
p = priv->firm[pos].ptr;
if (priv->firm[pos].type & HAS_IF) {
if (priv->firm[pos].size != 12 * 16 || scode >= 16)
return -EINVAL;
p += 12 * scode;
} else {
/* 16 SCODE entries per file; each SCODE entry is 12 bytes and
* has a 2-byte size header in the firmware format. */
if (priv->firm[pos].size != 14 * 16 || scode >= 16 ||
le16_to_cpu(*(__le16 *)(p + 14 * scode)) != 12)
return -EINVAL;
p += 14 * scode + 2;
}
tuner_info("Loading SCODE for type=");
dump_firm_type_and_int_freq(priv->firm[pos].type,
priv->firm[pos].int_freq);
printk("(%x), id %016llx.\n", priv->firm[pos].type,
(unsigned long long)*id);
if (priv->firm_version < 0x0202)
rc = send_seq(priv, {0x20, 0x00, 0x00, 0x00});
else
rc = send_seq(priv, {0xa0, 0x00, 0x00, 0x00});
if (rc < 0)
return -EIO;
rc = i2c_send(priv, p, 12);
if (rc < 0)
return -EIO;
rc = send_seq(priv, {0x00, 0x8c});
if (rc < 0)
return -EIO;
return 0;
}
static int xc2028_sleep(struct dvb_frontend *fe);
static int check_firmware(struct dvb_frontend *fe, unsigned int type,
v4l2_std_id std, __u16 int_freq)
{
struct xc2028_data *priv = fe->tuner_priv;
struct firmware_properties new_fw;
int rc, retry_count = 0;
u16 version, hwmodel;
v4l2_std_id std0;
tuner_dbg("%s called\n", __func__);
rc = check_device_status(priv);
if (rc < 0)
return rc;
if (priv->ctrl.mts && !(type & FM))
type |= MTS;
retry:
new_fw.type = type;
new_fw.id = std;
new_fw.std_req = std;
new_fw.scode_table = SCODE | priv->ctrl.scode_table;
new_fw.scode_nr = 0;
new_fw.int_freq = int_freq;
tuner_dbg("checking firmware, user requested type=");
if (debug) {
dump_firm_type(new_fw.type);
printk("(%x), id %016llx, ", new_fw.type,
(unsigned long long)new_fw.std_req);
if (!int_freq) {
printk("scode_tbl ");
dump_firm_type(priv->ctrl.scode_table);
printk("(%x), ", priv->ctrl.scode_table);
} else
printk("int_freq %d, ", new_fw.int_freq);
printk("scode_nr %d\n", new_fw.scode_nr);
}
/*
* No need to reload base firmware if it matches and if the tuner
* is not at sleep mode
*/
if ((priv->state == XC2028_ACTIVE) &&
(((BASE | new_fw.type) & BASE_TYPES) ==
(priv->cur_fw.type & BASE_TYPES))) {
tuner_dbg("BASE firmware not changed.\n");
goto skip_base;
}
/* Updating BASE - forget about all currently loaded firmware */
memset(&priv->cur_fw, 0, sizeof(priv->cur_fw));
/* Reset is needed before loading firmware */
rc = do_tuner_callback(fe, XC2028_TUNER_RESET, 0);
if (rc < 0)
goto fail;
/* BASE firmwares are all std0 */
std0 = 0;
rc = load_firmware(fe, BASE | new_fw.type, &std0);
if (rc < 0) {
tuner_err("Error %d while loading base firmware\n",
rc);
goto fail;
}
/* Load INIT1, if needed */
tuner_dbg("Load init1 firmware, if exists\n");
rc = load_firmware(fe, BASE | INIT1 | new_fw.type, &std0);
if (rc == -ENOENT)
rc = load_firmware(fe, (BASE | INIT1 | new_fw.type) & ~F8MHZ,
&std0);
if (rc < 0 && rc != -ENOENT) {
tuner_err("Error %d while loading init1 firmware\n",
rc);
goto fail;
}
skip_base:
/*
* No need to reload standard specific firmware if base firmware
* was not reloaded and requested video standards have not changed.
*/
if (priv->cur_fw.type == (BASE | new_fw.type) &&
priv->cur_fw.std_req == std) {
tuner_dbg("Std-specific firmware already loaded.\n");
goto skip_std_specific;
}
/* Reloading std-specific firmware forces a SCODE update */
priv->cur_fw.scode_table = 0;
rc = load_firmware(fe, new_fw.type, &new_fw.id);
if (rc == -ENOENT)
rc = load_firmware(fe, new_fw.type & ~F8MHZ, &new_fw.id);
if (rc < 0)
goto fail;
skip_std_specific:
if (priv->cur_fw.scode_table == new_fw.scode_table &&
priv->cur_fw.scode_nr == new_fw.scode_nr) {
tuner_dbg("SCODE firmware already loaded.\n");
goto check_device;
}
if (new_fw.type & FM)
goto check_device;
/* Load SCODE firmware, if exists */
tuner_dbg("Trying to load scode %d\n", new_fw.scode_nr);
rc = load_scode(fe, new_fw.type | new_fw.scode_table, &new_fw.id,
new_fw.int_freq, new_fw.scode_nr);
check_device:
if (xc2028_get_reg(priv, 0x0004, &version) < 0 ||
xc2028_get_reg(priv, 0x0008, &hwmodel) < 0) {
tuner_err("Unable to read tuner registers.\n");
goto fail;
}
tuner_dbg("Device is Xceive %d version %d.%d, "
"firmware version %d.%d\n",
hwmodel, (version & 0xf000) >> 12, (version & 0xf00) >> 8,
(version & 0xf0) >> 4, version & 0xf);
if (priv->ctrl.read_not_reliable)
goto read_not_reliable;
/* Check firmware version against what we downloaded. */
if (priv->firm_version != ((version & 0xf0) << 4 | (version & 0x0f))) {
if (!priv->ctrl.read_not_reliable) {
tuner_err("Incorrect readback of firmware version.\n");
goto fail;
} else {
tuner_err("Returned an incorrect version. However, "
"read is not reliable enough. Ignoring it.\n");
hwmodel = 3028;
}
}
/* Check that the tuner hardware model remains consistent over time. */
if (priv->hwmodel == 0 && (hwmodel == 2028 || hwmodel == 3028)) {
priv->hwmodel = hwmodel;
priv->hwvers = version & 0xff00;
} else if (priv->hwmodel == 0 || priv->hwmodel != hwmodel ||
priv->hwvers != (version & 0xff00)) {
tuner_err("Read invalid device hardware information - tuner "
"hung?\n");
goto fail;
}
read_not_reliable:
priv->cur_fw = new_fw;
/*
* By setting BASE in cur_fw.type only after successfully loading all
* firmwares, we can:
* 1. Identify that BASE firmware with type=0 has been loaded;
* 2. Tell whether BASE firmware was just changed the next time through.
*/
priv->cur_fw.type |= BASE;
priv->state = XC2028_ACTIVE;
return 0;
fail:
priv->state = XC2028_NO_FIRMWARE;
memset(&priv->cur_fw, 0, sizeof(priv->cur_fw));
if (retry_count < 8) {
msleep(50);
retry_count++;
tuner_dbg("Retrying firmware load\n");
goto retry;
}
/* Firmware didn't load. Put the device to sleep */
xc2028_sleep(fe);
if (rc == -ENOENT)
rc = -EINVAL;
return rc;
}
static int xc2028_signal(struct dvb_frontend *fe, u16 *strength)
{
struct xc2028_data *priv = fe->tuner_priv;
u16 frq_lock, signal = 0;
int rc, i;
tuner_dbg("%s called\n", __func__);
rc = check_device_status(priv);
if (rc < 0)
return rc;
/* If the device is sleeping, no channel is tuned */
if (!rc) {
*strength = 0;
return 0;
}
mutex_lock(&priv->lock);
/* Sync Lock Indicator */
for (i = 0; i < 3; i++) {
rc = xc2028_get_reg(priv, XREG_LOCK, &frq_lock);
if (rc < 0)
goto ret;
if (frq_lock)
break;
msleep(6);
}
/* Frequency didn't lock */
if (frq_lock == 2)
goto ret;
/* Get SNR of the video signal */
rc = xc2028_get_reg(priv, XREG_SNR, &signal);
if (rc < 0)
goto ret;
/* Signal level is 3 bits only */
signal = ((1 << 12) - 1) | ((signal & 0x07) << 12);
ret:
mutex_unlock(&priv->lock);
*strength = signal;
tuner_dbg("signal strength is %d\n", signal);
return rc;
}
static int xc2028_get_afc(struct dvb_frontend *fe, s32 *afc)
{
struct xc2028_data *priv = fe->tuner_priv;
int i, rc;
u16 frq_lock = 0;
s16 afc_reg = 0;
rc = check_device_status(priv);
if (rc < 0)
return rc;
/* If the device is sleeping, no channel is tuned */
if (!rc) {
*afc = 0;
return 0;
}
mutex_lock(&priv->lock);
/* Sync Lock Indicator */
for (i = 0; i < 3; i++) {
rc = xc2028_get_reg(priv, XREG_LOCK, &frq_lock);
if (rc < 0)
goto ret;
if (frq_lock)
break;
msleep(6);
}
/* Frequency didn't lock */
if (frq_lock == 2)
goto ret;
/* Get AFC */
rc = xc2028_get_reg(priv, XREG_FREQ_ERROR, &afc_reg);
if (rc < 0)
goto ret;
*afc = afc_reg * 15625; /* Hz */
tuner_dbg("AFC is %d Hz\n", *afc);
ret:
mutex_unlock(&priv->lock);
return rc;
}
#define DIV 15625
static int generic_set_freq(struct dvb_frontend *fe, u32 freq /* in HZ */,
enum v4l2_tuner_type new_type,
unsigned int type,
v4l2_std_id std,
u16 int_freq)
{
struct xc2028_data *priv = fe->tuner_priv;
int rc = -EINVAL;
unsigned char buf[4];
u32 div, offset = 0;
tuner_dbg("%s called\n", __func__);
mutex_lock(&priv->lock);
tuner_dbg("should set frequency %d kHz\n", freq / 1000);
if (check_firmware(fe, type, std, int_freq) < 0)
goto ret;
/* On some cases xc2028 can disable video output, if
* very weak signals are received. By sending a soft
* reset, this is re-enabled. So, it is better to always
* send a soft reset before changing channels, to be sure
* that xc2028 will be in a safe state.
* Maybe this might also be needed for DTV.
*/
switch (new_type) {
case V4L2_TUNER_ANALOG_TV:
rc = send_seq(priv, {0x00, 0x00});
/* Analog mode requires offset = 0 */
break;
case V4L2_TUNER_RADIO:
/* Radio mode requires offset = 0 */
break;
case V4L2_TUNER_DIGITAL_TV:
/*
* Digital modes require an offset to adjust to the
* proper frequency. The offset depends on what
* firmware version is used.
*/
/*
* Adjust to the center frequency. This is calculated by the
* formula: offset = 1.25MHz - BW/2
* For DTV 7/8, the firmware uses BW = 8000, so it needs a
* further adjustment to get the frequency center on VHF
*/
/*
* The firmware DTV78 used to work fine in UHF band (8 MHz
* bandwidth) but not at all in VHF band (7 MHz bandwidth).
* The real problem was connected to the formula used to
* calculate the center frequency offset in VHF band.
* In fact, removing the 500KHz adjustment fixed the problem.
* This is coherent to what was implemented for the DTV7
* firmware.
* In the end, now the center frequency is the same for all 3
* firmwares (DTV7, DTV8, DTV78) and doesn't depend on channel
* bandwidth.
*/
if (priv->cur_fw.type & DTV6)
offset = 1750000;
else /* DTV7 or DTV8 or DTV78 */
offset = 2750000;
/*
* xc3028 additional "magic"
* Depending on the firmware version, it needs some adjustments
* to properly centralize the frequency. This seems to be
* needed to compensate the SCODE table adjustments made by
* newer firmwares
*/
/*
* The proper adjustment would be to do it at s-code table.
* However, this didn't work, as reported by
* Robert Lowery <rglowery@exemail.com.au>
*/
#if 0
/*
* Still need tests for XC3028L (firmware 3.2 or upper)
* So, for now, let's just comment the per-firmware
* version of this change. Reports with xc3028l working
* with and without the lines below are welcome
*/
if (priv->firm_version < 0x0302) {
if (priv->cur_fw.type & DTV7)
offset += 500000;
} else {
if (priv->cur_fw.type & DTV7)
offset -= 300000;
else if (type != ATSC) /* DVB @6MHz, DTV 8 and DTV 7/8 */
offset += 200000;
}
#endif
break;
default:
tuner_err("Unsupported tuner type %d.\n", new_type);
break;
}
div = (freq - offset + DIV / 2) / DIV;
/* CMD= Set frequency */
if (priv->firm_version < 0x0202)
rc = send_seq(priv, {0x00, XREG_RF_FREQ, 0x00, 0x00});
else
rc = send_seq(priv, {0x80, XREG_RF_FREQ, 0x00, 0x00});
if (rc < 0)
goto ret;
/* Return code shouldn't be checked.
The reset CLK is needed only with tm6000.
Driver should work fine even if this fails.
*/
if (priv->ctrl.msleep)
msleep(priv->ctrl.msleep);
do_tuner_callback(fe, XC2028_RESET_CLK, 1);
msleep(10);
buf[0] = 0xff & (div >> 24);
buf[1] = 0xff & (div >> 16);
buf[2] = 0xff & (div >> 8);
buf[3] = 0xff & (div);
rc = i2c_send(priv, buf, sizeof(buf));
if (rc < 0)
goto ret;
msleep(100);
priv->frequency = freq;
tuner_dbg("divisor= %*ph (freq=%d.%03d)\n", 4, buf,
freq / 1000000, (freq % 1000000) / 1000);
rc = 0;
ret:
mutex_unlock(&priv->lock);
return rc;
}
static int xc2028_set_analog_freq(struct dvb_frontend *fe,
struct analog_parameters *p)
{
struct xc2028_data *priv = fe->tuner_priv;
unsigned int type=0;
tuner_dbg("%s called\n", __func__);
if (p->mode == V4L2_TUNER_RADIO) {
type |= FM;
if (priv->ctrl.input1)
type |= INPUT1;
return generic_set_freq(fe, (625l * p->frequency) / 10,
V4L2_TUNER_RADIO, type, 0, 0);
}
/* if std is not defined, choose one */
if (!p->std)
p->std = V4L2_STD_MN;
/* PAL/M, PAL/N, PAL/Nc and NTSC variants should use 6MHz firmware */
if (!(p->std & V4L2_STD_MN))
type |= F8MHZ;
/* Add audio hack to std mask */
p->std |= parse_audio_std_option();
return generic_set_freq(fe, 62500l * p->frequency,
V4L2_TUNER_ANALOG_TV, type, p->std, 0);
}
static int xc2028_set_params(struct dvb_frontend *fe)
{
struct dtv_frontend_properties *c = &fe->dtv_property_cache;
u32 delsys = c->delivery_system;
u32 bw = c->bandwidth_hz;
struct xc2028_data *priv = fe->tuner_priv;
int rc;
unsigned int type = 0;
u16 demod = 0;
tuner_dbg("%s called\n", __func__);
rc = check_device_status(priv);
if (rc < 0)
return rc;
switch (delsys) {
case SYS_DVBT:
case SYS_DVBT2:
/*
* The only countries with 6MHz seem to be Taiwan/Uruguay.
* Both seem to require QAM firmware for OFDM decoding
* Tested in Taiwan by Terry Wu <terrywu2009@gmail.com>
*/
if (bw <= 6000000)
type |= QAM;
switch (priv->ctrl.type) {
case XC2028_D2633:
type |= D2633;
break;
case XC2028_D2620:
type |= D2620;
break;
case XC2028_AUTO:
default:
/* Zarlink seems to need D2633 */
if (priv->ctrl.demod == XC3028_FE_ZARLINK456)
type |= D2633;
else
type |= D2620;
}
break;
case SYS_ATSC:
/* The only ATSC firmware (at least on v2.7) is D2633 */
type |= ATSC | D2633;
break;
/* DVB-S and pure QAM (FE_QAM) are not supported */
default:
return -EINVAL;
}
if (bw <= 6000000) {
type |= DTV6;
priv->ctrl.vhfbw7 = 0;
priv->ctrl.uhfbw8 = 0;
} else if (bw <= 7000000) {
if (c->frequency < 470000000)
priv->ctrl.vhfbw7 = 1;
else
priv->ctrl.uhfbw8 = 0;
type |= (priv->ctrl.vhfbw7 && priv->ctrl.uhfbw8) ? DTV78 : DTV7;
type |= F8MHZ;
} else {
if (c->frequency < 470000000)
priv->ctrl.vhfbw7 = 0;
else
priv->ctrl.uhfbw8 = 1;
type |= (priv->ctrl.vhfbw7 && priv->ctrl.uhfbw8) ? DTV78 : DTV8;
type |= F8MHZ;
}
/* All S-code tables need a 200kHz shift */
if (priv->ctrl.demod) {
demod = priv->ctrl.demod;
/*
* Newer firmwares require a 200 kHz offset only for ATSC
*/
if (type == ATSC || priv->firm_version < 0x0302)
demod += 200;
/*
* The DTV7 S-code table needs a 700 kHz shift.
*
* DTV7 is only used in Australia. Germany or Italy may also
* use this firmware after initialization, but a tune to a UHF
* channel should then cause DTV78 to be used.
*
* Unfortunately, on real-field tests, the s-code offset
* didn't work as expected, as reported by
* Robert Lowery <rglowery@exemail.com.au>
*/
}
return generic_set_freq(fe, c->frequency,
V4L2_TUNER_DIGITAL_TV, type, 0, demod);
}
static int xc2028_sleep(struct dvb_frontend *fe)
{
struct xc2028_data *priv = fe->tuner_priv;
int rc;
rc = check_device_status(priv);
if (rc < 0)
return rc;
/* Device is already in sleep mode */
if (!rc)
return 0;
/* Avoid firmware reload on slow devices or if PM disabled */
if (no_poweroff || priv->ctrl.disable_power_mgmt)
return 0;
tuner_dbg("Putting xc2028/3028 into poweroff mode.\n");
if (debug > 1) {
tuner_dbg("Printing sleep stack trace:\n");
dump_stack();
}
mutex_lock(&priv->lock);
if (priv->firm_version < 0x0202)
rc = send_seq(priv, {0x00, XREG_POWER_DOWN, 0x00, 0x00});
else
rc = send_seq(priv, {0x80, XREG_POWER_DOWN, 0x00, 0x00});
if (rc >= 0)
priv->state = XC2028_SLEEP;
mutex_unlock(&priv->lock);
return rc;
}
static int xc2028_dvb_release(struct dvb_frontend *fe)
{
struct xc2028_data *priv = fe->tuner_priv;
tuner_dbg("%s called\n", __func__);
mutex_lock(&xc2028_list_mutex);
/* only perform final cleanup if this is the last instance */
if (hybrid_tuner_report_instance_count(priv) == 1) {
free_firmware(priv);
kfree(priv->ctrl.fname);
priv->ctrl.fname = NULL;
}
if (priv)
hybrid_tuner_release_state(priv);
mutex_unlock(&xc2028_list_mutex);
fe->tuner_priv = NULL;
return 0;
}
static int xc2028_get_frequency(struct dvb_frontend *fe, u32 *frequency)
{
struct xc2028_data *priv = fe->tuner_priv;
int rc;
tuner_dbg("%s called\n", __func__);
rc = check_device_status(priv);
if (rc < 0)
return rc;
*frequency = priv->frequency;
return 0;
}
static void load_firmware_cb(const struct firmware *fw,
void *context)
{
struct dvb_frontend *fe = context;
struct xc2028_data *priv = fe->tuner_priv;
int rc;
tuner_dbg("request_firmware_nowait(): %s\n", fw ? "OK" : "error");
if (!fw) {
tuner_err("Could not load firmware %s.\n", priv->fname);
priv->state = XC2028_NODEV;
return;
}
rc = load_all_firmwares(fe, fw);
release_firmware(fw);
if (rc < 0)
return;
priv->state = XC2028_ACTIVE;
}
static int xc2028_set_config(struct dvb_frontend *fe, void *priv_cfg)
{
struct xc2028_data *priv = fe->tuner_priv;
struct xc2028_ctrl *p = priv_cfg;
int rc = 0;
tuner_dbg("%s called\n", __func__);
mutex_lock(&priv->lock);
/*
* Copy the config data.
* For the firmware name, keep a local copy of the string,
* in order to avoid troubles during device release.
*/
kfree(priv->ctrl.fname);
priv->ctrl.fname = NULL;
memcpy(&priv->ctrl, p, sizeof(priv->ctrl));
if (p->fname) {
priv->ctrl.fname = kstrdup(p->fname, GFP_KERNEL);
if (priv->ctrl.fname == NULL)
return -ENOMEM;
}
/*
* If firmware name changed, frees firmware. As free_firmware will
* reset the status to NO_FIRMWARE, this forces a new request_firmware
*/
if (!firmware_name[0] && p->fname &&
priv->fname && strcmp(p->fname, priv->fname))
free_firmware(priv);
if (priv->ctrl.max_len < 9)
priv->ctrl.max_len = 13;
if (priv->state == XC2028_NO_FIRMWARE) {
if (!firmware_name[0])
priv->fname = priv->ctrl.fname;
else
priv->fname = firmware_name;
rc = request_firmware_nowait(THIS_MODULE, 1,
priv->fname,
priv->i2c_props.adap->dev.parent,
GFP_KERNEL,
fe, load_firmware_cb);
if (rc < 0) {
tuner_err("Failed to request firmware %s\n",
priv->fname);
priv->state = XC2028_NODEV;
} else
priv->state = XC2028_WAITING_FIRMWARE;
}
mutex_unlock(&priv->lock);
return rc;
}
static const struct dvb_tuner_ops xc2028_dvb_tuner_ops = {
.info = {
.name = "Xceive XC3028",
.frequency_min = 42000000,
.frequency_max = 864000000,
.frequency_step = 50000,
},
.set_config = xc2028_set_config,
.set_analog_params = xc2028_set_analog_freq,
.release = xc2028_dvb_release,
.get_frequency = xc2028_get_frequency,
.get_rf_strength = xc2028_signal,
.get_afc = xc2028_get_afc,
.set_params = xc2028_set_params,
.sleep = xc2028_sleep,
};
struct dvb_frontend *xc2028_attach(struct dvb_frontend *fe,
struct xc2028_config *cfg)
{
struct xc2028_data *priv;
int instance;
if (debug)
printk(KERN_DEBUG "xc2028: Xcv2028/3028 init called!\n");
if (NULL == cfg)
return NULL;
if (!fe) {
printk(KERN_ERR "xc2028: No frontend!\n");
return NULL;
}
mutex_lock(&xc2028_list_mutex);
instance = hybrid_tuner_request_state(struct xc2028_data, priv,
hybrid_tuner_instance_list,
cfg->i2c_adap, cfg->i2c_addr,
"xc2028");
switch (instance) {
case 0:
/* memory allocation failure */
goto fail;
case 1:
/* new tuner instance */
priv->ctrl.max_len = 13;
mutex_init(&priv->lock);
fe->tuner_priv = priv;
break;
case 2:
/* existing tuner instance */
fe->tuner_priv = priv;
break;
}
memcpy(&fe->ops.tuner_ops, &xc2028_dvb_tuner_ops,
sizeof(xc2028_dvb_tuner_ops));
tuner_info("type set to %s\n", "XCeive xc2028/xc3028 tuner");
if (cfg->ctrl)
xc2028_set_config(fe, cfg->ctrl);
mutex_unlock(&xc2028_list_mutex);
return fe;
fail:
mutex_unlock(&xc2028_list_mutex);
xc2028_dvb_release(fe);
return NULL;
}
EXPORT_SYMBOL(xc2028_attach);
MODULE_DESCRIPTION("Xceive xc2028/xc3028 tuner driver");
MODULE_AUTHOR("Michel Ludwig <michel.ludwig@gmail.com>");
MODULE_AUTHOR("Mauro Carvalho Chehab <mchehab@infradead.org>");
MODULE_LICENSE("GPL");
MODULE_FIRMWARE(XC2028_DEFAULT_FIRMWARE);
MODULE_FIRMWARE(XC3028L_DEFAULT_FIRMWARE);
| ./CrossVul/dataset_final_sorted/CWE-416/c/good_5336_0 |
crossvul-cpp_data_bad_3348_1 | /*
Copyright (c) 2013-2014. The YARA Authors. All Rights Reserved.
Redistribution and use in source and binary forms, with or without modification,
are permitted provided that the following conditions are met:
1. Redistributions of source code must retain the above copyright notice, this
list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation and/or
other materials provided with the distribution.
3. Neither the name of the copyright holder nor the names of its contributors
may be used to endorse or promote products derived from this software without
specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR
ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#define _GNU_SOURCE
#include <string.h>
#include <assert.h>
#include <time.h>
#include <math.h>
#include <yara/endian.h>
#include <yara/exec.h>
#include <yara/limits.h>
#include <yara/error.h>
#include <yara/object.h>
#include <yara/modules.h>
#include <yara/re.h>
#include <yara/strutils.h>
#include <yara/utils.h>
#include <yara/mem.h>
#include <yara.h>
#define MEM_SIZE MAX_LOOP_NESTING * LOOP_LOCAL_VARS
#define push(x) \
if (sp < stack_size) \
{ \
stack[sp++] = (x); \
} \
else \
{ \
result = ERROR_EXEC_STACK_OVERFLOW; \
stop = TRUE; \
break; \
} \
#define pop(x) x = stack[--sp]
#define is_undef(x) IS_UNDEFINED((x).i)
#define ensure_defined(x) \
if (is_undef(x)) \
{ \
r1.i = UNDEFINED; \
push(r1); \
break; \
}
#define little_endian_uint8_t(x) (x)
#define little_endian_int8_t(x) (x)
#define little_endian_uint16_t(x) yr_le16toh(x)
#define little_endian_int16_t(x) yr_le16toh(x)
#define little_endian_uint32_t(x) yr_le32toh(x)
#define little_endian_int32_t(x) yr_le32toh(x)
#define big_endian_uint8_t(x) (x)
#define big_endian_int8_t(x) (x)
#define big_endian_uint16_t(x) yr_be16toh(x)
#define big_endian_int16_t(x) yr_be16toh(x)
#define big_endian_uint32_t(x) yr_be32toh(x)
#define big_endian_int32_t(x) yr_be32toh(x)
#define function_read(type, endianess) \
int64_t read_##type##_##endianess(YR_MEMORY_BLOCK_ITERATOR* iterator, size_t offset) \
{ \
YR_MEMORY_BLOCK* block = iterator->first(iterator); \
while (block != NULL) \
{ \
if (offset >= block->base && \
block->size >= sizeof(type) && \
offset <= block->base + block->size - sizeof(type)) \
{ \
type result; \
uint8_t* data = block->fetch_data(block); \
if (data == NULL) \
return UNDEFINED; \
result = *(type *)(data + offset - block->base); \
result = endianess##_##type(result); \
return result; \
} \
block = iterator->next(iterator); \
} \
return UNDEFINED; \
};
function_read(uint8_t, little_endian)
function_read(uint16_t, little_endian)
function_read(uint32_t, little_endian)
function_read(int8_t, little_endian)
function_read(int16_t, little_endian)
function_read(int32_t, little_endian)
function_read(uint8_t, big_endian)
function_read(uint16_t, big_endian)
function_read(uint32_t, big_endian)
function_read(int8_t, big_endian)
function_read(int16_t, big_endian)
function_read(int32_t, big_endian)
static uint8_t* jmp_if(
int condition,
uint8_t* ip)
{
uint8_t* result;
if (condition)
{
result = *(uint8_t**)(ip + 1);
// ip will be incremented at the end of the execution loop,
// decrement it here to compensate.
result--;
}
else
{
result = ip + sizeof(uint64_t);
}
return result;
}
int yr_execute_code(
YR_RULES* rules,
YR_SCAN_CONTEXT* context,
int timeout,
time_t start_time)
{
int64_t mem[MEM_SIZE];
int32_t sp = 0;
uint8_t* ip = rules->code_start;
YR_VALUE args[MAX_FUNCTION_ARGS];
YR_VALUE *stack;
YR_VALUE r1;
YR_VALUE r2;
YR_VALUE r3;
#ifdef PROFILING_ENABLED
YR_RULE* current_rule = NULL;
#endif
YR_RULE* rule;
YR_MATCH* match;
YR_OBJECT_FUNCTION* function;
char* identifier;
char* args_fmt;
int i;
int found;
int count;
int result = ERROR_SUCCESS;
int stop = FALSE;
int cycle = 0;
int tidx = context->tidx;
int stack_size;
#ifdef PROFILING_ENABLED
clock_t start = clock();
#endif
yr_get_configuration(YR_CONFIG_STACK_SIZE, (void*) &stack_size);
stack = (YR_VALUE*) yr_malloc(stack_size * sizeof(YR_VALUE));
if (stack == NULL)
return ERROR_INSUFFICIENT_MEMORY;
while(!stop)
{
switch(*ip)
{
case OP_NOP:
break;
case OP_HALT:
assert(sp == 0); // When HALT is reached the stack should be empty.
stop = TRUE;
break;
case OP_PUSH:
r1.i = *(uint64_t*)(ip + 1);
ip += sizeof(uint64_t);
push(r1);
break;
case OP_POP:
pop(r1);
break;
case OP_CLEAR_M:
r1.i = *(uint64_t*)(ip + 1);
ip += sizeof(uint64_t);
mem[r1.i] = 0;
break;
case OP_ADD_M:
r1.i = *(uint64_t*)(ip + 1);
ip += sizeof(uint64_t);
pop(r2);
if (!is_undef(r2))
mem[r1.i] += r2.i;
break;
case OP_INCR_M:
r1.i = *(uint64_t*)(ip + 1);
ip += sizeof(uint64_t);
mem[r1.i]++;
break;
case OP_PUSH_M:
r1.i = *(uint64_t*)(ip + 1);
ip += sizeof(uint64_t);
r1.i = mem[r1.i];
push(r1);
break;
case OP_POP_M:
r1.i = *(uint64_t*)(ip + 1);
ip += sizeof(uint64_t);
pop(r2);
mem[r1.i] = r2.i;
break;
case OP_SWAPUNDEF:
r1.i = *(uint64_t*)(ip + 1);
ip += sizeof(uint64_t);
pop(r2);
if (is_undef(r2))
{
r1.i = mem[r1.i];
push(r1);
}
else
{
push(r2);
}
break;
case OP_JNUNDEF:
pop(r1);
push(r1);
ip = jmp_if(!is_undef(r1), ip);
break;
case OP_JLE:
pop(r2);
pop(r1);
push(r1);
push(r2);
ip = jmp_if(r1.i <= r2.i, ip);
break;
case OP_JTRUE:
pop(r1);
push(r1);
ip = jmp_if(!is_undef(r1) && r1.i, ip);
break;
case OP_JFALSE:
pop(r1);
push(r1);
ip = jmp_if(is_undef(r1) || !r1.i, ip);
break;
case OP_AND:
pop(r2);
pop(r1);
if (is_undef(r1) || is_undef(r2))
r1.i = 0;
else
r1.i = r1.i && r2.i;
push(r1);
break;
case OP_OR:
pop(r2);
pop(r1);
if (is_undef(r1))
{
push(r2);
}
else if (is_undef(r2))
{
push(r1);
}
else
{
r1.i = r1.i || r2.i;
push(r1);
}
break;
case OP_NOT:
pop(r1);
if (is_undef(r1))
r1.i = UNDEFINED;
else
r1.i= !r1.i;
push(r1);
break;
case OP_MOD:
pop(r2);
pop(r1);
ensure_defined(r2);
ensure_defined(r1);
if (r2.i != 0)
r1.i = r1.i % r2.i;
else
r1.i = UNDEFINED;
push(r1);
break;
case OP_SHR:
pop(r2);
pop(r1);
ensure_defined(r2);
ensure_defined(r1);
r1.i = r1.i >> r2.i;
push(r1);
break;
case OP_SHL:
pop(r2);
pop(r1);
ensure_defined(r2);
ensure_defined(r1);
r1.i = r1.i << r2.i;
push(r1);
break;
case OP_BITWISE_NOT:
pop(r1);
ensure_defined(r1);
r1.i = ~r1.i;
push(r1);
break;
case OP_BITWISE_AND:
pop(r2);
pop(r1);
ensure_defined(r2);
ensure_defined(r1);
r1.i = r1.i & r2.i;
push(r1);
break;
case OP_BITWISE_OR:
pop(r2);
pop(r1);
ensure_defined(r2);
ensure_defined(r1);
r1.i = r1.i | r2.i;
push(r1);
break;
case OP_BITWISE_XOR:
pop(r2);
pop(r1);
ensure_defined(r2);
ensure_defined(r1);
r1.i = r1.i ^ r2.i;
push(r1);
break;
case OP_PUSH_RULE:
rule = *(YR_RULE**)(ip + 1);
ip += sizeof(uint64_t);
r1.i = rule->t_flags[tidx] & RULE_TFLAGS_MATCH ? 1 : 0;
push(r1);
break;
case OP_INIT_RULE:
#ifdef PROFILING_ENABLED
current_rule = *(YR_RULE**)(ip + 1);
#endif
ip += sizeof(uint64_t);
break;
case OP_MATCH_RULE:
pop(r1);
rule = *(YR_RULE**)(ip + 1);
ip += sizeof(uint64_t);
if (!is_undef(r1) && r1.i)
rule->t_flags[tidx] |= RULE_TFLAGS_MATCH;
else if (RULE_IS_GLOBAL(rule))
rule->ns->t_flags[tidx] |= NAMESPACE_TFLAGS_UNSATISFIED_GLOBAL;
#ifdef PROFILING_ENABLED
rule->clock_ticks += clock() - start;
start = clock();
#endif
break;
case OP_OBJ_LOAD:
identifier = *(char**)(ip + 1);
ip += sizeof(uint64_t);
r1.o = (YR_OBJECT*) yr_hash_table_lookup(
context->objects_table,
identifier,
NULL);
assert(r1.o != NULL);
push(r1);
break;
case OP_OBJ_FIELD:
identifier = *(char**)(ip + 1);
ip += sizeof(uint64_t);
pop(r1);
ensure_defined(r1);
r1.o = yr_object_lookup_field(r1.o, identifier);
assert(r1.o != NULL);
push(r1);
break;
case OP_OBJ_VALUE:
pop(r1);
ensure_defined(r1);
switch(r1.o->type)
{
case OBJECT_TYPE_INTEGER:
r1.i = ((YR_OBJECT_INTEGER*) r1.o)->value;
break;
case OBJECT_TYPE_FLOAT:
if (isnan(((YR_OBJECT_DOUBLE*) r1.o)->value))
r1.i = UNDEFINED;
else
r1.d = ((YR_OBJECT_DOUBLE*) r1.o)->value;
break;
case OBJECT_TYPE_STRING:
if (((YR_OBJECT_STRING*) r1.o)->value == NULL)
r1.i = UNDEFINED;
else
r1.p = ((YR_OBJECT_STRING*) r1.o)->value;
break;
default:
assert(FALSE);
}
push(r1);
break;
case OP_INDEX_ARRAY:
pop(r1); // index
pop(r2); // array
ensure_defined(r1);
ensure_defined(r2);
assert(r2.o->type == OBJECT_TYPE_ARRAY);
r1.o = yr_object_array_get_item(r2.o, 0, (int) r1.i);
if (r1.o == NULL)
r1.i = UNDEFINED;
push(r1);
break;
case OP_LOOKUP_DICT:
pop(r1); // key
pop(r2); // dictionary
ensure_defined(r1);
ensure_defined(r2);
assert(r2.o->type == OBJECT_TYPE_DICTIONARY);
r1.o = yr_object_dict_get_item(
r2.o, 0, r1.ss->c_string);
if (r1.o == NULL)
r1.i = UNDEFINED;
push(r1);
break;
case OP_CALL:
args_fmt = *(char**)(ip + 1);
ip += sizeof(uint64_t);
i = (int) strlen(args_fmt);
count = 0;
// pop arguments from stack and copy them to args array
while (i > 0)
{
pop(r1);
if (is_undef(r1)) // count the number of undefined args
count++;
args[i - 1] = r1;
i--;
}
pop(r2);
ensure_defined(r2);
if (count > 0)
{
// if there are undefined args, result for function call
// is undefined as well.
r1.i = UNDEFINED;
push(r1);
break;
}
function = (YR_OBJECT_FUNCTION*) r2.o;
result = ERROR_INTERNAL_FATAL_ERROR;
for (i = 0; i < MAX_OVERLOADED_FUNCTIONS; i++)
{
if (function->prototypes[i].arguments_fmt == NULL)
break;
if (strcmp(function->prototypes[i].arguments_fmt, args_fmt) == 0)
{
result = function->prototypes[i].code(args, context, function);
break;
}
}
assert(i < MAX_OVERLOADED_FUNCTIONS);
if (result == ERROR_SUCCESS)
{
r1.o = function->return_obj;
push(r1);
}
else
{
stop = TRUE;
}
break;
case OP_FOUND:
pop(r1);
r1.i = r1.s->matches[tidx].tail != NULL ? 1 : 0;
push(r1);
break;
case OP_FOUND_AT:
pop(r2);
pop(r1);
if (is_undef(r1))
{
r1.i = 0;
push(r1);
break;
}
match = r2.s->matches[tidx].head;
r3.i = FALSE;
while (match != NULL)
{
if (r1.i == match->base + match->offset)
{
r3.i = TRUE;
break;
}
if (r1.i < match->base + match->offset)
break;
match = match->next;
}
push(r3);
break;
case OP_FOUND_IN:
pop(r3);
pop(r2);
pop(r1);
ensure_defined(r1);
ensure_defined(r2);
match = r3.s->matches[tidx].head;
r3.i = FALSE;
while (match != NULL && !r3.i)
{
if (match->base + match->offset >= r1.i &&
match->base + match->offset <= r2.i)
{
r3.i = TRUE;
}
if (match->base + match->offset > r2.i)
break;
match = match->next;
}
push(r3);
break;
case OP_COUNT:
pop(r1);
r1.i = r1.s->matches[tidx].count;
push(r1);
break;
case OP_OFFSET:
pop(r2);
pop(r1);
ensure_defined(r1);
match = r2.s->matches[tidx].head;
i = 1;
r3.i = UNDEFINED;
while (match != NULL && r3.i == UNDEFINED)
{
if (r1.i == i)
r3.i = match->base + match->offset;
i++;
match = match->next;
}
push(r3);
break;
case OP_LENGTH:
pop(r2);
pop(r1);
ensure_defined(r1);
match = r2.s->matches[tidx].head;
i = 1;
r3.i = UNDEFINED;
while (match != NULL && r3.i == UNDEFINED)
{
if (r1.i == i)
r3.i = match->match_length;
i++;
match = match->next;
}
push(r3);
break;
case OP_OF:
found = 0;
count = 0;
pop(r1);
while (!is_undef(r1))
{
if (r1.s->matches[tidx].tail != NULL)
found++;
count++;
pop(r1);
}
pop(r2);
if (is_undef(r2))
r1.i = found >= count ? 1 : 0;
else
r1.i = found >= r2.i ? 1 : 0;
push(r1);
break;
case OP_FILESIZE:
r1.i = context->file_size;
push(r1);
break;
case OP_ENTRYPOINT:
r1.i = context->entry_point;
push(r1);
break;
case OP_INT8:
pop(r1);
r1.i = read_int8_t_little_endian(context->iterator, (size_t) r1.i);
push(r1);
break;
case OP_INT16:
pop(r1);
r1.i = read_int16_t_little_endian(context->iterator, (size_t) r1.i);
push(r1);
break;
case OP_INT32:
pop(r1);
r1.i = read_int32_t_little_endian(context->iterator, (size_t) r1.i);
push(r1);
break;
case OP_UINT8:
pop(r1);
r1.i = read_uint8_t_little_endian(context->iterator, (size_t) r1.i);
push(r1);
break;
case OP_UINT16:
pop(r1);
r1.i = read_uint16_t_little_endian(context->iterator, (size_t) r1.i);
push(r1);
break;
case OP_UINT32:
pop(r1);
r1.i = read_uint32_t_little_endian(context->iterator, (size_t) r1.i);
push(r1);
break;
case OP_INT8BE:
pop(r1);
r1.i = read_int8_t_big_endian(context->iterator, (size_t) r1.i);
push(r1);
break;
case OP_INT16BE:
pop(r1);
r1.i = read_int16_t_big_endian(context->iterator, (size_t) r1.i);
push(r1);
break;
case OP_INT32BE:
pop(r1);
r1.i = read_int32_t_big_endian(context->iterator, (size_t) r1.i);
push(r1);
break;
case OP_UINT8BE:
pop(r1);
r1.i = read_uint8_t_big_endian(context->iterator, (size_t) r1.i);
push(r1);
break;
case OP_UINT16BE:
pop(r1);
r1.i = read_uint16_t_big_endian(context->iterator, (size_t) r1.i);
push(r1);
break;
case OP_UINT32BE:
pop(r1);
r1.i = read_uint32_t_big_endian(context->iterator, (size_t) r1.i);
push(r1);
break;
case OP_CONTAINS:
pop(r2);
pop(r1);
ensure_defined(r1);
ensure_defined(r2);
r1.i = memmem(r1.ss->c_string, r1.ss->length,
r2.ss->c_string, r2.ss->length) != NULL;
push(r1);
break;
case OP_IMPORT:
r1.i = *(uint64_t*)(ip + 1);
ip += sizeof(uint64_t);
result = yr_modules_load((char*) r1.p, context);
if (result != ERROR_SUCCESS)
stop = TRUE;
break;
case OP_MATCHES:
pop(r2);
pop(r1);
ensure_defined(r2);
ensure_defined(r1);
if (r1.ss->length == 0)
{
r1.i = FALSE;
push(r1);
break;
}
result = yr_re_exec(
(uint8_t*) r2.re->code,
(uint8_t*) r1.ss->c_string,
r1.ss->length,
0,
r2.re->flags | RE_FLAGS_SCAN,
NULL,
NULL,
&found);
if (result != ERROR_SUCCESS)
stop = TRUE;
r1.i = found >= 0;
push(r1);
break;
case OP_INT_TO_DBL:
r1.i = *(uint64_t*)(ip + 1);
ip += sizeof(uint64_t);
r2 = stack[sp - r1.i];
if (is_undef(r2))
stack[sp - r1.i].i = UNDEFINED;
else
stack[sp - r1.i].d = (double) r2.i;
break;
case OP_STR_TO_BOOL:
pop(r1);
ensure_defined(r1);
r1.i = r1.ss->length > 0;
push(r1);
break;
case OP_INT_EQ:
pop(r2);
pop(r1);
ensure_defined(r2);
ensure_defined(r1);
r1.i = r1.i == r2.i;
push(r1);
break;
case OP_INT_NEQ:
pop(r2);
pop(r1);
ensure_defined(r2);
ensure_defined(r1);
r1.i = r1.i != r2.i;
push(r1);
break;
case OP_INT_LT:
pop(r2);
pop(r1);
ensure_defined(r2);
ensure_defined(r1);
r1.i = r1.i < r2.i;
push(r1);
break;
case OP_INT_GT:
pop(r2);
pop(r1);
ensure_defined(r2);
ensure_defined(r1);
r1.i = r1.i > r2.i;
push(r1);
break;
case OP_INT_LE:
pop(r2);
pop(r1);
ensure_defined(r2);
ensure_defined(r1);
r1.i = r1.i <= r2.i;
push(r1);
break;
case OP_INT_GE:
pop(r2);
pop(r1);
ensure_defined(r2);
ensure_defined(r1);
r1.i = r1.i >= r2.i;
push(r1);
break;
case OP_INT_ADD:
pop(r2);
pop(r1);
ensure_defined(r2);
ensure_defined(r1);
r1.i = r1.i + r2.i;
push(r1);
break;
case OP_INT_SUB:
pop(r2);
pop(r1);
ensure_defined(r2);
ensure_defined(r1);
r1.i = r1.i - r2.i;
push(r1);
break;
case OP_INT_MUL:
pop(r2);
pop(r1);
ensure_defined(r2);
ensure_defined(r1);
r1.i = r1.i * r2.i;
push(r1);
break;
case OP_INT_DIV:
pop(r2);
pop(r1);
ensure_defined(r2);
ensure_defined(r1);
if (r2.i != 0)
r1.i = r1.i / r2.i;
else
r1.i = UNDEFINED;
push(r1);
break;
case OP_INT_MINUS:
pop(r1);
ensure_defined(r1);
r1.i = -r1.i;
push(r1);
break;
case OP_DBL_LT:
pop(r2);
pop(r1);
ensure_defined(r2);
ensure_defined(r1);
r1.i = r1.d < r2.d;
push(r1);
break;
case OP_DBL_GT:
pop(r2);
pop(r1);
ensure_defined(r2);
ensure_defined(r1);
r1.i = r1.d > r2.d;
push(r1);
break;
case OP_DBL_LE:
pop(r2);
pop(r1);
ensure_defined(r2);
ensure_defined(r1);
r1.i = r1.d <= r2.d;
push(r1);
break;
case OP_DBL_GE:
pop(r2);
pop(r1);
ensure_defined(r2);
ensure_defined(r1);
r1.i = r1.d >= r2.d;
push(r1);
break;
case OP_DBL_EQ:
pop(r2);
pop(r1);
ensure_defined(r2);
ensure_defined(r1);
r1.i = r1.d == r2.d;
push(r1);
break;
case OP_DBL_NEQ:
pop(r2);
pop(r1);
ensure_defined(r2);
ensure_defined(r1);
r1.i = r1.d != r2.d;
push(r1);
break;
case OP_DBL_ADD:
pop(r2);
pop(r1);
ensure_defined(r2);
ensure_defined(r1);
r1.d = r1.d + r2.d;
push(r1);
break;
case OP_DBL_SUB:
pop(r2);
pop(r1);
ensure_defined(r2);
ensure_defined(r1);
r1.d = r1.d - r2.d;
push(r1);
break;
case OP_DBL_MUL:
pop(r2);
pop(r1);
ensure_defined(r2);
ensure_defined(r1);
r1.d = r1.d * r2.d;
push(r1);
break;
case OP_DBL_DIV:
pop(r2);
pop(r1);
ensure_defined(r2);
ensure_defined(r1);
r1.d = r1.d / r2.d;
push(r1);
break;
case OP_DBL_MINUS:
pop(r1);
ensure_defined(r1);
r1.d = -r1.d;
push(r1);
break;
case OP_STR_EQ:
case OP_STR_NEQ:
case OP_STR_LT:
case OP_STR_LE:
case OP_STR_GT:
case OP_STR_GE:
pop(r2);
pop(r1);
ensure_defined(r1);
ensure_defined(r2);
switch(*ip)
{
case OP_STR_EQ:
r1.i = (sized_string_cmp(r1.ss, r2.ss) == 0);
break;
case OP_STR_NEQ:
r1.i = (sized_string_cmp(r1.ss, r2.ss) != 0);
break;
case OP_STR_LT:
r1.i = (sized_string_cmp(r1.ss, r2.ss) < 0);
break;
case OP_STR_LE:
r1.i = (sized_string_cmp(r1.ss, r2.ss) <= 0);
break;
case OP_STR_GT:
r1.i = (sized_string_cmp(r1.ss, r2.ss) > 0);
break;
case OP_STR_GE:
r1.i = (sized_string_cmp(r1.ss, r2.ss) >= 0);
break;
}
push(r1);
break;
default:
// Unknown instruction, this shouldn't happen.
assert(FALSE);
}
if (timeout > 0) // timeout == 0 means no timeout
{
// Check for timeout every 10 instruction cycles.
if (++cycle == 10)
{
if (difftime(time(NULL), start_time) > timeout)
{
#ifdef PROFILING_ENABLED
assert(current_rule != NULL);
current_rule->clock_ticks += clock() - start;
#endif
result = ERROR_SCAN_TIMEOUT;
stop = TRUE;
}
cycle = 0;
}
}
ip++;
}
yr_modules_unload_all(context);
yr_free(stack);
return result;
}
| ./CrossVul/dataset_final_sorted/CWE-416/c/bad_3348_1 |
crossvul-cpp_data_good_5333_0 | /*
* gendisk handling
*/
#include <linux/module.h>
#include <linux/fs.h>
#include <linux/genhd.h>
#include <linux/kdev_t.h>
#include <linux/kernel.h>
#include <linux/blkdev.h>
#include <linux/backing-dev.h>
#include <linux/init.h>
#include <linux/spinlock.h>
#include <linux/proc_fs.h>
#include <linux/seq_file.h>
#include <linux/slab.h>
#include <linux/kmod.h>
#include <linux/kobj_map.h>
#include <linux/mutex.h>
#include <linux/idr.h>
#include <linux/log2.h>
#include <linux/pm_runtime.h>
#include <linux/badblocks.h>
#include "blk.h"
static DEFINE_MUTEX(block_class_lock);
struct kobject *block_depr;
/* for extended dynamic devt allocation, currently only one major is used */
#define NR_EXT_DEVT (1 << MINORBITS)
/* For extended devt allocation. ext_devt_lock prevents look up
* results from going away underneath its user.
*/
static DEFINE_SPINLOCK(ext_devt_lock);
static DEFINE_IDR(ext_devt_idr);
static struct device_type disk_type;
static void disk_check_events(struct disk_events *ev,
unsigned int *clearing_ptr);
static void disk_alloc_events(struct gendisk *disk);
static void disk_add_events(struct gendisk *disk);
static void disk_del_events(struct gendisk *disk);
static void disk_release_events(struct gendisk *disk);
/**
* disk_get_part - get partition
* @disk: disk to look partition from
* @partno: partition number
*
* Look for partition @partno from @disk. If found, increment
* reference count and return it.
*
* CONTEXT:
* Don't care.
*
* RETURNS:
* Pointer to the found partition on success, NULL if not found.
*/
struct hd_struct *disk_get_part(struct gendisk *disk, int partno)
{
struct hd_struct *part = NULL;
struct disk_part_tbl *ptbl;
if (unlikely(partno < 0))
return NULL;
rcu_read_lock();
ptbl = rcu_dereference(disk->part_tbl);
if (likely(partno < ptbl->len)) {
part = rcu_dereference(ptbl->part[partno]);
if (part)
get_device(part_to_dev(part));
}
rcu_read_unlock();
return part;
}
EXPORT_SYMBOL_GPL(disk_get_part);
/**
* disk_part_iter_init - initialize partition iterator
* @piter: iterator to initialize
* @disk: disk to iterate over
* @flags: DISK_PITER_* flags
*
* Initialize @piter so that it iterates over partitions of @disk.
*
* CONTEXT:
* Don't care.
*/
void disk_part_iter_init(struct disk_part_iter *piter, struct gendisk *disk,
unsigned int flags)
{
struct disk_part_tbl *ptbl;
rcu_read_lock();
ptbl = rcu_dereference(disk->part_tbl);
piter->disk = disk;
piter->part = NULL;
if (flags & DISK_PITER_REVERSE)
piter->idx = ptbl->len - 1;
else if (flags & (DISK_PITER_INCL_PART0 | DISK_PITER_INCL_EMPTY_PART0))
piter->idx = 0;
else
piter->idx = 1;
piter->flags = flags;
rcu_read_unlock();
}
EXPORT_SYMBOL_GPL(disk_part_iter_init);
/**
* disk_part_iter_next - proceed iterator to the next partition and return it
* @piter: iterator of interest
*
* Proceed @piter to the next partition and return it.
*
* CONTEXT:
* Don't care.
*/
struct hd_struct *disk_part_iter_next(struct disk_part_iter *piter)
{
struct disk_part_tbl *ptbl;
int inc, end;
/* put the last partition */
disk_put_part(piter->part);
piter->part = NULL;
/* get part_tbl */
rcu_read_lock();
ptbl = rcu_dereference(piter->disk->part_tbl);
/* determine iteration parameters */
if (piter->flags & DISK_PITER_REVERSE) {
inc = -1;
if (piter->flags & (DISK_PITER_INCL_PART0 |
DISK_PITER_INCL_EMPTY_PART0))
end = -1;
else
end = 0;
} else {
inc = 1;
end = ptbl->len;
}
/* iterate to the next partition */
for (; piter->idx != end; piter->idx += inc) {
struct hd_struct *part;
part = rcu_dereference(ptbl->part[piter->idx]);
if (!part)
continue;
if (!part_nr_sects_read(part) &&
!(piter->flags & DISK_PITER_INCL_EMPTY) &&
!(piter->flags & DISK_PITER_INCL_EMPTY_PART0 &&
piter->idx == 0))
continue;
get_device(part_to_dev(part));
piter->part = part;
piter->idx += inc;
break;
}
rcu_read_unlock();
return piter->part;
}
EXPORT_SYMBOL_GPL(disk_part_iter_next);
/**
* disk_part_iter_exit - finish up partition iteration
* @piter: iter of interest
*
* Called when iteration is over. Cleans up @piter.
*
* CONTEXT:
* Don't care.
*/
void disk_part_iter_exit(struct disk_part_iter *piter)
{
disk_put_part(piter->part);
piter->part = NULL;
}
EXPORT_SYMBOL_GPL(disk_part_iter_exit);
static inline int sector_in_part(struct hd_struct *part, sector_t sector)
{
return part->start_sect <= sector &&
sector < part->start_sect + part_nr_sects_read(part);
}
/**
* disk_map_sector_rcu - map sector to partition
* @disk: gendisk of interest
* @sector: sector to map
*
* Find out which partition @sector maps to on @disk. This is
* primarily used for stats accounting.
*
* CONTEXT:
* RCU read locked. The returned partition pointer is valid only
* while preemption is disabled.
*
* RETURNS:
* Found partition on success, part0 is returned if no partition matches
*/
struct hd_struct *disk_map_sector_rcu(struct gendisk *disk, sector_t sector)
{
struct disk_part_tbl *ptbl;
struct hd_struct *part;
int i;
ptbl = rcu_dereference(disk->part_tbl);
part = rcu_dereference(ptbl->last_lookup);
if (part && sector_in_part(part, sector))
return part;
for (i = 1; i < ptbl->len; i++) {
part = rcu_dereference(ptbl->part[i]);
if (part && sector_in_part(part, sector)) {
rcu_assign_pointer(ptbl->last_lookup, part);
return part;
}
}
return &disk->part0;
}
EXPORT_SYMBOL_GPL(disk_map_sector_rcu);
/*
* Can be deleted altogether. Later.
*
*/
static struct blk_major_name {
struct blk_major_name *next;
int major;
char name[16];
} *major_names[BLKDEV_MAJOR_HASH_SIZE];
/* index in the above - for now: assume no multimajor ranges */
static inline int major_to_index(unsigned major)
{
return major % BLKDEV_MAJOR_HASH_SIZE;
}
#ifdef CONFIG_PROC_FS
void blkdev_show(struct seq_file *seqf, off_t offset)
{
struct blk_major_name *dp;
if (offset < BLKDEV_MAJOR_HASH_SIZE) {
mutex_lock(&block_class_lock);
for (dp = major_names[offset]; dp; dp = dp->next)
seq_printf(seqf, "%3d %s\n", dp->major, dp->name);
mutex_unlock(&block_class_lock);
}
}
#endif /* CONFIG_PROC_FS */
/**
* register_blkdev - register a new block device
*
* @major: the requested major device number [1..255]. If @major=0, try to
* allocate any unused major number.
* @name: the name of the new block device as a zero terminated string
*
* The @name must be unique within the system.
*
* The return value depends on the @major input parameter.
* - if a major device number was requested in range [1..255] then the
* function returns zero on success, or a negative error code
* - if any unused major number was requested with @major=0 parameter
* then the return value is the allocated major number in range
* [1..255] or a negative error code otherwise
*/
int register_blkdev(unsigned int major, const char *name)
{
struct blk_major_name **n, *p;
int index, ret = 0;
mutex_lock(&block_class_lock);
/* temporary */
if (major == 0) {
for (index = ARRAY_SIZE(major_names)-1; index > 0; index--) {
if (major_names[index] == NULL)
break;
}
if (index == 0) {
printk("register_blkdev: failed to get major for %s\n",
name);
ret = -EBUSY;
goto out;
}
major = index;
ret = major;
}
p = kmalloc(sizeof(struct blk_major_name), GFP_KERNEL);
if (p == NULL) {
ret = -ENOMEM;
goto out;
}
p->major = major;
strlcpy(p->name, name, sizeof(p->name));
p->next = NULL;
index = major_to_index(major);
for (n = &major_names[index]; *n; n = &(*n)->next) {
if ((*n)->major == major)
break;
}
if (!*n)
*n = p;
else
ret = -EBUSY;
if (ret < 0) {
printk("register_blkdev: cannot get major %d for %s\n",
major, name);
kfree(p);
}
out:
mutex_unlock(&block_class_lock);
return ret;
}
EXPORT_SYMBOL(register_blkdev);
void unregister_blkdev(unsigned int major, const char *name)
{
struct blk_major_name **n;
struct blk_major_name *p = NULL;
int index = major_to_index(major);
mutex_lock(&block_class_lock);
for (n = &major_names[index]; *n; n = &(*n)->next)
if ((*n)->major == major)
break;
if (!*n || strcmp((*n)->name, name)) {
WARN_ON(1);
} else {
p = *n;
*n = p->next;
}
mutex_unlock(&block_class_lock);
kfree(p);
}
EXPORT_SYMBOL(unregister_blkdev);
static struct kobj_map *bdev_map;
/**
* blk_mangle_minor - scatter minor numbers apart
* @minor: minor number to mangle
*
* Scatter consecutively allocated @minor number apart if MANGLE_DEVT
* is enabled. Mangling twice gives the original value.
*
* RETURNS:
* Mangled value.
*
* CONTEXT:
* Don't care.
*/
static int blk_mangle_minor(int minor)
{
#ifdef CONFIG_DEBUG_BLOCK_EXT_DEVT
int i;
for (i = 0; i < MINORBITS / 2; i++) {
int low = minor & (1 << i);
int high = minor & (1 << (MINORBITS - 1 - i));
int distance = MINORBITS - 1 - 2 * i;
minor ^= low | high; /* clear both bits */
low <<= distance; /* swap the positions */
high >>= distance;
minor |= low | high; /* and set */
}
#endif
return minor;
}
/**
* blk_alloc_devt - allocate a dev_t for a partition
* @part: partition to allocate dev_t for
* @devt: out parameter for resulting dev_t
*
* Allocate a dev_t for block device.
*
* RETURNS:
* 0 on success, allocated dev_t is returned in *@devt. -errno on
* failure.
*
* CONTEXT:
* Might sleep.
*/
int blk_alloc_devt(struct hd_struct *part, dev_t *devt)
{
struct gendisk *disk = part_to_disk(part);
int idx;
/* in consecutive minor range? */
if (part->partno < disk->minors) {
*devt = MKDEV(disk->major, disk->first_minor + part->partno);
return 0;
}
/* allocate ext devt */
idr_preload(GFP_KERNEL);
spin_lock_bh(&ext_devt_lock);
idx = idr_alloc(&ext_devt_idr, part, 0, NR_EXT_DEVT, GFP_NOWAIT);
spin_unlock_bh(&ext_devt_lock);
idr_preload_end();
if (idx < 0)
return idx == -ENOSPC ? -EBUSY : idx;
*devt = MKDEV(BLOCK_EXT_MAJOR, blk_mangle_minor(idx));
return 0;
}
/**
* blk_free_devt - free a dev_t
* @devt: dev_t to free
*
* Free @devt which was allocated using blk_alloc_devt().
*
* CONTEXT:
* Might sleep.
*/
void blk_free_devt(dev_t devt)
{
if (devt == MKDEV(0, 0))
return;
if (MAJOR(devt) == BLOCK_EXT_MAJOR) {
spin_lock_bh(&ext_devt_lock);
idr_remove(&ext_devt_idr, blk_mangle_minor(MINOR(devt)));
spin_unlock_bh(&ext_devt_lock);
}
}
static char *bdevt_str(dev_t devt, char *buf)
{
if (MAJOR(devt) <= 0xff && MINOR(devt) <= 0xff) {
char tbuf[BDEVT_SIZE];
snprintf(tbuf, BDEVT_SIZE, "%02x%02x", MAJOR(devt), MINOR(devt));
snprintf(buf, BDEVT_SIZE, "%-9s", tbuf);
} else
snprintf(buf, BDEVT_SIZE, "%03x:%05x", MAJOR(devt), MINOR(devt));
return buf;
}
/*
* Register device numbers dev..(dev+range-1)
* range must be nonzero
* The hash chain is sorted on range, so that subranges can override.
*/
void blk_register_region(dev_t devt, unsigned long range, struct module *module,
struct kobject *(*probe)(dev_t, int *, void *),
int (*lock)(dev_t, void *), void *data)
{
kobj_map(bdev_map, devt, range, module, probe, lock, data);
}
EXPORT_SYMBOL(blk_register_region);
void blk_unregister_region(dev_t devt, unsigned long range)
{
kobj_unmap(bdev_map, devt, range);
}
EXPORT_SYMBOL(blk_unregister_region);
static struct kobject *exact_match(dev_t devt, int *partno, void *data)
{
struct gendisk *p = data;
return &disk_to_dev(p)->kobj;
}
static int exact_lock(dev_t devt, void *data)
{
struct gendisk *p = data;
if (!get_disk(p))
return -1;
return 0;
}
static void register_disk(struct device *parent, struct gendisk *disk)
{
struct device *ddev = disk_to_dev(disk);
struct block_device *bdev;
struct disk_part_iter piter;
struct hd_struct *part;
int err;
ddev->parent = parent;
dev_set_name(ddev, "%s", disk->disk_name);
/* delay uevents, until we scanned partition table */
dev_set_uevent_suppress(ddev, 1);
if (device_add(ddev))
return;
if (!sysfs_deprecated) {
err = sysfs_create_link(block_depr, &ddev->kobj,
kobject_name(&ddev->kobj));
if (err) {
device_del(ddev);
return;
}
}
/*
* avoid probable deadlock caused by allocating memory with
* GFP_KERNEL in runtime_resume callback of its all ancestor
* devices
*/
pm_runtime_set_memalloc_noio(ddev, true);
disk->part0.holder_dir = kobject_create_and_add("holders", &ddev->kobj);
disk->slave_dir = kobject_create_and_add("slaves", &ddev->kobj);
/* No minors to use for partitions */
if (!disk_part_scan_enabled(disk))
goto exit;
/* No such device (e.g., media were just removed) */
if (!get_capacity(disk))
goto exit;
bdev = bdget_disk(disk, 0);
if (!bdev)
goto exit;
bdev->bd_invalidated = 1;
err = blkdev_get(bdev, FMODE_READ, NULL);
if (err < 0)
goto exit;
blkdev_put(bdev, FMODE_READ);
exit:
/* announce disk after possible partitions are created */
dev_set_uevent_suppress(ddev, 0);
kobject_uevent(&ddev->kobj, KOBJ_ADD);
/* announce possible partitions */
disk_part_iter_init(&piter, disk, 0);
while ((part = disk_part_iter_next(&piter)))
kobject_uevent(&part_to_dev(part)->kobj, KOBJ_ADD);
disk_part_iter_exit(&piter);
}
/**
* device_add_disk - add partitioning information to kernel list
* @parent: parent device for the disk
* @disk: per-device partitioning information
*
* This function registers the partitioning information in @disk
* with the kernel.
*
* FIXME: error handling
*/
void device_add_disk(struct device *parent, struct gendisk *disk)
{
struct backing_dev_info *bdi;
dev_t devt;
int retval;
/* minors == 0 indicates to use ext devt from part0 and should
* be accompanied with EXT_DEVT flag. Make sure all
* parameters make sense.
*/
WARN_ON(disk->minors && !(disk->major || disk->first_minor));
WARN_ON(!disk->minors && !(disk->flags & GENHD_FL_EXT_DEVT));
disk->flags |= GENHD_FL_UP;
retval = blk_alloc_devt(&disk->part0, &devt);
if (retval) {
WARN_ON(1);
return;
}
disk_to_dev(disk)->devt = devt;
/* ->major and ->first_minor aren't supposed to be
* dereferenced from here on, but set them just in case.
*/
disk->major = MAJOR(devt);
disk->first_minor = MINOR(devt);
disk_alloc_events(disk);
/* Register BDI before referencing it from bdev */
bdi = &disk->queue->backing_dev_info;
bdi_register_dev(bdi, disk_devt(disk));
blk_register_region(disk_devt(disk), disk->minors, NULL,
exact_match, exact_lock, disk);
register_disk(parent, disk);
blk_register_queue(disk);
/*
* Take an extra ref on queue which will be put on disk_release()
* so that it sticks around as long as @disk is there.
*/
WARN_ON_ONCE(!blk_get_queue(disk->queue));
retval = sysfs_create_link(&disk_to_dev(disk)->kobj, &bdi->dev->kobj,
"bdi");
WARN_ON(retval);
disk_add_events(disk);
blk_integrity_add(disk);
}
EXPORT_SYMBOL(device_add_disk);
void del_gendisk(struct gendisk *disk)
{
struct disk_part_iter piter;
struct hd_struct *part;
blk_integrity_del(disk);
disk_del_events(disk);
/* invalidate stuff */
disk_part_iter_init(&piter, disk,
DISK_PITER_INCL_EMPTY | DISK_PITER_REVERSE);
while ((part = disk_part_iter_next(&piter))) {
invalidate_partition(disk, part->partno);
delete_partition(disk, part->partno);
}
disk_part_iter_exit(&piter);
invalidate_partition(disk, 0);
set_capacity(disk, 0);
disk->flags &= ~GENHD_FL_UP;
sysfs_remove_link(&disk_to_dev(disk)->kobj, "bdi");
blk_unregister_queue(disk);
blk_unregister_region(disk_devt(disk), disk->minors);
part_stat_set_all(&disk->part0, 0);
disk->part0.stamp = 0;
kobject_put(disk->part0.holder_dir);
kobject_put(disk->slave_dir);
if (!sysfs_deprecated)
sysfs_remove_link(block_depr, dev_name(disk_to_dev(disk)));
pm_runtime_set_memalloc_noio(disk_to_dev(disk), false);
device_del(disk_to_dev(disk));
}
EXPORT_SYMBOL(del_gendisk);
/* sysfs access to bad-blocks list. */
static ssize_t disk_badblocks_show(struct device *dev,
struct device_attribute *attr,
char *page)
{
struct gendisk *disk = dev_to_disk(dev);
if (!disk->bb)
return sprintf(page, "\n");
return badblocks_show(disk->bb, page, 0);
}
static ssize_t disk_badblocks_store(struct device *dev,
struct device_attribute *attr,
const char *page, size_t len)
{
struct gendisk *disk = dev_to_disk(dev);
if (!disk->bb)
return -ENXIO;
return badblocks_store(disk->bb, page, len, 0);
}
/**
* get_gendisk - get partitioning information for a given device
* @devt: device to get partitioning information for
* @partno: returned partition index
*
* This function gets the structure containing partitioning
* information for the given device @devt.
*/
struct gendisk *get_gendisk(dev_t devt, int *partno)
{
struct gendisk *disk = NULL;
if (MAJOR(devt) != BLOCK_EXT_MAJOR) {
struct kobject *kobj;
kobj = kobj_lookup(bdev_map, devt, partno);
if (kobj)
disk = dev_to_disk(kobj_to_dev(kobj));
} else {
struct hd_struct *part;
spin_lock_bh(&ext_devt_lock);
part = idr_find(&ext_devt_idr, blk_mangle_minor(MINOR(devt)));
if (part && get_disk(part_to_disk(part))) {
*partno = part->partno;
disk = part_to_disk(part);
}
spin_unlock_bh(&ext_devt_lock);
}
return disk;
}
EXPORT_SYMBOL(get_gendisk);
/**
* bdget_disk - do bdget() by gendisk and partition number
* @disk: gendisk of interest
* @partno: partition number
*
* Find partition @partno from @disk, do bdget() on it.
*
* CONTEXT:
* Don't care.
*
* RETURNS:
* Resulting block_device on success, NULL on failure.
*/
struct block_device *bdget_disk(struct gendisk *disk, int partno)
{
struct hd_struct *part;
struct block_device *bdev = NULL;
part = disk_get_part(disk, partno);
if (part)
bdev = bdget(part_devt(part));
disk_put_part(part);
return bdev;
}
EXPORT_SYMBOL(bdget_disk);
/*
* print a full list of all partitions - intended for places where the root
* filesystem can't be mounted and thus to give the victim some idea of what
* went wrong
*/
void __init printk_all_partitions(void)
{
struct class_dev_iter iter;
struct device *dev;
class_dev_iter_init(&iter, &block_class, NULL, &disk_type);
while ((dev = class_dev_iter_next(&iter))) {
struct gendisk *disk = dev_to_disk(dev);
struct disk_part_iter piter;
struct hd_struct *part;
char name_buf[BDEVNAME_SIZE];
char devt_buf[BDEVT_SIZE];
/*
* Don't show empty devices or things that have been
* suppressed
*/
if (get_capacity(disk) == 0 ||
(disk->flags & GENHD_FL_SUPPRESS_PARTITION_INFO))
continue;
/*
* Note, unlike /proc/partitions, I am showing the
* numbers in hex - the same format as the root=
* option takes.
*/
disk_part_iter_init(&piter, disk, DISK_PITER_INCL_PART0);
while ((part = disk_part_iter_next(&piter))) {
bool is_part0 = part == &disk->part0;
printk("%s%s %10llu %s %s", is_part0 ? "" : " ",
bdevt_str(part_devt(part), devt_buf),
(unsigned long long)part_nr_sects_read(part) >> 1
, disk_name(disk, part->partno, name_buf),
part->info ? part->info->uuid : "");
if (is_part0) {
if (dev->parent && dev->parent->driver)
printk(" driver: %s\n",
dev->parent->driver->name);
else
printk(" (driver?)\n");
} else
printk("\n");
}
disk_part_iter_exit(&piter);
}
class_dev_iter_exit(&iter);
}
#ifdef CONFIG_PROC_FS
/* iterator */
static void *disk_seqf_start(struct seq_file *seqf, loff_t *pos)
{
loff_t skip = *pos;
struct class_dev_iter *iter;
struct device *dev;
iter = kmalloc(sizeof(*iter), GFP_KERNEL);
if (!iter)
return ERR_PTR(-ENOMEM);
seqf->private = iter;
class_dev_iter_init(iter, &block_class, NULL, &disk_type);
do {
dev = class_dev_iter_next(iter);
if (!dev)
return NULL;
} while (skip--);
return dev_to_disk(dev);
}
static void *disk_seqf_next(struct seq_file *seqf, void *v, loff_t *pos)
{
struct device *dev;
(*pos)++;
dev = class_dev_iter_next(seqf->private);
if (dev)
return dev_to_disk(dev);
return NULL;
}
static void disk_seqf_stop(struct seq_file *seqf, void *v)
{
struct class_dev_iter *iter = seqf->private;
/* stop is called even after start failed :-( */
if (iter) {
class_dev_iter_exit(iter);
kfree(iter);
seqf->private = NULL;
}
}
static void *show_partition_start(struct seq_file *seqf, loff_t *pos)
{
void *p;
p = disk_seqf_start(seqf, pos);
if (!IS_ERR_OR_NULL(p) && !*pos)
seq_puts(seqf, "major minor #blocks name\n\n");
return p;
}
static int show_partition(struct seq_file *seqf, void *v)
{
struct gendisk *sgp = v;
struct disk_part_iter piter;
struct hd_struct *part;
char buf[BDEVNAME_SIZE];
/* Don't show non-partitionable removeable devices or empty devices */
if (!get_capacity(sgp) || (!disk_max_parts(sgp) &&
(sgp->flags & GENHD_FL_REMOVABLE)))
return 0;
if (sgp->flags & GENHD_FL_SUPPRESS_PARTITION_INFO)
return 0;
/* show the full disk and all non-0 size partitions of it */
disk_part_iter_init(&piter, sgp, DISK_PITER_INCL_PART0);
while ((part = disk_part_iter_next(&piter)))
seq_printf(seqf, "%4d %7d %10llu %s\n",
MAJOR(part_devt(part)), MINOR(part_devt(part)),
(unsigned long long)part_nr_sects_read(part) >> 1,
disk_name(sgp, part->partno, buf));
disk_part_iter_exit(&piter);
return 0;
}
static const struct seq_operations partitions_op = {
.start = show_partition_start,
.next = disk_seqf_next,
.stop = disk_seqf_stop,
.show = show_partition
};
static int partitions_open(struct inode *inode, struct file *file)
{
return seq_open(file, &partitions_op);
}
static const struct file_operations proc_partitions_operations = {
.open = partitions_open,
.read = seq_read,
.llseek = seq_lseek,
.release = seq_release,
};
#endif
static struct kobject *base_probe(dev_t devt, int *partno, void *data)
{
if (request_module("block-major-%d-%d", MAJOR(devt), MINOR(devt)) > 0)
/* Make old-style 2.4 aliases work */
request_module("block-major-%d", MAJOR(devt));
return NULL;
}
static int __init genhd_device_init(void)
{
int error;
block_class.dev_kobj = sysfs_dev_block_kobj;
error = class_register(&block_class);
if (unlikely(error))
return error;
bdev_map = kobj_map_init(base_probe, &block_class_lock);
blk_dev_init();
register_blkdev(BLOCK_EXT_MAJOR, "blkext");
/* create top-level block dir */
if (!sysfs_deprecated)
block_depr = kobject_create_and_add("block", NULL);
return 0;
}
subsys_initcall(genhd_device_init);
static ssize_t disk_range_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct gendisk *disk = dev_to_disk(dev);
return sprintf(buf, "%d\n", disk->minors);
}
static ssize_t disk_ext_range_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct gendisk *disk = dev_to_disk(dev);
return sprintf(buf, "%d\n", disk_max_parts(disk));
}
static ssize_t disk_removable_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct gendisk *disk = dev_to_disk(dev);
return sprintf(buf, "%d\n",
(disk->flags & GENHD_FL_REMOVABLE ? 1 : 0));
}
static ssize_t disk_ro_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct gendisk *disk = dev_to_disk(dev);
return sprintf(buf, "%d\n", get_disk_ro(disk) ? 1 : 0);
}
static ssize_t disk_capability_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct gendisk *disk = dev_to_disk(dev);
return sprintf(buf, "%x\n", disk->flags);
}
static ssize_t disk_alignment_offset_show(struct device *dev,
struct device_attribute *attr,
char *buf)
{
struct gendisk *disk = dev_to_disk(dev);
return sprintf(buf, "%d\n", queue_alignment_offset(disk->queue));
}
static ssize_t disk_discard_alignment_show(struct device *dev,
struct device_attribute *attr,
char *buf)
{
struct gendisk *disk = dev_to_disk(dev);
return sprintf(buf, "%d\n", queue_discard_alignment(disk->queue));
}
static DEVICE_ATTR(range, S_IRUGO, disk_range_show, NULL);
static DEVICE_ATTR(ext_range, S_IRUGO, disk_ext_range_show, NULL);
static DEVICE_ATTR(removable, S_IRUGO, disk_removable_show, NULL);
static DEVICE_ATTR(ro, S_IRUGO, disk_ro_show, NULL);
static DEVICE_ATTR(size, S_IRUGO, part_size_show, NULL);
static DEVICE_ATTR(alignment_offset, S_IRUGO, disk_alignment_offset_show, NULL);
static DEVICE_ATTR(discard_alignment, S_IRUGO, disk_discard_alignment_show,
NULL);
static DEVICE_ATTR(capability, S_IRUGO, disk_capability_show, NULL);
static DEVICE_ATTR(stat, S_IRUGO, part_stat_show, NULL);
static DEVICE_ATTR(inflight, S_IRUGO, part_inflight_show, NULL);
static DEVICE_ATTR(badblocks, S_IRUGO | S_IWUSR, disk_badblocks_show,
disk_badblocks_store);
#ifdef CONFIG_FAIL_MAKE_REQUEST
static struct device_attribute dev_attr_fail =
__ATTR(make-it-fail, S_IRUGO|S_IWUSR, part_fail_show, part_fail_store);
#endif
#ifdef CONFIG_FAIL_IO_TIMEOUT
static struct device_attribute dev_attr_fail_timeout =
__ATTR(io-timeout-fail, S_IRUGO|S_IWUSR, part_timeout_show,
part_timeout_store);
#endif
static struct attribute *disk_attrs[] = {
&dev_attr_range.attr,
&dev_attr_ext_range.attr,
&dev_attr_removable.attr,
&dev_attr_ro.attr,
&dev_attr_size.attr,
&dev_attr_alignment_offset.attr,
&dev_attr_discard_alignment.attr,
&dev_attr_capability.attr,
&dev_attr_stat.attr,
&dev_attr_inflight.attr,
&dev_attr_badblocks.attr,
#ifdef CONFIG_FAIL_MAKE_REQUEST
&dev_attr_fail.attr,
#endif
#ifdef CONFIG_FAIL_IO_TIMEOUT
&dev_attr_fail_timeout.attr,
#endif
NULL
};
static struct attribute_group disk_attr_group = {
.attrs = disk_attrs,
};
static const struct attribute_group *disk_attr_groups[] = {
&disk_attr_group,
NULL
};
/**
* disk_replace_part_tbl - replace disk->part_tbl in RCU-safe way
* @disk: disk to replace part_tbl for
* @new_ptbl: new part_tbl to install
*
* Replace disk->part_tbl with @new_ptbl in RCU-safe way. The
* original ptbl is freed using RCU callback.
*
* LOCKING:
* Matching bd_mutx locked.
*/
static void disk_replace_part_tbl(struct gendisk *disk,
struct disk_part_tbl *new_ptbl)
{
struct disk_part_tbl *old_ptbl = disk->part_tbl;
rcu_assign_pointer(disk->part_tbl, new_ptbl);
if (old_ptbl) {
rcu_assign_pointer(old_ptbl->last_lookup, NULL);
kfree_rcu(old_ptbl, rcu_head);
}
}
/**
* disk_expand_part_tbl - expand disk->part_tbl
* @disk: disk to expand part_tbl for
* @partno: expand such that this partno can fit in
*
* Expand disk->part_tbl such that @partno can fit in. disk->part_tbl
* uses RCU to allow unlocked dereferencing for stats and other stuff.
*
* LOCKING:
* Matching bd_mutex locked, might sleep.
*
* RETURNS:
* 0 on success, -errno on failure.
*/
int disk_expand_part_tbl(struct gendisk *disk, int partno)
{
struct disk_part_tbl *old_ptbl = disk->part_tbl;
struct disk_part_tbl *new_ptbl;
int len = old_ptbl ? old_ptbl->len : 0;
int i, target;
size_t size;
/*
* check for int overflow, since we can get here from blkpg_ioctl()
* with a user passed 'partno'.
*/
target = partno + 1;
if (target < 0)
return -EINVAL;
/* disk_max_parts() is zero during initialization, ignore if so */
if (disk_max_parts(disk) && target > disk_max_parts(disk))
return -EINVAL;
if (target <= len)
return 0;
size = sizeof(*new_ptbl) + target * sizeof(new_ptbl->part[0]);
new_ptbl = kzalloc_node(size, GFP_KERNEL, disk->node_id);
if (!new_ptbl)
return -ENOMEM;
new_ptbl->len = target;
for (i = 0; i < len; i++)
rcu_assign_pointer(new_ptbl->part[i], old_ptbl->part[i]);
disk_replace_part_tbl(disk, new_ptbl);
return 0;
}
static void disk_release(struct device *dev)
{
struct gendisk *disk = dev_to_disk(dev);
blk_free_devt(dev->devt);
disk_release_events(disk);
kfree(disk->random);
disk_replace_part_tbl(disk, NULL);
hd_free_part(&disk->part0);
if (disk->queue)
blk_put_queue(disk->queue);
kfree(disk);
}
struct class block_class = {
.name = "block",
};
static char *block_devnode(struct device *dev, umode_t *mode,
kuid_t *uid, kgid_t *gid)
{
struct gendisk *disk = dev_to_disk(dev);
if (disk->devnode)
return disk->devnode(disk, mode);
return NULL;
}
static struct device_type disk_type = {
.name = "disk",
.groups = disk_attr_groups,
.release = disk_release,
.devnode = block_devnode,
};
#ifdef CONFIG_PROC_FS
/*
* aggregate disk stat collector. Uses the same stats that the sysfs
* entries do, above, but makes them available through one seq_file.
*
* The output looks suspiciously like /proc/partitions with a bunch of
* extra fields.
*/
static int diskstats_show(struct seq_file *seqf, void *v)
{
struct gendisk *gp = v;
struct disk_part_iter piter;
struct hd_struct *hd;
char buf[BDEVNAME_SIZE];
int cpu;
/*
if (&disk_to_dev(gp)->kobj.entry == block_class.devices.next)
seq_puts(seqf, "major minor name"
" rio rmerge rsect ruse wio wmerge "
"wsect wuse running use aveq"
"\n\n");
*/
disk_part_iter_init(&piter, gp, DISK_PITER_INCL_EMPTY_PART0);
while ((hd = disk_part_iter_next(&piter))) {
cpu = part_stat_lock();
part_round_stats(cpu, hd);
part_stat_unlock();
seq_printf(seqf, "%4d %7d %s %lu %lu %lu "
"%u %lu %lu %lu %u %u %u %u\n",
MAJOR(part_devt(hd)), MINOR(part_devt(hd)),
disk_name(gp, hd->partno, buf),
part_stat_read(hd, ios[READ]),
part_stat_read(hd, merges[READ]),
part_stat_read(hd, sectors[READ]),
jiffies_to_msecs(part_stat_read(hd, ticks[READ])),
part_stat_read(hd, ios[WRITE]),
part_stat_read(hd, merges[WRITE]),
part_stat_read(hd, sectors[WRITE]),
jiffies_to_msecs(part_stat_read(hd, ticks[WRITE])),
part_in_flight(hd),
jiffies_to_msecs(part_stat_read(hd, io_ticks)),
jiffies_to_msecs(part_stat_read(hd, time_in_queue))
);
}
disk_part_iter_exit(&piter);
return 0;
}
static const struct seq_operations diskstats_op = {
.start = disk_seqf_start,
.next = disk_seqf_next,
.stop = disk_seqf_stop,
.show = diskstats_show
};
static int diskstats_open(struct inode *inode, struct file *file)
{
return seq_open(file, &diskstats_op);
}
static const struct file_operations proc_diskstats_operations = {
.open = diskstats_open,
.read = seq_read,
.llseek = seq_lseek,
.release = seq_release,
};
static int __init proc_genhd_init(void)
{
proc_create("diskstats", 0, NULL, &proc_diskstats_operations);
proc_create("partitions", 0, NULL, &proc_partitions_operations);
return 0;
}
module_init(proc_genhd_init);
#endif /* CONFIG_PROC_FS */
dev_t blk_lookup_devt(const char *name, int partno)
{
dev_t devt = MKDEV(0, 0);
struct class_dev_iter iter;
struct device *dev;
class_dev_iter_init(&iter, &block_class, NULL, &disk_type);
while ((dev = class_dev_iter_next(&iter))) {
struct gendisk *disk = dev_to_disk(dev);
struct hd_struct *part;
if (strcmp(dev_name(dev), name))
continue;
if (partno < disk->minors) {
/* We need to return the right devno, even
* if the partition doesn't exist yet.
*/
devt = MKDEV(MAJOR(dev->devt),
MINOR(dev->devt) + partno);
break;
}
part = disk_get_part(disk, partno);
if (part) {
devt = part_devt(part);
disk_put_part(part);
break;
}
disk_put_part(part);
}
class_dev_iter_exit(&iter);
return devt;
}
EXPORT_SYMBOL(blk_lookup_devt);
struct gendisk *alloc_disk(int minors)
{
return alloc_disk_node(minors, NUMA_NO_NODE);
}
EXPORT_SYMBOL(alloc_disk);
struct gendisk *alloc_disk_node(int minors, int node_id)
{
struct gendisk *disk;
disk = kzalloc_node(sizeof(struct gendisk), GFP_KERNEL, node_id);
if (disk) {
if (!init_part_stats(&disk->part0)) {
kfree(disk);
return NULL;
}
disk->node_id = node_id;
if (disk_expand_part_tbl(disk, 0)) {
free_part_stats(&disk->part0);
kfree(disk);
return NULL;
}
disk->part_tbl->part[0] = &disk->part0;
/*
* set_capacity() and get_capacity() currently don't use
* seqcounter to read/update the part0->nr_sects. Still init
* the counter as we can read the sectors in IO submission
* patch using seqence counters.
*
* TODO: Ideally set_capacity() and get_capacity() should be
* converted to make use of bd_mutex and sequence counters.
*/
seqcount_init(&disk->part0.nr_sects_seq);
if (hd_ref_init(&disk->part0)) {
hd_free_part(&disk->part0);
kfree(disk);
return NULL;
}
disk->minors = minors;
rand_initialize_disk(disk);
disk_to_dev(disk)->class = &block_class;
disk_to_dev(disk)->type = &disk_type;
device_initialize(disk_to_dev(disk));
}
return disk;
}
EXPORT_SYMBOL(alloc_disk_node);
struct kobject *get_disk(struct gendisk *disk)
{
struct module *owner;
struct kobject *kobj;
if (!disk->fops)
return NULL;
owner = disk->fops->owner;
if (owner && !try_module_get(owner))
return NULL;
kobj = kobject_get(&disk_to_dev(disk)->kobj);
if (kobj == NULL) {
module_put(owner);
return NULL;
}
return kobj;
}
EXPORT_SYMBOL(get_disk);
void put_disk(struct gendisk *disk)
{
if (disk)
kobject_put(&disk_to_dev(disk)->kobj);
}
EXPORT_SYMBOL(put_disk);
static void set_disk_ro_uevent(struct gendisk *gd, int ro)
{
char event[] = "DISK_RO=1";
char *envp[] = { event, NULL };
if (!ro)
event[8] = '0';
kobject_uevent_env(&disk_to_dev(gd)->kobj, KOBJ_CHANGE, envp);
}
void set_device_ro(struct block_device *bdev, int flag)
{
bdev->bd_part->policy = flag;
}
EXPORT_SYMBOL(set_device_ro);
void set_disk_ro(struct gendisk *disk, int flag)
{
struct disk_part_iter piter;
struct hd_struct *part;
if (disk->part0.policy != flag) {
set_disk_ro_uevent(disk, flag);
disk->part0.policy = flag;
}
disk_part_iter_init(&piter, disk, DISK_PITER_INCL_EMPTY);
while ((part = disk_part_iter_next(&piter)))
part->policy = flag;
disk_part_iter_exit(&piter);
}
EXPORT_SYMBOL(set_disk_ro);
int bdev_read_only(struct block_device *bdev)
{
if (!bdev)
return 0;
return bdev->bd_part->policy;
}
EXPORT_SYMBOL(bdev_read_only);
int invalidate_partition(struct gendisk *disk, int partno)
{
int res = 0;
struct block_device *bdev = bdget_disk(disk, partno);
if (bdev) {
fsync_bdev(bdev);
res = __invalidate_device(bdev, true);
bdput(bdev);
}
return res;
}
EXPORT_SYMBOL(invalidate_partition);
/*
* Disk events - monitor disk events like media change and eject request.
*/
struct disk_events {
struct list_head node; /* all disk_event's */
struct gendisk *disk; /* the associated disk */
spinlock_t lock;
struct mutex block_mutex; /* protects blocking */
int block; /* event blocking depth */
unsigned int pending; /* events already sent out */
unsigned int clearing; /* events being cleared */
long poll_msecs; /* interval, -1 for default */
struct delayed_work dwork;
};
static const char *disk_events_strs[] = {
[ilog2(DISK_EVENT_MEDIA_CHANGE)] = "media_change",
[ilog2(DISK_EVENT_EJECT_REQUEST)] = "eject_request",
};
static char *disk_uevents[] = {
[ilog2(DISK_EVENT_MEDIA_CHANGE)] = "DISK_MEDIA_CHANGE=1",
[ilog2(DISK_EVENT_EJECT_REQUEST)] = "DISK_EJECT_REQUEST=1",
};
/* list of all disk_events */
static DEFINE_MUTEX(disk_events_mutex);
static LIST_HEAD(disk_events);
/* disable in-kernel polling by default */
static unsigned long disk_events_dfl_poll_msecs;
static unsigned long disk_events_poll_jiffies(struct gendisk *disk)
{
struct disk_events *ev = disk->ev;
long intv_msecs = 0;
/*
* If device-specific poll interval is set, always use it. If
* the default is being used, poll iff there are events which
* can't be monitored asynchronously.
*/
if (ev->poll_msecs >= 0)
intv_msecs = ev->poll_msecs;
else if (disk->events & ~disk->async_events)
intv_msecs = disk_events_dfl_poll_msecs;
return msecs_to_jiffies(intv_msecs);
}
/**
* disk_block_events - block and flush disk event checking
* @disk: disk to block events for
*
* On return from this function, it is guaranteed that event checking
* isn't in progress and won't happen until unblocked by
* disk_unblock_events(). Events blocking is counted and the actual
* unblocking happens after the matching number of unblocks are done.
*
* Note that this intentionally does not block event checking from
* disk_clear_events().
*
* CONTEXT:
* Might sleep.
*/
void disk_block_events(struct gendisk *disk)
{
struct disk_events *ev = disk->ev;
unsigned long flags;
bool cancel;
if (!ev)
return;
/*
* Outer mutex ensures that the first blocker completes canceling
* the event work before further blockers are allowed to finish.
*/
mutex_lock(&ev->block_mutex);
spin_lock_irqsave(&ev->lock, flags);
cancel = !ev->block++;
spin_unlock_irqrestore(&ev->lock, flags);
if (cancel)
cancel_delayed_work_sync(&disk->ev->dwork);
mutex_unlock(&ev->block_mutex);
}
static void __disk_unblock_events(struct gendisk *disk, bool check_now)
{
struct disk_events *ev = disk->ev;
unsigned long intv;
unsigned long flags;
spin_lock_irqsave(&ev->lock, flags);
if (WARN_ON_ONCE(ev->block <= 0))
goto out_unlock;
if (--ev->block)
goto out_unlock;
intv = disk_events_poll_jiffies(disk);
if (check_now)
queue_delayed_work(system_freezable_power_efficient_wq,
&ev->dwork, 0);
else if (intv)
queue_delayed_work(system_freezable_power_efficient_wq,
&ev->dwork, intv);
out_unlock:
spin_unlock_irqrestore(&ev->lock, flags);
}
/**
* disk_unblock_events - unblock disk event checking
* @disk: disk to unblock events for
*
* Undo disk_block_events(). When the block count reaches zero, it
* starts events polling if configured.
*
* CONTEXT:
* Don't care. Safe to call from irq context.
*/
void disk_unblock_events(struct gendisk *disk)
{
if (disk->ev)
__disk_unblock_events(disk, false);
}
/**
* disk_flush_events - schedule immediate event checking and flushing
* @disk: disk to check and flush events for
* @mask: events to flush
*
* Schedule immediate event checking on @disk if not blocked. Events in
* @mask are scheduled to be cleared from the driver. Note that this
* doesn't clear the events from @disk->ev.
*
* CONTEXT:
* If @mask is non-zero must be called with bdev->bd_mutex held.
*/
void disk_flush_events(struct gendisk *disk, unsigned int mask)
{
struct disk_events *ev = disk->ev;
if (!ev)
return;
spin_lock_irq(&ev->lock);
ev->clearing |= mask;
if (!ev->block)
mod_delayed_work(system_freezable_power_efficient_wq,
&ev->dwork, 0);
spin_unlock_irq(&ev->lock);
}
/**
* disk_clear_events - synchronously check, clear and return pending events
* @disk: disk to fetch and clear events from
* @mask: mask of events to be fetched and cleared
*
* Disk events are synchronously checked and pending events in @mask
* are cleared and returned. This ignores the block count.
*
* CONTEXT:
* Might sleep.
*/
unsigned int disk_clear_events(struct gendisk *disk, unsigned int mask)
{
const struct block_device_operations *bdops = disk->fops;
struct disk_events *ev = disk->ev;
unsigned int pending;
unsigned int clearing = mask;
if (!ev) {
/* for drivers still using the old ->media_changed method */
if ((mask & DISK_EVENT_MEDIA_CHANGE) &&
bdops->media_changed && bdops->media_changed(disk))
return DISK_EVENT_MEDIA_CHANGE;
return 0;
}
disk_block_events(disk);
/*
* store the union of mask and ev->clearing on the stack so that the
* race with disk_flush_events does not cause ambiguity (ev->clearing
* can still be modified even if events are blocked).
*/
spin_lock_irq(&ev->lock);
clearing |= ev->clearing;
ev->clearing = 0;
spin_unlock_irq(&ev->lock);
disk_check_events(ev, &clearing);
/*
* if ev->clearing is not 0, the disk_flush_events got called in the
* middle of this function, so we want to run the workfn without delay.
*/
__disk_unblock_events(disk, ev->clearing ? true : false);
/* then, fetch and clear pending events */
spin_lock_irq(&ev->lock);
pending = ev->pending & mask;
ev->pending &= ~mask;
spin_unlock_irq(&ev->lock);
WARN_ON_ONCE(clearing & mask);
return pending;
}
/*
* Separate this part out so that a different pointer for clearing_ptr can be
* passed in for disk_clear_events.
*/
static void disk_events_workfn(struct work_struct *work)
{
struct delayed_work *dwork = to_delayed_work(work);
struct disk_events *ev = container_of(dwork, struct disk_events, dwork);
disk_check_events(ev, &ev->clearing);
}
static void disk_check_events(struct disk_events *ev,
unsigned int *clearing_ptr)
{
struct gendisk *disk = ev->disk;
char *envp[ARRAY_SIZE(disk_uevents) + 1] = { };
unsigned int clearing = *clearing_ptr;
unsigned int events;
unsigned long intv;
int nr_events = 0, i;
/* check events */
events = disk->fops->check_events(disk, clearing);
/* accumulate pending events and schedule next poll if necessary */
spin_lock_irq(&ev->lock);
events &= ~ev->pending;
ev->pending |= events;
*clearing_ptr &= ~clearing;
intv = disk_events_poll_jiffies(disk);
if (!ev->block && intv)
queue_delayed_work(system_freezable_power_efficient_wq,
&ev->dwork, intv);
spin_unlock_irq(&ev->lock);
/*
* Tell userland about new events. Only the events listed in
* @disk->events are reported. Unlisted events are processed the
* same internally but never get reported to userland.
*/
for (i = 0; i < ARRAY_SIZE(disk_uevents); i++)
if (events & disk->events & (1 << i))
envp[nr_events++] = disk_uevents[i];
if (nr_events)
kobject_uevent_env(&disk_to_dev(disk)->kobj, KOBJ_CHANGE, envp);
}
/*
* A disk events enabled device has the following sysfs nodes under
* its /sys/block/X/ directory.
*
* events : list of all supported events
* events_async : list of events which can be detected w/o polling
* events_poll_msecs : polling interval, 0: disable, -1: system default
*/
static ssize_t __disk_events_show(unsigned int events, char *buf)
{
const char *delim = "";
ssize_t pos = 0;
int i;
for (i = 0; i < ARRAY_SIZE(disk_events_strs); i++)
if (events & (1 << i)) {
pos += sprintf(buf + pos, "%s%s",
delim, disk_events_strs[i]);
delim = " ";
}
if (pos)
pos += sprintf(buf + pos, "\n");
return pos;
}
static ssize_t disk_events_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct gendisk *disk = dev_to_disk(dev);
return __disk_events_show(disk->events, buf);
}
static ssize_t disk_events_async_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct gendisk *disk = dev_to_disk(dev);
return __disk_events_show(disk->async_events, buf);
}
static ssize_t disk_events_poll_msecs_show(struct device *dev,
struct device_attribute *attr,
char *buf)
{
struct gendisk *disk = dev_to_disk(dev);
return sprintf(buf, "%ld\n", disk->ev->poll_msecs);
}
static ssize_t disk_events_poll_msecs_store(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t count)
{
struct gendisk *disk = dev_to_disk(dev);
long intv;
if (!count || !sscanf(buf, "%ld", &intv))
return -EINVAL;
if (intv < 0 && intv != -1)
return -EINVAL;
disk_block_events(disk);
disk->ev->poll_msecs = intv;
__disk_unblock_events(disk, true);
return count;
}
static const DEVICE_ATTR(events, S_IRUGO, disk_events_show, NULL);
static const DEVICE_ATTR(events_async, S_IRUGO, disk_events_async_show, NULL);
static const DEVICE_ATTR(events_poll_msecs, S_IRUGO|S_IWUSR,
disk_events_poll_msecs_show,
disk_events_poll_msecs_store);
static const struct attribute *disk_events_attrs[] = {
&dev_attr_events.attr,
&dev_attr_events_async.attr,
&dev_attr_events_poll_msecs.attr,
NULL,
};
/*
* The default polling interval can be specified by the kernel
* parameter block.events_dfl_poll_msecs which defaults to 0
* (disable). This can also be modified runtime by writing to
* /sys/module/block/events_dfl_poll_msecs.
*/
static int disk_events_set_dfl_poll_msecs(const char *val,
const struct kernel_param *kp)
{
struct disk_events *ev;
int ret;
ret = param_set_ulong(val, kp);
if (ret < 0)
return ret;
mutex_lock(&disk_events_mutex);
list_for_each_entry(ev, &disk_events, node)
disk_flush_events(ev->disk, 0);
mutex_unlock(&disk_events_mutex);
return 0;
}
static const struct kernel_param_ops disk_events_dfl_poll_msecs_param_ops = {
.set = disk_events_set_dfl_poll_msecs,
.get = param_get_ulong,
};
#undef MODULE_PARAM_PREFIX
#define MODULE_PARAM_PREFIX "block."
module_param_cb(events_dfl_poll_msecs, &disk_events_dfl_poll_msecs_param_ops,
&disk_events_dfl_poll_msecs, 0644);
/*
* disk_{alloc|add|del|release}_events - initialize and destroy disk_events.
*/
static void disk_alloc_events(struct gendisk *disk)
{
struct disk_events *ev;
if (!disk->fops->check_events)
return;
ev = kzalloc(sizeof(*ev), GFP_KERNEL);
if (!ev) {
pr_warn("%s: failed to initialize events\n", disk->disk_name);
return;
}
INIT_LIST_HEAD(&ev->node);
ev->disk = disk;
spin_lock_init(&ev->lock);
mutex_init(&ev->block_mutex);
ev->block = 1;
ev->poll_msecs = -1;
INIT_DELAYED_WORK(&ev->dwork, disk_events_workfn);
disk->ev = ev;
}
static void disk_add_events(struct gendisk *disk)
{
if (!disk->ev)
return;
/* FIXME: error handling */
if (sysfs_create_files(&disk_to_dev(disk)->kobj, disk_events_attrs) < 0)
pr_warn("%s: failed to create sysfs files for events\n",
disk->disk_name);
mutex_lock(&disk_events_mutex);
list_add_tail(&disk->ev->node, &disk_events);
mutex_unlock(&disk_events_mutex);
/*
* Block count is initialized to 1 and the following initial
* unblock kicks it into action.
*/
__disk_unblock_events(disk, true);
}
static void disk_del_events(struct gendisk *disk)
{
if (!disk->ev)
return;
disk_block_events(disk);
mutex_lock(&disk_events_mutex);
list_del_init(&disk->ev->node);
mutex_unlock(&disk_events_mutex);
sysfs_remove_files(&disk_to_dev(disk)->kobj, disk_events_attrs);
}
static void disk_release_events(struct gendisk *disk)
{
/* the block count should be 1 from disk_del_events() */
WARN_ON_ONCE(disk->ev && disk->ev->block != 1);
kfree(disk->ev);
}
| ./CrossVul/dataset_final_sorted/CWE-416/c/good_5333_0 |
crossvul-cpp_data_good_5282_1 | /*
+----------------------------------------------------------------------+
| PHP Version 5 |
+----------------------------------------------------------------------+
| Copyright (c) 1997-2016 The PHP Group |
+----------------------------------------------------------------------+
| This source file is subject to version 3.01 of the PHP license, |
| that is bundled with this package in the file LICENSE, and is |
| available through the world-wide-web at the following url: |
| http://www.php.net/license/3_01.txt |
| If you did not receive a copy of the PHP license and are unable to |
| obtain it through the world-wide-web, please send a note to |
| license@php.net so we can mail you a copy immediately. |
+----------------------------------------------------------------------+
| Author: Andrei Zmievski <andrei@php.net> |
+----------------------------------------------------------------------+
*/
/* $Id$ */
#ifdef HAVE_CONFIG_H
#include "config.h"
#endif
#include "php.h"
#if HAVE_WDDX
#include "ext/xml/expat_compat.h"
#include "php_wddx.h"
#include "php_wddx_api.h"
#define PHP_XML_INTERNAL
#include "ext/xml/php_xml.h"
#include "ext/standard/php_incomplete_class.h"
#include "ext/standard/base64.h"
#include "ext/standard/info.h"
#include "ext/standard/php_smart_str.h"
#include "ext/standard/html.h"
#include "ext/standard/php_string.h"
#include "ext/date/php_date.h"
#include "zend_globals.h"
#define WDDX_BUF_LEN 256
#define PHP_CLASS_NAME_VAR "php_class_name"
#define EL_ARRAY "array"
#define EL_BINARY "binary"
#define EL_BOOLEAN "boolean"
#define EL_CHAR "char"
#define EL_CHAR_CODE "code"
#define EL_NULL "null"
#define EL_NUMBER "number"
#define EL_PACKET "wddxPacket"
#define EL_STRING "string"
#define EL_STRUCT "struct"
#define EL_VALUE "value"
#define EL_VAR "var"
#define EL_NAME "name"
#define EL_VERSION "version"
#define EL_RECORDSET "recordset"
#define EL_FIELD "field"
#define EL_DATETIME "dateTime"
#define php_wddx_deserialize(a,b) \
php_wddx_deserialize_ex((a)->value.str.val, (a)->value.str.len, (b))
#define SET_STACK_VARNAME \
if (stack->varname) { \
ent.varname = estrdup(stack->varname); \
efree(stack->varname); \
stack->varname = NULL; \
} else \
ent.varname = NULL; \
static int le_wddx;
typedef struct {
zval *data;
enum {
ST_ARRAY,
ST_BOOLEAN,
ST_NULL,
ST_NUMBER,
ST_STRING,
ST_BINARY,
ST_STRUCT,
ST_RECORDSET,
ST_FIELD,
ST_DATETIME
} type;
char *varname;
} st_entry;
typedef struct {
int top, max;
char *varname;
zend_bool done;
void **elements;
} wddx_stack;
static void php_wddx_process_data(void *user_data, const XML_Char *s, int len);
/* {{{ arginfo */
ZEND_BEGIN_ARG_INFO_EX(arginfo_wddx_serialize_value, 0, 0, 1)
ZEND_ARG_INFO(0, var)
ZEND_ARG_INFO(0, comment)
ZEND_END_ARG_INFO()
ZEND_BEGIN_ARG_INFO_EX(arginfo_wddx_serialize_vars, 0, 0, 1)
ZEND_ARG_VARIADIC_INFO(0, var_names)
ZEND_END_ARG_INFO()
ZEND_BEGIN_ARG_INFO_EX(arginfo_wddx_serialize_start, 0, 0, 0)
ZEND_ARG_INFO(0, comment)
ZEND_END_ARG_INFO()
ZEND_BEGIN_ARG_INFO_EX(arginfo_wddx_packet_end, 0, 0, 1)
ZEND_ARG_INFO(0, packet_id)
ZEND_END_ARG_INFO()
ZEND_BEGIN_ARG_INFO_EX(arginfo_wddx_add_vars, 0, 0, 2)
ZEND_ARG_INFO(0, packet_id)
ZEND_ARG_VARIADIC_INFO(0, var_names)
ZEND_END_ARG_INFO()
ZEND_BEGIN_ARG_INFO_EX(arginfo_wddx_deserialize, 0, 0, 1)
ZEND_ARG_INFO(0, packet)
ZEND_END_ARG_INFO()
/* }}} */
/* {{{ wddx_functions[]
*/
const zend_function_entry wddx_functions[] = {
PHP_FE(wddx_serialize_value, arginfo_wddx_serialize_value)
PHP_FE(wddx_serialize_vars, arginfo_wddx_serialize_vars)
PHP_FE(wddx_packet_start, arginfo_wddx_serialize_start)
PHP_FE(wddx_packet_end, arginfo_wddx_packet_end)
PHP_FE(wddx_add_vars, arginfo_wddx_add_vars)
PHP_FE(wddx_deserialize, arginfo_wddx_deserialize)
PHP_FE_END
};
/* }}} */
PHP_MINIT_FUNCTION(wddx);
PHP_MINFO_FUNCTION(wddx);
/* {{{ dynamically loadable module stuff */
#ifdef COMPILE_DL_WDDX
ZEND_GET_MODULE(wddx)
#endif /* COMPILE_DL_WDDX */
/* }}} */
/* {{{ wddx_module_entry
*/
zend_module_entry wddx_module_entry = {
STANDARD_MODULE_HEADER,
"wddx",
wddx_functions,
PHP_MINIT(wddx),
NULL,
NULL,
NULL,
PHP_MINFO(wddx),
NO_VERSION_YET,
STANDARD_MODULE_PROPERTIES
};
/* }}} */
/* {{{ wddx_stack_init
*/
static int wddx_stack_init(wddx_stack *stack)
{
stack->top = 0;
stack->elements = (void **) safe_emalloc(sizeof(void **), STACK_BLOCK_SIZE, 0);
stack->max = STACK_BLOCK_SIZE;
stack->varname = NULL;
stack->done = 0;
return SUCCESS;
}
/* }}} */
/* {{{ wddx_stack_push
*/
static int wddx_stack_push(wddx_stack *stack, void *element, int size)
{
if (stack->top >= stack->max) { /* we need to allocate more memory */
stack->elements = (void **) erealloc(stack->elements,
(sizeof(void **) * (stack->max += STACK_BLOCK_SIZE)));
}
stack->elements[stack->top] = (void *) emalloc(size);
memcpy(stack->elements[stack->top], element, size);
return stack->top++;
}
/* }}} */
/* {{{ wddx_stack_top
*/
static int wddx_stack_top(wddx_stack *stack, void **element)
{
if (stack->top > 0) {
*element = stack->elements[stack->top - 1];
return SUCCESS;
} else {
*element = NULL;
return FAILURE;
}
}
/* }}} */
/* {{{ wddx_stack_is_empty
*/
static int wddx_stack_is_empty(wddx_stack *stack)
{
if (stack->top == 0) {
return 1;
} else {
return 0;
}
}
/* }}} */
/* {{{ wddx_stack_destroy
*/
static int wddx_stack_destroy(wddx_stack *stack)
{
register int i;
if (stack->elements) {
for (i = 0; i < stack->top; i++) {
if (((st_entry *)stack->elements[i])->data
&& ((st_entry *)stack->elements[i])->type != ST_FIELD) {
zval_ptr_dtor(&((st_entry *)stack->elements[i])->data);
}
if (((st_entry *)stack->elements[i])->varname) {
efree(((st_entry *)stack->elements[i])->varname);
}
efree(stack->elements[i]);
}
efree(stack->elements);
}
return SUCCESS;
}
/* }}} */
/* {{{ release_wddx_packet_rsrc
*/
static void release_wddx_packet_rsrc(zend_rsrc_list_entry *rsrc TSRMLS_DC)
{
smart_str *str = (smart_str *)rsrc->ptr;
smart_str_free(str);
efree(str);
}
/* }}} */
#include "ext/session/php_session.h"
#if HAVE_PHP_SESSION && !defined(COMPILE_DL_SESSION)
/* {{{ PS_SERIALIZER_ENCODE_FUNC
*/
PS_SERIALIZER_ENCODE_FUNC(wddx)
{
wddx_packet *packet;
PS_ENCODE_VARS;
packet = php_wddx_constructor();
php_wddx_packet_start(packet, NULL, 0);
php_wddx_add_chunk_static(packet, WDDX_STRUCT_S);
PS_ENCODE_LOOP(
php_wddx_serialize_var(packet, *struc, key, key_length TSRMLS_CC);
);
php_wddx_add_chunk_static(packet, WDDX_STRUCT_E);
php_wddx_packet_end(packet);
*newstr = php_wddx_gather(packet);
php_wddx_destructor(packet);
if (newlen) {
*newlen = strlen(*newstr);
}
return SUCCESS;
}
/* }}} */
/* {{{ PS_SERIALIZER_DECODE_FUNC
*/
PS_SERIALIZER_DECODE_FUNC(wddx)
{
zval *retval;
zval **ent;
char *key;
uint key_length;
char tmp[128];
ulong idx;
int hash_type;
int ret;
if (vallen == 0) {
return SUCCESS;
}
MAKE_STD_ZVAL(retval);
if ((ret = php_wddx_deserialize_ex((char *)val, vallen, retval)) == SUCCESS) {
if (Z_TYPE_P(retval) != IS_ARRAY) {
zval_ptr_dtor(&retval);
return FAILURE;
}
for (zend_hash_internal_pointer_reset(Z_ARRVAL_P(retval));
zend_hash_get_current_data(Z_ARRVAL_P(retval), (void **) &ent) == SUCCESS;
zend_hash_move_forward(Z_ARRVAL_P(retval))) {
hash_type = zend_hash_get_current_key_ex(Z_ARRVAL_P(retval), &key, &key_length, &idx, 0, NULL);
switch (hash_type) {
case HASH_KEY_IS_LONG:
key_length = slprintf(tmp, sizeof(tmp), "%ld", idx) + 1;
key = tmp;
/* fallthru */
case HASH_KEY_IS_STRING:
php_set_session_var(key, key_length-1, *ent, NULL TSRMLS_CC);
PS_ADD_VAR(key);
}
}
}
zval_ptr_dtor(&retval);
return ret;
}
/* }}} */
#endif
/* {{{ PHP_MINIT_FUNCTION
*/
PHP_MINIT_FUNCTION(wddx)
{
le_wddx = zend_register_list_destructors_ex(release_wddx_packet_rsrc, NULL, "wddx", module_number);
#if HAVE_PHP_SESSION && !defined(COMPILE_DL_SESSION)
php_session_register_serializer("wddx",
PS_SERIALIZER_ENCODE_NAME(wddx),
PS_SERIALIZER_DECODE_NAME(wddx));
#endif
return SUCCESS;
}
/* }}} */
/* {{{ PHP_MINFO_FUNCTION
*/
PHP_MINFO_FUNCTION(wddx)
{
php_info_print_table_start();
#if HAVE_PHP_SESSION && !defined(COMPILE_DL_SESSION)
php_info_print_table_header(2, "WDDX Support", "enabled" );
php_info_print_table_row(2, "WDDX Session Serializer", "enabled" );
#else
php_info_print_table_row(2, "WDDX Support", "enabled" );
#endif
php_info_print_table_end();
}
/* }}} */
/* {{{ php_wddx_packet_start
*/
void php_wddx_packet_start(wddx_packet *packet, char *comment, int comment_len)
{
php_wddx_add_chunk_static(packet, WDDX_PACKET_S);
if (comment) {
char *escaped;
size_t escaped_len;
TSRMLS_FETCH();
escaped = php_escape_html_entities(
comment, comment_len, &escaped_len, 0, ENT_QUOTES, NULL TSRMLS_CC);
php_wddx_add_chunk_static(packet, WDDX_HEADER_S);
php_wddx_add_chunk_static(packet, WDDX_COMMENT_S);
php_wddx_add_chunk_ex(packet, escaped, escaped_len);
php_wddx_add_chunk_static(packet, WDDX_COMMENT_E);
php_wddx_add_chunk_static(packet, WDDX_HEADER_E);
str_efree(escaped);
} else {
php_wddx_add_chunk_static(packet, WDDX_HEADER);
}
php_wddx_add_chunk_static(packet, WDDX_DATA_S);
}
/* }}} */
/* {{{ php_wddx_packet_end
*/
void php_wddx_packet_end(wddx_packet *packet)
{
php_wddx_add_chunk_static(packet, WDDX_DATA_E);
php_wddx_add_chunk_static(packet, WDDX_PACKET_E);
}
/* }}} */
#define FLUSH_BUF() \
if (l > 0) { \
php_wddx_add_chunk_ex(packet, buf, l); \
l = 0; \
}
/* {{{ php_wddx_serialize_string
*/
static void php_wddx_serialize_string(wddx_packet *packet, zval *var TSRMLS_DC)
{
php_wddx_add_chunk_static(packet, WDDX_STRING_S);
if (Z_STRLEN_P(var) > 0) {
char *buf;
size_t buf_len;
buf = php_escape_html_entities(Z_STRVAL_P(var), Z_STRLEN_P(var), &buf_len, 0, ENT_QUOTES, NULL TSRMLS_CC);
php_wddx_add_chunk_ex(packet, buf, buf_len);
str_efree(buf);
}
php_wddx_add_chunk_static(packet, WDDX_STRING_E);
}
/* }}} */
/* {{{ php_wddx_serialize_number
*/
static void php_wddx_serialize_number(wddx_packet *packet, zval *var)
{
char tmp_buf[WDDX_BUF_LEN];
zval tmp;
tmp = *var;
zval_copy_ctor(&tmp);
convert_to_string(&tmp);
snprintf(tmp_buf, sizeof(tmp_buf), WDDX_NUMBER, Z_STRVAL(tmp));
zval_dtor(&tmp);
php_wddx_add_chunk(packet, tmp_buf);
}
/* }}} */
/* {{{ php_wddx_serialize_boolean
*/
static void php_wddx_serialize_boolean(wddx_packet *packet, zval *var)
{
php_wddx_add_chunk(packet, Z_LVAL_P(var) ? WDDX_BOOLEAN_TRUE : WDDX_BOOLEAN_FALSE);
}
/* }}} */
/* {{{ php_wddx_serialize_unset
*/
static void php_wddx_serialize_unset(wddx_packet *packet)
{
php_wddx_add_chunk_static(packet, WDDX_NULL);
}
/* }}} */
/* {{{ php_wddx_serialize_object
*/
static void php_wddx_serialize_object(wddx_packet *packet, zval *obj)
{
/* OBJECTS_FIXME */
zval **ent, *fname, **varname;
zval *retval = NULL;
const char *key;
ulong idx;
char tmp_buf[WDDX_BUF_LEN];
HashTable *objhash, *sleephash;
TSRMLS_FETCH();
MAKE_STD_ZVAL(fname);
ZVAL_STRING(fname, "__sleep", 1);
/*
* We try to call __sleep() method on object. It's supposed to return an
* array of property names to be serialized.
*/
if (call_user_function_ex(CG(function_table), &obj, fname, &retval, 0, 0, 1, NULL TSRMLS_CC) == SUCCESS) {
if (retval && (sleephash = HASH_OF(retval))) {
PHP_CLASS_ATTRIBUTES;
PHP_SET_CLASS_ATTRIBUTES(obj);
php_wddx_add_chunk_static(packet, WDDX_STRUCT_S);
snprintf(tmp_buf, WDDX_BUF_LEN, WDDX_VAR_S, PHP_CLASS_NAME_VAR);
php_wddx_add_chunk(packet, tmp_buf);
php_wddx_add_chunk_static(packet, WDDX_STRING_S);
php_wddx_add_chunk_ex(packet, class_name, name_len);
php_wddx_add_chunk_static(packet, WDDX_STRING_E);
php_wddx_add_chunk_static(packet, WDDX_VAR_E);
PHP_CLEANUP_CLASS_ATTRIBUTES();
objhash = HASH_OF(obj);
for (zend_hash_internal_pointer_reset(sleephash);
zend_hash_get_current_data(sleephash, (void **)&varname) == SUCCESS;
zend_hash_move_forward(sleephash)) {
if (Z_TYPE_PP(varname) != IS_STRING) {
php_error_docref(NULL TSRMLS_CC, E_NOTICE, "__sleep should return an array only containing the names of instance-variables to serialize.");
continue;
}
if (zend_hash_find(objhash, Z_STRVAL_PP(varname), Z_STRLEN_PP(varname)+1, (void **)&ent) == SUCCESS) {
php_wddx_serialize_var(packet, *ent, Z_STRVAL_PP(varname), Z_STRLEN_PP(varname) TSRMLS_CC);
}
}
php_wddx_add_chunk_static(packet, WDDX_STRUCT_E);
}
} else {
uint key_len;
PHP_CLASS_ATTRIBUTES;
PHP_SET_CLASS_ATTRIBUTES(obj);
php_wddx_add_chunk_static(packet, WDDX_STRUCT_S);
snprintf(tmp_buf, WDDX_BUF_LEN, WDDX_VAR_S, PHP_CLASS_NAME_VAR);
php_wddx_add_chunk(packet, tmp_buf);
php_wddx_add_chunk_static(packet, WDDX_STRING_S);
php_wddx_add_chunk_ex(packet, class_name, name_len);
php_wddx_add_chunk_static(packet, WDDX_STRING_E);
php_wddx_add_chunk_static(packet, WDDX_VAR_E);
PHP_CLEANUP_CLASS_ATTRIBUTES();
objhash = HASH_OF(obj);
for (zend_hash_internal_pointer_reset(objhash);
zend_hash_get_current_data(objhash, (void**)&ent) == SUCCESS;
zend_hash_move_forward(objhash)) {
if (*ent == obj) {
continue;
}
if (zend_hash_get_current_key_ex(objhash, &key, &key_len, &idx, 0, NULL) == HASH_KEY_IS_STRING) {
const char *class_name, *prop_name;
zend_unmangle_property_name(key, key_len-1, &class_name, &prop_name);
php_wddx_serialize_var(packet, *ent, prop_name, strlen(prop_name)+1 TSRMLS_CC);
} else {
key_len = slprintf(tmp_buf, sizeof(tmp_buf), "%ld", idx);
php_wddx_serialize_var(packet, *ent, tmp_buf, key_len TSRMLS_CC);
}
}
php_wddx_add_chunk_static(packet, WDDX_STRUCT_E);
}
zval_dtor(fname);
FREE_ZVAL(fname);
if (retval) {
zval_ptr_dtor(&retval);
}
}
/* }}} */
/* {{{ php_wddx_serialize_array
*/
static void php_wddx_serialize_array(wddx_packet *packet, zval *arr)
{
zval **ent;
char *key;
uint key_len;
int is_struct = 0, ent_type;
ulong idx;
HashTable *target_hash;
char tmp_buf[WDDX_BUF_LEN];
ulong ind = 0;
int type;
TSRMLS_FETCH();
target_hash = HASH_OF(arr);
for (zend_hash_internal_pointer_reset(target_hash);
zend_hash_get_current_data(target_hash, (void**)&ent) == SUCCESS;
zend_hash_move_forward(target_hash)) {
type = zend_hash_get_current_key(target_hash, &key, &idx, 0);
if (type == HASH_KEY_IS_STRING) {
is_struct = 1;
break;
}
if (idx != ind) {
is_struct = 1;
break;
}
ind++;
}
if (is_struct) {
php_wddx_add_chunk_static(packet, WDDX_STRUCT_S);
} else {
snprintf(tmp_buf, sizeof(tmp_buf), WDDX_ARRAY_S, zend_hash_num_elements(target_hash));
php_wddx_add_chunk(packet, tmp_buf);
}
for (zend_hash_internal_pointer_reset(target_hash);
zend_hash_get_current_data(target_hash, (void**)&ent) == SUCCESS;
zend_hash_move_forward(target_hash)) {
if (*ent == arr) {
continue;
}
if (is_struct) {
ent_type = zend_hash_get_current_key_ex(target_hash, &key, &key_len, &idx, 0, NULL);
if (ent_type == HASH_KEY_IS_STRING) {
php_wddx_serialize_var(packet, *ent, key, key_len TSRMLS_CC);
} else {
key_len = slprintf(tmp_buf, sizeof(tmp_buf), "%ld", idx);
php_wddx_serialize_var(packet, *ent, tmp_buf, key_len TSRMLS_CC);
}
} else {
php_wddx_serialize_var(packet, *ent, NULL, 0 TSRMLS_CC);
}
}
if (is_struct) {
php_wddx_add_chunk_static(packet, WDDX_STRUCT_E);
} else {
php_wddx_add_chunk_static(packet, WDDX_ARRAY_E);
}
}
/* }}} */
/* {{{ php_wddx_serialize_var
*/
void php_wddx_serialize_var(wddx_packet *packet, zval *var, char *name, int name_len TSRMLS_DC)
{
HashTable *ht;
if (name) {
size_t name_esc_len;
char *tmp_buf, *name_esc;
name_esc = php_escape_html_entities(name, name_len, &name_esc_len, 0, ENT_QUOTES, NULL TSRMLS_CC);
tmp_buf = emalloc(name_esc_len + sizeof(WDDX_VAR_S));
snprintf(tmp_buf, name_esc_len + sizeof(WDDX_VAR_S), WDDX_VAR_S, name_esc);
php_wddx_add_chunk(packet, tmp_buf);
efree(tmp_buf);
str_efree(name_esc);
}
switch(Z_TYPE_P(var)) {
case IS_STRING:
php_wddx_serialize_string(packet, var TSRMLS_CC);
break;
case IS_LONG:
case IS_DOUBLE:
php_wddx_serialize_number(packet, var);
break;
case IS_BOOL:
php_wddx_serialize_boolean(packet, var);
break;
case IS_NULL:
php_wddx_serialize_unset(packet);
break;
case IS_ARRAY:
ht = Z_ARRVAL_P(var);
if (ht->nApplyCount > 1) {
php_error_docref(NULL TSRMLS_CC, E_RECOVERABLE_ERROR, "WDDX doesn't support circular references");
return;
}
ht->nApplyCount++;
php_wddx_serialize_array(packet, var);
ht->nApplyCount--;
break;
case IS_OBJECT:
ht = Z_OBJPROP_P(var);
if (ht->nApplyCount > 1) {
php_error_docref(NULL TSRMLS_CC, E_RECOVERABLE_ERROR, "WDDX doesn't support circular references");
return;
}
ht->nApplyCount++;
php_wddx_serialize_object(packet, var);
ht->nApplyCount--;
break;
}
if (name) {
php_wddx_add_chunk_static(packet, WDDX_VAR_E);
}
}
/* }}} */
/* {{{ php_wddx_add_var
*/
static void php_wddx_add_var(wddx_packet *packet, zval *name_var)
{
zval **val;
HashTable *target_hash;
TSRMLS_FETCH();
if (Z_TYPE_P(name_var) == IS_STRING) {
if (!EG(active_symbol_table)) {
zend_rebuild_symbol_table(TSRMLS_C);
}
if (zend_hash_find(EG(active_symbol_table), Z_STRVAL_P(name_var),
Z_STRLEN_P(name_var)+1, (void**)&val) != FAILURE) {
php_wddx_serialize_var(packet, *val, Z_STRVAL_P(name_var), Z_STRLEN_P(name_var) TSRMLS_CC);
}
} else if (Z_TYPE_P(name_var) == IS_ARRAY || Z_TYPE_P(name_var) == IS_OBJECT) {
int is_array = Z_TYPE_P(name_var) == IS_ARRAY;
target_hash = HASH_OF(name_var);
if (is_array && target_hash->nApplyCount > 1) {
php_error_docref(NULL TSRMLS_CC, E_WARNING, "recursion detected");
return;
}
zend_hash_internal_pointer_reset(target_hash);
while(zend_hash_get_current_data(target_hash, (void**)&val) == SUCCESS) {
if (is_array) {
target_hash->nApplyCount++;
}
php_wddx_add_var(packet, *val);
if (is_array) {
target_hash->nApplyCount--;
}
zend_hash_move_forward(target_hash);
}
}
}
/* }}} */
/* {{{ php_wddx_push_element
*/
static void php_wddx_push_element(void *user_data, const XML_Char *name, const XML_Char **atts)
{
st_entry ent;
wddx_stack *stack = (wddx_stack *)user_data;
if (!strcmp(name, EL_PACKET)) {
int i;
if (atts) for (i=0; atts[i]; i++) {
if (!strcmp(atts[i], EL_VERSION)) {
/* nothing for now */
}
}
} else if (!strcmp(name, EL_STRING)) {
ent.type = ST_STRING;
SET_STACK_VARNAME;
ALLOC_ZVAL(ent.data);
INIT_PZVAL(ent.data);
Z_TYPE_P(ent.data) = IS_STRING;
Z_STRVAL_P(ent.data) = STR_EMPTY_ALLOC();
Z_STRLEN_P(ent.data) = 0;
wddx_stack_push((wddx_stack *)stack, &ent, sizeof(st_entry));
} else if (!strcmp(name, EL_BINARY)) {
ent.type = ST_BINARY;
SET_STACK_VARNAME;
ALLOC_ZVAL(ent.data);
INIT_PZVAL(ent.data);
Z_TYPE_P(ent.data) = IS_STRING;
Z_STRVAL_P(ent.data) = STR_EMPTY_ALLOC();
Z_STRLEN_P(ent.data) = 0;
wddx_stack_push((wddx_stack *)stack, &ent, sizeof(st_entry));
} else if (!strcmp(name, EL_CHAR)) {
int i;
if (atts) for (i = 0; atts[i]; i++) {
if (!strcmp(atts[i], EL_CHAR_CODE) && atts[++i] && atts[i][0]) {
char tmp_buf[2];
snprintf(tmp_buf, sizeof(tmp_buf), "%c", (char)strtol(atts[i], NULL, 16));
php_wddx_process_data(user_data, tmp_buf, strlen(tmp_buf));
break;
}
}
} else if (!strcmp(name, EL_NUMBER)) {
ent.type = ST_NUMBER;
SET_STACK_VARNAME;
ALLOC_ZVAL(ent.data);
INIT_PZVAL(ent.data);
Z_TYPE_P(ent.data) = IS_LONG;
Z_LVAL_P(ent.data) = 0;
wddx_stack_push((wddx_stack *)stack, &ent, sizeof(st_entry));
} else if (!strcmp(name, EL_BOOLEAN)) {
int i;
if (atts) for (i = 0; atts[i]; i++) {
if (!strcmp(atts[i], EL_VALUE) && atts[++i] && atts[i][0]) {
ent.type = ST_BOOLEAN;
SET_STACK_VARNAME;
ALLOC_ZVAL(ent.data);
INIT_PZVAL(ent.data);
Z_TYPE_P(ent.data) = IS_BOOL;
wddx_stack_push((wddx_stack *)stack, &ent, sizeof(st_entry));
php_wddx_process_data(user_data, atts[i], strlen(atts[i]));
break;
}
}
} else if (!strcmp(name, EL_NULL)) {
ent.type = ST_NULL;
SET_STACK_VARNAME;
ALLOC_ZVAL(ent.data);
INIT_PZVAL(ent.data);
ZVAL_NULL(ent.data);
wddx_stack_push((wddx_stack *)stack, &ent, sizeof(st_entry));
} else if (!strcmp(name, EL_ARRAY)) {
ent.type = ST_ARRAY;
SET_STACK_VARNAME;
ALLOC_ZVAL(ent.data);
array_init(ent.data);
INIT_PZVAL(ent.data);
wddx_stack_push((wddx_stack *)stack, &ent, sizeof(st_entry));
} else if (!strcmp(name, EL_STRUCT)) {
ent.type = ST_STRUCT;
SET_STACK_VARNAME;
ALLOC_ZVAL(ent.data);
array_init(ent.data);
INIT_PZVAL(ent.data);
wddx_stack_push((wddx_stack *)stack, &ent, sizeof(st_entry));
} else if (!strcmp(name, EL_VAR)) {
int i;
if (atts) for (i = 0; atts[i]; i++) {
if (!strcmp(atts[i], EL_NAME) && atts[++i] && atts[i][0]) {
if (stack->varname) efree(stack->varname);
stack->varname = estrdup(atts[i]);
break;
}
}
} else if (!strcmp(name, EL_RECORDSET)) {
int i;
ent.type = ST_RECORDSET;
SET_STACK_VARNAME;
MAKE_STD_ZVAL(ent.data);
array_init(ent.data);
if (atts) for (i = 0; atts[i]; i++) {
if (!strcmp(atts[i], "fieldNames") && atts[++i] && atts[i][0]) {
zval *tmp;
char *key;
char *p1, *p2, *endp;
endp = (char *)atts[i] + strlen(atts[i]);
p1 = (char *)atts[i];
while ((p2 = php_memnstr(p1, ",", sizeof(",")-1, endp)) != NULL) {
key = estrndup(p1, p2 - p1);
MAKE_STD_ZVAL(tmp);
array_init(tmp);
add_assoc_zval_ex(ent.data, key, p2 - p1 + 1, tmp);
p1 = p2 + sizeof(",")-1;
efree(key);
}
if (p1 <= endp) {
MAKE_STD_ZVAL(tmp);
array_init(tmp);
add_assoc_zval_ex(ent.data, p1, endp - p1 + 1, tmp);
}
break;
}
}
wddx_stack_push((wddx_stack *)stack, &ent, sizeof(st_entry));
} else if (!strcmp(name, EL_FIELD)) {
int i;
st_entry ent;
ent.type = ST_FIELD;
ent.varname = NULL;
ent.data = NULL;
if (atts) for (i = 0; atts[i]; i++) {
if (!strcmp(atts[i], EL_NAME) && atts[++i] && atts[i][0]) {
st_entry *recordset;
zval **field;
if (wddx_stack_top(stack, (void**)&recordset) == SUCCESS &&
recordset->type == ST_RECORDSET &&
zend_hash_find(Z_ARRVAL_P(recordset->data), (char*)atts[i], strlen(atts[i])+1, (void**)&field) == SUCCESS) {
ent.data = *field;
}
break;
}
}
wddx_stack_push((wddx_stack *)stack, &ent, sizeof(st_entry));
} else if (!strcmp(name, EL_DATETIME)) {
ent.type = ST_DATETIME;
SET_STACK_VARNAME;
ALLOC_ZVAL(ent.data);
INIT_PZVAL(ent.data);
Z_TYPE_P(ent.data) = IS_LONG;
wddx_stack_push((wddx_stack *)stack, &ent, sizeof(st_entry));
}
}
/* }}} */
/* {{{ php_wddx_pop_element
*/
static void php_wddx_pop_element(void *user_data, const XML_Char *name)
{
st_entry *ent1, *ent2;
wddx_stack *stack = (wddx_stack *)user_data;
HashTable *target_hash;
zend_class_entry **pce;
zval *obj;
zval *tmp;
TSRMLS_FETCH();
/* OBJECTS_FIXME */
if (stack->top == 0) {
return;
}
if (!strcmp(name, EL_STRING) || !strcmp(name, EL_NUMBER) ||
!strcmp(name, EL_BOOLEAN) || !strcmp(name, EL_NULL) ||
!strcmp(name, EL_ARRAY) || !strcmp(name, EL_STRUCT) ||
!strcmp(name, EL_RECORDSET) || !strcmp(name, EL_BINARY) ||
!strcmp(name, EL_DATETIME)) {
wddx_stack_top(stack, (void**)&ent1);
if (!ent1->data) {
if (stack->top > 1) {
stack->top--;
efree(ent1);
} else {
stack->done = 1;
}
return;
}
if (!strcmp(name, EL_BINARY)) {
int new_len=0;
unsigned char *new_str;
new_str = php_base64_decode(Z_STRVAL_P(ent1->data), Z_STRLEN_P(ent1->data), &new_len);
STR_FREE(Z_STRVAL_P(ent1->data));
if (new_str) {
Z_STRVAL_P(ent1->data) = new_str;
Z_STRLEN_P(ent1->data) = new_len;
} else {
ZVAL_EMPTY_STRING(ent1->data);
}
}
/* Call __wakeup() method on the object. */
if (Z_TYPE_P(ent1->data) == IS_OBJECT) {
zval *fname, *retval = NULL;
MAKE_STD_ZVAL(fname);
ZVAL_STRING(fname, "__wakeup", 1);
call_user_function_ex(NULL, &ent1->data, fname, &retval, 0, 0, 0, NULL TSRMLS_CC);
zval_dtor(fname);
FREE_ZVAL(fname);
if (retval) {
zval_ptr_dtor(&retval);
}
}
if (stack->top > 1) {
stack->top--;
wddx_stack_top(stack, (void**)&ent2);
/* if non-existent field */
if (ent2->data == NULL) {
zval_ptr_dtor(&ent1->data);
efree(ent1);
return;
}
if (Z_TYPE_P(ent2->data) == IS_ARRAY || Z_TYPE_P(ent2->data) == IS_OBJECT) {
target_hash = HASH_OF(ent2->data);
if (ent1->varname) {
if (!strcmp(ent1->varname, PHP_CLASS_NAME_VAR) &&
Z_TYPE_P(ent1->data) == IS_STRING && Z_STRLEN_P(ent1->data) &&
ent2->type == ST_STRUCT && Z_TYPE_P(ent2->data) == IS_ARRAY) {
zend_bool incomplete_class = 0;
zend_str_tolower(Z_STRVAL_P(ent1->data), Z_STRLEN_P(ent1->data));
if (zend_hash_find(EG(class_table), Z_STRVAL_P(ent1->data),
Z_STRLEN_P(ent1->data)+1, (void **) &pce)==FAILURE) {
incomplete_class = 1;
pce = &PHP_IC_ENTRY;
}
/* Initialize target object */
MAKE_STD_ZVAL(obj);
object_init_ex(obj, *pce);
/* Merge current hashtable with object's default properties */
zend_hash_merge(Z_OBJPROP_P(obj),
Z_ARRVAL_P(ent2->data),
(void (*)(void *)) zval_add_ref,
(void *) &tmp, sizeof(zval *), 0);
if (incomplete_class) {
php_store_class_name(obj, Z_STRVAL_P(ent1->data), Z_STRLEN_P(ent1->data));
}
/* Clean up old array entry */
zval_ptr_dtor(&ent2->data);
/* Set stack entry to point to the newly created object */
ent2->data = obj;
/* Clean up class name var entry */
zval_ptr_dtor(&ent1->data);
} else if (Z_TYPE_P(ent2->data) == IS_OBJECT) {
zend_class_entry *old_scope = EG(scope);
EG(scope) = Z_OBJCE_P(ent2->data);
Z_DELREF_P(ent1->data);
add_property_zval(ent2->data, ent1->varname, ent1->data);
EG(scope) = old_scope;
} else {
zend_symtable_update(target_hash, ent1->varname, strlen(ent1->varname)+1, &ent1->data, sizeof(zval *), NULL);
}
efree(ent1->varname);
} else {
zend_hash_next_index_insert(target_hash, &ent1->data, sizeof(zval *), NULL);
}
}
efree(ent1);
} else {
stack->done = 1;
}
} else if (!strcmp(name, EL_VAR) && stack->varname) {
efree(stack->varname);
stack->varname = NULL;
} else if (!strcmp(name, EL_FIELD)) {
st_entry *ent;
wddx_stack_top(stack, (void **)&ent);
efree(ent);
stack->top--;
}
}
/* }}} */
/* {{{ php_wddx_process_data
*/
static void php_wddx_process_data(void *user_data, const XML_Char *s, int len)
{
st_entry *ent;
wddx_stack *stack = (wddx_stack *)user_data;
TSRMLS_FETCH();
if (!wddx_stack_is_empty(stack) && !stack->done) {
wddx_stack_top(stack, (void**)&ent);
switch (ent->type) {
case ST_STRING:
if (Z_STRLEN_P(ent->data) == 0) {
STR_FREE(Z_STRVAL_P(ent->data));
Z_STRVAL_P(ent->data) = estrndup(s, len);
Z_STRLEN_P(ent->data) = len;
} else {
Z_STRVAL_P(ent->data) = erealloc(Z_STRVAL_P(ent->data), Z_STRLEN_P(ent->data) + len + 1);
memcpy(Z_STRVAL_P(ent->data) + Z_STRLEN_P(ent->data), s, len);
Z_STRLEN_P(ent->data) += len;
Z_STRVAL_P(ent->data)[Z_STRLEN_P(ent->data)] = '\0';
}
break;
case ST_BINARY:
if (Z_STRLEN_P(ent->data) == 0) {
STR_FREE(Z_STRVAL_P(ent->data));
Z_STRVAL_P(ent->data) = estrndup(s, len + 1);
} else {
Z_STRVAL_P(ent->data) = erealloc(Z_STRVAL_P(ent->data), Z_STRLEN_P(ent->data) + len + 1);
memcpy(Z_STRVAL_P(ent->data) + Z_STRLEN_P(ent->data), s, len);
}
Z_STRLEN_P(ent->data) += len;
Z_STRVAL_P(ent->data)[Z_STRLEN_P(ent->data)] = '\0';
break;
case ST_NUMBER:
Z_TYPE_P(ent->data) = IS_STRING;
Z_STRLEN_P(ent->data) = len;
Z_STRVAL_P(ent->data) = estrndup(s, len);
convert_scalar_to_number(ent->data TSRMLS_CC);
break;
case ST_BOOLEAN:
if(!ent->data) {
break;
}
if (!strcmp(s, "true")) {
Z_LVAL_P(ent->data) = 1;
} else if (!strcmp(s, "false")) {
Z_LVAL_P(ent->data) = 0;
} else {
zval_ptr_dtor(&ent->data);
if (ent->varname) {
efree(ent->varname);
ent->varname = NULL;
}
ent->data = NULL;
}
break;
case ST_DATETIME: {
char *tmp;
if (Z_TYPE_P(ent->data) == IS_STRING) {
tmp = safe_emalloc(Z_STRLEN_P(ent->data), 1, (size_t)len + 1);
memcpy(tmp, Z_STRVAL_P(ent->data), Z_STRLEN_P(ent->data));
memcpy(tmp + Z_STRLEN_P(ent->data), s, len);
len += Z_STRLEN_P(ent->data);
efree(Z_STRVAL_P(ent->data));
Z_TYPE_P(ent->data) = IS_LONG;
} else {
tmp = emalloc(len + 1);
memcpy(tmp, s, len);
}
tmp[len] = '\0';
Z_LVAL_P(ent->data) = php_parse_date(tmp, NULL);
/* date out of range < 1969 or > 2038 */
if (Z_LVAL_P(ent->data) == -1) {
ZVAL_STRINGL(ent->data, tmp, len, 0);
} else {
efree(tmp);
}
}
break;
default:
break;
}
}
}
/* }}} */
/* {{{ php_wddx_deserialize_ex
*/
int php_wddx_deserialize_ex(char *value, int vallen, zval *return_value)
{
wddx_stack stack;
XML_Parser parser;
st_entry *ent;
int retval;
wddx_stack_init(&stack);
parser = XML_ParserCreate("UTF-8");
XML_SetUserData(parser, &stack);
XML_SetElementHandler(parser, php_wddx_push_element, php_wddx_pop_element);
XML_SetCharacterDataHandler(parser, php_wddx_process_data);
XML_Parse(parser, value, vallen, 1);
XML_ParserFree(parser);
if (stack.top == 1) {
wddx_stack_top(&stack, (void**)&ent);
if(ent->data == NULL) {
retval = FAILURE;
} else {
*return_value = *(ent->data);
zval_copy_ctor(return_value);
retval = SUCCESS;
}
} else {
retval = FAILURE;
}
wddx_stack_destroy(&stack);
return retval;
}
/* }}} */
/* {{{ proto string wddx_serialize_value(mixed var [, string comment])
Creates a new packet and serializes the given value */
PHP_FUNCTION(wddx_serialize_value)
{
zval *var;
char *comment = NULL;
int comment_len = 0;
wddx_packet *packet;
if (zend_parse_parameters(ZEND_NUM_ARGS() TSRMLS_CC, "z|s", &var, &comment, &comment_len) == FAILURE) {
return;
}
packet = php_wddx_constructor();
php_wddx_packet_start(packet, comment, comment_len);
php_wddx_serialize_var(packet, var, NULL, 0 TSRMLS_CC);
php_wddx_packet_end(packet);
ZVAL_STRINGL(return_value, packet->c, packet->len, 1);
smart_str_free(packet);
efree(packet);
}
/* }}} */
/* {{{ proto string wddx_serialize_vars(mixed var_name [, mixed ...])
Creates a new packet and serializes given variables into a struct */
PHP_FUNCTION(wddx_serialize_vars)
{
int num_args, i;
wddx_packet *packet;
zval ***args = NULL;
if (zend_parse_parameters(ZEND_NUM_ARGS() TSRMLS_CC, "+", &args, &num_args) == FAILURE) {
return;
}
packet = php_wddx_constructor();
php_wddx_packet_start(packet, NULL, 0);
php_wddx_add_chunk_static(packet, WDDX_STRUCT_S);
for (i=0; i<num_args; i++) {
if (Z_TYPE_PP(args[i]) != IS_ARRAY && Z_TYPE_PP(args[i]) != IS_OBJECT) {
convert_to_string_ex(args[i]);
}
php_wddx_add_var(packet, *args[i]);
}
php_wddx_add_chunk_static(packet, WDDX_STRUCT_E);
php_wddx_packet_end(packet);
efree(args);
ZVAL_STRINGL(return_value, packet->c, packet->len, 1);
smart_str_free(packet);
efree(packet);
}
/* }}} */
/* {{{ php_wddx_constructor
*/
wddx_packet *php_wddx_constructor(void)
{
smart_str *packet;
packet = (smart_str *)emalloc(sizeof(smart_str));
packet->c = NULL;
return packet;
}
/* }}} */
/* {{{ php_wddx_destructor
*/
void php_wddx_destructor(wddx_packet *packet)
{
smart_str_free(packet);
efree(packet);
}
/* }}} */
/* {{{ proto resource wddx_packet_start([string comment])
Starts a WDDX packet with optional comment and returns the packet id */
PHP_FUNCTION(wddx_packet_start)
{
char *comment = NULL;
int comment_len = 0;
wddx_packet *packet;
comment = NULL;
if (zend_parse_parameters(ZEND_NUM_ARGS() TSRMLS_CC, "|s", &comment, &comment_len) == FAILURE) {
return;
}
packet = php_wddx_constructor();
php_wddx_packet_start(packet, comment, comment_len);
php_wddx_add_chunk_static(packet, WDDX_STRUCT_S);
ZEND_REGISTER_RESOURCE(return_value, packet, le_wddx);
}
/* }}} */
/* {{{ proto string wddx_packet_end(resource packet_id)
Ends specified WDDX packet and returns the string containing the packet */
PHP_FUNCTION(wddx_packet_end)
{
zval *packet_id;
wddx_packet *packet = NULL;
if (zend_parse_parameters(ZEND_NUM_ARGS() TSRMLS_CC, "r", &packet_id) == FAILURE) {
return;
}
ZEND_FETCH_RESOURCE(packet, wddx_packet *, &packet_id, -1, "WDDX packet ID", le_wddx);
php_wddx_add_chunk_static(packet, WDDX_STRUCT_E);
php_wddx_packet_end(packet);
ZVAL_STRINGL(return_value, packet->c, packet->len, 1);
zend_list_delete(Z_LVAL_P(packet_id));
}
/* }}} */
/* {{{ proto int wddx_add_vars(resource packet_id, mixed var_names [, mixed ...])
Serializes given variables and adds them to packet given by packet_id */
PHP_FUNCTION(wddx_add_vars)
{
int num_args, i;
zval ***args = NULL;
zval *packet_id;
wddx_packet *packet = NULL;
if (zend_parse_parameters(ZEND_NUM_ARGS() TSRMLS_CC, "r+", &packet_id, &args, &num_args) == FAILURE) {
return;
}
if (!ZEND_FETCH_RESOURCE_NO_RETURN(packet, wddx_packet *, &packet_id, -1, "WDDX packet ID", le_wddx)) {
efree(args);
RETURN_FALSE;
}
if (!packet) {
efree(args);
RETURN_FALSE;
}
for (i=0; i<num_args; i++) {
if (Z_TYPE_PP(args[i]) != IS_ARRAY && Z_TYPE_PP(args[i]) != IS_OBJECT) {
convert_to_string_ex(args[i]);
}
php_wddx_add_var(packet, (*args[i]));
}
efree(args);
RETURN_TRUE;
}
/* }}} */
/* {{{ proto mixed wddx_deserialize(mixed packet)
Deserializes given packet and returns a PHP value */
PHP_FUNCTION(wddx_deserialize)
{
zval *packet;
char *payload;
int payload_len;
php_stream *stream = NULL;
if (zend_parse_parameters(ZEND_NUM_ARGS() TSRMLS_CC, "z", &packet) == FAILURE) {
return;
}
if (Z_TYPE_P(packet) == IS_STRING) {
payload = Z_STRVAL_P(packet);
payload_len = Z_STRLEN_P(packet);
} else if (Z_TYPE_P(packet) == IS_RESOURCE) {
php_stream_from_zval(stream, &packet);
if (stream) {
payload_len = php_stream_copy_to_mem(stream, &payload, PHP_STREAM_COPY_ALL, 0);
}
} else {
php_error_docref(NULL TSRMLS_CC, E_WARNING, "Expecting parameter 1 to be a string or a stream");
return;
}
if (payload_len == 0) {
return;
}
php_wddx_deserialize_ex(payload, payload_len, return_value);
if (stream) {
pefree(payload, 0);
}
}
/* }}} */
#endif /* HAVE_LIBEXPAT */
/*
* Local variables:
* tab-width: 4
* c-basic-offset: 4
* End:
* vim600: sw=4 ts=4 fdm=marker
* vim<600: sw=4 ts=4
*/
| ./CrossVul/dataset_final_sorted/CWE-416/c/good_5282_1 |
crossvul-cpp_data_bad_5021_8 | /*
* RAW sockets for IPv6
* Linux INET6 implementation
*
* Authors:
* Pedro Roque <roque@di.fc.ul.pt>
*
* Adapted from linux/net/ipv4/raw.c
*
* Fixes:
* Hideaki YOSHIFUJI : sin6_scope_id support
* YOSHIFUJI,H.@USAGI : raw checksum (RFC2292(bis) compliance)
* Kazunori MIYAZAWA @USAGI: change process style to use ip6_append_data
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version
* 2 of the License, or (at your option) any later version.
*/
#include <linux/errno.h>
#include <linux/types.h>
#include <linux/socket.h>
#include <linux/slab.h>
#include <linux/sockios.h>
#include <linux/net.h>
#include <linux/in6.h>
#include <linux/netdevice.h>
#include <linux/if_arp.h>
#include <linux/icmpv6.h>
#include <linux/netfilter.h>
#include <linux/netfilter_ipv6.h>
#include <linux/skbuff.h>
#include <linux/compat.h>
#include <linux/uaccess.h>
#include <asm/ioctls.h>
#include <net/net_namespace.h>
#include <net/ip.h>
#include <net/sock.h>
#include <net/snmp.h>
#include <net/ipv6.h>
#include <net/ndisc.h>
#include <net/protocol.h>
#include <net/ip6_route.h>
#include <net/ip6_checksum.h>
#include <net/addrconf.h>
#include <net/transp_v6.h>
#include <net/udp.h>
#include <net/inet_common.h>
#include <net/tcp_states.h>
#if IS_ENABLED(CONFIG_IPV6_MIP6)
#include <net/mip6.h>
#endif
#include <linux/mroute6.h>
#include <net/raw.h>
#include <net/rawv6.h>
#include <net/xfrm.h>
#include <linux/proc_fs.h>
#include <linux/seq_file.h>
#include <linux/export.h>
#define ICMPV6_HDRLEN 4 /* ICMPv6 header, RFC 4443 Section 2.1 */
static struct raw_hashinfo raw_v6_hashinfo = {
.lock = __RW_LOCK_UNLOCKED(raw_v6_hashinfo.lock),
};
static struct sock *__raw_v6_lookup(struct net *net, struct sock *sk,
unsigned short num, const struct in6_addr *loc_addr,
const struct in6_addr *rmt_addr, int dif)
{
bool is_multicast = ipv6_addr_is_multicast(loc_addr);
sk_for_each_from(sk)
if (inet_sk(sk)->inet_num == num) {
if (!net_eq(sock_net(sk), net))
continue;
if (!ipv6_addr_any(&sk->sk_v6_daddr) &&
!ipv6_addr_equal(&sk->sk_v6_daddr, rmt_addr))
continue;
if (sk->sk_bound_dev_if && sk->sk_bound_dev_if != dif)
continue;
if (!ipv6_addr_any(&sk->sk_v6_rcv_saddr)) {
if (ipv6_addr_equal(&sk->sk_v6_rcv_saddr, loc_addr))
goto found;
if (is_multicast &&
inet6_mc_check(sk, loc_addr, rmt_addr))
goto found;
continue;
}
goto found;
}
sk = NULL;
found:
return sk;
}
/*
* 0 - deliver
* 1 - block
*/
static int icmpv6_filter(const struct sock *sk, const struct sk_buff *skb)
{
struct icmp6hdr _hdr;
const struct icmp6hdr *hdr;
/* We require only the four bytes of the ICMPv6 header, not any
* additional bytes of message body in "struct icmp6hdr".
*/
hdr = skb_header_pointer(skb, skb_transport_offset(skb),
ICMPV6_HDRLEN, &_hdr);
if (hdr) {
const __u32 *data = &raw6_sk(sk)->filter.data[0];
unsigned int type = hdr->icmp6_type;
return (data[type >> 5] & (1U << (type & 31))) != 0;
}
return 1;
}
#if IS_ENABLED(CONFIG_IPV6_MIP6)
typedef int mh_filter_t(struct sock *sock, struct sk_buff *skb);
static mh_filter_t __rcu *mh_filter __read_mostly;
int rawv6_mh_filter_register(mh_filter_t filter)
{
rcu_assign_pointer(mh_filter, filter);
return 0;
}
EXPORT_SYMBOL(rawv6_mh_filter_register);
int rawv6_mh_filter_unregister(mh_filter_t filter)
{
RCU_INIT_POINTER(mh_filter, NULL);
synchronize_rcu();
return 0;
}
EXPORT_SYMBOL(rawv6_mh_filter_unregister);
#endif
/*
* demultiplex raw sockets.
* (should consider queueing the skb in the sock receive_queue
* without calling rawv6.c)
*
* Caller owns SKB so we must make clones.
*/
static bool ipv6_raw_deliver(struct sk_buff *skb, int nexthdr)
{
const struct in6_addr *saddr;
const struct in6_addr *daddr;
struct sock *sk;
bool delivered = false;
__u8 hash;
struct net *net;
saddr = &ipv6_hdr(skb)->saddr;
daddr = saddr + 1;
hash = nexthdr & (RAW_HTABLE_SIZE - 1);
read_lock(&raw_v6_hashinfo.lock);
sk = sk_head(&raw_v6_hashinfo.ht[hash]);
if (!sk)
goto out;
net = dev_net(skb->dev);
sk = __raw_v6_lookup(net, sk, nexthdr, daddr, saddr, inet6_iif(skb));
while (sk) {
int filtered;
delivered = true;
switch (nexthdr) {
case IPPROTO_ICMPV6:
filtered = icmpv6_filter(sk, skb);
break;
#if IS_ENABLED(CONFIG_IPV6_MIP6)
case IPPROTO_MH:
{
/* XXX: To validate MH only once for each packet,
* this is placed here. It should be after checking
* xfrm policy, however it doesn't. The checking xfrm
* policy is placed in rawv6_rcv() because it is
* required for each socket.
*/
mh_filter_t *filter;
filter = rcu_dereference(mh_filter);
filtered = filter ? (*filter)(sk, skb) : 0;
break;
}
#endif
default:
filtered = 0;
break;
}
if (filtered < 0)
break;
if (filtered == 0) {
struct sk_buff *clone = skb_clone(skb, GFP_ATOMIC);
/* Not releasing hash table! */
if (clone) {
nf_reset(clone);
rawv6_rcv(sk, clone);
}
}
sk = __raw_v6_lookup(net, sk_next(sk), nexthdr, daddr, saddr,
inet6_iif(skb));
}
out:
read_unlock(&raw_v6_hashinfo.lock);
return delivered;
}
bool raw6_local_deliver(struct sk_buff *skb, int nexthdr)
{
struct sock *raw_sk;
raw_sk = sk_head(&raw_v6_hashinfo.ht[nexthdr & (RAW_HTABLE_SIZE - 1)]);
if (raw_sk && !ipv6_raw_deliver(skb, nexthdr))
raw_sk = NULL;
return raw_sk != NULL;
}
/* This cleans up af_inet6 a bit. -DaveM */
static int rawv6_bind(struct sock *sk, struct sockaddr *uaddr, int addr_len)
{
struct inet_sock *inet = inet_sk(sk);
struct ipv6_pinfo *np = inet6_sk(sk);
struct sockaddr_in6 *addr = (struct sockaddr_in6 *) uaddr;
__be32 v4addr = 0;
int addr_type;
int err;
if (addr_len < SIN6_LEN_RFC2133)
return -EINVAL;
if (addr->sin6_family != AF_INET6)
return -EINVAL;
addr_type = ipv6_addr_type(&addr->sin6_addr);
/* Raw sockets are IPv6 only */
if (addr_type == IPV6_ADDR_MAPPED)
return -EADDRNOTAVAIL;
lock_sock(sk);
err = -EINVAL;
if (sk->sk_state != TCP_CLOSE)
goto out;
rcu_read_lock();
/* Check if the address belongs to the host. */
if (addr_type != IPV6_ADDR_ANY) {
struct net_device *dev = NULL;
if (__ipv6_addr_needs_scope_id(addr_type)) {
if (addr_len >= sizeof(struct sockaddr_in6) &&
addr->sin6_scope_id) {
/* Override any existing binding, if another
* one is supplied by user.
*/
sk->sk_bound_dev_if = addr->sin6_scope_id;
}
/* Binding to link-local address requires an interface */
if (!sk->sk_bound_dev_if)
goto out_unlock;
err = -ENODEV;
dev = dev_get_by_index_rcu(sock_net(sk),
sk->sk_bound_dev_if);
if (!dev)
goto out_unlock;
}
/* ipv4 addr of the socket is invalid. Only the
* unspecified and mapped address have a v4 equivalent.
*/
v4addr = LOOPBACK4_IPV6;
if (!(addr_type & IPV6_ADDR_MULTICAST) &&
!sock_net(sk)->ipv6.sysctl.ip_nonlocal_bind) {
err = -EADDRNOTAVAIL;
if (!ipv6_chk_addr(sock_net(sk), &addr->sin6_addr,
dev, 0)) {
goto out_unlock;
}
}
}
inet->inet_rcv_saddr = inet->inet_saddr = v4addr;
sk->sk_v6_rcv_saddr = addr->sin6_addr;
if (!(addr_type & IPV6_ADDR_MULTICAST))
np->saddr = addr->sin6_addr;
err = 0;
out_unlock:
rcu_read_unlock();
out:
release_sock(sk);
return err;
}
static void rawv6_err(struct sock *sk, struct sk_buff *skb,
struct inet6_skb_parm *opt,
u8 type, u8 code, int offset, __be32 info)
{
struct inet_sock *inet = inet_sk(sk);
struct ipv6_pinfo *np = inet6_sk(sk);
int err;
int harderr;
/* Report error on raw socket, if:
1. User requested recverr.
2. Socket is connected (otherwise the error indication
is useless without recverr and error is hard.
*/
if (!np->recverr && sk->sk_state != TCP_ESTABLISHED)
return;
harderr = icmpv6_err_convert(type, code, &err);
if (type == ICMPV6_PKT_TOOBIG) {
ip6_sk_update_pmtu(skb, sk, info);
harderr = (np->pmtudisc == IPV6_PMTUDISC_DO);
}
if (type == NDISC_REDIRECT) {
ip6_sk_redirect(skb, sk);
return;
}
if (np->recverr) {
u8 *payload = skb->data;
if (!inet->hdrincl)
payload += offset;
ipv6_icmp_error(sk, skb, err, 0, ntohl(info), payload);
}
if (np->recverr || harderr) {
sk->sk_err = err;
sk->sk_error_report(sk);
}
}
void raw6_icmp_error(struct sk_buff *skb, int nexthdr,
u8 type, u8 code, int inner_offset, __be32 info)
{
struct sock *sk;
int hash;
const struct in6_addr *saddr, *daddr;
struct net *net;
hash = nexthdr & (RAW_HTABLE_SIZE - 1);
read_lock(&raw_v6_hashinfo.lock);
sk = sk_head(&raw_v6_hashinfo.ht[hash]);
if (sk) {
/* Note: ipv6_hdr(skb) != skb->data */
const struct ipv6hdr *ip6h = (const struct ipv6hdr *)skb->data;
saddr = &ip6h->saddr;
daddr = &ip6h->daddr;
net = dev_net(skb->dev);
while ((sk = __raw_v6_lookup(net, sk, nexthdr, saddr, daddr,
inet6_iif(skb)))) {
rawv6_err(sk, skb, NULL, type, code,
inner_offset, info);
sk = sk_next(sk);
}
}
read_unlock(&raw_v6_hashinfo.lock);
}
static inline int rawv6_rcv_skb(struct sock *sk, struct sk_buff *skb)
{
if ((raw6_sk(sk)->checksum || rcu_access_pointer(sk->sk_filter)) &&
skb_checksum_complete(skb)) {
atomic_inc(&sk->sk_drops);
kfree_skb(skb);
return NET_RX_DROP;
}
/* Charge it to the socket. */
skb_dst_drop(skb);
if (sock_queue_rcv_skb(sk, skb) < 0) {
kfree_skb(skb);
return NET_RX_DROP;
}
return 0;
}
/*
* This is next to useless...
* if we demultiplex in network layer we don't need the extra call
* just to queue the skb...
* maybe we could have the network decide upon a hint if it
* should call raw_rcv for demultiplexing
*/
int rawv6_rcv(struct sock *sk, struct sk_buff *skb)
{
struct inet_sock *inet = inet_sk(sk);
struct raw6_sock *rp = raw6_sk(sk);
if (!xfrm6_policy_check(sk, XFRM_POLICY_IN, skb)) {
atomic_inc(&sk->sk_drops);
kfree_skb(skb);
return NET_RX_DROP;
}
if (!rp->checksum)
skb->ip_summed = CHECKSUM_UNNECESSARY;
if (skb->ip_summed == CHECKSUM_COMPLETE) {
skb_postpull_rcsum(skb, skb_network_header(skb),
skb_network_header_len(skb));
if (!csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
&ipv6_hdr(skb)->daddr,
skb->len, inet->inet_num, skb->csum))
skb->ip_summed = CHECKSUM_UNNECESSARY;
}
if (!skb_csum_unnecessary(skb))
skb->csum = ~csum_unfold(csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
&ipv6_hdr(skb)->daddr,
skb->len,
inet->inet_num, 0));
if (inet->hdrincl) {
if (skb_checksum_complete(skb)) {
atomic_inc(&sk->sk_drops);
kfree_skb(skb);
return NET_RX_DROP;
}
}
rawv6_rcv_skb(sk, skb);
return 0;
}
/*
* This should be easy, if there is something there
* we return it, otherwise we block.
*/
static int rawv6_recvmsg(struct sock *sk, struct msghdr *msg, size_t len,
int noblock, int flags, int *addr_len)
{
struct ipv6_pinfo *np = inet6_sk(sk);
DECLARE_SOCKADDR(struct sockaddr_in6 *, sin6, msg->msg_name);
struct sk_buff *skb;
size_t copied;
int err;
if (flags & MSG_OOB)
return -EOPNOTSUPP;
if (flags & MSG_ERRQUEUE)
return ipv6_recv_error(sk, msg, len, addr_len);
if (np->rxpmtu && np->rxopt.bits.rxpmtu)
return ipv6_recv_rxpmtu(sk, msg, len, addr_len);
skb = skb_recv_datagram(sk, flags, noblock, &err);
if (!skb)
goto out;
copied = skb->len;
if (copied > len) {
copied = len;
msg->msg_flags |= MSG_TRUNC;
}
if (skb_csum_unnecessary(skb)) {
err = skb_copy_datagram_msg(skb, 0, msg, copied);
} else if (msg->msg_flags&MSG_TRUNC) {
if (__skb_checksum_complete(skb))
goto csum_copy_err;
err = skb_copy_datagram_msg(skb, 0, msg, copied);
} else {
err = skb_copy_and_csum_datagram_msg(skb, 0, msg);
if (err == -EINVAL)
goto csum_copy_err;
}
if (err)
goto out_free;
/* Copy the address. */
if (sin6) {
sin6->sin6_family = AF_INET6;
sin6->sin6_port = 0;
sin6->sin6_addr = ipv6_hdr(skb)->saddr;
sin6->sin6_flowinfo = 0;
sin6->sin6_scope_id = ipv6_iface_scope_id(&sin6->sin6_addr,
inet6_iif(skb));
*addr_len = sizeof(*sin6);
}
sock_recv_ts_and_drops(msg, sk, skb);
if (np->rxopt.all)
ip6_datagram_recv_ctl(sk, msg, skb);
err = copied;
if (flags & MSG_TRUNC)
err = skb->len;
out_free:
skb_free_datagram(sk, skb);
out:
return err;
csum_copy_err:
skb_kill_datagram(sk, skb, flags);
/* Error for blocking case is chosen to masquerade
as some normal condition.
*/
err = (flags&MSG_DONTWAIT) ? -EAGAIN : -EHOSTUNREACH;
goto out;
}
static int rawv6_push_pending_frames(struct sock *sk, struct flowi6 *fl6,
struct raw6_sock *rp)
{
struct sk_buff *skb;
int err = 0;
int offset;
int len;
int total_len;
__wsum tmp_csum;
__sum16 csum;
if (!rp->checksum)
goto send;
skb = skb_peek(&sk->sk_write_queue);
if (!skb)
goto out;
offset = rp->offset;
total_len = inet_sk(sk)->cork.base.length;
if (offset >= total_len - 1) {
err = -EINVAL;
ip6_flush_pending_frames(sk);
goto out;
}
/* should be check HW csum miyazawa */
if (skb_queue_len(&sk->sk_write_queue) == 1) {
/*
* Only one fragment on the socket.
*/
tmp_csum = skb->csum;
} else {
struct sk_buff *csum_skb = NULL;
tmp_csum = 0;
skb_queue_walk(&sk->sk_write_queue, skb) {
tmp_csum = csum_add(tmp_csum, skb->csum);
if (csum_skb)
continue;
len = skb->len - skb_transport_offset(skb);
if (offset >= len) {
offset -= len;
continue;
}
csum_skb = skb;
}
skb = csum_skb;
}
offset += skb_transport_offset(skb);
BUG_ON(skb_copy_bits(skb, offset, &csum, 2));
/* in case cksum was not initialized */
if (unlikely(csum))
tmp_csum = csum_sub(tmp_csum, csum_unfold(csum));
csum = csum_ipv6_magic(&fl6->saddr, &fl6->daddr,
total_len, fl6->flowi6_proto, tmp_csum);
if (csum == 0 && fl6->flowi6_proto == IPPROTO_UDP)
csum = CSUM_MANGLED_0;
BUG_ON(skb_store_bits(skb, offset, &csum, 2));
send:
err = ip6_push_pending_frames(sk);
out:
return err;
}
static int rawv6_send_hdrinc(struct sock *sk, struct msghdr *msg, int length,
struct flowi6 *fl6, struct dst_entry **dstp,
unsigned int flags)
{
struct ipv6_pinfo *np = inet6_sk(sk);
struct net *net = sock_net(sk);
struct ipv6hdr *iph;
struct sk_buff *skb;
int err;
struct rt6_info *rt = (struct rt6_info *)*dstp;
int hlen = LL_RESERVED_SPACE(rt->dst.dev);
int tlen = rt->dst.dev->needed_tailroom;
if (length > rt->dst.dev->mtu) {
ipv6_local_error(sk, EMSGSIZE, fl6, rt->dst.dev->mtu);
return -EMSGSIZE;
}
if (flags&MSG_PROBE)
goto out;
skb = sock_alloc_send_skb(sk,
length + hlen + tlen + 15,
flags & MSG_DONTWAIT, &err);
if (!skb)
goto error;
skb_reserve(skb, hlen);
skb->protocol = htons(ETH_P_IPV6);
skb->priority = sk->sk_priority;
skb->mark = sk->sk_mark;
skb_dst_set(skb, &rt->dst);
*dstp = NULL;
skb_put(skb, length);
skb_reset_network_header(skb);
iph = ipv6_hdr(skb);
skb->ip_summed = CHECKSUM_NONE;
skb->transport_header = skb->network_header;
err = memcpy_from_msg(iph, msg, length);
if (err)
goto error_fault;
IP6_UPD_PO_STATS(net, rt->rt6i_idev, IPSTATS_MIB_OUT, skb->len);
err = NF_HOOK(NFPROTO_IPV6, NF_INET_LOCAL_OUT, net, sk, skb,
NULL, rt->dst.dev, dst_output);
if (err > 0)
err = net_xmit_errno(err);
if (err)
goto error;
out:
return 0;
error_fault:
err = -EFAULT;
kfree_skb(skb);
error:
IP6_INC_STATS(net, rt->rt6i_idev, IPSTATS_MIB_OUTDISCARDS);
if (err == -ENOBUFS && !np->recverr)
err = 0;
return err;
}
struct raw6_frag_vec {
struct msghdr *msg;
int hlen;
char c[4];
};
static int rawv6_probe_proto_opt(struct raw6_frag_vec *rfv, struct flowi6 *fl6)
{
int err = 0;
switch (fl6->flowi6_proto) {
case IPPROTO_ICMPV6:
rfv->hlen = 2;
err = memcpy_from_msg(rfv->c, rfv->msg, rfv->hlen);
if (!err) {
fl6->fl6_icmp_type = rfv->c[0];
fl6->fl6_icmp_code = rfv->c[1];
}
break;
case IPPROTO_MH:
rfv->hlen = 4;
err = memcpy_from_msg(rfv->c, rfv->msg, rfv->hlen);
if (!err)
fl6->fl6_mh_type = rfv->c[2];
}
return err;
}
static int raw6_getfrag(void *from, char *to, int offset, int len, int odd,
struct sk_buff *skb)
{
struct raw6_frag_vec *rfv = from;
if (offset < rfv->hlen) {
int copy = min(rfv->hlen - offset, len);
if (skb->ip_summed == CHECKSUM_PARTIAL)
memcpy(to, rfv->c + offset, copy);
else
skb->csum = csum_block_add(
skb->csum,
csum_partial_copy_nocheck(rfv->c + offset,
to, copy, 0),
odd);
odd = 0;
offset += copy;
to += copy;
len -= copy;
if (!len)
return 0;
}
offset -= rfv->hlen;
return ip_generic_getfrag(rfv->msg, to, offset, len, odd, skb);
}
static int rawv6_sendmsg(struct sock *sk, struct msghdr *msg, size_t len)
{
struct ipv6_txoptions opt_space;
DECLARE_SOCKADDR(struct sockaddr_in6 *, sin6, msg->msg_name);
struct in6_addr *daddr, *final_p, final;
struct inet_sock *inet = inet_sk(sk);
struct ipv6_pinfo *np = inet6_sk(sk);
struct raw6_sock *rp = raw6_sk(sk);
struct ipv6_txoptions *opt = NULL;
struct ip6_flowlabel *flowlabel = NULL;
struct dst_entry *dst = NULL;
struct raw6_frag_vec rfv;
struct flowi6 fl6;
int addr_len = msg->msg_namelen;
int hlimit = -1;
int tclass = -1;
int dontfrag = -1;
u16 proto;
int err;
/* Rough check on arithmetic overflow,
better check is made in ip6_append_data().
*/
if (len > INT_MAX)
return -EMSGSIZE;
/* Mirror BSD error message compatibility */
if (msg->msg_flags & MSG_OOB)
return -EOPNOTSUPP;
/*
* Get and verify the address.
*/
memset(&fl6, 0, sizeof(fl6));
fl6.flowi6_mark = sk->sk_mark;
if (sin6) {
if (addr_len < SIN6_LEN_RFC2133)
return -EINVAL;
if (sin6->sin6_family && sin6->sin6_family != AF_INET6)
return -EAFNOSUPPORT;
/* port is the proto value [0..255] carried in nexthdr */
proto = ntohs(sin6->sin6_port);
if (!proto)
proto = inet->inet_num;
else if (proto != inet->inet_num)
return -EINVAL;
if (proto > 255)
return -EINVAL;
daddr = &sin6->sin6_addr;
if (np->sndflow) {
fl6.flowlabel = sin6->sin6_flowinfo&IPV6_FLOWINFO_MASK;
if (fl6.flowlabel&IPV6_FLOWLABEL_MASK) {
flowlabel = fl6_sock_lookup(sk, fl6.flowlabel);
if (!flowlabel)
return -EINVAL;
}
}
/*
* Otherwise it will be difficult to maintain
* sk->sk_dst_cache.
*/
if (sk->sk_state == TCP_ESTABLISHED &&
ipv6_addr_equal(daddr, &sk->sk_v6_daddr))
daddr = &sk->sk_v6_daddr;
if (addr_len >= sizeof(struct sockaddr_in6) &&
sin6->sin6_scope_id &&
__ipv6_addr_needs_scope_id(__ipv6_addr_type(daddr)))
fl6.flowi6_oif = sin6->sin6_scope_id;
} else {
if (sk->sk_state != TCP_ESTABLISHED)
return -EDESTADDRREQ;
proto = inet->inet_num;
daddr = &sk->sk_v6_daddr;
fl6.flowlabel = np->flow_label;
}
if (fl6.flowi6_oif == 0)
fl6.flowi6_oif = sk->sk_bound_dev_if;
if (msg->msg_controllen) {
opt = &opt_space;
memset(opt, 0, sizeof(struct ipv6_txoptions));
opt->tot_len = sizeof(struct ipv6_txoptions);
err = ip6_datagram_send_ctl(sock_net(sk), sk, msg, &fl6, opt,
&hlimit, &tclass, &dontfrag);
if (err < 0) {
fl6_sock_release(flowlabel);
return err;
}
if ((fl6.flowlabel&IPV6_FLOWLABEL_MASK) && !flowlabel) {
flowlabel = fl6_sock_lookup(sk, fl6.flowlabel);
if (!flowlabel)
return -EINVAL;
}
if (!(opt->opt_nflen|opt->opt_flen))
opt = NULL;
}
if (!opt)
opt = np->opt;
if (flowlabel)
opt = fl6_merge_options(&opt_space, flowlabel, opt);
opt = ipv6_fixup_options(&opt_space, opt);
fl6.flowi6_proto = proto;
rfv.msg = msg;
rfv.hlen = 0;
err = rawv6_probe_proto_opt(&rfv, &fl6);
if (err)
goto out;
if (!ipv6_addr_any(daddr))
fl6.daddr = *daddr;
else
fl6.daddr.s6_addr[15] = 0x1; /* :: means loopback (BSD'ism) */
if (ipv6_addr_any(&fl6.saddr) && !ipv6_addr_any(&np->saddr))
fl6.saddr = np->saddr;
final_p = fl6_update_dst(&fl6, opt, &final);
if (!fl6.flowi6_oif && ipv6_addr_is_multicast(&fl6.daddr))
fl6.flowi6_oif = np->mcast_oif;
else if (!fl6.flowi6_oif)
fl6.flowi6_oif = np->ucast_oif;
security_sk_classify_flow(sk, flowi6_to_flowi(&fl6));
if (inet->hdrincl)
fl6.flowi6_flags |= FLOWI_FLAG_KNOWN_NH;
dst = ip6_dst_lookup_flow(sk, &fl6, final_p);
if (IS_ERR(dst)) {
err = PTR_ERR(dst);
goto out;
}
if (hlimit < 0)
hlimit = ip6_sk_dst_hoplimit(np, &fl6, dst);
if (tclass < 0)
tclass = np->tclass;
if (dontfrag < 0)
dontfrag = np->dontfrag;
if (msg->msg_flags&MSG_CONFIRM)
goto do_confirm;
back_from_confirm:
if (inet->hdrincl)
err = rawv6_send_hdrinc(sk, msg, len, &fl6, &dst, msg->msg_flags);
else {
lock_sock(sk);
err = ip6_append_data(sk, raw6_getfrag, &rfv,
len, 0, hlimit, tclass, opt, &fl6, (struct rt6_info *)dst,
msg->msg_flags, dontfrag);
if (err)
ip6_flush_pending_frames(sk);
else if (!(msg->msg_flags & MSG_MORE))
err = rawv6_push_pending_frames(sk, &fl6, rp);
release_sock(sk);
}
done:
dst_release(dst);
out:
fl6_sock_release(flowlabel);
return err < 0 ? err : len;
do_confirm:
dst_confirm(dst);
if (!(msg->msg_flags & MSG_PROBE) || len)
goto back_from_confirm;
err = 0;
goto done;
}
static int rawv6_seticmpfilter(struct sock *sk, int level, int optname,
char __user *optval, int optlen)
{
switch (optname) {
case ICMPV6_FILTER:
if (optlen > sizeof(struct icmp6_filter))
optlen = sizeof(struct icmp6_filter);
if (copy_from_user(&raw6_sk(sk)->filter, optval, optlen))
return -EFAULT;
return 0;
default:
return -ENOPROTOOPT;
}
return 0;
}
static int rawv6_geticmpfilter(struct sock *sk, int level, int optname,
char __user *optval, int __user *optlen)
{
int len;
switch (optname) {
case ICMPV6_FILTER:
if (get_user(len, optlen))
return -EFAULT;
if (len < 0)
return -EINVAL;
if (len > sizeof(struct icmp6_filter))
len = sizeof(struct icmp6_filter);
if (put_user(len, optlen))
return -EFAULT;
if (copy_to_user(optval, &raw6_sk(sk)->filter, len))
return -EFAULT;
return 0;
default:
return -ENOPROTOOPT;
}
return 0;
}
static int do_rawv6_setsockopt(struct sock *sk, int level, int optname,
char __user *optval, unsigned int optlen)
{
struct raw6_sock *rp = raw6_sk(sk);
int val;
if (get_user(val, (int __user *)optval))
return -EFAULT;
switch (optname) {
case IPV6_CHECKSUM:
if (inet_sk(sk)->inet_num == IPPROTO_ICMPV6 &&
level == IPPROTO_IPV6) {
/*
* RFC3542 tells that IPV6_CHECKSUM socket
* option in the IPPROTO_IPV6 level is not
* allowed on ICMPv6 sockets.
* If you want to set it, use IPPROTO_RAW
* level IPV6_CHECKSUM socket option
* (Linux extension).
*/
return -EINVAL;
}
/* You may get strange result with a positive odd offset;
RFC2292bis agrees with me. */
if (val > 0 && (val&1))
return -EINVAL;
if (val < 0) {
rp->checksum = 0;
} else {
rp->checksum = 1;
rp->offset = val;
}
return 0;
default:
return -ENOPROTOOPT;
}
}
static int rawv6_setsockopt(struct sock *sk, int level, int optname,
char __user *optval, unsigned int optlen)
{
switch (level) {
case SOL_RAW:
break;
case SOL_ICMPV6:
if (inet_sk(sk)->inet_num != IPPROTO_ICMPV6)
return -EOPNOTSUPP;
return rawv6_seticmpfilter(sk, level, optname, optval, optlen);
case SOL_IPV6:
if (optname == IPV6_CHECKSUM)
break;
default:
return ipv6_setsockopt(sk, level, optname, optval, optlen);
}
return do_rawv6_setsockopt(sk, level, optname, optval, optlen);
}
#ifdef CONFIG_COMPAT
static int compat_rawv6_setsockopt(struct sock *sk, int level, int optname,
char __user *optval, unsigned int optlen)
{
switch (level) {
case SOL_RAW:
break;
case SOL_ICMPV6:
if (inet_sk(sk)->inet_num != IPPROTO_ICMPV6)
return -EOPNOTSUPP;
return rawv6_seticmpfilter(sk, level, optname, optval, optlen);
case SOL_IPV6:
if (optname == IPV6_CHECKSUM)
break;
default:
return compat_ipv6_setsockopt(sk, level, optname,
optval, optlen);
}
return do_rawv6_setsockopt(sk, level, optname, optval, optlen);
}
#endif
static int do_rawv6_getsockopt(struct sock *sk, int level, int optname,
char __user *optval, int __user *optlen)
{
struct raw6_sock *rp = raw6_sk(sk);
int val, len;
if (get_user(len, optlen))
return -EFAULT;
switch (optname) {
case IPV6_CHECKSUM:
/*
* We allow getsockopt() for IPPROTO_IPV6-level
* IPV6_CHECKSUM socket option on ICMPv6 sockets
* since RFC3542 is silent about it.
*/
if (rp->checksum == 0)
val = -1;
else
val = rp->offset;
break;
default:
return -ENOPROTOOPT;
}
len = min_t(unsigned int, sizeof(int), len);
if (put_user(len, optlen))
return -EFAULT;
if (copy_to_user(optval, &val, len))
return -EFAULT;
return 0;
}
static int rawv6_getsockopt(struct sock *sk, int level, int optname,
char __user *optval, int __user *optlen)
{
switch (level) {
case SOL_RAW:
break;
case SOL_ICMPV6:
if (inet_sk(sk)->inet_num != IPPROTO_ICMPV6)
return -EOPNOTSUPP;
return rawv6_geticmpfilter(sk, level, optname, optval, optlen);
case SOL_IPV6:
if (optname == IPV6_CHECKSUM)
break;
default:
return ipv6_getsockopt(sk, level, optname, optval, optlen);
}
return do_rawv6_getsockopt(sk, level, optname, optval, optlen);
}
#ifdef CONFIG_COMPAT
static int compat_rawv6_getsockopt(struct sock *sk, int level, int optname,
char __user *optval, int __user *optlen)
{
switch (level) {
case SOL_RAW:
break;
case SOL_ICMPV6:
if (inet_sk(sk)->inet_num != IPPROTO_ICMPV6)
return -EOPNOTSUPP;
return rawv6_geticmpfilter(sk, level, optname, optval, optlen);
case SOL_IPV6:
if (optname == IPV6_CHECKSUM)
break;
default:
return compat_ipv6_getsockopt(sk, level, optname,
optval, optlen);
}
return do_rawv6_getsockopt(sk, level, optname, optval, optlen);
}
#endif
static int rawv6_ioctl(struct sock *sk, int cmd, unsigned long arg)
{
switch (cmd) {
case SIOCOUTQ: {
int amount = sk_wmem_alloc_get(sk);
return put_user(amount, (int __user *)arg);
}
case SIOCINQ: {
struct sk_buff *skb;
int amount = 0;
spin_lock_bh(&sk->sk_receive_queue.lock);
skb = skb_peek(&sk->sk_receive_queue);
if (skb)
amount = skb_tail_pointer(skb) -
skb_transport_header(skb);
spin_unlock_bh(&sk->sk_receive_queue.lock);
return put_user(amount, (int __user *)arg);
}
default:
#ifdef CONFIG_IPV6_MROUTE
return ip6mr_ioctl(sk, cmd, (void __user *)arg);
#else
return -ENOIOCTLCMD;
#endif
}
}
#ifdef CONFIG_COMPAT
static int compat_rawv6_ioctl(struct sock *sk, unsigned int cmd, unsigned long arg)
{
switch (cmd) {
case SIOCOUTQ:
case SIOCINQ:
return -ENOIOCTLCMD;
default:
#ifdef CONFIG_IPV6_MROUTE
return ip6mr_compat_ioctl(sk, cmd, compat_ptr(arg));
#else
return -ENOIOCTLCMD;
#endif
}
}
#endif
static void rawv6_close(struct sock *sk, long timeout)
{
if (inet_sk(sk)->inet_num == IPPROTO_RAW)
ip6_ra_control(sk, -1);
ip6mr_sk_done(sk);
sk_common_release(sk);
}
static void raw6_destroy(struct sock *sk)
{
lock_sock(sk);
ip6_flush_pending_frames(sk);
release_sock(sk);
inet6_destroy_sock(sk);
}
static int rawv6_init_sk(struct sock *sk)
{
struct raw6_sock *rp = raw6_sk(sk);
switch (inet_sk(sk)->inet_num) {
case IPPROTO_ICMPV6:
rp->checksum = 1;
rp->offset = 2;
break;
case IPPROTO_MH:
rp->checksum = 1;
rp->offset = 4;
break;
default:
break;
}
return 0;
}
struct proto rawv6_prot = {
.name = "RAWv6",
.owner = THIS_MODULE,
.close = rawv6_close,
.destroy = raw6_destroy,
.connect = ip6_datagram_connect_v6_only,
.disconnect = udp_disconnect,
.ioctl = rawv6_ioctl,
.init = rawv6_init_sk,
.setsockopt = rawv6_setsockopt,
.getsockopt = rawv6_getsockopt,
.sendmsg = rawv6_sendmsg,
.recvmsg = rawv6_recvmsg,
.bind = rawv6_bind,
.backlog_rcv = rawv6_rcv_skb,
.hash = raw_hash_sk,
.unhash = raw_unhash_sk,
.obj_size = sizeof(struct raw6_sock),
.h.raw_hash = &raw_v6_hashinfo,
#ifdef CONFIG_COMPAT
.compat_setsockopt = compat_rawv6_setsockopt,
.compat_getsockopt = compat_rawv6_getsockopt,
.compat_ioctl = compat_rawv6_ioctl,
#endif
};
#ifdef CONFIG_PROC_FS
static int raw6_seq_show(struct seq_file *seq, void *v)
{
if (v == SEQ_START_TOKEN) {
seq_puts(seq, IPV6_SEQ_DGRAM_HEADER);
} else {
struct sock *sp = v;
__u16 srcp = inet_sk(sp)->inet_num;
ip6_dgram_sock_seq_show(seq, v, srcp, 0,
raw_seq_private(seq)->bucket);
}
return 0;
}
static const struct seq_operations raw6_seq_ops = {
.start = raw_seq_start,
.next = raw_seq_next,
.stop = raw_seq_stop,
.show = raw6_seq_show,
};
static int raw6_seq_open(struct inode *inode, struct file *file)
{
return raw_seq_open(inode, file, &raw_v6_hashinfo, &raw6_seq_ops);
}
static const struct file_operations raw6_seq_fops = {
.owner = THIS_MODULE,
.open = raw6_seq_open,
.read = seq_read,
.llseek = seq_lseek,
.release = seq_release_net,
};
static int __net_init raw6_init_net(struct net *net)
{
if (!proc_create("raw6", S_IRUGO, net->proc_net, &raw6_seq_fops))
return -ENOMEM;
return 0;
}
static void __net_exit raw6_exit_net(struct net *net)
{
remove_proc_entry("raw6", net->proc_net);
}
static struct pernet_operations raw6_net_ops = {
.init = raw6_init_net,
.exit = raw6_exit_net,
};
int __init raw6_proc_init(void)
{
return register_pernet_subsys(&raw6_net_ops);
}
void raw6_proc_exit(void)
{
unregister_pernet_subsys(&raw6_net_ops);
}
#endif /* CONFIG_PROC_FS */
/* Same as inet6_dgram_ops, sans udp_poll. */
static const struct proto_ops inet6_sockraw_ops = {
.family = PF_INET6,
.owner = THIS_MODULE,
.release = inet6_release,
.bind = inet6_bind,
.connect = inet_dgram_connect, /* ok */
.socketpair = sock_no_socketpair, /* a do nothing */
.accept = sock_no_accept, /* a do nothing */
.getname = inet6_getname,
.poll = datagram_poll, /* ok */
.ioctl = inet6_ioctl, /* must change */
.listen = sock_no_listen, /* ok */
.shutdown = inet_shutdown, /* ok */
.setsockopt = sock_common_setsockopt, /* ok */
.getsockopt = sock_common_getsockopt, /* ok */
.sendmsg = inet_sendmsg, /* ok */
.recvmsg = sock_common_recvmsg, /* ok */
.mmap = sock_no_mmap,
.sendpage = sock_no_sendpage,
#ifdef CONFIG_COMPAT
.compat_setsockopt = compat_sock_common_setsockopt,
.compat_getsockopt = compat_sock_common_getsockopt,
#endif
};
static struct inet_protosw rawv6_protosw = {
.type = SOCK_RAW,
.protocol = IPPROTO_IP, /* wild card */
.prot = &rawv6_prot,
.ops = &inet6_sockraw_ops,
.flags = INET_PROTOSW_REUSE,
};
int __init rawv6_init(void)
{
return inet6_register_protosw(&rawv6_protosw);
}
void rawv6_exit(void)
{
inet6_unregister_protosw(&rawv6_protosw);
}
| ./CrossVul/dataset_final_sorted/CWE-416/c/bad_5021_8 |
crossvul-cpp_data_good_4045_0 | /*
* Apple HTTP Live Streaming demuxer
* Copyright (c) 2010 Martin Storsjo
* Copyright (c) 2013 Anssi Hannula
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
/**
* @file
* Apple HTTP Live Streaming demuxer
* http://tools.ietf.org/html/draft-pantos-http-live-streaming
*/
#include "libavutil/avstring.h"
#include "libavutil/avassert.h"
#include "libavutil/intreadwrite.h"
#include "libavutil/mathematics.h"
#include "libavutil/opt.h"
#include "libavutil/dict.h"
#include "libavutil/time.h"
#include "avformat.h"
#include "internal.h"
#include "avio_internal.h"
#include "url.h"
#include "id3v2.h"
#define INITIAL_BUFFER_SIZE 32768
#define MAX_FIELD_LEN 64
#define MAX_CHARACTERISTICS_LEN 512
#define MPEG_TIME_BASE 90000
#define MPEG_TIME_BASE_Q (AVRational){1, MPEG_TIME_BASE}
/*
* An apple http stream consists of a playlist with media segment files,
* played sequentially. There may be several playlists with the same
* video content, in different bandwidth variants, that are played in
* parallel (preferably only one bandwidth variant at a time). In this case,
* the user supplied the url to a main playlist that only lists the variant
* playlists.
*
* If the main playlist doesn't point at any variants, we still create
* one anonymous toplevel variant for this, to maintain the structure.
*/
enum KeyType {
KEY_NONE,
KEY_AES_128,
KEY_SAMPLE_AES
};
struct segment {
int64_t duration;
int64_t url_offset;
int64_t size;
char *url;
char *key;
enum KeyType key_type;
uint8_t iv[16];
/* associated Media Initialization Section, treated as a segment */
struct segment *init_section;
};
struct rendition;
enum PlaylistType {
PLS_TYPE_UNSPECIFIED,
PLS_TYPE_EVENT,
PLS_TYPE_VOD
};
/*
* Each playlist has its own demuxer. If it currently is active,
* it has an open AVIOContext too, and potentially an AVPacket
* containing the next packet from this stream.
*/
struct playlist {
char url[MAX_URL_SIZE];
AVIOContext pb;
uint8_t* read_buffer;
URLContext *input;
AVFormatContext *parent;
int index;
AVFormatContext *ctx;
AVPacket pkt;
int stream_offset;
int finished;
enum PlaylistType type;
int64_t target_duration;
int start_seq_no;
int n_segments;
struct segment **segments;
int needed, cur_needed;
int cur_seq_no;
int64_t cur_seg_offset;
int64_t last_load_time;
/* Currently active Media Initialization Section */
struct segment *cur_init_section;
uint8_t *init_sec_buf;
unsigned int init_sec_buf_size;
unsigned int init_sec_data_len;
unsigned int init_sec_buf_read_offset;
char key_url[MAX_URL_SIZE];
uint8_t key[16];
/* ID3 timestamp handling (elementary audio streams have ID3 timestamps
* (and possibly other ID3 tags) in the beginning of each segment) */
int is_id3_timestamped; /* -1: not yet known */
int64_t id3_mpegts_timestamp; /* in mpegts tb */
int64_t id3_offset; /* in stream original tb */
uint8_t* id3_buf; /* temp buffer for id3 parsing */
unsigned int id3_buf_size;
AVDictionary *id3_initial; /* data from first id3 tag */
int id3_found; /* ID3 tag found at some point */
int id3_changed; /* ID3 tag data has changed at some point */
ID3v2ExtraMeta *id3_deferred_extra; /* stored here until subdemuxer is opened */
int64_t seek_timestamp;
int seek_flags;
int seek_stream_index; /* into subdemuxer stream array */
/* Renditions associated with this playlist, if any.
* Alternative rendition playlists have a single rendition associated
* with them, and variant main Media Playlists may have
* multiple (playlist-less) renditions associated with them. */
int n_renditions;
struct rendition **renditions;
/* Media Initialization Sections (EXT-X-MAP) associated with this
* playlist, if any. */
int n_init_sections;
struct segment **init_sections;
};
/*
* Renditions are e.g. alternative subtitle or audio streams.
* The rendition may either be an external playlist or it may be
* contained in the main Media Playlist of the variant (in which case
* playlist is NULL).
*/
struct rendition {
enum AVMediaType type;
struct playlist *playlist;
char group_id[MAX_FIELD_LEN];
char language[MAX_FIELD_LEN];
char name[MAX_FIELD_LEN];
int disposition;
};
struct variant {
int bandwidth;
/* every variant contains at least the main Media Playlist in index 0 */
int n_playlists;
struct playlist **playlists;
char audio_group[MAX_FIELD_LEN];
char video_group[MAX_FIELD_LEN];
char subtitles_group[MAX_FIELD_LEN];
};
typedef struct HLSContext {
AVClass *class;
int n_variants;
struct variant **variants;
int n_playlists;
struct playlist **playlists;
int n_renditions;
struct rendition **renditions;
int cur_seq_no;
int live_start_index;
int first_packet;
int64_t first_timestamp;
int64_t cur_timestamp;
AVIOInterruptCB *interrupt_callback;
char *user_agent; ///< holds HTTP user agent set as an AVOption to the HTTP protocol context
char *cookies; ///< holds HTTP cookie values set in either the initial response or as an AVOption to the HTTP protocol context
char *headers; ///< holds HTTP headers set as an AVOption to the HTTP protocol context
AVDictionary *avio_opts;
char *allowed_extensions;
int max_reload;
} HLSContext;
static int read_chomp_line(AVIOContext *s, char *buf, int maxlen)
{
int len = ff_get_line(s, buf, maxlen);
while (len > 0 && av_isspace(buf[len - 1]))
buf[--len] = '\0';
return len;
}
static void free_segment_list(struct playlist *pls)
{
int i;
for (i = 0; i < pls->n_segments; i++) {
av_freep(&pls->segments[i]->key);
av_freep(&pls->segments[i]->url);
av_freep(&pls->segments[i]);
}
av_freep(&pls->segments);
pls->n_segments = 0;
}
static void free_init_section_list(struct playlist *pls)
{
int i;
for (i = 0; i < pls->n_init_sections; i++) {
av_freep(&pls->init_sections[i]->url);
av_freep(&pls->init_sections[i]);
}
av_freep(&pls->init_sections);
pls->n_init_sections = 0;
}
static void free_playlist_list(HLSContext *c)
{
int i;
for (i = 0; i < c->n_playlists; i++) {
struct playlist *pls = c->playlists[i];
free_segment_list(pls);
free_init_section_list(pls);
av_freep(&pls->renditions);
av_freep(&pls->id3_buf);
av_dict_free(&pls->id3_initial);
ff_id3v2_free_extra_meta(&pls->id3_deferred_extra);
av_freep(&pls->init_sec_buf);
av_free_packet(&pls->pkt);
av_freep(&pls->pb.buffer);
if (pls->input)
ffurl_close(pls->input);
if (pls->ctx) {
pls->ctx->pb = NULL;
avformat_close_input(&pls->ctx);
}
av_free(pls);
}
av_freep(&c->playlists);
av_freep(&c->cookies);
av_freep(&c->user_agent);
c->n_playlists = 0;
}
static void free_variant_list(HLSContext *c)
{
int i;
for (i = 0; i < c->n_variants; i++) {
struct variant *var = c->variants[i];
av_freep(&var->playlists);
av_free(var);
}
av_freep(&c->variants);
c->n_variants = 0;
}
static void free_rendition_list(HLSContext *c)
{
int i;
for (i = 0; i < c->n_renditions; i++)
av_freep(&c->renditions[i]);
av_freep(&c->renditions);
c->n_renditions = 0;
}
/*
* Used to reset a statically allocated AVPacket to a clean slate,
* containing no data.
*/
static void reset_packet(AVPacket *pkt)
{
av_init_packet(pkt);
pkt->data = NULL;
}
static struct playlist *new_playlist(HLSContext *c, const char *url,
const char *base)
{
struct playlist *pls = av_mallocz(sizeof(struct playlist));
if (!pls)
return NULL;
reset_packet(&pls->pkt);
ff_make_absolute_url(pls->url, sizeof(pls->url), base, url);
pls->seek_timestamp = AV_NOPTS_VALUE;
pls->is_id3_timestamped = -1;
pls->id3_mpegts_timestamp = AV_NOPTS_VALUE;
dynarray_add(&c->playlists, &c->n_playlists, pls);
return pls;
}
struct variant_info {
char bandwidth[20];
/* variant group ids: */
char audio[MAX_FIELD_LEN];
char video[MAX_FIELD_LEN];
char subtitles[MAX_FIELD_LEN];
};
static struct variant *new_variant(HLSContext *c, struct variant_info *info,
const char *url, const char *base)
{
struct variant *var;
struct playlist *pls;
pls = new_playlist(c, url, base);
if (!pls)
return NULL;
var = av_mallocz(sizeof(struct variant));
if (!var)
return NULL;
if (info) {
var->bandwidth = atoi(info->bandwidth);
strcpy(var->audio_group, info->audio);
strcpy(var->video_group, info->video);
strcpy(var->subtitles_group, info->subtitles);
}
dynarray_add(&c->variants, &c->n_variants, var);
dynarray_add(&var->playlists, &var->n_playlists, pls);
return var;
}
static void handle_variant_args(struct variant_info *info, const char *key,
int key_len, char **dest, int *dest_len)
{
if (!strncmp(key, "BANDWIDTH=", key_len)) {
*dest = info->bandwidth;
*dest_len = sizeof(info->bandwidth);
} else if (!strncmp(key, "AUDIO=", key_len)) {
*dest = info->audio;
*dest_len = sizeof(info->audio);
} else if (!strncmp(key, "VIDEO=", key_len)) {
*dest = info->video;
*dest_len = sizeof(info->video);
} else if (!strncmp(key, "SUBTITLES=", key_len)) {
*dest = info->subtitles;
*dest_len = sizeof(info->subtitles);
}
}
struct key_info {
char uri[MAX_URL_SIZE];
char method[11];
char iv[35];
};
static void handle_key_args(struct key_info *info, const char *key,
int key_len, char **dest, int *dest_len)
{
if (!strncmp(key, "METHOD=", key_len)) {
*dest = info->method;
*dest_len = sizeof(info->method);
} else if (!strncmp(key, "URI=", key_len)) {
*dest = info->uri;
*dest_len = sizeof(info->uri);
} else if (!strncmp(key, "IV=", key_len)) {
*dest = info->iv;
*dest_len = sizeof(info->iv);
}
}
struct init_section_info {
char uri[MAX_URL_SIZE];
char byterange[32];
};
static struct segment *new_init_section(struct playlist *pls,
struct init_section_info *info,
const char *url_base)
{
struct segment *sec;
char *ptr;
char tmp_str[MAX_URL_SIZE];
if (!info->uri[0])
return NULL;
sec = av_mallocz(sizeof(*sec));
if (!sec)
return NULL;
ff_make_absolute_url(tmp_str, sizeof(tmp_str), url_base, info->uri);
sec->url = av_strdup(tmp_str);
if (!sec->url) {
av_free(sec);
return NULL;
}
if (info->byterange[0]) {
sec->size = atoi(info->byterange);
ptr = strchr(info->byterange, '@');
if (ptr)
sec->url_offset = atoi(ptr+1);
} else {
/* the entire file is the init section */
sec->size = -1;
}
dynarray_add(&pls->init_sections, &pls->n_init_sections, sec);
return sec;
}
static void handle_init_section_args(struct init_section_info *info, const char *key,
int key_len, char **dest, int *dest_len)
{
if (!strncmp(key, "URI=", key_len)) {
*dest = info->uri;
*dest_len = sizeof(info->uri);
} else if (!strncmp(key, "BYTERANGE=", key_len)) {
*dest = info->byterange;
*dest_len = sizeof(info->byterange);
}
}
struct rendition_info {
char type[16];
char uri[MAX_URL_SIZE];
char group_id[MAX_FIELD_LEN];
char language[MAX_FIELD_LEN];
char assoc_language[MAX_FIELD_LEN];
char name[MAX_FIELD_LEN];
char defaultr[4];
char forced[4];
char characteristics[MAX_CHARACTERISTICS_LEN];
};
static struct rendition *new_rendition(HLSContext *c, struct rendition_info *info,
const char *url_base)
{
struct rendition *rend;
enum AVMediaType type = AVMEDIA_TYPE_UNKNOWN;
char *characteristic;
char *chr_ptr;
char *saveptr;
if (!strcmp(info->type, "AUDIO"))
type = AVMEDIA_TYPE_AUDIO;
else if (!strcmp(info->type, "VIDEO"))
type = AVMEDIA_TYPE_VIDEO;
else if (!strcmp(info->type, "SUBTITLES"))
type = AVMEDIA_TYPE_SUBTITLE;
else if (!strcmp(info->type, "CLOSED-CAPTIONS"))
/* CLOSED-CAPTIONS is ignored since we do not support CEA-608 CC in
* AVC SEI RBSP anyway */
return NULL;
if (type == AVMEDIA_TYPE_UNKNOWN)
return NULL;
/* URI is mandatory for subtitles as per spec */
if (type == AVMEDIA_TYPE_SUBTITLE && !info->uri[0])
return NULL;
/* TODO: handle subtitles (each segment has to parsed separately) */
if (type == AVMEDIA_TYPE_SUBTITLE)
return NULL;
rend = av_mallocz(sizeof(struct rendition));
if (!rend)
return NULL;
dynarray_add(&c->renditions, &c->n_renditions, rend);
rend->type = type;
strcpy(rend->group_id, info->group_id);
strcpy(rend->language, info->language);
strcpy(rend->name, info->name);
/* add the playlist if this is an external rendition */
if (info->uri[0]) {
rend->playlist = new_playlist(c, info->uri, url_base);
if (rend->playlist)
dynarray_add(&rend->playlist->renditions,
&rend->playlist->n_renditions, rend);
}
if (info->assoc_language[0]) {
int langlen = strlen(rend->language);
if (langlen < sizeof(rend->language) - 3) {
rend->language[langlen] = ',';
strncpy(rend->language + langlen + 1, info->assoc_language,
sizeof(rend->language) - langlen - 2);
}
}
if (!strcmp(info->defaultr, "YES"))
rend->disposition |= AV_DISPOSITION_DEFAULT;
if (!strcmp(info->forced, "YES"))
rend->disposition |= AV_DISPOSITION_FORCED;
chr_ptr = info->characteristics;
while ((characteristic = av_strtok(chr_ptr, ",", &saveptr))) {
if (!strcmp(characteristic, "public.accessibility.describes-music-and-sound"))
rend->disposition |= AV_DISPOSITION_HEARING_IMPAIRED;
else if (!strcmp(characteristic, "public.accessibility.describes-video"))
rend->disposition |= AV_DISPOSITION_VISUAL_IMPAIRED;
chr_ptr = NULL;
}
return rend;
}
static void handle_rendition_args(struct rendition_info *info, const char *key,
int key_len, char **dest, int *dest_len)
{
if (!strncmp(key, "TYPE=", key_len)) {
*dest = info->type;
*dest_len = sizeof(info->type);
} else if (!strncmp(key, "URI=", key_len)) {
*dest = info->uri;
*dest_len = sizeof(info->uri);
} else if (!strncmp(key, "GROUP-ID=", key_len)) {
*dest = info->group_id;
*dest_len = sizeof(info->group_id);
} else if (!strncmp(key, "LANGUAGE=", key_len)) {
*dest = info->language;
*dest_len = sizeof(info->language);
} else if (!strncmp(key, "ASSOC-LANGUAGE=", key_len)) {
*dest = info->assoc_language;
*dest_len = sizeof(info->assoc_language);
} else if (!strncmp(key, "NAME=", key_len)) {
*dest = info->name;
*dest_len = sizeof(info->name);
} else if (!strncmp(key, "DEFAULT=", key_len)) {
*dest = info->defaultr;
*dest_len = sizeof(info->defaultr);
} else if (!strncmp(key, "FORCED=", key_len)) {
*dest = info->forced;
*dest_len = sizeof(info->forced);
} else if (!strncmp(key, "CHARACTERISTICS=", key_len)) {
*dest = info->characteristics;
*dest_len = sizeof(info->characteristics);
}
/*
* ignored:
* - AUTOSELECT: client may autoselect based on e.g. system language
* - INSTREAM-ID: EIA-608 closed caption number ("CC1".."CC4")
*/
}
/* used by parse_playlist to allocate a new variant+playlist when the
* playlist is detected to be a Media Playlist (not Master Playlist)
* and we have no parent Master Playlist (parsing of which would have
* allocated the variant and playlist already)
* *pls == NULL => Master Playlist or parentless Media Playlist
* *pls != NULL => parented Media Playlist, playlist+variant allocated */
static int ensure_playlist(HLSContext *c, struct playlist **pls, const char *url)
{
if (*pls)
return 0;
if (!new_variant(c, NULL, url, NULL))
return AVERROR(ENOMEM);
*pls = c->playlists[c->n_playlists - 1];
return 0;
}
static int open_in(HLSContext *c, AVIOContext **in, const char *url)
{
AVDictionary *tmp = NULL;
int ret;
av_dict_copy(&tmp, c->avio_opts, 0);
ret = avio_open2(in, url, AVIO_FLAG_READ, c->interrupt_callback, &tmp);
av_dict_free(&tmp);
return ret;
}
static int url_connect(struct playlist *pls, AVDictionary *opts, AVDictionary *opts2)
{
AVDictionary *tmp = NULL;
int ret;
av_dict_copy(&tmp, opts, 0);
av_dict_copy(&tmp, opts2, 0);
if ((ret = ffurl_connect(pls->input, &tmp)) < 0) {
ffurl_close(pls->input);
pls->input = NULL;
}
av_dict_free(&tmp);
return ret;
}
static void update_options(char **dest, const char *name, void *src)
{
av_freep(dest);
av_opt_get(src, name, 0, (uint8_t**)dest);
if (*dest && !strlen(*dest))
av_freep(dest);
}
static int open_url(HLSContext *c, URLContext **uc, const char *url, AVDictionary *opts)
{
AVDictionary *tmp = NULL;
int ret;
const char *proto_name = avio_find_protocol_name(url);
if (!proto_name)
return AVERROR_INVALIDDATA;
// only http(s) & file are allowed
if (av_strstart(proto_name, "file", NULL)) {
if (strcmp(c->allowed_extensions, "ALL") && !av_match_ext(url, c->allowed_extensions)) {
av_log(c, AV_LOG_ERROR,
"Filename extension of \'%s\' is not a common multimedia extension, blocked for security reasons.\n"
"If you wish to override this adjust allowed_extensions, you can set it to \'ALL\' to allow all\n",
url);
return AVERROR_INVALIDDATA;
}
} else if (av_strstart(proto_name, "http", NULL)) {
;
} else
return AVERROR_INVALIDDATA;
if (!strncmp(proto_name, url, strlen(proto_name)) && url[strlen(proto_name)] == ':')
;
else if (strcmp(proto_name, "file") || !strncmp(url, "file,", 5))
return AVERROR_INVALIDDATA;
av_dict_copy(&tmp, c->avio_opts, 0);
av_dict_copy(&tmp, opts, 0);
ret = ffurl_open(uc, url, AVIO_FLAG_READ, c->interrupt_callback, &tmp);
if( ret >= 0) {
// update cookies on http response with setcookies.
URLContext *u = *uc;
update_options(&c->cookies, "cookies", u->priv_data);
av_dict_set(&opts, "cookies", c->cookies, 0);
}
av_dict_free(&tmp);
return ret;
}
static int parse_playlist(HLSContext *c, const char *url,
struct playlist *pls, AVIOContext *in)
{
int ret = 0, is_segment = 0, is_variant = 0;
int64_t duration = 0;
enum KeyType key_type = KEY_NONE;
uint8_t iv[16] = "";
int has_iv = 0;
char key[MAX_URL_SIZE] = "";
char line[MAX_URL_SIZE];
const char *ptr;
int close_in = 0;
int64_t seg_offset = 0;
int64_t seg_size = -1;
uint8_t *new_url = NULL;
struct variant_info variant_info;
char tmp_str[MAX_URL_SIZE];
struct segment *cur_init_section = NULL;
if (!in) {
#if 1
AVDictionary *opts = NULL;
close_in = 1;
/* Some HLS servers don't like being sent the range header */
av_dict_set(&opts, "seekable", "0", 0);
// broker prior HTTP options that should be consistent across requests
av_dict_set(&opts, "user-agent", c->user_agent, 0);
av_dict_set(&opts, "cookies", c->cookies, 0);
av_dict_set(&opts, "headers", c->headers, 0);
ret = avio_open2(&in, url, AVIO_FLAG_READ,
c->interrupt_callback, &opts);
av_dict_free(&opts);
if (ret < 0)
return ret;
#else
ret = open_in(c, &in, url);
if (ret < 0)
return ret;
close_in = 1;
#endif
}
if (av_opt_get(in, "location", AV_OPT_SEARCH_CHILDREN, &new_url) >= 0)
url = new_url;
read_chomp_line(in, line, sizeof(line));
if (strcmp(line, "#EXTM3U")) {
ret = AVERROR_INVALIDDATA;
goto fail;
}
if (pls) {
free_segment_list(pls);
pls->finished = 0;
pls->type = PLS_TYPE_UNSPECIFIED;
}
while (!avio_feof(in)) {
read_chomp_line(in, line, sizeof(line));
if (av_strstart(line, "#EXT-X-STREAM-INF:", &ptr)) {
is_variant = 1;
memset(&variant_info, 0, sizeof(variant_info));
ff_parse_key_value(ptr, (ff_parse_key_val_cb) handle_variant_args,
&variant_info);
} else if (av_strstart(line, "#EXT-X-KEY:", &ptr)) {
struct key_info info = {{0}};
ff_parse_key_value(ptr, (ff_parse_key_val_cb) handle_key_args,
&info);
key_type = KEY_NONE;
has_iv = 0;
if (!strcmp(info.method, "AES-128"))
key_type = KEY_AES_128;
if (!strcmp(info.method, "SAMPLE-AES"))
key_type = KEY_SAMPLE_AES;
if (!strncmp(info.iv, "0x", 2) || !strncmp(info.iv, "0X", 2)) {
ff_hex_to_data(iv, info.iv + 2);
has_iv = 1;
}
av_strlcpy(key, info.uri, sizeof(key));
} else if (av_strstart(line, "#EXT-X-MEDIA:", &ptr)) {
struct rendition_info info = {{0}};
ff_parse_key_value(ptr, (ff_parse_key_val_cb) handle_rendition_args,
&info);
new_rendition(c, &info, url);
} else if (av_strstart(line, "#EXT-X-TARGETDURATION:", &ptr)) {
ret = ensure_playlist(c, &pls, url);
if (ret < 0)
goto fail;
pls->target_duration = atoi(ptr) * AV_TIME_BASE;
} else if (av_strstart(line, "#EXT-X-MEDIA-SEQUENCE:", &ptr)) {
ret = ensure_playlist(c, &pls, url);
if (ret < 0)
goto fail;
pls->start_seq_no = atoi(ptr);
} else if (av_strstart(line, "#EXT-X-PLAYLIST-TYPE:", &ptr)) {
ret = ensure_playlist(c, &pls, url);
if (ret < 0)
goto fail;
if (!strcmp(ptr, "EVENT"))
pls->type = PLS_TYPE_EVENT;
else if (!strcmp(ptr, "VOD"))
pls->type = PLS_TYPE_VOD;
} else if (av_strstart(line, "#EXT-X-MAP:", &ptr)) {
struct init_section_info info = {{0}};
ret = ensure_playlist(c, &pls, url);
if (ret < 0)
goto fail;
ff_parse_key_value(ptr, (ff_parse_key_val_cb) handle_init_section_args,
&info);
cur_init_section = new_init_section(pls, &info, url);
} else if (av_strstart(line, "#EXT-X-ENDLIST", &ptr)) {
if (pls)
pls->finished = 1;
} else if (av_strstart(line, "#EXTINF:", &ptr)) {
is_segment = 1;
duration = atof(ptr) * AV_TIME_BASE;
} else if (av_strstart(line, "#EXT-X-BYTERANGE:", &ptr)) {
seg_size = atoi(ptr);
ptr = strchr(ptr, '@');
if (ptr)
seg_offset = atoi(ptr+1);
} else if (av_strstart(line, "#", NULL)) {
continue;
} else if (line[0]) {
if (is_variant) {
if (!new_variant(c, &variant_info, line, url)) {
ret = AVERROR(ENOMEM);
goto fail;
}
is_variant = 0;
}
if (is_segment) {
struct segment *seg;
if (!pls) {
if (!new_variant(c, 0, url, NULL)) {
ret = AVERROR(ENOMEM);
goto fail;
}
pls = c->playlists[c->n_playlists - 1];
}
seg = av_malloc(sizeof(struct segment));
if (!seg) {
ret = AVERROR(ENOMEM);
goto fail;
}
if (has_iv) {
memcpy(seg->iv, iv, sizeof(iv));
} else {
int seq = pls->start_seq_no + pls->n_segments;
memset(seg->iv, 0, sizeof(seg->iv));
AV_WB32(seg->iv + 12, seq);
}
if (key_type != KEY_NONE) {
ff_make_absolute_url(tmp_str, sizeof(tmp_str), url, key);
seg->key = av_strdup(tmp_str);
if (!seg->key) {
av_free(seg);
ret = AVERROR(ENOMEM);
goto fail;
}
} else {
seg->key = NULL;
}
ff_make_absolute_url(tmp_str, sizeof(tmp_str), url, line);
seg->url = av_strdup(tmp_str);
if (!seg->url) {
av_free(seg->key);
av_free(seg);
ret = AVERROR(ENOMEM);
goto fail;
}
if (duration < 0.001 * AV_TIME_BASE) {
duration = 0.001 * AV_TIME_BASE;
}
seg->duration = duration;
seg->key_type = key_type;
dynarray_add(&pls->segments, &pls->n_segments, seg);
is_segment = 0;
seg->size = seg_size;
if (seg_size >= 0) {
seg->url_offset = seg_offset;
seg_offset += seg_size;
seg_size = -1;
} else {
seg->url_offset = 0;
seg_offset = 0;
}
seg->init_section = cur_init_section;
}
}
}
if (pls)
pls->last_load_time = av_gettime_relative();
fail:
av_free(new_url);
if (close_in)
avio_close(in);
return ret;
}
static struct segment *current_segment(struct playlist *pls)
{
return pls->segments[pls->cur_seq_no - pls->start_seq_no];
}
enum ReadFromURLMode {
READ_NORMAL,
READ_COMPLETE,
};
/* read from URLContext, limiting read to current segment */
static int read_from_url(struct playlist *pls, struct segment *seg,
uint8_t *buf, int buf_size,
enum ReadFromURLMode mode)
{
int ret;
/* limit read if the segment was only a part of a file */
if (seg->size >= 0)
buf_size = FFMIN(buf_size, seg->size - pls->cur_seg_offset);
if (mode == READ_COMPLETE)
ret = ffurl_read_complete(pls->input, buf, buf_size);
else
ret = ffurl_read(pls->input, buf, buf_size);
if (ret > 0)
pls->cur_seg_offset += ret;
return ret;
}
/* Parse the raw ID3 data and pass contents to caller */
static void parse_id3(AVFormatContext *s, AVIOContext *pb,
AVDictionary **metadata, int64_t *dts,
ID3v2ExtraMetaAPIC **apic, ID3v2ExtraMeta **extra_meta)
{
static const char id3_priv_owner_ts[] = "com.apple.streaming.transportStreamTimestamp";
ID3v2ExtraMeta *meta;
ff_id3v2_read_dict(pb, metadata, ID3v2_DEFAULT_MAGIC, extra_meta);
for (meta = *extra_meta; meta; meta = meta->next) {
if (!strcmp(meta->tag, "PRIV")) {
ID3v2ExtraMetaPRIV *priv = meta->data;
if (priv->datasize == 8 && !strcmp(priv->owner, id3_priv_owner_ts)) {
/* 33-bit MPEG timestamp */
int64_t ts = AV_RB64(priv->data);
av_log(s, AV_LOG_DEBUG, "HLS ID3 audio timestamp %"PRId64"\n", ts);
if ((ts & ~((1ULL << 33) - 1)) == 0)
*dts = ts;
else
av_log(s, AV_LOG_ERROR, "Invalid HLS ID3 audio timestamp %"PRId64"\n", ts);
}
} else if (!strcmp(meta->tag, "APIC") && apic)
*apic = meta->data;
}
}
/* Check if the ID3 metadata contents have changed */
static int id3_has_changed_values(struct playlist *pls, AVDictionary *metadata,
ID3v2ExtraMetaAPIC *apic)
{
AVDictionaryEntry *entry = NULL;
AVDictionaryEntry *oldentry;
/* check that no keys have changed values */
while ((entry = av_dict_get(metadata, "", entry, AV_DICT_IGNORE_SUFFIX))) {
oldentry = av_dict_get(pls->id3_initial, entry->key, NULL, AV_DICT_MATCH_CASE);
if (!oldentry || strcmp(oldentry->value, entry->value) != 0)
return 1;
}
/* check if apic appeared */
if (apic && (pls->ctx->nb_streams != 2 || !pls->ctx->streams[1]->attached_pic.data))
return 1;
if (apic) {
int size = pls->ctx->streams[1]->attached_pic.size;
if (size != apic->buf->size - AV_INPUT_BUFFER_PADDING_SIZE)
return 1;
if (memcmp(apic->buf->data, pls->ctx->streams[1]->attached_pic.data, size) != 0)
return 1;
}
return 0;
}
/* Parse ID3 data and handle the found data */
static void handle_id3(AVIOContext *pb, struct playlist *pls)
{
AVDictionary *metadata = NULL;
ID3v2ExtraMetaAPIC *apic = NULL;
ID3v2ExtraMeta *extra_meta = NULL;
int64_t timestamp = AV_NOPTS_VALUE;
parse_id3(pls->ctx, pb, &metadata, ×tamp, &apic, &extra_meta);
if (timestamp != AV_NOPTS_VALUE) {
pls->id3_mpegts_timestamp = timestamp;
pls->id3_offset = 0;
}
if (!pls->id3_found) {
/* initial ID3 tags */
av_assert0(!pls->id3_deferred_extra);
pls->id3_found = 1;
/* get picture attachment and set text metadata */
if (pls->ctx->nb_streams)
ff_id3v2_parse_apic(pls->ctx, &extra_meta);
else
/* demuxer not yet opened, defer picture attachment */
pls->id3_deferred_extra = extra_meta;
av_dict_copy(&pls->ctx->metadata, metadata, 0);
pls->id3_initial = metadata;
} else {
if (!pls->id3_changed && id3_has_changed_values(pls, metadata, apic)) {
avpriv_report_missing_feature(pls->ctx, "Changing ID3 metadata in HLS audio elementary stream");
pls->id3_changed = 1;
}
av_dict_free(&metadata);
}
if (!pls->id3_deferred_extra)
ff_id3v2_free_extra_meta(&extra_meta);
}
/* Intercept and handle ID3 tags between URLContext and AVIOContext */
static void intercept_id3(struct playlist *pls, uint8_t *buf,
int buf_size, int *len)
{
/* intercept id3 tags, we do not want to pass them to the raw
* demuxer on all segment switches */
int bytes;
int id3_buf_pos = 0;
int fill_buf = 0;
struct segment *seg = current_segment(pls);
/* gather all the id3 tags */
while (1) {
/* see if we can retrieve enough data for ID3 header */
if (*len < ID3v2_HEADER_SIZE && buf_size >= ID3v2_HEADER_SIZE) {
bytes = read_from_url(pls, seg, buf + *len, ID3v2_HEADER_SIZE - *len, READ_COMPLETE);
if (bytes > 0) {
if (bytes == ID3v2_HEADER_SIZE - *len)
/* no EOF yet, so fill the caller buffer again after
* we have stripped the ID3 tags */
fill_buf = 1;
*len += bytes;
} else if (*len <= 0) {
/* error/EOF */
*len = bytes;
fill_buf = 0;
}
}
if (*len < ID3v2_HEADER_SIZE)
break;
if (ff_id3v2_match(buf, ID3v2_DEFAULT_MAGIC)) {
int64_t maxsize = seg->size >= 0 ? seg->size : 1024*1024;
int taglen = ff_id3v2_tag_len(buf);
int tag_got_bytes = FFMIN(taglen, *len);
int remaining = taglen - tag_got_bytes;
if (taglen > maxsize) {
av_log(pls->ctx, AV_LOG_ERROR, "Too large HLS ID3 tag (%d > %"PRId64" bytes)\n",
taglen, maxsize);
break;
}
/*
* Copy the id3 tag to our temporary id3 buffer.
* We could read a small id3 tag directly without memcpy, but
* we would still need to copy the large tags, and handling
* both of those cases together with the possibility for multiple
* tags would make the handling a bit complex.
*/
pls->id3_buf = av_fast_realloc(pls->id3_buf, &pls->id3_buf_size, id3_buf_pos + taglen);
if (!pls->id3_buf)
break;
memcpy(pls->id3_buf + id3_buf_pos, buf, tag_got_bytes);
id3_buf_pos += tag_got_bytes;
/* strip the intercepted bytes */
*len -= tag_got_bytes;
memmove(buf, buf + tag_got_bytes, *len);
av_log(pls->ctx, AV_LOG_DEBUG, "Stripped %d HLS ID3 bytes\n", tag_got_bytes);
if (remaining > 0) {
/* read the rest of the tag in */
if (read_from_url(pls, seg, pls->id3_buf + id3_buf_pos, remaining, READ_COMPLETE) != remaining)
break;
id3_buf_pos += remaining;
av_log(pls->ctx, AV_LOG_DEBUG, "Stripped additional %d HLS ID3 bytes\n", remaining);
}
} else {
/* no more ID3 tags */
break;
}
}
/* re-fill buffer for the caller unless EOF */
if (*len >= 0 && (fill_buf || *len == 0)) {
bytes = read_from_url(pls, seg, buf + *len, buf_size - *len, READ_NORMAL);
/* ignore error if we already had some data */
if (bytes >= 0)
*len += bytes;
else if (*len == 0)
*len = bytes;
}
if (pls->id3_buf) {
/* Now parse all the ID3 tags */
AVIOContext id3ioctx;
ffio_init_context(&id3ioctx, pls->id3_buf, id3_buf_pos, 0, NULL, NULL, NULL, NULL);
handle_id3(&id3ioctx, pls);
}
if (pls->is_id3_timestamped == -1)
pls->is_id3_timestamped = (pls->id3_mpegts_timestamp != AV_NOPTS_VALUE);
}
static int open_input(HLSContext *c, struct playlist *pls, struct segment *seg)
{
AVDictionary *opts = NULL;
int ret;
// broker prior HTTP options that should be consistent across requests
av_dict_set(&opts, "user-agent", c->user_agent, 0);
av_dict_set(&opts, "cookies", c->cookies, 0);
av_dict_set(&opts, "headers", c->headers, 0);
av_dict_set(&opts, "seekable", "0", 0);
if (seg->size >= 0) {
/* try to restrict the HTTP request to the part we want
* (if this is in fact a HTTP request) */
av_dict_set_int(&opts, "offset", seg->url_offset, 0);
av_dict_set_int(&opts, "end_offset", seg->url_offset + seg->size, 0);
}
av_log(pls->parent, AV_LOG_VERBOSE, "HLS request for url '%s', offset %"PRId64", playlist %d\n",
seg->url, seg->url_offset, pls->index);
if (seg->key_type == KEY_NONE) {
ret = open_url(pls->parent->priv_data, &pls->input, seg->url, opts);
} else if (seg->key_type == KEY_AES_128) {
// HLSContext *c = var->parent->priv_data;
char iv[33], key[33], url[MAX_URL_SIZE];
if (strcmp(seg->key, pls->key_url)) {
URLContext *uc;
if (open_url(pls->parent->priv_data, &uc, seg->key, opts) == 0) {
if (ffurl_read_complete(uc, pls->key, sizeof(pls->key))
!= sizeof(pls->key)) {
av_log(NULL, AV_LOG_ERROR, "Unable to read key file %s\n",
seg->key);
}
ffurl_close(uc);
} else {
av_log(NULL, AV_LOG_ERROR, "Unable to open key file %s\n",
seg->key);
}
av_strlcpy(pls->key_url, seg->key, sizeof(pls->key_url));
}
ff_data_to_hex(iv, seg->iv, sizeof(seg->iv), 0);
ff_data_to_hex(key, pls->key, sizeof(pls->key), 0);
iv[32] = key[32] = '\0';
if (strstr(seg->url, "://"))
snprintf(url, sizeof(url), "crypto+%s", seg->url);
else
snprintf(url, sizeof(url), "crypto:%s", seg->url);
if ((ret = ffurl_alloc(&pls->input, url, AVIO_FLAG_READ,
&pls->parent->interrupt_callback)) < 0)
goto cleanup;
av_opt_set(pls->input->priv_data, "key", key, 0);
av_opt_set(pls->input->priv_data, "iv", iv, 0);
if ((ret = url_connect(pls, c->avio_opts, opts)) < 0) {
goto cleanup;
}
ret = 0;
} else if (seg->key_type == KEY_SAMPLE_AES) {
av_log(pls->parent, AV_LOG_ERROR,
"SAMPLE-AES encryption is not supported yet\n");
ret = AVERROR_PATCHWELCOME;
}
else
ret = AVERROR(ENOSYS);
/* Seek to the requested position. If this was a HTTP request, the offset
* should already be where want it to, but this allows e.g. local testing
* without a HTTP server. */
if (ret == 0 && seg->key_type == KEY_NONE && seg->url_offset) {
int seekret = ffurl_seek(pls->input, seg->url_offset, SEEK_SET);
if (seekret < 0) {
av_log(pls->parent, AV_LOG_ERROR, "Unable to seek to offset %"PRId64" of HLS segment '%s'\n", seg->url_offset, seg->url);
ret = seekret;
ffurl_close(pls->input);
pls->input = NULL;
}
}
cleanup:
av_dict_free(&opts);
pls->cur_seg_offset = 0;
return ret;
}
static int update_init_section(struct playlist *pls, struct segment *seg)
{
static const int max_init_section_size = 1024*1024;
HLSContext *c = pls->parent->priv_data;
int64_t sec_size;
int64_t urlsize;
int ret;
if (seg->init_section == pls->cur_init_section)
return 0;
pls->cur_init_section = NULL;
if (!seg->init_section)
return 0;
/* this will clobber playlist URLContext stuff, so this should be
* called between segments only */
ret = open_input(c, pls, seg->init_section);
if (ret < 0) {
av_log(pls->parent, AV_LOG_WARNING,
"Failed to open an initialization section in playlist %d\n",
pls->index);
return ret;
}
if (seg->init_section->size >= 0)
sec_size = seg->init_section->size;
else if ((urlsize = ffurl_size(pls->input)) >= 0)
sec_size = urlsize;
else
sec_size = max_init_section_size;
av_log(pls->parent, AV_LOG_DEBUG,
"Downloading an initialization section of size %"PRId64"\n",
sec_size);
sec_size = FFMIN(sec_size, max_init_section_size);
av_fast_malloc(&pls->init_sec_buf, &pls->init_sec_buf_size, sec_size);
ret = read_from_url(pls, seg->init_section, pls->init_sec_buf,
pls->init_sec_buf_size, READ_COMPLETE);
ffurl_close(pls->input);
pls->input = NULL;
if (ret < 0)
return ret;
pls->cur_init_section = seg->init_section;
pls->init_sec_data_len = ret;
pls->init_sec_buf_read_offset = 0;
/* spec says audio elementary streams do not have media initialization
* sections, so there should be no ID3 timestamps */
pls->is_id3_timestamped = 0;
return 0;
}
static int64_t default_reload_interval(struct playlist *pls)
{
return pls->n_segments > 0 ?
pls->segments[pls->n_segments - 1]->duration :
pls->target_duration;
}
static int read_data(void *opaque, uint8_t *buf, int buf_size)
{
struct playlist *v = opaque;
HLSContext *c = v->parent->priv_data;
int ret, i;
int just_opened = 0;
int reload_count = 0;
restart:
if (!v->needed)
return AVERROR_EOF;
if (!v->input) {
int64_t reload_interval;
struct segment *seg;
/* Check that the playlist is still needed before opening a new
* segment. */
if (v->ctx && v->ctx->nb_streams &&
v->parent->nb_streams >= v->stream_offset + v->ctx->nb_streams) {
v->needed = 0;
for (i = v->stream_offset; i < v->stream_offset + v->ctx->nb_streams;
i++) {
if (v->parent->streams[i]->discard < AVDISCARD_ALL)
v->needed = 1;
}
}
if (!v->needed) {
av_log(v->parent, AV_LOG_INFO, "No longer receiving playlist %d\n",
v->index);
return AVERROR_EOF;
}
/* If this is a live stream and the reload interval has elapsed since
* the last playlist reload, reload the playlists now. */
reload_interval = default_reload_interval(v);
reload:
reload_count++;
if (reload_count > c->max_reload)
return AVERROR_EOF;
if (!v->finished &&
av_gettime_relative() - v->last_load_time >= reload_interval) {
if ((ret = parse_playlist(c, v->url, v, NULL)) < 0) {
av_log(v->parent, AV_LOG_WARNING, "Failed to reload playlist %d\n",
v->index);
return ret;
}
/* If we need to reload the playlist again below (if
* there's still no more segments), switch to a reload
* interval of half the target duration. */
reload_interval = v->target_duration / 2;
}
if (v->cur_seq_no < v->start_seq_no) {
av_log(NULL, AV_LOG_WARNING,
"skipping %d segments ahead, expired from playlists\n",
v->start_seq_no - v->cur_seq_no);
v->cur_seq_no = v->start_seq_no;
}
if (v->cur_seq_no >= v->start_seq_no + v->n_segments) {
if (v->finished)
return AVERROR_EOF;
while (av_gettime_relative() - v->last_load_time < reload_interval) {
if (ff_check_interrupt(c->interrupt_callback))
return AVERROR_EXIT;
av_usleep(100*1000);
}
/* Enough time has elapsed since the last reload */
goto reload;
}
seg = current_segment(v);
/* load/update Media Initialization Section, if any */
ret = update_init_section(v, seg);
if (ret)
return ret;
ret = open_input(c, v, seg);
if (ret < 0) {
if (ff_check_interrupt(c->interrupt_callback))
return AVERROR_EXIT;
av_log(v->parent, AV_LOG_WARNING, "Failed to open segment of playlist %d\n",
v->index);
v->cur_seq_no += 1;
goto reload;
}
just_opened = 1;
}
if (v->init_sec_buf_read_offset < v->init_sec_data_len) {
/* Push init section out first before first actual segment */
int copy_size = FFMIN(v->init_sec_data_len - v->init_sec_buf_read_offset, buf_size);
memcpy(buf, v->init_sec_buf, copy_size);
v->init_sec_buf_read_offset += copy_size;
return copy_size;
}
ret = read_from_url(v, current_segment(v), buf, buf_size, READ_NORMAL);
if (ret > 0) {
if (just_opened && v->is_id3_timestamped != 0) {
/* Intercept ID3 tags here, elementary audio streams are required
* to convey timestamps using them in the beginning of each segment. */
intercept_id3(v, buf, buf_size, &ret);
}
return ret;
}
ffurl_close(v->input);
v->input = NULL;
v->cur_seq_no++;
c->cur_seq_no = v->cur_seq_no;
goto restart;
}
static int playlist_in_multiple_variants(HLSContext *c, struct playlist *pls)
{
int variant_count = 0;
int i, j;
for (i = 0; i < c->n_variants && variant_count < 2; i++) {
struct variant *v = c->variants[i];
for (j = 0; j < v->n_playlists; j++) {
if (v->playlists[j] == pls) {
variant_count++;
break;
}
}
}
return variant_count >= 2;
}
static void add_renditions_to_variant(HLSContext *c, struct variant *var,
enum AVMediaType type, const char *group_id)
{
int i;
for (i = 0; i < c->n_renditions; i++) {
struct rendition *rend = c->renditions[i];
if (rend->type == type && !strcmp(rend->group_id, group_id)) {
if (rend->playlist)
/* rendition is an external playlist
* => add the playlist to the variant */
dynarray_add(&var->playlists, &var->n_playlists, rend->playlist);
else
/* rendition is part of the variant main Media Playlist
* => add the rendition to the main Media Playlist */
dynarray_add(&var->playlists[0]->renditions,
&var->playlists[0]->n_renditions,
rend);
}
}
}
static void add_metadata_from_renditions(AVFormatContext *s, struct playlist *pls,
enum AVMediaType type)
{
int rend_idx = 0;
int i;
for (i = 0; i < pls->ctx->nb_streams; i++) {
AVStream *st = s->streams[pls->stream_offset + i];
if (st->codec->codec_type != type)
continue;
for (; rend_idx < pls->n_renditions; rend_idx++) {
struct rendition *rend = pls->renditions[rend_idx];
if (rend->type != type)
continue;
if (rend->language[0])
av_dict_set(&st->metadata, "language", rend->language, 0);
if (rend->name[0])
av_dict_set(&st->metadata, "comment", rend->name, 0);
st->disposition |= rend->disposition;
}
if (rend_idx >=pls->n_renditions)
break;
}
}
/* if timestamp was in valid range: returns 1 and sets seq_no
* if not: returns 0 and sets seq_no to closest segment */
static int find_timestamp_in_playlist(HLSContext *c, struct playlist *pls,
int64_t timestamp, int *seq_no)
{
int i;
int64_t pos = c->first_timestamp == AV_NOPTS_VALUE ?
0 : c->first_timestamp;
if (timestamp < pos) {
*seq_no = pls->start_seq_no;
return 0;
}
for (i = 0; i < pls->n_segments; i++) {
int64_t diff = pos + pls->segments[i]->duration - timestamp;
if (diff > 0) {
*seq_no = pls->start_seq_no + i;
return 1;
}
pos += pls->segments[i]->duration;
}
*seq_no = pls->start_seq_no + pls->n_segments - 1;
return 0;
}
static int select_cur_seq_no(HLSContext *c, struct playlist *pls)
{
int seq_no;
if (!pls->finished && !c->first_packet &&
av_gettime_relative() - pls->last_load_time >= default_reload_interval(pls))
/* reload the playlist since it was suspended */
parse_playlist(c, pls->url, pls, NULL);
/* If playback is already in progress (we are just selecting a new
* playlist) and this is a complete file, find the matching segment
* by counting durations. */
if (pls->finished && c->cur_timestamp != AV_NOPTS_VALUE) {
find_timestamp_in_playlist(c, pls, c->cur_timestamp, &seq_no);
return seq_no;
}
if (!pls->finished) {
if (!c->first_packet && /* we are doing a segment selection during playback */
c->cur_seq_no >= pls->start_seq_no &&
c->cur_seq_no < pls->start_seq_no + pls->n_segments)
/* While spec 3.4.3 says that we cannot assume anything about the
* content at the same sequence number on different playlists,
* in practice this seems to work and doing it otherwise would
* require us to download a segment to inspect its timestamps. */
return c->cur_seq_no;
/* If this is a live stream, start live_start_index segments from the
* start or end */
if (c->live_start_index < 0)
return pls->start_seq_no + FFMAX(pls->n_segments + c->live_start_index, 0);
else
return pls->start_seq_no + FFMIN(c->live_start_index, pls->n_segments - 1);
}
/* Otherwise just start on the first segment. */
return pls->start_seq_no;
}
static int save_avio_options(AVFormatContext *s)
{
HLSContext *c = s->priv_data;
const char *opts[] = { "headers", "user_agent", "user-agent", "cookies", NULL }, **opt = opts;
uint8_t *buf;
int ret = 0;
while (*opt) {
if (av_opt_get(s->pb, *opt, AV_OPT_SEARCH_CHILDREN, &buf) >= 0) {
ret = av_dict_set(&c->avio_opts, *opt, buf,
AV_DICT_DONT_STRDUP_VAL);
if (ret < 0)
return ret;
}
opt++;
}
return ret;
}
static int hls_read_header(AVFormatContext *s)
{
URLContext *u = (s->flags & AVFMT_FLAG_CUSTOM_IO) ? NULL : s->pb->opaque;
HLSContext *c = s->priv_data;
int ret = 0, i, j, stream_offset = 0;
c->interrupt_callback = &s->interrupt_callback;
c->first_packet = 1;
c->first_timestamp = AV_NOPTS_VALUE;
c->cur_timestamp = AV_NOPTS_VALUE;
// if the URL context is good, read important options we must broker later
if (u && u->prot->priv_data_class) {
// get the previous user agent & set back to null if string size is zero
update_options(&c->user_agent, "user-agent", u->priv_data);
// get the previous cookies & set back to null if string size is zero
update_options(&c->cookies, "cookies", u->priv_data);
// get the previous headers & set back to null if string size is zero
update_options(&c->headers, "headers", u->priv_data);
}
if ((ret = parse_playlist(c, s->filename, NULL, s->pb)) < 0)
goto fail;
if ((ret = save_avio_options(s)) < 0)
goto fail;
/* Some HLS servers don't like being sent the range header */
av_dict_set(&c->avio_opts, "seekable", "0", 0);
if (c->n_variants == 0) {
av_log(NULL, AV_LOG_WARNING, "Empty playlist\n");
ret = AVERROR_EOF;
goto fail;
}
/* If the playlist only contained playlists (Master Playlist),
* parse each individual playlist. */
if (c->n_playlists > 1 || c->playlists[0]->n_segments == 0) {
for (i = 0; i < c->n_playlists; i++) {
struct playlist *pls = c->playlists[i];
if ((ret = parse_playlist(c, pls->url, pls, NULL)) < 0)
goto fail;
}
}
if (c->variants[0]->playlists[0]->n_segments == 0) {
av_log(NULL, AV_LOG_WARNING, "Empty playlist\n");
ret = AVERROR_EOF;
goto fail;
}
/* If this isn't a live stream, calculate the total duration of the
* stream. */
if (c->variants[0]->playlists[0]->finished) {
int64_t duration = 0;
for (i = 0; i < c->variants[0]->playlists[0]->n_segments; i++)
duration += c->variants[0]->playlists[0]->segments[i]->duration;
s->duration = duration;
}
/* Associate renditions with variants */
for (i = 0; i < c->n_variants; i++) {
struct variant *var = c->variants[i];
if (var->audio_group[0])
add_renditions_to_variant(c, var, AVMEDIA_TYPE_AUDIO, var->audio_group);
if (var->video_group[0])
add_renditions_to_variant(c, var, AVMEDIA_TYPE_VIDEO, var->video_group);
if (var->subtitles_group[0])
add_renditions_to_variant(c, var, AVMEDIA_TYPE_SUBTITLE, var->subtitles_group);
}
/* Open the demuxer for each playlist */
for (i = 0; i < c->n_playlists; i++) {
struct playlist *pls = c->playlists[i];
AVInputFormat *in_fmt = NULL;
if (!(pls->ctx = avformat_alloc_context())) {
ret = AVERROR(ENOMEM);
goto fail;
}
if (pls->n_segments == 0)
continue;
pls->index = i;
pls->needed = 1;
pls->parent = s;
pls->cur_seq_no = select_cur_seq_no(c, pls);
pls->read_buffer = av_malloc(INITIAL_BUFFER_SIZE);
if (!pls->read_buffer){
ret = AVERROR(ENOMEM);
avformat_free_context(pls->ctx);
pls->ctx = NULL;
goto fail;
}
ffio_init_context(&pls->pb, pls->read_buffer, INITIAL_BUFFER_SIZE, 0, pls,
read_data, NULL, NULL);
pls->pb.seekable = 0;
ret = av_probe_input_buffer(&pls->pb, &in_fmt, pls->segments[0]->url,
NULL, 0, 0);
if (ret < 0) {
/* Free the ctx - it isn't initialized properly at this point,
* so avformat_close_input shouldn't be called. If
* avformat_open_input fails below, it frees and zeros the
* context, so it doesn't need any special treatment like this. */
av_log(s, AV_LOG_ERROR, "Error when loading first segment '%s'\n", pls->segments[0]->url);
avformat_free_context(pls->ctx);
pls->ctx = NULL;
goto fail;
}
pls->ctx->pb = &pls->pb;
pls->stream_offset = stream_offset;
if ((ret = ff_copy_whitelists(pls->ctx, s)) < 0)
goto fail;
ret = avformat_open_input(&pls->ctx, pls->segments[0]->url, in_fmt, NULL);
if (ret < 0)
goto fail;
if (pls->id3_deferred_extra && pls->ctx->nb_streams == 1) {
ff_id3v2_parse_apic(pls->ctx, &pls->id3_deferred_extra);
avformat_queue_attached_pictures(pls->ctx);
ff_id3v2_free_extra_meta(&pls->id3_deferred_extra);
pls->id3_deferred_extra = NULL;
}
pls->ctx->ctx_flags &= ~AVFMTCTX_NOHEADER;
ret = avformat_find_stream_info(pls->ctx, NULL);
if (ret < 0)
goto fail;
if (pls->is_id3_timestamped == -1)
av_log(s, AV_LOG_WARNING, "No expected HTTP requests have been made\n");
/* Create new AVStreams for each stream in this playlist */
for (j = 0; j < pls->ctx->nb_streams; j++) {
AVStream *st = avformat_new_stream(s, NULL);
AVStream *ist = pls->ctx->streams[j];
if (!st) {
ret = AVERROR(ENOMEM);
goto fail;
}
st->id = i;
avcodec_copy_context(st->codec, pls->ctx->streams[j]->codec);
if (pls->is_id3_timestamped) /* custom timestamps via id3 */
avpriv_set_pts_info(st, 33, 1, MPEG_TIME_BASE);
else
avpriv_set_pts_info(st, ist->pts_wrap_bits, ist->time_base.num, ist->time_base.den);
}
add_metadata_from_renditions(s, pls, AVMEDIA_TYPE_AUDIO);
add_metadata_from_renditions(s, pls, AVMEDIA_TYPE_VIDEO);
add_metadata_from_renditions(s, pls, AVMEDIA_TYPE_SUBTITLE);
stream_offset += pls->ctx->nb_streams;
}
/* Create a program for each variant */
for (i = 0; i < c->n_variants; i++) {
struct variant *v = c->variants[i];
AVProgram *program;
program = av_new_program(s, i);
if (!program)
goto fail;
av_dict_set_int(&program->metadata, "variant_bitrate", v->bandwidth, 0);
for (j = 0; j < v->n_playlists; j++) {
struct playlist *pls = v->playlists[j];
int is_shared = playlist_in_multiple_variants(c, pls);
int k;
for (k = 0; k < pls->ctx->nb_streams; k++) {
struct AVStream *st = s->streams[pls->stream_offset + k];
ff_program_add_stream_index(s, i, pls->stream_offset + k);
/* Set variant_bitrate for streams unique to this variant */
if (!is_shared && v->bandwidth)
av_dict_set_int(&st->metadata, "variant_bitrate", v->bandwidth, 0);
}
}
}
return 0;
fail:
free_playlist_list(c);
free_variant_list(c);
free_rendition_list(c);
return ret;
}
static int recheck_discard_flags(AVFormatContext *s, int first)
{
HLSContext *c = s->priv_data;
int i, changed = 0;
/* Check if any new streams are needed */
for (i = 0; i < c->n_playlists; i++)
c->playlists[i]->cur_needed = 0;
for (i = 0; i < s->nb_streams; i++) {
AVStream *st = s->streams[i];
struct playlist *pls = c->playlists[s->streams[i]->id];
if (st->discard < AVDISCARD_ALL)
pls->cur_needed = 1;
}
for (i = 0; i < c->n_playlists; i++) {
struct playlist *pls = c->playlists[i];
if (pls->cur_needed && !pls->needed) {
pls->needed = 1;
changed = 1;
pls->cur_seq_no = select_cur_seq_no(c, pls);
pls->pb.eof_reached = 0;
if (c->cur_timestamp != AV_NOPTS_VALUE) {
/* catch up */
pls->seek_timestamp = c->cur_timestamp;
pls->seek_flags = AVSEEK_FLAG_ANY;
pls->seek_stream_index = -1;
}
av_log(s, AV_LOG_INFO, "Now receiving playlist %d, segment %d\n", i, pls->cur_seq_no);
} else if (first && !pls->cur_needed && pls->needed) {
if (pls->input)
ffurl_close(pls->input);
pls->input = NULL;
pls->needed = 0;
changed = 1;
av_log(s, AV_LOG_INFO, "No longer receiving playlist %d\n", i);
}
}
return changed;
}
static void fill_timing_for_id3_timestamped_stream(struct playlist *pls)
{
if (pls->id3_offset >= 0) {
pls->pkt.dts = pls->id3_mpegts_timestamp +
av_rescale_q(pls->id3_offset,
pls->ctx->streams[pls->pkt.stream_index]->time_base,
MPEG_TIME_BASE_Q);
if (pls->pkt.duration)
pls->id3_offset += pls->pkt.duration;
else
pls->id3_offset = -1;
} else {
/* there have been packets with unknown duration
* since the last id3 tag, should not normally happen */
pls->pkt.dts = AV_NOPTS_VALUE;
}
if (pls->pkt.duration)
pls->pkt.duration = av_rescale_q(pls->pkt.duration,
pls->ctx->streams[pls->pkt.stream_index]->time_base,
MPEG_TIME_BASE_Q);
pls->pkt.pts = AV_NOPTS_VALUE;
}
static AVRational get_timebase(struct playlist *pls)
{
if (pls->is_id3_timestamped)
return MPEG_TIME_BASE_Q;
return pls->ctx->streams[pls->pkt.stream_index]->time_base;
}
static int compare_ts_with_wrapdetect(int64_t ts_a, struct playlist *pls_a,
int64_t ts_b, struct playlist *pls_b)
{
int64_t scaled_ts_a = av_rescale_q(ts_a, get_timebase(pls_a), MPEG_TIME_BASE_Q);
int64_t scaled_ts_b = av_rescale_q(ts_b, get_timebase(pls_b), MPEG_TIME_BASE_Q);
return av_compare_mod(scaled_ts_a, scaled_ts_b, 1LL << 33);
}
static int hls_read_packet(AVFormatContext *s, AVPacket *pkt)
{
HLSContext *c = s->priv_data;
int ret, i, minplaylist = -1;
recheck_discard_flags(s, c->first_packet);
c->first_packet = 0;
for (i = 0; i < c->n_playlists; i++) {
struct playlist *pls = c->playlists[i];
/* Make sure we've got one buffered packet from each open playlist
* stream */
if (pls->needed && !pls->pkt.data) {
while (1) {
int64_t ts_diff;
AVRational tb;
ret = av_read_frame(pls->ctx, &pls->pkt);
if (ret < 0) {
if (!avio_feof(&pls->pb) && ret != AVERROR_EOF)
return ret;
reset_packet(&pls->pkt);
break;
} else {
/* stream_index check prevents matching picture attachments etc. */
if (pls->is_id3_timestamped && pls->pkt.stream_index == 0) {
/* audio elementary streams are id3 timestamped */
fill_timing_for_id3_timestamped_stream(pls);
}
if (c->first_timestamp == AV_NOPTS_VALUE &&
pls->pkt.dts != AV_NOPTS_VALUE)
c->first_timestamp = av_rescale_q(pls->pkt.dts,
get_timebase(pls), AV_TIME_BASE_Q);
}
if (pls->seek_timestamp == AV_NOPTS_VALUE)
break;
if (pls->seek_stream_index < 0 ||
pls->seek_stream_index == pls->pkt.stream_index) {
if (pls->pkt.dts == AV_NOPTS_VALUE) {
pls->seek_timestamp = AV_NOPTS_VALUE;
break;
}
tb = get_timebase(pls);
ts_diff = av_rescale_rnd(pls->pkt.dts, AV_TIME_BASE,
tb.den, AV_ROUND_DOWN) -
pls->seek_timestamp;
if (ts_diff >= 0 && (pls->seek_flags & AVSEEK_FLAG_ANY ||
pls->pkt.flags & AV_PKT_FLAG_KEY)) {
pls->seek_timestamp = AV_NOPTS_VALUE;
break;
}
}
av_free_packet(&pls->pkt);
reset_packet(&pls->pkt);
}
}
/* Check if this stream has the packet with the lowest dts */
if (pls->pkt.data) {
struct playlist *minpls = minplaylist < 0 ?
NULL : c->playlists[minplaylist];
if (minplaylist < 0) {
minplaylist = i;
} else {
int64_t dts = pls->pkt.dts;
int64_t mindts = minpls->pkt.dts;
if (dts == AV_NOPTS_VALUE ||
(mindts != AV_NOPTS_VALUE && compare_ts_with_wrapdetect(dts, pls, mindts, minpls) < 0))
minplaylist = i;
}
}
}
/* If we got a packet, return it */
if (minplaylist >= 0) {
struct playlist *pls = c->playlists[minplaylist];
*pkt = pls->pkt;
pkt->stream_index += pls->stream_offset;
reset_packet(&c->playlists[minplaylist]->pkt);
if (pkt->dts != AV_NOPTS_VALUE)
c->cur_timestamp = av_rescale_q(pkt->dts,
pls->ctx->streams[pls->pkt.stream_index]->time_base,
AV_TIME_BASE_Q);
return 0;
}
return AVERROR_EOF;
}
static int hls_close(AVFormatContext *s)
{
HLSContext *c = s->priv_data;
free_playlist_list(c);
free_variant_list(c);
free_rendition_list(c);
av_dict_free(&c->avio_opts);
return 0;
}
static int hls_read_seek(AVFormatContext *s, int stream_index,
int64_t timestamp, int flags)
{
HLSContext *c = s->priv_data;
struct playlist *seek_pls = NULL;
int i, seq_no;
int64_t first_timestamp, seek_timestamp, duration;
if ((flags & AVSEEK_FLAG_BYTE) ||
!(c->variants[0]->playlists[0]->finished || c->variants[0]->playlists[0]->type == PLS_TYPE_EVENT))
return AVERROR(ENOSYS);
first_timestamp = c->first_timestamp == AV_NOPTS_VALUE ?
0 : c->first_timestamp;
seek_timestamp = av_rescale_rnd(timestamp, AV_TIME_BASE,
s->streams[stream_index]->time_base.den,
flags & AVSEEK_FLAG_BACKWARD ?
AV_ROUND_DOWN : AV_ROUND_UP);
duration = s->duration == AV_NOPTS_VALUE ?
0 : s->duration;
if (0 < duration && duration < seek_timestamp - first_timestamp)
return AVERROR(EIO);
/* find the playlist with the specified stream */
for (i = 0; i < c->n_playlists; i++) {
struct playlist *pls = c->playlists[i];
if (stream_index >= pls->stream_offset &&
stream_index - pls->stream_offset < pls->ctx->nb_streams) {
seek_pls = pls;
break;
}
}
/* check if the timestamp is valid for the playlist with the
* specified stream index */
if (!seek_pls || !find_timestamp_in_playlist(c, seek_pls, seek_timestamp, &seq_no))
return AVERROR(EIO);
/* set segment now so we do not need to search again below */
seek_pls->cur_seq_no = seq_no;
seek_pls->seek_stream_index = stream_index - seek_pls->stream_offset;
for (i = 0; i < c->n_playlists; i++) {
/* Reset reading */
struct playlist *pls = c->playlists[i];
if (pls->input) {
ffurl_close(pls->input);
pls->input = NULL;
}
av_free_packet(&pls->pkt);
reset_packet(&pls->pkt);
pls->pb.eof_reached = 0;
/* Clear any buffered data */
pls->pb.buf_end = pls->pb.buf_ptr = pls->pb.buffer;
/* Reset the pos, to let the mpegts demuxer know we've seeked. */
pls->pb.pos = 0;
/* Flush the packet queue of the subdemuxer. */
ff_read_frame_flush(pls->ctx);
pls->seek_timestamp = seek_timestamp;
pls->seek_flags = flags;
if (pls != seek_pls) {
/* set closest segment seq_no for playlists not handled above */
find_timestamp_in_playlist(c, pls, seek_timestamp, &pls->cur_seq_no);
/* seek the playlist to the given position without taking
* keyframes into account since this playlist does not have the
* specified stream where we should look for the keyframes */
pls->seek_stream_index = -1;
pls->seek_flags |= AVSEEK_FLAG_ANY;
}
}
c->cur_timestamp = seek_timestamp;
return 0;
}
static int hls_probe(AVProbeData *p)
{
/* Require #EXTM3U at the start, and either one of the ones below
* somewhere for a proper match. */
if (strncmp(p->buf, "#EXTM3U", 7))
return 0;
if (strstr(p->buf, "#EXT-X-STREAM-INF:") ||
strstr(p->buf, "#EXT-X-TARGETDURATION:") ||
strstr(p->buf, "#EXT-X-MEDIA-SEQUENCE:"))
return AVPROBE_SCORE_MAX;
return 0;
}
#define OFFSET(x) offsetof(HLSContext, x)
#define FLAGS AV_OPT_FLAG_DECODING_PARAM
static const AVOption hls_options[] = {
{"live_start_index", "segment index to start live streams at (negative values are from the end)",
OFFSET(live_start_index), AV_OPT_TYPE_INT, {.i64 = -3}, INT_MIN, INT_MAX, FLAGS},
{"allowed_extensions", "List of file extensions that hls is allowed to access",
OFFSET(allowed_extensions), AV_OPT_TYPE_STRING,
{.str = "3gp,aac,avi,flac,mkv,m3u8,m4a,m4s,m4v,mpg,mov,mp2,mp3,mp4,mpeg,mpegts,ogg,ogv,oga,ts,vob,wav"},
INT_MIN, INT_MAX, FLAGS},
{"max_reload", "Maximum number of times a insufficient list is attempted to be reloaded",
OFFSET(max_reload), AV_OPT_TYPE_INT, {.i64 = 1000}, 0, INT_MAX, FLAGS},
{NULL}
};
static const AVClass hls_class = {
.class_name = "hls,applehttp",
.item_name = av_default_item_name,
.option = hls_options,
.version = LIBAVUTIL_VERSION_INT,
};
AVInputFormat ff_hls_demuxer = {
.name = "hls,applehttp",
.long_name = NULL_IF_CONFIG_SMALL("Apple HTTP Live Streaming"),
.priv_class = &hls_class,
.priv_data_size = sizeof(HLSContext),
.read_probe = hls_probe,
.read_header = hls_read_header,
.read_packet = hls_read_packet,
.read_close = hls_close,
.read_seek = hls_read_seek,
};
| ./CrossVul/dataset_final_sorted/CWE-416/c/good_4045_0 |
crossvul-cpp_data_bad_832_0 | // SPDX-License-Identifier: GPL-2.0+
/*
* ipmi_si.c
*
* The interface to the IPMI driver for the system interfaces (KCS, SMIC,
* BT).
*
* Author: MontaVista Software, Inc.
* Corey Minyard <minyard@mvista.com>
* source@mvista.com
*
* Copyright 2002 MontaVista Software Inc.
* Copyright 2006 IBM Corp., Christian Krafft <krafft@de.ibm.com>
*/
/*
* This file holds the "policy" for the interface to the SMI state
* machine. It does the configuration, handles timers and interrupts,
* and drives the real SMI state machine.
*/
#define pr_fmt(fmt) "ipmi_si: " fmt
#include <linux/module.h>
#include <linux/moduleparam.h>
#include <linux/sched.h>
#include <linux/seq_file.h>
#include <linux/timer.h>
#include <linux/errno.h>
#include <linux/spinlock.h>
#include <linux/slab.h>
#include <linux/delay.h>
#include <linux/list.h>
#include <linux/notifier.h>
#include <linux/mutex.h>
#include <linux/kthread.h>
#include <asm/irq.h>
#include <linux/interrupt.h>
#include <linux/rcupdate.h>
#include <linux/ipmi.h>
#include <linux/ipmi_smi.h>
#include "ipmi_si.h"
#include <linux/string.h>
#include <linux/ctype.h>
/* Measure times between events in the driver. */
#undef DEBUG_TIMING
/* Call every 10 ms. */
#define SI_TIMEOUT_TIME_USEC 10000
#define SI_USEC_PER_JIFFY (1000000/HZ)
#define SI_TIMEOUT_JIFFIES (SI_TIMEOUT_TIME_USEC/SI_USEC_PER_JIFFY)
#define SI_SHORT_TIMEOUT_USEC 250 /* .25ms when the SM request a
short timeout */
enum si_intf_state {
SI_NORMAL,
SI_GETTING_FLAGS,
SI_GETTING_EVENTS,
SI_CLEARING_FLAGS,
SI_GETTING_MESSAGES,
SI_CHECKING_ENABLES,
SI_SETTING_ENABLES
/* FIXME - add watchdog stuff. */
};
/* Some BT-specific defines we need here. */
#define IPMI_BT_INTMASK_REG 2
#define IPMI_BT_INTMASK_CLEAR_IRQ_BIT 2
#define IPMI_BT_INTMASK_ENABLE_IRQ_BIT 1
static const char * const si_to_str[] = { "invalid", "kcs", "smic", "bt" };
static int initialized;
/*
* Indexes into stats[] in smi_info below.
*/
enum si_stat_indexes {
/*
* Number of times the driver requested a timer while an operation
* was in progress.
*/
SI_STAT_short_timeouts = 0,
/*
* Number of times the driver requested a timer while nothing was in
* progress.
*/
SI_STAT_long_timeouts,
/* Number of times the interface was idle while being polled. */
SI_STAT_idles,
/* Number of interrupts the driver handled. */
SI_STAT_interrupts,
/* Number of time the driver got an ATTN from the hardware. */
SI_STAT_attentions,
/* Number of times the driver requested flags from the hardware. */
SI_STAT_flag_fetches,
/* Number of times the hardware didn't follow the state machine. */
SI_STAT_hosed_count,
/* Number of completed messages. */
SI_STAT_complete_transactions,
/* Number of IPMI events received from the hardware. */
SI_STAT_events,
/* Number of watchdog pretimeouts. */
SI_STAT_watchdog_pretimeouts,
/* Number of asynchronous messages received. */
SI_STAT_incoming_messages,
/* This *must* remain last, add new values above this. */
SI_NUM_STATS
};
struct smi_info {
int si_num;
struct ipmi_smi *intf;
struct si_sm_data *si_sm;
const struct si_sm_handlers *handlers;
spinlock_t si_lock;
struct ipmi_smi_msg *waiting_msg;
struct ipmi_smi_msg *curr_msg;
enum si_intf_state si_state;
/*
* Used to handle the various types of I/O that can occur with
* IPMI
*/
struct si_sm_io io;
/*
* Per-OEM handler, called from handle_flags(). Returns 1
* when handle_flags() needs to be re-run or 0 indicating it
* set si_state itself.
*/
int (*oem_data_avail_handler)(struct smi_info *smi_info);
/*
* Flags from the last GET_MSG_FLAGS command, used when an ATTN
* is set to hold the flags until we are done handling everything
* from the flags.
*/
#define RECEIVE_MSG_AVAIL 0x01
#define EVENT_MSG_BUFFER_FULL 0x02
#define WDT_PRE_TIMEOUT_INT 0x08
#define OEM0_DATA_AVAIL 0x20
#define OEM1_DATA_AVAIL 0x40
#define OEM2_DATA_AVAIL 0x80
#define OEM_DATA_AVAIL (OEM0_DATA_AVAIL | \
OEM1_DATA_AVAIL | \
OEM2_DATA_AVAIL)
unsigned char msg_flags;
/* Does the BMC have an event buffer? */
bool has_event_buffer;
/*
* If set to true, this will request events the next time the
* state machine is idle.
*/
atomic_t req_events;
/*
* If true, run the state machine to completion on every send
* call. Generally used after a panic to make sure stuff goes
* out.
*/
bool run_to_completion;
/* The timer for this si. */
struct timer_list si_timer;
/* This flag is set, if the timer can be set */
bool timer_can_start;
/* This flag is set, if the timer is running (timer_pending() isn't enough) */
bool timer_running;
/* The time (in jiffies) the last timeout occurred at. */
unsigned long last_timeout_jiffies;
/* Are we waiting for the events, pretimeouts, received msgs? */
atomic_t need_watch;
/*
* The driver will disable interrupts when it gets into a
* situation where it cannot handle messages due to lack of
* memory. Once that situation clears up, it will re-enable
* interrupts.
*/
bool interrupt_disabled;
/*
* Does the BMC support events?
*/
bool supports_event_msg_buff;
/*
* Can we disable interrupts the global enables receive irq
* bit? There are currently two forms of brokenness, some
* systems cannot disable the bit (which is technically within
* the spec but a bad idea) and some systems have the bit
* forced to zero even though interrupts work (which is
* clearly outside the spec). The next bool tells which form
* of brokenness is present.
*/
bool cannot_disable_irq;
/*
* Some systems are broken and cannot set the irq enable
* bit, even if they support interrupts.
*/
bool irq_enable_broken;
/*
* Did we get an attention that we did not handle?
*/
bool got_attn;
/* From the get device id response... */
struct ipmi_device_id device_id;
/* Default driver model device. */
struct platform_device *pdev;
/* Have we added the device group to the device? */
bool dev_group_added;
/* Have we added the platform device? */
bool pdev_registered;
/* Counters and things for the proc filesystem. */
atomic_t stats[SI_NUM_STATS];
struct task_struct *thread;
struct list_head link;
};
#define smi_inc_stat(smi, stat) \
atomic_inc(&(smi)->stats[SI_STAT_ ## stat])
#define smi_get_stat(smi, stat) \
((unsigned int) atomic_read(&(smi)->stats[SI_STAT_ ## stat]))
#define IPMI_MAX_INTFS 4
static int force_kipmid[IPMI_MAX_INTFS];
static int num_force_kipmid;
static unsigned int kipmid_max_busy_us[IPMI_MAX_INTFS];
static int num_max_busy_us;
static bool unload_when_empty = true;
static int try_smi_init(struct smi_info *smi);
static void cleanup_one_si(struct smi_info *smi_info);
static void cleanup_ipmi_si(void);
#ifdef DEBUG_TIMING
void debug_timestamp(char *msg)
{
struct timespec64 t;
ktime_get_ts64(&t);
pr_debug("**%s: %lld.%9.9ld\n", msg, (long long) t.tv_sec, t.tv_nsec);
}
#else
#define debug_timestamp(x)
#endif
static ATOMIC_NOTIFIER_HEAD(xaction_notifier_list);
static int register_xaction_notifier(struct notifier_block *nb)
{
return atomic_notifier_chain_register(&xaction_notifier_list, nb);
}
static void deliver_recv_msg(struct smi_info *smi_info,
struct ipmi_smi_msg *msg)
{
/* Deliver the message to the upper layer. */
ipmi_smi_msg_received(smi_info->intf, msg);
}
static void return_hosed_msg(struct smi_info *smi_info, int cCode)
{
struct ipmi_smi_msg *msg = smi_info->curr_msg;
if (cCode < 0 || cCode > IPMI_ERR_UNSPECIFIED)
cCode = IPMI_ERR_UNSPECIFIED;
/* else use it as is */
/* Make it a response */
msg->rsp[0] = msg->data[0] | 4;
msg->rsp[1] = msg->data[1];
msg->rsp[2] = cCode;
msg->rsp_size = 3;
smi_info->curr_msg = NULL;
deliver_recv_msg(smi_info, msg);
}
static enum si_sm_result start_next_msg(struct smi_info *smi_info)
{
int rv;
if (!smi_info->waiting_msg) {
smi_info->curr_msg = NULL;
rv = SI_SM_IDLE;
} else {
int err;
smi_info->curr_msg = smi_info->waiting_msg;
smi_info->waiting_msg = NULL;
debug_timestamp("Start2");
err = atomic_notifier_call_chain(&xaction_notifier_list,
0, smi_info);
if (err & NOTIFY_STOP_MASK) {
rv = SI_SM_CALL_WITHOUT_DELAY;
goto out;
}
err = smi_info->handlers->start_transaction(
smi_info->si_sm,
smi_info->curr_msg->data,
smi_info->curr_msg->data_size);
if (err)
return_hosed_msg(smi_info, err);
rv = SI_SM_CALL_WITHOUT_DELAY;
}
out:
return rv;
}
static void smi_mod_timer(struct smi_info *smi_info, unsigned long new_val)
{
if (!smi_info->timer_can_start)
return;
smi_info->last_timeout_jiffies = jiffies;
mod_timer(&smi_info->si_timer, new_val);
smi_info->timer_running = true;
}
/*
* Start a new message and (re)start the timer and thread.
*/
static void start_new_msg(struct smi_info *smi_info, unsigned char *msg,
unsigned int size)
{
smi_mod_timer(smi_info, jiffies + SI_TIMEOUT_JIFFIES);
if (smi_info->thread)
wake_up_process(smi_info->thread);
smi_info->handlers->start_transaction(smi_info->si_sm, msg, size);
}
static void start_check_enables(struct smi_info *smi_info)
{
unsigned char msg[2];
msg[0] = (IPMI_NETFN_APP_REQUEST << 2);
msg[1] = IPMI_GET_BMC_GLOBAL_ENABLES_CMD;
start_new_msg(smi_info, msg, 2);
smi_info->si_state = SI_CHECKING_ENABLES;
}
static void start_clear_flags(struct smi_info *smi_info)
{
unsigned char msg[3];
/* Make sure the watchdog pre-timeout flag is not set at startup. */
msg[0] = (IPMI_NETFN_APP_REQUEST << 2);
msg[1] = IPMI_CLEAR_MSG_FLAGS_CMD;
msg[2] = WDT_PRE_TIMEOUT_INT;
start_new_msg(smi_info, msg, 3);
smi_info->si_state = SI_CLEARING_FLAGS;
}
static void start_getting_msg_queue(struct smi_info *smi_info)
{
smi_info->curr_msg->data[0] = (IPMI_NETFN_APP_REQUEST << 2);
smi_info->curr_msg->data[1] = IPMI_GET_MSG_CMD;
smi_info->curr_msg->data_size = 2;
start_new_msg(smi_info, smi_info->curr_msg->data,
smi_info->curr_msg->data_size);
smi_info->si_state = SI_GETTING_MESSAGES;
}
static void start_getting_events(struct smi_info *smi_info)
{
smi_info->curr_msg->data[0] = (IPMI_NETFN_APP_REQUEST << 2);
smi_info->curr_msg->data[1] = IPMI_READ_EVENT_MSG_BUFFER_CMD;
smi_info->curr_msg->data_size = 2;
start_new_msg(smi_info, smi_info->curr_msg->data,
smi_info->curr_msg->data_size);
smi_info->si_state = SI_GETTING_EVENTS;
}
/*
* When we have a situtaion where we run out of memory and cannot
* allocate messages, we just leave them in the BMC and run the system
* polled until we can allocate some memory. Once we have some
* memory, we will re-enable the interrupt.
*
* Note that we cannot just use disable_irq(), since the interrupt may
* be shared.
*/
static inline bool disable_si_irq(struct smi_info *smi_info)
{
if ((smi_info->io.irq) && (!smi_info->interrupt_disabled)) {
smi_info->interrupt_disabled = true;
start_check_enables(smi_info);
return true;
}
return false;
}
static inline bool enable_si_irq(struct smi_info *smi_info)
{
if ((smi_info->io.irq) && (smi_info->interrupt_disabled)) {
smi_info->interrupt_disabled = false;
start_check_enables(smi_info);
return true;
}
return false;
}
/*
* Allocate a message. If unable to allocate, start the interrupt
* disable process and return NULL. If able to allocate but
* interrupts are disabled, free the message and return NULL after
* starting the interrupt enable process.
*/
static struct ipmi_smi_msg *alloc_msg_handle_irq(struct smi_info *smi_info)
{
struct ipmi_smi_msg *msg;
msg = ipmi_alloc_smi_msg();
if (!msg) {
if (!disable_si_irq(smi_info))
smi_info->si_state = SI_NORMAL;
} else if (enable_si_irq(smi_info)) {
ipmi_free_smi_msg(msg);
msg = NULL;
}
return msg;
}
static void handle_flags(struct smi_info *smi_info)
{
retry:
if (smi_info->msg_flags & WDT_PRE_TIMEOUT_INT) {
/* Watchdog pre-timeout */
smi_inc_stat(smi_info, watchdog_pretimeouts);
start_clear_flags(smi_info);
smi_info->msg_flags &= ~WDT_PRE_TIMEOUT_INT;
ipmi_smi_watchdog_pretimeout(smi_info->intf);
} else if (smi_info->msg_flags & RECEIVE_MSG_AVAIL) {
/* Messages available. */
smi_info->curr_msg = alloc_msg_handle_irq(smi_info);
if (!smi_info->curr_msg)
return;
start_getting_msg_queue(smi_info);
} else if (smi_info->msg_flags & EVENT_MSG_BUFFER_FULL) {
/* Events available. */
smi_info->curr_msg = alloc_msg_handle_irq(smi_info);
if (!smi_info->curr_msg)
return;
start_getting_events(smi_info);
} else if (smi_info->msg_flags & OEM_DATA_AVAIL &&
smi_info->oem_data_avail_handler) {
if (smi_info->oem_data_avail_handler(smi_info))
goto retry;
} else
smi_info->si_state = SI_NORMAL;
}
/*
* Global enables we care about.
*/
#define GLOBAL_ENABLES_MASK (IPMI_BMC_EVT_MSG_BUFF | IPMI_BMC_RCV_MSG_INTR | \
IPMI_BMC_EVT_MSG_INTR)
static u8 current_global_enables(struct smi_info *smi_info, u8 base,
bool *irq_on)
{
u8 enables = 0;
if (smi_info->supports_event_msg_buff)
enables |= IPMI_BMC_EVT_MSG_BUFF;
if (((smi_info->io.irq && !smi_info->interrupt_disabled) ||
smi_info->cannot_disable_irq) &&
!smi_info->irq_enable_broken)
enables |= IPMI_BMC_RCV_MSG_INTR;
if (smi_info->supports_event_msg_buff &&
smi_info->io.irq && !smi_info->interrupt_disabled &&
!smi_info->irq_enable_broken)
enables |= IPMI_BMC_EVT_MSG_INTR;
*irq_on = enables & (IPMI_BMC_EVT_MSG_INTR | IPMI_BMC_RCV_MSG_INTR);
return enables;
}
static void check_bt_irq(struct smi_info *smi_info, bool irq_on)
{
u8 irqstate = smi_info->io.inputb(&smi_info->io, IPMI_BT_INTMASK_REG);
irqstate &= IPMI_BT_INTMASK_ENABLE_IRQ_BIT;
if ((bool)irqstate == irq_on)
return;
if (irq_on)
smi_info->io.outputb(&smi_info->io, IPMI_BT_INTMASK_REG,
IPMI_BT_INTMASK_ENABLE_IRQ_BIT);
else
smi_info->io.outputb(&smi_info->io, IPMI_BT_INTMASK_REG, 0);
}
static void handle_transaction_done(struct smi_info *smi_info)
{
struct ipmi_smi_msg *msg;
debug_timestamp("Done");
switch (smi_info->si_state) {
case SI_NORMAL:
if (!smi_info->curr_msg)
break;
smi_info->curr_msg->rsp_size
= smi_info->handlers->get_result(
smi_info->si_sm,
smi_info->curr_msg->rsp,
IPMI_MAX_MSG_LENGTH);
/*
* Do this here becase deliver_recv_msg() releases the
* lock, and a new message can be put in during the
* time the lock is released.
*/
msg = smi_info->curr_msg;
smi_info->curr_msg = NULL;
deliver_recv_msg(smi_info, msg);
break;
case SI_GETTING_FLAGS:
{
unsigned char msg[4];
unsigned int len;
/* We got the flags from the SMI, now handle them. */
len = smi_info->handlers->get_result(smi_info->si_sm, msg, 4);
if (msg[2] != 0) {
/* Error fetching flags, just give up for now. */
smi_info->si_state = SI_NORMAL;
} else if (len < 4) {
/*
* Hmm, no flags. That's technically illegal, but
* don't use uninitialized data.
*/
smi_info->si_state = SI_NORMAL;
} else {
smi_info->msg_flags = msg[3];
handle_flags(smi_info);
}
break;
}
case SI_CLEARING_FLAGS:
{
unsigned char msg[3];
/* We cleared the flags. */
smi_info->handlers->get_result(smi_info->si_sm, msg, 3);
if (msg[2] != 0) {
/* Error clearing flags */
dev_warn(smi_info->io.dev,
"Error clearing flags: %2.2x\n", msg[2]);
}
smi_info->si_state = SI_NORMAL;
break;
}
case SI_GETTING_EVENTS:
{
smi_info->curr_msg->rsp_size
= smi_info->handlers->get_result(
smi_info->si_sm,
smi_info->curr_msg->rsp,
IPMI_MAX_MSG_LENGTH);
/*
* Do this here becase deliver_recv_msg() releases the
* lock, and a new message can be put in during the
* time the lock is released.
*/
msg = smi_info->curr_msg;
smi_info->curr_msg = NULL;
if (msg->rsp[2] != 0) {
/* Error getting event, probably done. */
msg->done(msg);
/* Take off the event flag. */
smi_info->msg_flags &= ~EVENT_MSG_BUFFER_FULL;
handle_flags(smi_info);
} else {
smi_inc_stat(smi_info, events);
/*
* Do this before we deliver the message
* because delivering the message releases the
* lock and something else can mess with the
* state.
*/
handle_flags(smi_info);
deliver_recv_msg(smi_info, msg);
}
break;
}
case SI_GETTING_MESSAGES:
{
smi_info->curr_msg->rsp_size
= smi_info->handlers->get_result(
smi_info->si_sm,
smi_info->curr_msg->rsp,
IPMI_MAX_MSG_LENGTH);
/*
* Do this here becase deliver_recv_msg() releases the
* lock, and a new message can be put in during the
* time the lock is released.
*/
msg = smi_info->curr_msg;
smi_info->curr_msg = NULL;
if (msg->rsp[2] != 0) {
/* Error getting event, probably done. */
msg->done(msg);
/* Take off the msg flag. */
smi_info->msg_flags &= ~RECEIVE_MSG_AVAIL;
handle_flags(smi_info);
} else {
smi_inc_stat(smi_info, incoming_messages);
/*
* Do this before we deliver the message
* because delivering the message releases the
* lock and something else can mess with the
* state.
*/
handle_flags(smi_info);
deliver_recv_msg(smi_info, msg);
}
break;
}
case SI_CHECKING_ENABLES:
{
unsigned char msg[4];
u8 enables;
bool irq_on;
/* We got the flags from the SMI, now handle them. */
smi_info->handlers->get_result(smi_info->si_sm, msg, 4);
if (msg[2] != 0) {
dev_warn(smi_info->io.dev,
"Couldn't get irq info: %x.\n", msg[2]);
dev_warn(smi_info->io.dev,
"Maybe ok, but ipmi might run very slowly.\n");
smi_info->si_state = SI_NORMAL;
break;
}
enables = current_global_enables(smi_info, 0, &irq_on);
if (smi_info->io.si_type == SI_BT)
/* BT has its own interrupt enable bit. */
check_bt_irq(smi_info, irq_on);
if (enables != (msg[3] & GLOBAL_ENABLES_MASK)) {
/* Enables are not correct, fix them. */
msg[0] = (IPMI_NETFN_APP_REQUEST << 2);
msg[1] = IPMI_SET_BMC_GLOBAL_ENABLES_CMD;
msg[2] = enables | (msg[3] & ~GLOBAL_ENABLES_MASK);
smi_info->handlers->start_transaction(
smi_info->si_sm, msg, 3);
smi_info->si_state = SI_SETTING_ENABLES;
} else if (smi_info->supports_event_msg_buff) {
smi_info->curr_msg = ipmi_alloc_smi_msg();
if (!smi_info->curr_msg) {
smi_info->si_state = SI_NORMAL;
break;
}
start_getting_events(smi_info);
} else {
smi_info->si_state = SI_NORMAL;
}
break;
}
case SI_SETTING_ENABLES:
{
unsigned char msg[4];
smi_info->handlers->get_result(smi_info->si_sm, msg, 4);
if (msg[2] != 0)
dev_warn(smi_info->io.dev,
"Could not set the global enables: 0x%x.\n",
msg[2]);
if (smi_info->supports_event_msg_buff) {
smi_info->curr_msg = ipmi_alloc_smi_msg();
if (!smi_info->curr_msg) {
smi_info->si_state = SI_NORMAL;
break;
}
start_getting_events(smi_info);
} else {
smi_info->si_state = SI_NORMAL;
}
break;
}
}
}
/*
* Called on timeouts and events. Timeouts should pass the elapsed
* time, interrupts should pass in zero. Must be called with
* si_lock held and interrupts disabled.
*/
static enum si_sm_result smi_event_handler(struct smi_info *smi_info,
int time)
{
enum si_sm_result si_sm_result;
restart:
/*
* There used to be a loop here that waited a little while
* (around 25us) before giving up. That turned out to be
* pointless, the minimum delays I was seeing were in the 300us
* range, which is far too long to wait in an interrupt. So
* we just run until the state machine tells us something
* happened or it needs a delay.
*/
si_sm_result = smi_info->handlers->event(smi_info->si_sm, time);
time = 0;
while (si_sm_result == SI_SM_CALL_WITHOUT_DELAY)
si_sm_result = smi_info->handlers->event(smi_info->si_sm, 0);
if (si_sm_result == SI_SM_TRANSACTION_COMPLETE) {
smi_inc_stat(smi_info, complete_transactions);
handle_transaction_done(smi_info);
goto restart;
} else if (si_sm_result == SI_SM_HOSED) {
smi_inc_stat(smi_info, hosed_count);
/*
* Do the before return_hosed_msg, because that
* releases the lock.
*/
smi_info->si_state = SI_NORMAL;
if (smi_info->curr_msg != NULL) {
/*
* If we were handling a user message, format
* a response to send to the upper layer to
* tell it about the error.
*/
return_hosed_msg(smi_info, IPMI_ERR_UNSPECIFIED);
}
goto restart;
}
/*
* We prefer handling attn over new messages. But don't do
* this if there is not yet an upper layer to handle anything.
*/
if (si_sm_result == SI_SM_ATTN || smi_info->got_attn) {
unsigned char msg[2];
if (smi_info->si_state != SI_NORMAL) {
/*
* We got an ATTN, but we are doing something else.
* Handle the ATTN later.
*/
smi_info->got_attn = true;
} else {
smi_info->got_attn = false;
smi_inc_stat(smi_info, attentions);
/*
* Got a attn, send down a get message flags to see
* what's causing it. It would be better to handle
* this in the upper layer, but due to the way
* interrupts work with the SMI, that's not really
* possible.
*/
msg[0] = (IPMI_NETFN_APP_REQUEST << 2);
msg[1] = IPMI_GET_MSG_FLAGS_CMD;
start_new_msg(smi_info, msg, 2);
smi_info->si_state = SI_GETTING_FLAGS;
goto restart;
}
}
/* If we are currently idle, try to start the next message. */
if (si_sm_result == SI_SM_IDLE) {
smi_inc_stat(smi_info, idles);
si_sm_result = start_next_msg(smi_info);
if (si_sm_result != SI_SM_IDLE)
goto restart;
}
if ((si_sm_result == SI_SM_IDLE)
&& (atomic_read(&smi_info->req_events))) {
/*
* We are idle and the upper layer requested that I fetch
* events, so do so.
*/
atomic_set(&smi_info->req_events, 0);
/*
* Take this opportunity to check the interrupt and
* message enable state for the BMC. The BMC can be
* asynchronously reset, and may thus get interrupts
* disable and messages disabled.
*/
if (smi_info->supports_event_msg_buff || smi_info->io.irq) {
start_check_enables(smi_info);
} else {
smi_info->curr_msg = alloc_msg_handle_irq(smi_info);
if (!smi_info->curr_msg)
goto out;
start_getting_events(smi_info);
}
goto restart;
}
if (si_sm_result == SI_SM_IDLE && smi_info->timer_running) {
/* Ok it if fails, the timer will just go off. */
if (del_timer(&smi_info->si_timer))
smi_info->timer_running = false;
}
out:
return si_sm_result;
}
static void check_start_timer_thread(struct smi_info *smi_info)
{
if (smi_info->si_state == SI_NORMAL && smi_info->curr_msg == NULL) {
smi_mod_timer(smi_info, jiffies + SI_TIMEOUT_JIFFIES);
if (smi_info->thread)
wake_up_process(smi_info->thread);
start_next_msg(smi_info);
smi_event_handler(smi_info, 0);
}
}
static void flush_messages(void *send_info)
{
struct smi_info *smi_info = send_info;
enum si_sm_result result;
/*
* Currently, this function is called only in run-to-completion
* mode. This means we are single-threaded, no need for locks.
*/
result = smi_event_handler(smi_info, 0);
while (result != SI_SM_IDLE) {
udelay(SI_SHORT_TIMEOUT_USEC);
result = smi_event_handler(smi_info, SI_SHORT_TIMEOUT_USEC);
}
}
static void sender(void *send_info,
struct ipmi_smi_msg *msg)
{
struct smi_info *smi_info = send_info;
unsigned long flags;
debug_timestamp("Enqueue");
if (smi_info->run_to_completion) {
/*
* If we are running to completion, start it. Upper
* layer will call flush_messages to clear it out.
*/
smi_info->waiting_msg = msg;
return;
}
spin_lock_irqsave(&smi_info->si_lock, flags);
/*
* The following two lines don't need to be under the lock for
* the lock's sake, but they do need SMP memory barriers to
* avoid getting things out of order. We are already claiming
* the lock, anyway, so just do it under the lock to avoid the
* ordering problem.
*/
BUG_ON(smi_info->waiting_msg);
smi_info->waiting_msg = msg;
check_start_timer_thread(smi_info);
spin_unlock_irqrestore(&smi_info->si_lock, flags);
}
static void set_run_to_completion(void *send_info, bool i_run_to_completion)
{
struct smi_info *smi_info = send_info;
smi_info->run_to_completion = i_run_to_completion;
if (i_run_to_completion)
flush_messages(smi_info);
}
/*
* Use -1 in the nsec value of the busy waiting timespec to tell that
* we are spinning in kipmid looking for something and not delaying
* between checks
*/
static inline void ipmi_si_set_not_busy(struct timespec64 *ts)
{
ts->tv_nsec = -1;
}
static inline int ipmi_si_is_busy(struct timespec64 *ts)
{
return ts->tv_nsec != -1;
}
static inline int ipmi_thread_busy_wait(enum si_sm_result smi_result,
const struct smi_info *smi_info,
struct timespec64 *busy_until)
{
unsigned int max_busy_us = 0;
if (smi_info->si_num < num_max_busy_us)
max_busy_us = kipmid_max_busy_us[smi_info->si_num];
if (max_busy_us == 0 || smi_result != SI_SM_CALL_WITH_DELAY)
ipmi_si_set_not_busy(busy_until);
else if (!ipmi_si_is_busy(busy_until)) {
ktime_get_ts64(busy_until);
timespec64_add_ns(busy_until, max_busy_us*NSEC_PER_USEC);
} else {
struct timespec64 now;
ktime_get_ts64(&now);
if (unlikely(timespec64_compare(&now, busy_until) > 0)) {
ipmi_si_set_not_busy(busy_until);
return 0;
}
}
return 1;
}
/*
* A busy-waiting loop for speeding up IPMI operation.
*
* Lousy hardware makes this hard. This is only enabled for systems
* that are not BT and do not have interrupts. It starts spinning
* when an operation is complete or until max_busy tells it to stop
* (if that is enabled). See the paragraph on kimid_max_busy_us in
* Documentation/IPMI.txt for details.
*/
static int ipmi_thread(void *data)
{
struct smi_info *smi_info = data;
unsigned long flags;
enum si_sm_result smi_result;
struct timespec64 busy_until;
ipmi_si_set_not_busy(&busy_until);
set_user_nice(current, MAX_NICE);
while (!kthread_should_stop()) {
int busy_wait;
spin_lock_irqsave(&(smi_info->si_lock), flags);
smi_result = smi_event_handler(smi_info, 0);
/*
* If the driver is doing something, there is a possible
* race with the timer. If the timer handler see idle,
* and the thread here sees something else, the timer
* handler won't restart the timer even though it is
* required. So start it here if necessary.
*/
if (smi_result != SI_SM_IDLE && !smi_info->timer_running)
smi_mod_timer(smi_info, jiffies + SI_TIMEOUT_JIFFIES);
spin_unlock_irqrestore(&(smi_info->si_lock), flags);
busy_wait = ipmi_thread_busy_wait(smi_result, smi_info,
&busy_until);
if (smi_result == SI_SM_CALL_WITHOUT_DELAY)
; /* do nothing */
else if (smi_result == SI_SM_CALL_WITH_DELAY && busy_wait)
schedule();
else if (smi_result == SI_SM_IDLE) {
if (atomic_read(&smi_info->need_watch)) {
schedule_timeout_interruptible(100);
} else {
/* Wait to be woken up when we are needed. */
__set_current_state(TASK_INTERRUPTIBLE);
schedule();
}
} else
schedule_timeout_interruptible(1);
}
return 0;
}
static void poll(void *send_info)
{
struct smi_info *smi_info = send_info;
unsigned long flags = 0;
bool run_to_completion = smi_info->run_to_completion;
/*
* Make sure there is some delay in the poll loop so we can
* drive time forward and timeout things.
*/
udelay(10);
if (!run_to_completion)
spin_lock_irqsave(&smi_info->si_lock, flags);
smi_event_handler(smi_info, 10);
if (!run_to_completion)
spin_unlock_irqrestore(&smi_info->si_lock, flags);
}
static void request_events(void *send_info)
{
struct smi_info *smi_info = send_info;
if (!smi_info->has_event_buffer)
return;
atomic_set(&smi_info->req_events, 1);
}
static void set_need_watch(void *send_info, bool enable)
{
struct smi_info *smi_info = send_info;
unsigned long flags;
atomic_set(&smi_info->need_watch, enable);
spin_lock_irqsave(&smi_info->si_lock, flags);
check_start_timer_thread(smi_info);
spin_unlock_irqrestore(&smi_info->si_lock, flags);
}
static void smi_timeout(struct timer_list *t)
{
struct smi_info *smi_info = from_timer(smi_info, t, si_timer);
enum si_sm_result smi_result;
unsigned long flags;
unsigned long jiffies_now;
long time_diff;
long timeout;
spin_lock_irqsave(&(smi_info->si_lock), flags);
debug_timestamp("Timer");
jiffies_now = jiffies;
time_diff = (((long)jiffies_now - (long)smi_info->last_timeout_jiffies)
* SI_USEC_PER_JIFFY);
smi_result = smi_event_handler(smi_info, time_diff);
if ((smi_info->io.irq) && (!smi_info->interrupt_disabled)) {
/* Running with interrupts, only do long timeouts. */
timeout = jiffies + SI_TIMEOUT_JIFFIES;
smi_inc_stat(smi_info, long_timeouts);
goto do_mod_timer;
}
/*
* If the state machine asks for a short delay, then shorten
* the timer timeout.
*/
if (smi_result == SI_SM_CALL_WITH_DELAY) {
smi_inc_stat(smi_info, short_timeouts);
timeout = jiffies + 1;
} else {
smi_inc_stat(smi_info, long_timeouts);
timeout = jiffies + SI_TIMEOUT_JIFFIES;
}
do_mod_timer:
if (smi_result != SI_SM_IDLE)
smi_mod_timer(smi_info, timeout);
else
smi_info->timer_running = false;
spin_unlock_irqrestore(&(smi_info->si_lock), flags);
}
irqreturn_t ipmi_si_irq_handler(int irq, void *data)
{
struct smi_info *smi_info = data;
unsigned long flags;
if (smi_info->io.si_type == SI_BT)
/* We need to clear the IRQ flag for the BT interface. */
smi_info->io.outputb(&smi_info->io, IPMI_BT_INTMASK_REG,
IPMI_BT_INTMASK_CLEAR_IRQ_BIT
| IPMI_BT_INTMASK_ENABLE_IRQ_BIT);
spin_lock_irqsave(&(smi_info->si_lock), flags);
smi_inc_stat(smi_info, interrupts);
debug_timestamp("Interrupt");
smi_event_handler(smi_info, 0);
spin_unlock_irqrestore(&(smi_info->si_lock), flags);
return IRQ_HANDLED;
}
static int smi_start_processing(void *send_info,
struct ipmi_smi *intf)
{
struct smi_info *new_smi = send_info;
int enable = 0;
new_smi->intf = intf;
/* Set up the timer that drives the interface. */
timer_setup(&new_smi->si_timer, smi_timeout, 0);
new_smi->timer_can_start = true;
smi_mod_timer(new_smi, jiffies + SI_TIMEOUT_JIFFIES);
/* Try to claim any interrupts. */
if (new_smi->io.irq_setup) {
new_smi->io.irq_handler_data = new_smi;
new_smi->io.irq_setup(&new_smi->io);
}
/*
* Check if the user forcefully enabled the daemon.
*/
if (new_smi->si_num < num_force_kipmid)
enable = force_kipmid[new_smi->si_num];
/*
* The BT interface is efficient enough to not need a thread,
* and there is no need for a thread if we have interrupts.
*/
else if ((new_smi->io.si_type != SI_BT) && (!new_smi->io.irq))
enable = 1;
if (enable) {
new_smi->thread = kthread_run(ipmi_thread, new_smi,
"kipmi%d", new_smi->si_num);
if (IS_ERR(new_smi->thread)) {
dev_notice(new_smi->io.dev, "Could not start"
" kernel thread due to error %ld, only using"
" timers to drive the interface\n",
PTR_ERR(new_smi->thread));
new_smi->thread = NULL;
}
}
return 0;
}
static int get_smi_info(void *send_info, struct ipmi_smi_info *data)
{
struct smi_info *smi = send_info;
data->addr_src = smi->io.addr_source;
data->dev = smi->io.dev;
data->addr_info = smi->io.addr_info;
get_device(smi->io.dev);
return 0;
}
static void set_maintenance_mode(void *send_info, bool enable)
{
struct smi_info *smi_info = send_info;
if (!enable)
atomic_set(&smi_info->req_events, 0);
}
static void shutdown_smi(void *send_info);
static const struct ipmi_smi_handlers handlers = {
.owner = THIS_MODULE,
.start_processing = smi_start_processing,
.shutdown = shutdown_smi,
.get_smi_info = get_smi_info,
.sender = sender,
.request_events = request_events,
.set_need_watch = set_need_watch,
.set_maintenance_mode = set_maintenance_mode,
.set_run_to_completion = set_run_to_completion,
.flush_messages = flush_messages,
.poll = poll,
};
static LIST_HEAD(smi_infos);
static DEFINE_MUTEX(smi_infos_lock);
static int smi_num; /* Used to sequence the SMIs */
static const char * const addr_space_to_str[] = { "i/o", "mem" };
module_param_array(force_kipmid, int, &num_force_kipmid, 0);
MODULE_PARM_DESC(force_kipmid, "Force the kipmi daemon to be enabled (1) or"
" disabled(0). Normally the IPMI driver auto-detects"
" this, but the value may be overridden by this parm.");
module_param(unload_when_empty, bool, 0);
MODULE_PARM_DESC(unload_when_empty, "Unload the module if no interfaces are"
" specified or found, default is 1. Setting to 0"
" is useful for hot add of devices using hotmod.");
module_param_array(kipmid_max_busy_us, uint, &num_max_busy_us, 0644);
MODULE_PARM_DESC(kipmid_max_busy_us,
"Max time (in microseconds) to busy-wait for IPMI data before"
" sleeping. 0 (default) means to wait forever. Set to 100-500"
" if kipmid is using up a lot of CPU time.");
void ipmi_irq_finish_setup(struct si_sm_io *io)
{
if (io->si_type == SI_BT)
/* Enable the interrupt in the BT interface. */
io->outputb(io, IPMI_BT_INTMASK_REG,
IPMI_BT_INTMASK_ENABLE_IRQ_BIT);
}
void ipmi_irq_start_cleanup(struct si_sm_io *io)
{
if (io->si_type == SI_BT)
/* Disable the interrupt in the BT interface. */
io->outputb(io, IPMI_BT_INTMASK_REG, 0);
}
static void std_irq_cleanup(struct si_sm_io *io)
{
ipmi_irq_start_cleanup(io);
free_irq(io->irq, io->irq_handler_data);
}
int ipmi_std_irq_setup(struct si_sm_io *io)
{
int rv;
if (!io->irq)
return 0;
rv = request_irq(io->irq,
ipmi_si_irq_handler,
IRQF_SHARED,
DEVICE_NAME,
io->irq_handler_data);
if (rv) {
dev_warn(io->dev, "%s unable to claim interrupt %d,"
" running polled\n",
DEVICE_NAME, io->irq);
io->irq = 0;
} else {
io->irq_cleanup = std_irq_cleanup;
ipmi_irq_finish_setup(io);
dev_info(io->dev, "Using irq %d\n", io->irq);
}
return rv;
}
static int wait_for_msg_done(struct smi_info *smi_info)
{
enum si_sm_result smi_result;
smi_result = smi_info->handlers->event(smi_info->si_sm, 0);
for (;;) {
if (smi_result == SI_SM_CALL_WITH_DELAY ||
smi_result == SI_SM_CALL_WITH_TICK_DELAY) {
schedule_timeout_uninterruptible(1);
smi_result = smi_info->handlers->event(
smi_info->si_sm, jiffies_to_usecs(1));
} else if (smi_result == SI_SM_CALL_WITHOUT_DELAY) {
smi_result = smi_info->handlers->event(
smi_info->si_sm, 0);
} else
break;
}
if (smi_result == SI_SM_HOSED)
/*
* We couldn't get the state machine to run, so whatever's at
* the port is probably not an IPMI SMI interface.
*/
return -ENODEV;
return 0;
}
static int try_get_dev_id(struct smi_info *smi_info)
{
unsigned char msg[2];
unsigned char *resp;
unsigned long resp_len;
int rv = 0;
resp = kmalloc(IPMI_MAX_MSG_LENGTH, GFP_KERNEL);
if (!resp)
return -ENOMEM;
/*
* Do a Get Device ID command, since it comes back with some
* useful info.
*/
msg[0] = IPMI_NETFN_APP_REQUEST << 2;
msg[1] = IPMI_GET_DEVICE_ID_CMD;
smi_info->handlers->start_transaction(smi_info->si_sm, msg, 2);
rv = wait_for_msg_done(smi_info);
if (rv)
goto out;
resp_len = smi_info->handlers->get_result(smi_info->si_sm,
resp, IPMI_MAX_MSG_LENGTH);
/* Check and record info from the get device id, in case we need it. */
rv = ipmi_demangle_device_id(resp[0] >> 2, resp[1],
resp + 2, resp_len - 2, &smi_info->device_id);
out:
kfree(resp);
return rv;
}
static int get_global_enables(struct smi_info *smi_info, u8 *enables)
{
unsigned char msg[3];
unsigned char *resp;
unsigned long resp_len;
int rv;
resp = kmalloc(IPMI_MAX_MSG_LENGTH, GFP_KERNEL);
if (!resp)
return -ENOMEM;
msg[0] = IPMI_NETFN_APP_REQUEST << 2;
msg[1] = IPMI_GET_BMC_GLOBAL_ENABLES_CMD;
smi_info->handlers->start_transaction(smi_info->si_sm, msg, 2);
rv = wait_for_msg_done(smi_info);
if (rv) {
dev_warn(smi_info->io.dev,
"Error getting response from get global enables command: %d\n",
rv);
goto out;
}
resp_len = smi_info->handlers->get_result(smi_info->si_sm,
resp, IPMI_MAX_MSG_LENGTH);
if (resp_len < 4 ||
resp[0] != (IPMI_NETFN_APP_REQUEST | 1) << 2 ||
resp[1] != IPMI_GET_BMC_GLOBAL_ENABLES_CMD ||
resp[2] != 0) {
dev_warn(smi_info->io.dev,
"Invalid return from get global enables command: %ld %x %x %x\n",
resp_len, resp[0], resp[1], resp[2]);
rv = -EINVAL;
goto out;
} else {
*enables = resp[3];
}
out:
kfree(resp);
return rv;
}
/*
* Returns 1 if it gets an error from the command.
*/
static int set_global_enables(struct smi_info *smi_info, u8 enables)
{
unsigned char msg[3];
unsigned char *resp;
unsigned long resp_len;
int rv;
resp = kmalloc(IPMI_MAX_MSG_LENGTH, GFP_KERNEL);
if (!resp)
return -ENOMEM;
msg[0] = IPMI_NETFN_APP_REQUEST << 2;
msg[1] = IPMI_SET_BMC_GLOBAL_ENABLES_CMD;
msg[2] = enables;
smi_info->handlers->start_transaction(smi_info->si_sm, msg, 3);
rv = wait_for_msg_done(smi_info);
if (rv) {
dev_warn(smi_info->io.dev,
"Error getting response from set global enables command: %d\n",
rv);
goto out;
}
resp_len = smi_info->handlers->get_result(smi_info->si_sm,
resp, IPMI_MAX_MSG_LENGTH);
if (resp_len < 3 ||
resp[0] != (IPMI_NETFN_APP_REQUEST | 1) << 2 ||
resp[1] != IPMI_SET_BMC_GLOBAL_ENABLES_CMD) {
dev_warn(smi_info->io.dev,
"Invalid return from set global enables command: %ld %x %x\n",
resp_len, resp[0], resp[1]);
rv = -EINVAL;
goto out;
}
if (resp[2] != 0)
rv = 1;
out:
kfree(resp);
return rv;
}
/*
* Some BMCs do not support clearing the receive irq bit in the global
* enables (even if they don't support interrupts on the BMC). Check
* for this and handle it properly.
*/
static void check_clr_rcv_irq(struct smi_info *smi_info)
{
u8 enables = 0;
int rv;
rv = get_global_enables(smi_info, &enables);
if (!rv) {
if ((enables & IPMI_BMC_RCV_MSG_INTR) == 0)
/* Already clear, should work ok. */
return;
enables &= ~IPMI_BMC_RCV_MSG_INTR;
rv = set_global_enables(smi_info, enables);
}
if (rv < 0) {
dev_err(smi_info->io.dev,
"Cannot check clearing the rcv irq: %d\n", rv);
return;
}
if (rv) {
/*
* An error when setting the event buffer bit means
* clearing the bit is not supported.
*/
dev_warn(smi_info->io.dev,
"The BMC does not support clearing the recv irq bit, compensating, but the BMC needs to be fixed.\n");
smi_info->cannot_disable_irq = true;
}
}
/*
* Some BMCs do not support setting the interrupt bits in the global
* enables even if they support interrupts. Clearly bad, but we can
* compensate.
*/
static void check_set_rcv_irq(struct smi_info *smi_info)
{
u8 enables = 0;
int rv;
if (!smi_info->io.irq)
return;
rv = get_global_enables(smi_info, &enables);
if (!rv) {
enables |= IPMI_BMC_RCV_MSG_INTR;
rv = set_global_enables(smi_info, enables);
}
if (rv < 0) {
dev_err(smi_info->io.dev,
"Cannot check setting the rcv irq: %d\n", rv);
return;
}
if (rv) {
/*
* An error when setting the event buffer bit means
* setting the bit is not supported.
*/
dev_warn(smi_info->io.dev,
"The BMC does not support setting the recv irq bit, compensating, but the BMC needs to be fixed.\n");
smi_info->cannot_disable_irq = true;
smi_info->irq_enable_broken = true;
}
}
static int try_enable_event_buffer(struct smi_info *smi_info)
{
unsigned char msg[3];
unsigned char *resp;
unsigned long resp_len;
int rv = 0;
resp = kmalloc(IPMI_MAX_MSG_LENGTH, GFP_KERNEL);
if (!resp)
return -ENOMEM;
msg[0] = IPMI_NETFN_APP_REQUEST << 2;
msg[1] = IPMI_GET_BMC_GLOBAL_ENABLES_CMD;
smi_info->handlers->start_transaction(smi_info->si_sm, msg, 2);
rv = wait_for_msg_done(smi_info);
if (rv) {
pr_warn("Error getting response from get global enables command, the event buffer is not enabled\n");
goto out;
}
resp_len = smi_info->handlers->get_result(smi_info->si_sm,
resp, IPMI_MAX_MSG_LENGTH);
if (resp_len < 4 ||
resp[0] != (IPMI_NETFN_APP_REQUEST | 1) << 2 ||
resp[1] != IPMI_GET_BMC_GLOBAL_ENABLES_CMD ||
resp[2] != 0) {
pr_warn("Invalid return from get global enables command, cannot enable the event buffer\n");
rv = -EINVAL;
goto out;
}
if (resp[3] & IPMI_BMC_EVT_MSG_BUFF) {
/* buffer is already enabled, nothing to do. */
smi_info->supports_event_msg_buff = true;
goto out;
}
msg[0] = IPMI_NETFN_APP_REQUEST << 2;
msg[1] = IPMI_SET_BMC_GLOBAL_ENABLES_CMD;
msg[2] = resp[3] | IPMI_BMC_EVT_MSG_BUFF;
smi_info->handlers->start_transaction(smi_info->si_sm, msg, 3);
rv = wait_for_msg_done(smi_info);
if (rv) {
pr_warn("Error getting response from set global, enables command, the event buffer is not enabled\n");
goto out;
}
resp_len = smi_info->handlers->get_result(smi_info->si_sm,
resp, IPMI_MAX_MSG_LENGTH);
if (resp_len < 3 ||
resp[0] != (IPMI_NETFN_APP_REQUEST | 1) << 2 ||
resp[1] != IPMI_SET_BMC_GLOBAL_ENABLES_CMD) {
pr_warn("Invalid return from get global, enables command, not enable the event buffer\n");
rv = -EINVAL;
goto out;
}
if (resp[2] != 0)
/*
* An error when setting the event buffer bit means
* that the event buffer is not supported.
*/
rv = -ENOENT;
else
smi_info->supports_event_msg_buff = true;
out:
kfree(resp);
return rv;
}
#define IPMI_SI_ATTR(name) \
static ssize_t ipmi_##name##_show(struct device *dev, \
struct device_attribute *attr, \
char *buf) \
{ \
struct smi_info *smi_info = dev_get_drvdata(dev); \
\
return snprintf(buf, 10, "%u\n", smi_get_stat(smi_info, name)); \
} \
static DEVICE_ATTR(name, S_IRUGO, ipmi_##name##_show, NULL)
static ssize_t ipmi_type_show(struct device *dev,
struct device_attribute *attr,
char *buf)
{
struct smi_info *smi_info = dev_get_drvdata(dev);
return snprintf(buf, 10, "%s\n", si_to_str[smi_info->io.si_type]);
}
static DEVICE_ATTR(type, S_IRUGO, ipmi_type_show, NULL);
static ssize_t ipmi_interrupts_enabled_show(struct device *dev,
struct device_attribute *attr,
char *buf)
{
struct smi_info *smi_info = dev_get_drvdata(dev);
int enabled = smi_info->io.irq && !smi_info->interrupt_disabled;
return snprintf(buf, 10, "%d\n", enabled);
}
static DEVICE_ATTR(interrupts_enabled, S_IRUGO,
ipmi_interrupts_enabled_show, NULL);
IPMI_SI_ATTR(short_timeouts);
IPMI_SI_ATTR(long_timeouts);
IPMI_SI_ATTR(idles);
IPMI_SI_ATTR(interrupts);
IPMI_SI_ATTR(attentions);
IPMI_SI_ATTR(flag_fetches);
IPMI_SI_ATTR(hosed_count);
IPMI_SI_ATTR(complete_transactions);
IPMI_SI_ATTR(events);
IPMI_SI_ATTR(watchdog_pretimeouts);
IPMI_SI_ATTR(incoming_messages);
static ssize_t ipmi_params_show(struct device *dev,
struct device_attribute *attr,
char *buf)
{
struct smi_info *smi_info = dev_get_drvdata(dev);
return snprintf(buf, 200,
"%s,%s,0x%lx,rsp=%d,rsi=%d,rsh=%d,irq=%d,ipmb=%d\n",
si_to_str[smi_info->io.si_type],
addr_space_to_str[smi_info->io.addr_type],
smi_info->io.addr_data,
smi_info->io.regspacing,
smi_info->io.regsize,
smi_info->io.regshift,
smi_info->io.irq,
smi_info->io.slave_addr);
}
static DEVICE_ATTR(params, S_IRUGO, ipmi_params_show, NULL);
static struct attribute *ipmi_si_dev_attrs[] = {
&dev_attr_type.attr,
&dev_attr_interrupts_enabled.attr,
&dev_attr_short_timeouts.attr,
&dev_attr_long_timeouts.attr,
&dev_attr_idles.attr,
&dev_attr_interrupts.attr,
&dev_attr_attentions.attr,
&dev_attr_flag_fetches.attr,
&dev_attr_hosed_count.attr,
&dev_attr_complete_transactions.attr,
&dev_attr_events.attr,
&dev_attr_watchdog_pretimeouts.attr,
&dev_attr_incoming_messages.attr,
&dev_attr_params.attr,
NULL
};
static const struct attribute_group ipmi_si_dev_attr_group = {
.attrs = ipmi_si_dev_attrs,
};
/*
* oem_data_avail_to_receive_msg_avail
* @info - smi_info structure with msg_flags set
*
* Converts flags from OEM_DATA_AVAIL to RECEIVE_MSG_AVAIL
* Returns 1 indicating need to re-run handle_flags().
*/
static int oem_data_avail_to_receive_msg_avail(struct smi_info *smi_info)
{
smi_info->msg_flags = ((smi_info->msg_flags & ~OEM_DATA_AVAIL) |
RECEIVE_MSG_AVAIL);
return 1;
}
/*
* setup_dell_poweredge_oem_data_handler
* @info - smi_info.device_id must be populated
*
* Systems that match, but have firmware version < 1.40 may assert
* OEM0_DATA_AVAIL on their own, without being told via Set Flags that
* it's safe to do so. Such systems will de-assert OEM1_DATA_AVAIL
* upon receipt of IPMI_GET_MSG_CMD, so we should treat these flags
* as RECEIVE_MSG_AVAIL instead.
*
* As Dell has no plans to release IPMI 1.5 firmware that *ever*
* assert the OEM[012] bits, and if it did, the driver would have to
* change to handle that properly, we don't actually check for the
* firmware version.
* Device ID = 0x20 BMC on PowerEdge 8G servers
* Device Revision = 0x80
* Firmware Revision1 = 0x01 BMC version 1.40
* Firmware Revision2 = 0x40 BCD encoded
* IPMI Version = 0x51 IPMI 1.5
* Manufacturer ID = A2 02 00 Dell IANA
*
* Additionally, PowerEdge systems with IPMI < 1.5 may also assert
* OEM0_DATA_AVAIL and needs to be treated as RECEIVE_MSG_AVAIL.
*
*/
#define DELL_POWEREDGE_8G_BMC_DEVICE_ID 0x20
#define DELL_POWEREDGE_8G_BMC_DEVICE_REV 0x80
#define DELL_POWEREDGE_8G_BMC_IPMI_VERSION 0x51
#define DELL_IANA_MFR_ID 0x0002a2
static void setup_dell_poweredge_oem_data_handler(struct smi_info *smi_info)
{
struct ipmi_device_id *id = &smi_info->device_id;
if (id->manufacturer_id == DELL_IANA_MFR_ID) {
if (id->device_id == DELL_POWEREDGE_8G_BMC_DEVICE_ID &&
id->device_revision == DELL_POWEREDGE_8G_BMC_DEVICE_REV &&
id->ipmi_version == DELL_POWEREDGE_8G_BMC_IPMI_VERSION) {
smi_info->oem_data_avail_handler =
oem_data_avail_to_receive_msg_avail;
} else if (ipmi_version_major(id) < 1 ||
(ipmi_version_major(id) == 1 &&
ipmi_version_minor(id) < 5)) {
smi_info->oem_data_avail_handler =
oem_data_avail_to_receive_msg_avail;
}
}
}
#define CANNOT_RETURN_REQUESTED_LENGTH 0xCA
static void return_hosed_msg_badsize(struct smi_info *smi_info)
{
struct ipmi_smi_msg *msg = smi_info->curr_msg;
/* Make it a response */
msg->rsp[0] = msg->data[0] | 4;
msg->rsp[1] = msg->data[1];
msg->rsp[2] = CANNOT_RETURN_REQUESTED_LENGTH;
msg->rsp_size = 3;
smi_info->curr_msg = NULL;
deliver_recv_msg(smi_info, msg);
}
/*
* dell_poweredge_bt_xaction_handler
* @info - smi_info.device_id must be populated
*
* Dell PowerEdge servers with the BT interface (x6xx and 1750) will
* not respond to a Get SDR command if the length of the data
* requested is exactly 0x3A, which leads to command timeouts and no
* data returned. This intercepts such commands, and causes userspace
* callers to try again with a different-sized buffer, which succeeds.
*/
#define STORAGE_NETFN 0x0A
#define STORAGE_CMD_GET_SDR 0x23
static int dell_poweredge_bt_xaction_handler(struct notifier_block *self,
unsigned long unused,
void *in)
{
struct smi_info *smi_info = in;
unsigned char *data = smi_info->curr_msg->data;
unsigned int size = smi_info->curr_msg->data_size;
if (size >= 8 &&
(data[0]>>2) == STORAGE_NETFN &&
data[1] == STORAGE_CMD_GET_SDR &&
data[7] == 0x3A) {
return_hosed_msg_badsize(smi_info);
return NOTIFY_STOP;
}
return NOTIFY_DONE;
}
static struct notifier_block dell_poweredge_bt_xaction_notifier = {
.notifier_call = dell_poweredge_bt_xaction_handler,
};
/*
* setup_dell_poweredge_bt_xaction_handler
* @info - smi_info.device_id must be filled in already
*
* Fills in smi_info.device_id.start_transaction_pre_hook
* when we know what function to use there.
*/
static void
setup_dell_poweredge_bt_xaction_handler(struct smi_info *smi_info)
{
struct ipmi_device_id *id = &smi_info->device_id;
if (id->manufacturer_id == DELL_IANA_MFR_ID &&
smi_info->io.si_type == SI_BT)
register_xaction_notifier(&dell_poweredge_bt_xaction_notifier);
}
/*
* setup_oem_data_handler
* @info - smi_info.device_id must be filled in already
*
* Fills in smi_info.device_id.oem_data_available_handler
* when we know what function to use there.
*/
static void setup_oem_data_handler(struct smi_info *smi_info)
{
setup_dell_poweredge_oem_data_handler(smi_info);
}
static void setup_xaction_handlers(struct smi_info *smi_info)
{
setup_dell_poweredge_bt_xaction_handler(smi_info);
}
static void check_for_broken_irqs(struct smi_info *smi_info)
{
check_clr_rcv_irq(smi_info);
check_set_rcv_irq(smi_info);
}
static inline void stop_timer_and_thread(struct smi_info *smi_info)
{
if (smi_info->thread != NULL) {
kthread_stop(smi_info->thread);
smi_info->thread = NULL;
}
smi_info->timer_can_start = false;
if (smi_info->timer_running)
del_timer_sync(&smi_info->si_timer);
}
static struct smi_info *find_dup_si(struct smi_info *info)
{
struct smi_info *e;
list_for_each_entry(e, &smi_infos, link) {
if (e->io.addr_type != info->io.addr_type)
continue;
if (e->io.addr_data == info->io.addr_data) {
/*
* This is a cheap hack, ACPI doesn't have a defined
* slave address but SMBIOS does. Pick it up from
* any source that has it available.
*/
if (info->io.slave_addr && !e->io.slave_addr)
e->io.slave_addr = info->io.slave_addr;
return e;
}
}
return NULL;
}
int ipmi_si_add_smi(struct si_sm_io *io)
{
int rv = 0;
struct smi_info *new_smi, *dup;
if (!io->io_setup) {
if (io->addr_type == IPMI_IO_ADDR_SPACE) {
io->io_setup = ipmi_si_port_setup;
} else if (io->addr_type == IPMI_MEM_ADDR_SPACE) {
io->io_setup = ipmi_si_mem_setup;
} else {
return -EINVAL;
}
}
new_smi = kzalloc(sizeof(*new_smi), GFP_KERNEL);
if (!new_smi)
return -ENOMEM;
spin_lock_init(&new_smi->si_lock);
new_smi->io = *io;
mutex_lock(&smi_infos_lock);
dup = find_dup_si(new_smi);
if (dup) {
if (new_smi->io.addr_source == SI_ACPI &&
dup->io.addr_source == SI_SMBIOS) {
/* We prefer ACPI over SMBIOS. */
dev_info(dup->io.dev,
"Removing SMBIOS-specified %s state machine in favor of ACPI\n",
si_to_str[new_smi->io.si_type]);
cleanup_one_si(dup);
} else {
dev_info(new_smi->io.dev,
"%s-specified %s state machine: duplicate\n",
ipmi_addr_src_to_str(new_smi->io.addr_source),
si_to_str[new_smi->io.si_type]);
rv = -EBUSY;
kfree(new_smi);
goto out_err;
}
}
pr_info("Adding %s-specified %s state machine\n",
ipmi_addr_src_to_str(new_smi->io.addr_source),
si_to_str[new_smi->io.si_type]);
list_add_tail(&new_smi->link, &smi_infos);
if (initialized)
rv = try_smi_init(new_smi);
out_err:
mutex_unlock(&smi_infos_lock);
return rv;
}
/*
* Try to start up an interface. Must be called with smi_infos_lock
* held, primarily to keep smi_num consistent, we only one to do these
* one at a time.
*/
static int try_smi_init(struct smi_info *new_smi)
{
int rv = 0;
int i;
char *init_name = NULL;
pr_info("Trying %s-specified %s state machine at %s address 0x%lx, slave address 0x%x, irq %d\n",
ipmi_addr_src_to_str(new_smi->io.addr_source),
si_to_str[new_smi->io.si_type],
addr_space_to_str[new_smi->io.addr_type],
new_smi->io.addr_data,
new_smi->io.slave_addr, new_smi->io.irq);
switch (new_smi->io.si_type) {
case SI_KCS:
new_smi->handlers = &kcs_smi_handlers;
break;
case SI_SMIC:
new_smi->handlers = &smic_smi_handlers;
break;
case SI_BT:
new_smi->handlers = &bt_smi_handlers;
break;
default:
/* No support for anything else yet. */
rv = -EIO;
goto out_err;
}
new_smi->si_num = smi_num;
/* Do this early so it's available for logs. */
if (!new_smi->io.dev) {
init_name = kasprintf(GFP_KERNEL, "ipmi_si.%d",
new_smi->si_num);
/*
* If we don't already have a device from something
* else (like PCI), then register a new one.
*/
new_smi->pdev = platform_device_alloc("ipmi_si",
new_smi->si_num);
if (!new_smi->pdev) {
pr_err("Unable to allocate platform device\n");
rv = -ENOMEM;
goto out_err;
}
new_smi->io.dev = &new_smi->pdev->dev;
new_smi->io.dev->driver = &ipmi_platform_driver.driver;
/* Nulled by device_add() */
new_smi->io.dev->init_name = init_name;
}
/* Allocate the state machine's data and initialize it. */
new_smi->si_sm = kmalloc(new_smi->handlers->size(), GFP_KERNEL);
if (!new_smi->si_sm) {
rv = -ENOMEM;
goto out_err;
}
new_smi->io.io_size = new_smi->handlers->init_data(new_smi->si_sm,
&new_smi->io);
/* Now that we know the I/O size, we can set up the I/O. */
rv = new_smi->io.io_setup(&new_smi->io);
if (rv) {
dev_err(new_smi->io.dev, "Could not set up I/O space\n");
goto out_err;
}
/* Do low-level detection first. */
if (new_smi->handlers->detect(new_smi->si_sm)) {
if (new_smi->io.addr_source)
dev_err(new_smi->io.dev,
"Interface detection failed\n");
rv = -ENODEV;
goto out_err;
}
/*
* Attempt a get device id command. If it fails, we probably
* don't have a BMC here.
*/
rv = try_get_dev_id(new_smi);
if (rv) {
if (new_smi->io.addr_source)
dev_err(new_smi->io.dev,
"There appears to be no BMC at this location\n");
goto out_err;
}
setup_oem_data_handler(new_smi);
setup_xaction_handlers(new_smi);
check_for_broken_irqs(new_smi);
new_smi->waiting_msg = NULL;
new_smi->curr_msg = NULL;
atomic_set(&new_smi->req_events, 0);
new_smi->run_to_completion = false;
for (i = 0; i < SI_NUM_STATS; i++)
atomic_set(&new_smi->stats[i], 0);
new_smi->interrupt_disabled = true;
atomic_set(&new_smi->need_watch, 0);
rv = try_enable_event_buffer(new_smi);
if (rv == 0)
new_smi->has_event_buffer = true;
/*
* Start clearing the flags before we enable interrupts or the
* timer to avoid racing with the timer.
*/
start_clear_flags(new_smi);
/*
* IRQ is defined to be set when non-zero. req_events will
* cause a global flags check that will enable interrupts.
*/
if (new_smi->io.irq) {
new_smi->interrupt_disabled = false;
atomic_set(&new_smi->req_events, 1);
}
if (new_smi->pdev && !new_smi->pdev_registered) {
rv = platform_device_add(new_smi->pdev);
if (rv) {
dev_err(new_smi->io.dev,
"Unable to register system interface device: %d\n",
rv);
goto out_err;
}
new_smi->pdev_registered = true;
}
dev_set_drvdata(new_smi->io.dev, new_smi);
rv = device_add_group(new_smi->io.dev, &ipmi_si_dev_attr_group);
if (rv) {
dev_err(new_smi->io.dev,
"Unable to add device attributes: error %d\n",
rv);
goto out_err;
}
new_smi->dev_group_added = true;
rv = ipmi_register_smi(&handlers,
new_smi,
new_smi->io.dev,
new_smi->io.slave_addr);
if (rv) {
dev_err(new_smi->io.dev,
"Unable to register device: error %d\n",
rv);
goto out_err;
}
/* Don't increment till we know we have succeeded. */
smi_num++;
dev_info(new_smi->io.dev, "IPMI %s interface initialized\n",
si_to_str[new_smi->io.si_type]);
WARN_ON(new_smi->io.dev->init_name != NULL);
out_err:
kfree(init_name);
return rv;
}
static int init_ipmi_si(void)
{
struct smi_info *e;
enum ipmi_addr_src type = SI_INVALID;
if (initialized)
return 0;
pr_info("IPMI System Interface driver\n");
/* If the user gave us a device, they presumably want us to use it */
if (!ipmi_si_hardcode_find_bmc())
goto do_scan;
ipmi_si_platform_init();
ipmi_si_pci_init();
ipmi_si_parisc_init();
/* We prefer devices with interrupts, but in the case of a machine
with multiple BMCs we assume that there will be several instances
of a given type so if we succeed in registering a type then also
try to register everything else of the same type */
do_scan:
mutex_lock(&smi_infos_lock);
list_for_each_entry(e, &smi_infos, link) {
/* Try to register a device if it has an IRQ and we either
haven't successfully registered a device yet or this
device has the same type as one we successfully registered */
if (e->io.irq && (!type || e->io.addr_source == type)) {
if (!try_smi_init(e)) {
type = e->io.addr_source;
}
}
}
/* type will only have been set if we successfully registered an si */
if (type)
goto skip_fallback_noirq;
/* Fall back to the preferred device */
list_for_each_entry(e, &smi_infos, link) {
if (!e->io.irq && (!type || e->io.addr_source == type)) {
if (!try_smi_init(e)) {
type = e->io.addr_source;
}
}
}
skip_fallback_noirq:
initialized = 1;
mutex_unlock(&smi_infos_lock);
if (type)
return 0;
mutex_lock(&smi_infos_lock);
if (unload_when_empty && list_empty(&smi_infos)) {
mutex_unlock(&smi_infos_lock);
cleanup_ipmi_si();
pr_warn("Unable to find any System Interface(s)\n");
return -ENODEV;
} else {
mutex_unlock(&smi_infos_lock);
return 0;
}
}
module_init(init_ipmi_si);
static void shutdown_smi(void *send_info)
{
struct smi_info *smi_info = send_info;
if (smi_info->dev_group_added) {
device_remove_group(smi_info->io.dev, &ipmi_si_dev_attr_group);
smi_info->dev_group_added = false;
}
if (smi_info->io.dev)
dev_set_drvdata(smi_info->io.dev, NULL);
/*
* Make sure that interrupts, the timer and the thread are
* stopped and will not run again.
*/
smi_info->interrupt_disabled = true;
if (smi_info->io.irq_cleanup) {
smi_info->io.irq_cleanup(&smi_info->io);
smi_info->io.irq_cleanup = NULL;
}
stop_timer_and_thread(smi_info);
/*
* Wait until we know that we are out of any interrupt
* handlers might have been running before we freed the
* interrupt.
*/
synchronize_rcu();
/*
* Timeouts are stopped, now make sure the interrupts are off
* in the BMC. Note that timers and CPU interrupts are off,
* so no need for locks.
*/
while (smi_info->curr_msg || (smi_info->si_state != SI_NORMAL)) {
poll(smi_info);
schedule_timeout_uninterruptible(1);
}
if (smi_info->handlers)
disable_si_irq(smi_info);
while (smi_info->curr_msg || (smi_info->si_state != SI_NORMAL)) {
poll(smi_info);
schedule_timeout_uninterruptible(1);
}
if (smi_info->handlers)
smi_info->handlers->cleanup(smi_info->si_sm);
if (smi_info->io.addr_source_cleanup) {
smi_info->io.addr_source_cleanup(&smi_info->io);
smi_info->io.addr_source_cleanup = NULL;
}
if (smi_info->io.io_cleanup) {
smi_info->io.io_cleanup(&smi_info->io);
smi_info->io.io_cleanup = NULL;
}
kfree(smi_info->si_sm);
smi_info->si_sm = NULL;
smi_info->intf = NULL;
}
/*
* Must be called with smi_infos_lock held, to serialize the
* smi_info->intf check.
*/
static void cleanup_one_si(struct smi_info *smi_info)
{
if (!smi_info)
return;
list_del(&smi_info->link);
if (smi_info->intf)
ipmi_unregister_smi(smi_info->intf);
if (smi_info->pdev) {
if (smi_info->pdev_registered)
platform_device_unregister(smi_info->pdev);
else
platform_device_put(smi_info->pdev);
}
kfree(smi_info);
}
int ipmi_si_remove_by_dev(struct device *dev)
{
struct smi_info *e;
int rv = -ENOENT;
mutex_lock(&smi_infos_lock);
list_for_each_entry(e, &smi_infos, link) {
if (e->io.dev == dev) {
cleanup_one_si(e);
rv = 0;
break;
}
}
mutex_unlock(&smi_infos_lock);
return rv;
}
void ipmi_si_remove_by_data(int addr_space, enum si_type si_type,
unsigned long addr)
{
/* remove */
struct smi_info *e, *tmp_e;
mutex_lock(&smi_infos_lock);
list_for_each_entry_safe(e, tmp_e, &smi_infos, link) {
if (e->io.addr_type != addr_space)
continue;
if (e->io.si_type != si_type)
continue;
if (e->io.addr_data == addr)
cleanup_one_si(e);
}
mutex_unlock(&smi_infos_lock);
}
static void cleanup_ipmi_si(void)
{
struct smi_info *e, *tmp_e;
if (!initialized)
return;
ipmi_si_pci_shutdown();
ipmi_si_parisc_shutdown();
ipmi_si_platform_shutdown();
mutex_lock(&smi_infos_lock);
list_for_each_entry_safe(e, tmp_e, &smi_infos, link)
cleanup_one_si(e);
mutex_unlock(&smi_infos_lock);
}
module_exit(cleanup_ipmi_si);
MODULE_ALIAS("platform:dmi-ipmi-si");
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Corey Minyard <minyard@mvista.com>");
MODULE_DESCRIPTION("Interface to the IPMI driver for the KCS, SMIC, and BT"
" system interfaces.");
| ./CrossVul/dataset_final_sorted/CWE-416/c/bad_832_0 |
crossvul-cpp_data_good_320_0 | /************************************************************
* Copyright (c) 1994 by Silicon Graphics Computer Systems, Inc.
*
* Permission to use, copy, modify, and distribute this
* software and its documentation for any purpose and without
* fee is hereby granted, provided that the above copyright
* notice appear in all copies and that both that copyright
* notice and this permission notice appear in supporting
* documentation, and that the name of Silicon Graphics not be
* used in advertising or publicity pertaining to distribution
* of the software without specific prior written permission.
* Silicon Graphics makes no representation about the suitability
* of this software for any purpose. It is provided "as is"
* without any express or implied warranty.
*
* SILICON GRAPHICS DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS
* SOFTWARE, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
* AND FITNESS FOR A PARTICULAR PURPOSE. IN NO EVENT SHALL SILICON
* GRAPHICS BE LIABLE FOR ANY SPECIAL, INDIRECT OR CONSEQUENTIAL
* DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE,
* DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
* OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH
* THE USE OR PERFORMANCE OF THIS SOFTWARE.
*
********************************************************/
/*
* Copyright © 2012 Intel Corporation
* Copyright © 2012 Ran Benita <ran234@gmail.com>
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice (including the next
* paragraph) shall be included in all copies or substantial portions of the
* Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*
* Author: Daniel Stone <daniel@fooishbar.org>
* Ran Benita <ran234@gmail.com>
*/
#include "xkbcomp-priv.h"
#include "ast-build.h"
#include "include.h"
ParseCommon *
AppendStmt(ParseCommon *to, ParseCommon *append)
{
ParseCommon *iter;
if (!to)
return append;
for (iter = to; iter->next; iter = iter->next);
iter->next = append;
return to;
}
static ExprDef *
ExprCreate(enum expr_op_type op, enum expr_value_type type, size_t size)
{
ExprDef *expr = malloc(size);
if (!expr)
return NULL;
expr->common.type = STMT_EXPR;
expr->common.next = NULL;
expr->expr.op = op;
expr->expr.value_type = type;
return expr;
}
#define EXPR_CREATE(type_, name_, op_, value_type_) \
ExprDef *name_ = ExprCreate(op_, value_type_, sizeof(type_)); \
if (!name_) \
return NULL;
ExprDef *
ExprCreateString(xkb_atom_t str)
{
EXPR_CREATE(ExprString, expr, EXPR_VALUE, EXPR_TYPE_STRING);
expr->string.str = str;
return expr;
}
ExprDef *
ExprCreateInteger(int ival)
{
EXPR_CREATE(ExprInteger, expr, EXPR_VALUE, EXPR_TYPE_INT);
expr->integer.ival = ival;
return expr;
}
ExprDef *
ExprCreateFloat(void)
{
EXPR_CREATE(ExprFloat, expr, EXPR_VALUE, EXPR_TYPE_FLOAT);
return expr;
}
ExprDef *
ExprCreateBoolean(bool set)
{
EXPR_CREATE(ExprBoolean, expr, EXPR_VALUE, EXPR_TYPE_BOOLEAN);
expr->boolean.set = set;
return expr;
}
ExprDef *
ExprCreateKeyName(xkb_atom_t key_name)
{
EXPR_CREATE(ExprKeyName, expr, EXPR_VALUE, EXPR_TYPE_KEYNAME);
expr->key_name.key_name = key_name;
return expr;
}
ExprDef *
ExprCreateIdent(xkb_atom_t ident)
{
EXPR_CREATE(ExprIdent, expr, EXPR_IDENT, EXPR_TYPE_UNKNOWN);
expr->ident.ident = ident;
return expr;
}
ExprDef *
ExprCreateUnary(enum expr_op_type op, enum expr_value_type type,
ExprDef *child)
{
EXPR_CREATE(ExprUnary, expr, op, type);
expr->unary.child = child;
return expr;
}
ExprDef *
ExprCreateBinary(enum expr_op_type op, ExprDef *left, ExprDef *right)
{
EXPR_CREATE(ExprBinary, expr, op, EXPR_TYPE_UNKNOWN);
if (op == EXPR_ASSIGN || left->expr.value_type == EXPR_TYPE_UNKNOWN)
expr->expr.value_type = right->expr.value_type;
else if (left->expr.value_type == right->expr.value_type ||
right->expr.value_type == EXPR_TYPE_UNKNOWN)
expr->expr.value_type = left->expr.value_type;
expr->binary.left = left;
expr->binary.right = right;
return expr;
}
ExprDef *
ExprCreateFieldRef(xkb_atom_t element, xkb_atom_t field)
{
EXPR_CREATE(ExprFieldRef, expr, EXPR_FIELD_REF, EXPR_TYPE_UNKNOWN);
expr->field_ref.element = element;
expr->field_ref.field = field;
return expr;
}
ExprDef *
ExprCreateArrayRef(xkb_atom_t element, xkb_atom_t field, ExprDef *entry)
{
EXPR_CREATE(ExprArrayRef, expr, EXPR_ARRAY_REF, EXPR_TYPE_UNKNOWN);
expr->array_ref.element = element;
expr->array_ref.field = field;
expr->array_ref.entry = entry;
return expr;
}
ExprDef *
ExprCreateAction(xkb_atom_t name, ExprDef *args)
{
EXPR_CREATE(ExprAction, expr, EXPR_ACTION_DECL, EXPR_TYPE_UNKNOWN);
expr->action.name = name;
expr->action.args = args;
return expr;
}
ExprDef *
ExprCreateKeysymList(xkb_keysym_t sym)
{
EXPR_CREATE(ExprKeysymList, expr, EXPR_KEYSYM_LIST, EXPR_TYPE_SYMBOLS);
darray_init(expr->keysym_list.syms);
darray_init(expr->keysym_list.symsMapIndex);
darray_init(expr->keysym_list.symsNumEntries);
darray_append(expr->keysym_list.syms, sym);
darray_append(expr->keysym_list.symsMapIndex, 0);
darray_append(expr->keysym_list.symsNumEntries, 1);
return expr;
}
ExprDef *
ExprCreateMultiKeysymList(ExprDef *expr)
{
unsigned nLevels = darray_size(expr->keysym_list.symsMapIndex);
darray_resize(expr->keysym_list.symsMapIndex, 1);
darray_resize(expr->keysym_list.symsNumEntries, 1);
darray_item(expr->keysym_list.symsMapIndex, 0) = 0;
darray_item(expr->keysym_list.symsNumEntries, 0) = nLevels;
return expr;
}
ExprDef *
ExprAppendKeysymList(ExprDef *expr, xkb_keysym_t sym)
{
unsigned nSyms = darray_size(expr->keysym_list.syms);
darray_append(expr->keysym_list.symsMapIndex, nSyms);
darray_append(expr->keysym_list.symsNumEntries, 1);
darray_append(expr->keysym_list.syms, sym);
return expr;
}
ExprDef *
ExprAppendMultiKeysymList(ExprDef *expr, ExprDef *append)
{
unsigned nSyms = darray_size(expr->keysym_list.syms);
unsigned numEntries = darray_size(append->keysym_list.syms);
darray_append(expr->keysym_list.symsMapIndex, nSyms);
darray_append(expr->keysym_list.symsNumEntries, numEntries);
darray_concat(expr->keysym_list.syms, append->keysym_list.syms);
FreeStmt((ParseCommon *) append);
return expr;
}
KeycodeDef *
KeycodeCreate(xkb_atom_t name, int64_t value)
{
KeycodeDef *def = malloc(sizeof(*def));
if (!def)
return NULL;
def->common.type = STMT_KEYCODE;
def->common.next = NULL;
def->name = name;
def->value = value;
return def;
}
KeyAliasDef *
KeyAliasCreate(xkb_atom_t alias, xkb_atom_t real)
{
KeyAliasDef *def = malloc(sizeof(*def));
if (!def)
return NULL;
def->common.type = STMT_ALIAS;
def->common.next = NULL;
def->alias = alias;
def->real = real;
return def;
}
VModDef *
VModCreate(xkb_atom_t name, ExprDef *value)
{
VModDef *def = malloc(sizeof(*def));
if (!def)
return NULL;
def->common.type = STMT_VMOD;
def->common.next = NULL;
def->name = name;
def->value = value;
return def;
}
VarDef *
VarCreate(ExprDef *name, ExprDef *value)
{
VarDef *def = malloc(sizeof(*def));
if (!def)
return NULL;
def->common.type = STMT_VAR;
def->common.next = NULL;
def->name = name;
def->value = value;
return def;
}
VarDef *
BoolVarCreate(xkb_atom_t ident, bool set)
{
ExprDef *name, *value;
VarDef *def;
if (!(name = ExprCreateIdent(ident))) {
return NULL;
}
if (!(value = ExprCreateBoolean(set))) {
FreeStmt((ParseCommon *) name);
return NULL;
}
if (!(def = VarCreate(name, value))) {
FreeStmt((ParseCommon *) name);
FreeStmt((ParseCommon *) value);
return NULL;
}
return def;
}
InterpDef *
InterpCreate(xkb_keysym_t sym, ExprDef *match)
{
InterpDef *def = malloc(sizeof(*def));
if (!def)
return NULL;
def->common.type = STMT_INTERP;
def->common.next = NULL;
def->sym = sym;
def->match = match;
def->def = NULL;
return def;
}
KeyTypeDef *
KeyTypeCreate(xkb_atom_t name, VarDef *body)
{
KeyTypeDef *def = malloc(sizeof(*def));
if (!def)
return NULL;
def->common.type = STMT_TYPE;
def->common.next = NULL;
def->merge = MERGE_DEFAULT;
def->name = name;
def->body = body;
return def;
}
SymbolsDef *
SymbolsCreate(xkb_atom_t keyName, VarDef *symbols)
{
SymbolsDef *def = malloc(sizeof(*def));
if (!def)
return NULL;
def->common.type = STMT_SYMBOLS;
def->common.next = NULL;
def->merge = MERGE_DEFAULT;
def->keyName = keyName;
def->symbols = symbols;
return def;
}
GroupCompatDef *
GroupCompatCreate(unsigned group, ExprDef *val)
{
GroupCompatDef *def = malloc(sizeof(*def));
if (!def)
return NULL;
def->common.type = STMT_GROUP_COMPAT;
def->common.next = NULL;
def->merge = MERGE_DEFAULT;
def->group = group;
def->def = val;
return def;
}
ModMapDef *
ModMapCreate(xkb_atom_t modifier, ExprDef *keys)
{
ModMapDef *def = malloc(sizeof(*def));
if (!def)
return NULL;
def->common.type = STMT_MODMAP;
def->common.next = NULL;
def->merge = MERGE_DEFAULT;
def->modifier = modifier;
def->keys = keys;
return def;
}
LedMapDef *
LedMapCreate(xkb_atom_t name, VarDef *body)
{
LedMapDef *def = malloc(sizeof(*def));
if (!def)
return NULL;
def->common.type = STMT_LED_MAP;
def->common.next = NULL;
def->merge = MERGE_DEFAULT;
def->name = name;
def->body = body;
return def;
}
LedNameDef *
LedNameCreate(unsigned ndx, ExprDef *name, bool virtual)
{
LedNameDef *def = malloc(sizeof(*def));
if (!def)
return NULL;
def->common.type = STMT_LED_NAME;
def->common.next = NULL;
def->merge = MERGE_DEFAULT;
def->ndx = ndx;
def->name = name;
def->virtual = virtual;
return def;
}
static void
FreeInclude(IncludeStmt *incl);
IncludeStmt *
IncludeCreate(struct xkb_context *ctx, char *str, enum merge_mode merge)
{
IncludeStmt *incl, *first;
char *file, *map, *stmt, *tmp, *extra_data;
char nextop;
incl = first = NULL;
file = map = NULL;
tmp = str;
stmt = strdup_safe(str);
while (tmp && *tmp)
{
if (!ParseIncludeMap(&tmp, &file, &map, &nextop, &extra_data))
goto err;
/*
* Given an RMLVO (here layout) like 'us,,fr', the rules parser
* will give out something like 'pc+us+:2+fr:3+inet(evdev)'.
* We should just skip the ':2' in this case and leave it to the
* appropriate section to deal with the empty group.
*/
if (isempty(file)) {
free(file);
free(map);
free(extra_data);
continue;
}
if (first == NULL) {
first = incl = malloc(sizeof(*first));
} else {
incl->next_incl = malloc(sizeof(*first));
incl = incl->next_incl;
}
if (!incl)
break;
incl->common.type = STMT_INCLUDE;
incl->common.next = NULL;
incl->merge = merge;
incl->stmt = NULL;
incl->file = file;
incl->map = map;
incl->modifier = extra_data;
incl->next_incl = NULL;
if (nextop == '|')
merge = MERGE_AUGMENT;
else
merge = MERGE_OVERRIDE;
}
if (first)
first->stmt = stmt;
else
free(stmt);
return first;
err:
log_err(ctx, "Illegal include statement \"%s\"; Ignored\n", stmt);
FreeInclude(first);
free(stmt);
return NULL;
}
XkbFile *
XkbFileCreate(enum xkb_file_type type, char *name, ParseCommon *defs,
enum xkb_map_flags flags)
{
XkbFile *file;
file = calloc(1, sizeof(*file));
if (!file)
return NULL;
XkbEscapeMapName(name);
file->file_type = type;
file->name = name ? name : strdup("(unnamed)");
file->defs = defs;
file->flags = flags;
return file;
}
XkbFile *
XkbFileFromComponents(struct xkb_context *ctx,
const struct xkb_component_names *kkctgs)
{
char *const components[] = {
kkctgs->keycodes, kkctgs->types,
kkctgs->compat, kkctgs->symbols,
};
enum xkb_file_type type;
IncludeStmt *include = NULL;
XkbFile *file = NULL;
ParseCommon *defs = NULL;
for (type = FIRST_KEYMAP_FILE_TYPE; type <= LAST_KEYMAP_FILE_TYPE; type++) {
include = IncludeCreate(ctx, components[type], MERGE_DEFAULT);
if (!include)
goto err;
file = XkbFileCreate(type, NULL, (ParseCommon *) include, 0);
if (!file) {
FreeInclude(include);
goto err;
}
defs = AppendStmt(defs, &file->common);
}
file = XkbFileCreate(FILE_TYPE_KEYMAP, NULL, defs, 0);
if (!file)
goto err;
return file;
err:
FreeXkbFile((XkbFile *) defs);
return NULL;
}
static void
FreeExpr(ExprDef *expr)
{
if (!expr)
return;
switch (expr->expr.op) {
case EXPR_ACTION_LIST:
case EXPR_NEGATE:
case EXPR_UNARY_PLUS:
case EXPR_NOT:
case EXPR_INVERT:
FreeStmt((ParseCommon *) expr->unary.child);
break;
case EXPR_DIVIDE:
case EXPR_ADD:
case EXPR_SUBTRACT:
case EXPR_MULTIPLY:
case EXPR_ASSIGN:
FreeStmt((ParseCommon *) expr->binary.left);
FreeStmt((ParseCommon *) expr->binary.right);
break;
case EXPR_ACTION_DECL:
FreeStmt((ParseCommon *) expr->action.args);
break;
case EXPR_ARRAY_REF:
FreeStmt((ParseCommon *) expr->array_ref.entry);
break;
case EXPR_KEYSYM_LIST:
darray_free(expr->keysym_list.syms);
darray_free(expr->keysym_list.symsMapIndex);
darray_free(expr->keysym_list.symsNumEntries);
break;
default:
break;
}
}
static void
FreeInclude(IncludeStmt *incl)
{
IncludeStmt *next;
while (incl)
{
next = incl->next_incl;
free(incl->file);
free(incl->map);
free(incl->modifier);
free(incl->stmt);
free(incl);
incl = next;
}
}
void
FreeStmt(ParseCommon *stmt)
{
ParseCommon *next;
while (stmt)
{
next = stmt->next;
switch (stmt->type) {
case STMT_INCLUDE:
FreeInclude((IncludeStmt *) stmt);
/* stmt is already free'd here. */
stmt = NULL;
break;
case STMT_EXPR:
FreeExpr((ExprDef *) stmt);
break;
case STMT_VAR:
FreeStmt((ParseCommon *) ((VarDef *) stmt)->name);
FreeStmt((ParseCommon *) ((VarDef *) stmt)->value);
break;
case STMT_TYPE:
FreeStmt((ParseCommon *) ((KeyTypeDef *) stmt)->body);
break;
case STMT_INTERP:
FreeStmt((ParseCommon *) ((InterpDef *) stmt)->match);
FreeStmt((ParseCommon *) ((InterpDef *) stmt)->def);
break;
case STMT_VMOD:
FreeStmt((ParseCommon *) ((VModDef *) stmt)->value);
break;
case STMT_SYMBOLS:
FreeStmt((ParseCommon *) ((SymbolsDef *) stmt)->symbols);
break;
case STMT_MODMAP:
FreeStmt((ParseCommon *) ((ModMapDef *) stmt)->keys);
break;
case STMT_GROUP_COMPAT:
FreeStmt((ParseCommon *) ((GroupCompatDef *) stmt)->def);
break;
case STMT_LED_MAP:
FreeStmt((ParseCommon *) ((LedMapDef *) stmt)->body);
break;
case STMT_LED_NAME:
FreeStmt((ParseCommon *) ((LedNameDef *) stmt)->name);
break;
default:
break;
}
free(stmt);
stmt = next;
}
}
void
FreeXkbFile(XkbFile *file)
{
XkbFile *next;
while (file)
{
next = (XkbFile *) file->common.next;
switch (file->file_type) {
case FILE_TYPE_KEYMAP:
FreeXkbFile((XkbFile *) file->defs);
break;
case FILE_TYPE_TYPES:
case FILE_TYPE_COMPAT:
case FILE_TYPE_SYMBOLS:
case FILE_TYPE_KEYCODES:
case FILE_TYPE_GEOMETRY:
FreeStmt(file->defs);
break;
default:
break;
}
free(file->name);
free(file);
file = next;
}
}
static const char *xkb_file_type_strings[_FILE_TYPE_NUM_ENTRIES] = {
[FILE_TYPE_KEYCODES] = "xkb_keycodes",
[FILE_TYPE_TYPES] = "xkb_types",
[FILE_TYPE_COMPAT] = "xkb_compatibility",
[FILE_TYPE_SYMBOLS] = "xkb_symbols",
[FILE_TYPE_GEOMETRY] = "xkb_geometry",
[FILE_TYPE_KEYMAP] = "xkb_keymap",
[FILE_TYPE_RULES] = "rules",
};
const char *
xkb_file_type_to_string(enum xkb_file_type type)
{
if (type > _FILE_TYPE_NUM_ENTRIES)
return "unknown";
return xkb_file_type_strings[type];
}
static const char *stmt_type_strings[_STMT_NUM_VALUES] = {
[STMT_UNKNOWN] = "unknown statement",
[STMT_INCLUDE] = "include statement",
[STMT_KEYCODE] = "key name definition",
[STMT_ALIAS] = "key alias definition",
[STMT_EXPR] = "expression",
[STMT_VAR] = "variable definition",
[STMT_TYPE] = "key type definition",
[STMT_INTERP] = "symbol interpretation definition",
[STMT_VMOD] = "virtual modifiers definition",
[STMT_SYMBOLS] = "key symbols definition",
[STMT_MODMAP] = "modifier map declaration",
[STMT_GROUP_COMPAT] = "group declaration",
[STMT_LED_MAP] = "indicator map declaration",
[STMT_LED_NAME] = "indicator name declaration",
};
const char *
stmt_type_to_string(enum stmt_type type)
{
if (type >= _STMT_NUM_VALUES)
return NULL;
return stmt_type_strings[type];
}
static const char *expr_op_type_strings[_EXPR_NUM_VALUES] = {
[EXPR_VALUE] = "literal",
[EXPR_IDENT] = "identifier",
[EXPR_ACTION_DECL] = "action declaration",
[EXPR_FIELD_REF] = "field reference",
[EXPR_ARRAY_REF] = "array reference",
[EXPR_KEYSYM_LIST] = "list of keysyms",
[EXPR_ACTION_LIST] = "list of actions",
[EXPR_ADD] = "addition",
[EXPR_SUBTRACT] = "subtraction",
[EXPR_MULTIPLY] = "multiplication",
[EXPR_DIVIDE] = "division",
[EXPR_ASSIGN] = "assignment",
[EXPR_NOT] = "logical negation",
[EXPR_NEGATE] = "arithmetic negation",
[EXPR_INVERT] = "bitwise inversion",
[EXPR_UNARY_PLUS] = "unary plus",
};
const char *
expr_op_type_to_string(enum expr_op_type type)
{
if (type >= _EXPR_NUM_VALUES)
return NULL;
return expr_op_type_strings[type];
}
static const char *expr_value_type_strings[_EXPR_TYPE_NUM_VALUES] = {
[EXPR_TYPE_UNKNOWN] = "unknown",
[EXPR_TYPE_BOOLEAN] = "boolean",
[EXPR_TYPE_INT] = "int",
[EXPR_TYPE_FLOAT] = "float",
[EXPR_TYPE_STRING] = "string",
[EXPR_TYPE_ACTION] = "action",
[EXPR_TYPE_KEYNAME] = "keyname",
[EXPR_TYPE_SYMBOLS] = "symbols",
};
const char *
expr_value_type_to_string(enum expr_value_type type)
{
if (type >= _EXPR_TYPE_NUM_VALUES)
return NULL;
return expr_value_type_strings[type];
}
| ./CrossVul/dataset_final_sorted/CWE-416/c/good_320_0 |
crossvul-cpp_data_good_4841_1 | /*
* L2TPv3 IP encapsulation support for IPv6
*
* Copyright (c) 2012 Katalix Systems Ltd
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version
* 2 of the License, or (at your option) any later version.
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/icmp.h>
#include <linux/module.h>
#include <linux/skbuff.h>
#include <linux/random.h>
#include <linux/socket.h>
#include <linux/l2tp.h>
#include <linux/in.h>
#include <linux/in6.h>
#include <net/sock.h>
#include <net/ip.h>
#include <net/icmp.h>
#include <net/udp.h>
#include <net/inet_common.h>
#include <net/inet_hashtables.h>
#include <net/inet6_hashtables.h>
#include <net/tcp_states.h>
#include <net/protocol.h>
#include <net/xfrm.h>
#include <net/transp_v6.h>
#include <net/addrconf.h>
#include <net/ip6_route.h>
#include "l2tp_core.h"
struct l2tp_ip6_sock {
/* inet_sock has to be the first member of l2tp_ip6_sock */
struct inet_sock inet;
u32 conn_id;
u32 peer_conn_id;
/* ipv6_pinfo has to be the last member of l2tp_ip6_sock, see
inet6_sk_generic */
struct ipv6_pinfo inet6;
};
static DEFINE_RWLOCK(l2tp_ip6_lock);
static struct hlist_head l2tp_ip6_table;
static struct hlist_head l2tp_ip6_bind_table;
static inline struct l2tp_ip6_sock *l2tp_ip6_sk(const struct sock *sk)
{
return (struct l2tp_ip6_sock *)sk;
}
static struct sock *__l2tp_ip6_bind_lookup(struct net *net,
struct in6_addr *laddr,
int dif, u32 tunnel_id)
{
struct sock *sk;
sk_for_each_bound(sk, &l2tp_ip6_bind_table) {
const struct in6_addr *addr = inet6_rcv_saddr(sk);
struct l2tp_ip6_sock *l2tp = l2tp_ip6_sk(sk);
if (l2tp == NULL)
continue;
if ((l2tp->conn_id == tunnel_id) &&
net_eq(sock_net(sk), net) &&
!(addr && ipv6_addr_equal(addr, laddr)) &&
!(sk->sk_bound_dev_if && sk->sk_bound_dev_if != dif))
goto found;
}
sk = NULL;
found:
return sk;
}
static inline struct sock *l2tp_ip6_bind_lookup(struct net *net,
struct in6_addr *laddr,
int dif, u32 tunnel_id)
{
struct sock *sk = __l2tp_ip6_bind_lookup(net, laddr, dif, tunnel_id);
if (sk)
sock_hold(sk);
return sk;
}
/* When processing receive frames, there are two cases to
* consider. Data frames consist of a non-zero session-id and an
* optional cookie. Control frames consist of a regular L2TP header
* preceded by 32-bits of zeros.
*
* L2TPv3 Session Header Over IP
*
* 0 1 2 3
* 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
* +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
* | Session ID |
* +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
* | Cookie (optional, maximum 64 bits)...
* +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
* |
* +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
*
* L2TPv3 Control Message Header Over IP
*
* 0 1 2 3
* 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
* +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
* | (32 bits of zeros) |
* +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
* |T|L|x|x|S|x|x|x|x|x|x|x| Ver | Length |
* +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
* | Control Connection ID |
* +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
* | Ns | Nr |
* +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
*
* All control frames are passed to userspace.
*/
static int l2tp_ip6_recv(struct sk_buff *skb)
{
struct net *net = dev_net(skb->dev);
struct sock *sk;
u32 session_id;
u32 tunnel_id;
unsigned char *ptr, *optr;
struct l2tp_session *session;
struct l2tp_tunnel *tunnel = NULL;
int length;
if (!pskb_may_pull(skb, 4))
goto discard;
/* Point to L2TP header */
optr = ptr = skb->data;
session_id = ntohl(*((__be32 *) ptr));
ptr += 4;
/* RFC3931: L2TP/IP packets have the first 4 bytes containing
* the session_id. If it is 0, the packet is a L2TP control
* frame and the session_id value can be discarded.
*/
if (session_id == 0) {
__skb_pull(skb, 4);
goto pass_up;
}
/* Ok, this is a data packet. Lookup the session. */
session = l2tp_session_find(net, NULL, session_id);
if (session == NULL)
goto discard;
tunnel = session->tunnel;
if (tunnel == NULL)
goto discard;
/* Trace packet contents, if enabled */
if (tunnel->debug & L2TP_MSG_DATA) {
length = min(32u, skb->len);
if (!pskb_may_pull(skb, length))
goto discard;
/* Point to L2TP header */
optr = ptr = skb->data;
ptr += 4;
pr_debug("%s: ip recv\n", tunnel->name);
print_hex_dump_bytes("", DUMP_PREFIX_OFFSET, ptr, length);
}
l2tp_recv_common(session, skb, ptr, optr, 0, skb->len,
tunnel->recv_payload_hook);
return 0;
pass_up:
/* Get the tunnel_id from the L2TP header */
if (!pskb_may_pull(skb, 12))
goto discard;
if ((skb->data[0] & 0xc0) != 0xc0)
goto discard;
tunnel_id = ntohl(*(__be32 *) &skb->data[4]);
tunnel = l2tp_tunnel_find(net, tunnel_id);
if (tunnel != NULL)
sk = tunnel->sock;
else {
struct ipv6hdr *iph = ipv6_hdr(skb);
read_lock_bh(&l2tp_ip6_lock);
sk = __l2tp_ip6_bind_lookup(net, &iph->daddr,
0, tunnel_id);
read_unlock_bh(&l2tp_ip6_lock);
}
if (sk == NULL)
goto discard;
sock_hold(sk);
if (!xfrm6_policy_check(sk, XFRM_POLICY_IN, skb))
goto discard_put;
nf_reset(skb);
return sk_receive_skb(sk, skb, 1);
discard_put:
sock_put(sk);
discard:
kfree_skb(skb);
return 0;
}
static int l2tp_ip6_open(struct sock *sk)
{
/* Prevent autobind. We don't have ports. */
inet_sk(sk)->inet_num = IPPROTO_L2TP;
write_lock_bh(&l2tp_ip6_lock);
sk_add_node(sk, &l2tp_ip6_table);
write_unlock_bh(&l2tp_ip6_lock);
return 0;
}
static void l2tp_ip6_close(struct sock *sk, long timeout)
{
write_lock_bh(&l2tp_ip6_lock);
hlist_del_init(&sk->sk_bind_node);
sk_del_node_init(sk);
write_unlock_bh(&l2tp_ip6_lock);
sk_common_release(sk);
}
static void l2tp_ip6_destroy_sock(struct sock *sk)
{
struct l2tp_tunnel *tunnel = l2tp_sock_to_tunnel(sk);
lock_sock(sk);
ip6_flush_pending_frames(sk);
release_sock(sk);
if (tunnel) {
l2tp_tunnel_closeall(tunnel);
sock_put(sk);
}
inet6_destroy_sock(sk);
}
static int l2tp_ip6_bind(struct sock *sk, struct sockaddr *uaddr, int addr_len)
{
struct inet_sock *inet = inet_sk(sk);
struct ipv6_pinfo *np = inet6_sk(sk);
struct sockaddr_l2tpip6 *addr = (struct sockaddr_l2tpip6 *) uaddr;
struct net *net = sock_net(sk);
__be32 v4addr = 0;
int addr_type;
int err;
if (addr->l2tp_family != AF_INET6)
return -EINVAL;
if (addr_len < sizeof(*addr))
return -EINVAL;
addr_type = ipv6_addr_type(&addr->l2tp_addr);
/* l2tp_ip6 sockets are IPv6 only */
if (addr_type == IPV6_ADDR_MAPPED)
return -EADDRNOTAVAIL;
/* L2TP is point-point, not multicast */
if (addr_type & IPV6_ADDR_MULTICAST)
return -EADDRNOTAVAIL;
err = -EADDRINUSE;
read_lock_bh(&l2tp_ip6_lock);
if (__l2tp_ip6_bind_lookup(net, &addr->l2tp_addr,
sk->sk_bound_dev_if, addr->l2tp_conn_id))
goto out_in_use;
read_unlock_bh(&l2tp_ip6_lock);
lock_sock(sk);
err = -EINVAL;
if (!sock_flag(sk, SOCK_ZAPPED))
goto out_unlock;
if (sk->sk_state != TCP_CLOSE)
goto out_unlock;
/* Check if the address belongs to the host. */
rcu_read_lock();
if (addr_type != IPV6_ADDR_ANY) {
struct net_device *dev = NULL;
if (addr_type & IPV6_ADDR_LINKLOCAL) {
if (addr_len >= sizeof(struct sockaddr_in6) &&
addr->l2tp_scope_id) {
/* Override any existing binding, if another
* one is supplied by user.
*/
sk->sk_bound_dev_if = addr->l2tp_scope_id;
}
/* Binding to link-local address requires an
interface */
if (!sk->sk_bound_dev_if)
goto out_unlock_rcu;
err = -ENODEV;
dev = dev_get_by_index_rcu(sock_net(sk),
sk->sk_bound_dev_if);
if (!dev)
goto out_unlock_rcu;
}
/* ipv4 addr of the socket is invalid. Only the
* unspecified and mapped address have a v4 equivalent.
*/
v4addr = LOOPBACK4_IPV6;
err = -EADDRNOTAVAIL;
if (!ipv6_chk_addr(sock_net(sk), &addr->l2tp_addr, dev, 0))
goto out_unlock_rcu;
}
rcu_read_unlock();
inet->inet_rcv_saddr = inet->inet_saddr = v4addr;
sk->sk_v6_rcv_saddr = addr->l2tp_addr;
np->saddr = addr->l2tp_addr;
l2tp_ip6_sk(sk)->conn_id = addr->l2tp_conn_id;
write_lock_bh(&l2tp_ip6_lock);
sk_add_bind_node(sk, &l2tp_ip6_bind_table);
sk_del_node_init(sk);
write_unlock_bh(&l2tp_ip6_lock);
sock_reset_flag(sk, SOCK_ZAPPED);
release_sock(sk);
return 0;
out_unlock_rcu:
rcu_read_unlock();
out_unlock:
release_sock(sk);
return err;
out_in_use:
read_unlock_bh(&l2tp_ip6_lock);
return err;
}
static int l2tp_ip6_connect(struct sock *sk, struct sockaddr *uaddr,
int addr_len)
{
struct sockaddr_l2tpip6 *lsa = (struct sockaddr_l2tpip6 *) uaddr;
struct sockaddr_in6 *usin = (struct sockaddr_in6 *) uaddr;
struct in6_addr *daddr;
int addr_type;
int rc;
if (sock_flag(sk, SOCK_ZAPPED)) /* Must bind first - autobinding does not work */
return -EINVAL;
if (addr_len < sizeof(*lsa))
return -EINVAL;
if (usin->sin6_family != AF_INET6)
return -EINVAL;
addr_type = ipv6_addr_type(&usin->sin6_addr);
if (addr_type & IPV6_ADDR_MULTICAST)
return -EINVAL;
if (addr_type & IPV6_ADDR_MAPPED) {
daddr = &usin->sin6_addr;
if (ipv4_is_multicast(daddr->s6_addr32[3]))
return -EINVAL;
}
rc = ip6_datagram_connect(sk, uaddr, addr_len);
lock_sock(sk);
l2tp_ip6_sk(sk)->peer_conn_id = lsa->l2tp_conn_id;
write_lock_bh(&l2tp_ip6_lock);
hlist_del_init(&sk->sk_bind_node);
sk_add_bind_node(sk, &l2tp_ip6_bind_table);
write_unlock_bh(&l2tp_ip6_lock);
release_sock(sk);
return rc;
}
static int l2tp_ip6_disconnect(struct sock *sk, int flags)
{
if (sock_flag(sk, SOCK_ZAPPED))
return 0;
return __udp_disconnect(sk, flags);
}
static int l2tp_ip6_getname(struct socket *sock, struct sockaddr *uaddr,
int *uaddr_len, int peer)
{
struct sockaddr_l2tpip6 *lsa = (struct sockaddr_l2tpip6 *)uaddr;
struct sock *sk = sock->sk;
struct ipv6_pinfo *np = inet6_sk(sk);
struct l2tp_ip6_sock *lsk = l2tp_ip6_sk(sk);
lsa->l2tp_family = AF_INET6;
lsa->l2tp_flowinfo = 0;
lsa->l2tp_scope_id = 0;
lsa->l2tp_unused = 0;
if (peer) {
if (!lsk->peer_conn_id)
return -ENOTCONN;
lsa->l2tp_conn_id = lsk->peer_conn_id;
lsa->l2tp_addr = sk->sk_v6_daddr;
if (np->sndflow)
lsa->l2tp_flowinfo = np->flow_label;
} else {
if (ipv6_addr_any(&sk->sk_v6_rcv_saddr))
lsa->l2tp_addr = np->saddr;
else
lsa->l2tp_addr = sk->sk_v6_rcv_saddr;
lsa->l2tp_conn_id = lsk->conn_id;
}
if (ipv6_addr_type(&lsa->l2tp_addr) & IPV6_ADDR_LINKLOCAL)
lsa->l2tp_scope_id = sk->sk_bound_dev_if;
*uaddr_len = sizeof(*lsa);
return 0;
}
static int l2tp_ip6_backlog_recv(struct sock *sk, struct sk_buff *skb)
{
int rc;
/* Charge it to the socket, dropping if the queue is full. */
rc = sock_queue_rcv_skb(sk, skb);
if (rc < 0)
goto drop;
return 0;
drop:
IP_INC_STATS(sock_net(sk), IPSTATS_MIB_INDISCARDS);
kfree_skb(skb);
return -1;
}
static int l2tp_ip6_push_pending_frames(struct sock *sk)
{
struct sk_buff *skb;
__be32 *transhdr = NULL;
int err = 0;
skb = skb_peek(&sk->sk_write_queue);
if (skb == NULL)
goto out;
transhdr = (__be32 *)skb_transport_header(skb);
*transhdr = 0;
err = ip6_push_pending_frames(sk);
out:
return err;
}
/* Userspace will call sendmsg() on the tunnel socket to send L2TP
* control frames.
*/
static int l2tp_ip6_sendmsg(struct sock *sk, struct msghdr *msg, size_t len)
{
struct ipv6_txoptions opt_space;
DECLARE_SOCKADDR(struct sockaddr_l2tpip6 *, lsa, msg->msg_name);
struct in6_addr *daddr, *final_p, final;
struct ipv6_pinfo *np = inet6_sk(sk);
struct ipv6_txoptions *opt_to_free = NULL;
struct ipv6_txoptions *opt = NULL;
struct ip6_flowlabel *flowlabel = NULL;
struct dst_entry *dst = NULL;
struct flowi6 fl6;
struct sockcm_cookie sockc_unused = {0};
struct ipcm6_cookie ipc6;
int addr_len = msg->msg_namelen;
int transhdrlen = 4; /* zero session-id */
int ulen = len + transhdrlen;
int err;
/* Rough check on arithmetic overflow,
better check is made in ip6_append_data().
*/
if (len > INT_MAX)
return -EMSGSIZE;
/* Mirror BSD error message compatibility */
if (msg->msg_flags & MSG_OOB)
return -EOPNOTSUPP;
/*
* Get and verify the address.
*/
memset(&fl6, 0, sizeof(fl6));
fl6.flowi6_mark = sk->sk_mark;
ipc6.hlimit = -1;
ipc6.tclass = -1;
ipc6.dontfrag = -1;
if (lsa) {
if (addr_len < SIN6_LEN_RFC2133)
return -EINVAL;
if (lsa->l2tp_family && lsa->l2tp_family != AF_INET6)
return -EAFNOSUPPORT;
daddr = &lsa->l2tp_addr;
if (np->sndflow) {
fl6.flowlabel = lsa->l2tp_flowinfo & IPV6_FLOWINFO_MASK;
if (fl6.flowlabel&IPV6_FLOWLABEL_MASK) {
flowlabel = fl6_sock_lookup(sk, fl6.flowlabel);
if (flowlabel == NULL)
return -EINVAL;
}
}
/*
* Otherwise it will be difficult to maintain
* sk->sk_dst_cache.
*/
if (sk->sk_state == TCP_ESTABLISHED &&
ipv6_addr_equal(daddr, &sk->sk_v6_daddr))
daddr = &sk->sk_v6_daddr;
if (addr_len >= sizeof(struct sockaddr_in6) &&
lsa->l2tp_scope_id &&
ipv6_addr_type(daddr) & IPV6_ADDR_LINKLOCAL)
fl6.flowi6_oif = lsa->l2tp_scope_id;
} else {
if (sk->sk_state != TCP_ESTABLISHED)
return -EDESTADDRREQ;
daddr = &sk->sk_v6_daddr;
fl6.flowlabel = np->flow_label;
}
if (fl6.flowi6_oif == 0)
fl6.flowi6_oif = sk->sk_bound_dev_if;
if (msg->msg_controllen) {
opt = &opt_space;
memset(opt, 0, sizeof(struct ipv6_txoptions));
opt->tot_len = sizeof(struct ipv6_txoptions);
ipc6.opt = opt;
err = ip6_datagram_send_ctl(sock_net(sk), sk, msg, &fl6, &ipc6,
&sockc_unused);
if (err < 0) {
fl6_sock_release(flowlabel);
return err;
}
if ((fl6.flowlabel & IPV6_FLOWLABEL_MASK) && !flowlabel) {
flowlabel = fl6_sock_lookup(sk, fl6.flowlabel);
if (flowlabel == NULL)
return -EINVAL;
}
if (!(opt->opt_nflen|opt->opt_flen))
opt = NULL;
}
if (!opt) {
opt = txopt_get(np);
opt_to_free = opt;
}
if (flowlabel)
opt = fl6_merge_options(&opt_space, flowlabel, opt);
opt = ipv6_fixup_options(&opt_space, opt);
ipc6.opt = opt;
fl6.flowi6_proto = sk->sk_protocol;
if (!ipv6_addr_any(daddr))
fl6.daddr = *daddr;
else
fl6.daddr.s6_addr[15] = 0x1; /* :: means loopback (BSD'ism) */
if (ipv6_addr_any(&fl6.saddr) && !ipv6_addr_any(&np->saddr))
fl6.saddr = np->saddr;
final_p = fl6_update_dst(&fl6, opt, &final);
if (!fl6.flowi6_oif && ipv6_addr_is_multicast(&fl6.daddr))
fl6.flowi6_oif = np->mcast_oif;
else if (!fl6.flowi6_oif)
fl6.flowi6_oif = np->ucast_oif;
security_sk_classify_flow(sk, flowi6_to_flowi(&fl6));
if (ipc6.tclass < 0)
ipc6.tclass = np->tclass;
fl6.flowlabel = ip6_make_flowinfo(ipc6.tclass, fl6.flowlabel);
dst = ip6_dst_lookup_flow(sk, &fl6, final_p);
if (IS_ERR(dst)) {
err = PTR_ERR(dst);
goto out;
}
if (ipc6.hlimit < 0)
ipc6.hlimit = ip6_sk_dst_hoplimit(np, &fl6, dst);
if (ipc6.dontfrag < 0)
ipc6.dontfrag = np->dontfrag;
if (msg->msg_flags & MSG_CONFIRM)
goto do_confirm;
back_from_confirm:
lock_sock(sk);
err = ip6_append_data(sk, ip_generic_getfrag, msg,
ulen, transhdrlen, &ipc6,
&fl6, (struct rt6_info *)dst,
msg->msg_flags, &sockc_unused);
if (err)
ip6_flush_pending_frames(sk);
else if (!(msg->msg_flags & MSG_MORE))
err = l2tp_ip6_push_pending_frames(sk);
release_sock(sk);
done:
dst_release(dst);
out:
fl6_sock_release(flowlabel);
txopt_put(opt_to_free);
return err < 0 ? err : len;
do_confirm:
dst_confirm(dst);
if (!(msg->msg_flags & MSG_PROBE) || len)
goto back_from_confirm;
err = 0;
goto done;
}
static int l2tp_ip6_recvmsg(struct sock *sk, struct msghdr *msg, size_t len,
int noblock, int flags, int *addr_len)
{
struct ipv6_pinfo *np = inet6_sk(sk);
DECLARE_SOCKADDR(struct sockaddr_l2tpip6 *, lsa, msg->msg_name);
size_t copied = 0;
int err = -EOPNOTSUPP;
struct sk_buff *skb;
if (flags & MSG_OOB)
goto out;
if (addr_len)
*addr_len = sizeof(*lsa);
if (flags & MSG_ERRQUEUE)
return ipv6_recv_error(sk, msg, len, addr_len);
skb = skb_recv_datagram(sk, flags, noblock, &err);
if (!skb)
goto out;
copied = skb->len;
if (len < copied) {
msg->msg_flags |= MSG_TRUNC;
copied = len;
}
err = skb_copy_datagram_msg(skb, 0, msg, copied);
if (err)
goto done;
sock_recv_timestamp(msg, sk, skb);
/* Copy the address. */
if (lsa) {
lsa->l2tp_family = AF_INET6;
lsa->l2tp_unused = 0;
lsa->l2tp_addr = ipv6_hdr(skb)->saddr;
lsa->l2tp_flowinfo = 0;
lsa->l2tp_scope_id = 0;
lsa->l2tp_conn_id = 0;
if (ipv6_addr_type(&lsa->l2tp_addr) & IPV6_ADDR_LINKLOCAL)
lsa->l2tp_scope_id = inet6_iif(skb);
}
if (np->rxopt.all)
ip6_datagram_recv_ctl(sk, msg, skb);
if (flags & MSG_TRUNC)
copied = skb->len;
done:
skb_free_datagram(sk, skb);
out:
return err ? err : copied;
}
static struct proto l2tp_ip6_prot = {
.name = "L2TP/IPv6",
.owner = THIS_MODULE,
.init = l2tp_ip6_open,
.close = l2tp_ip6_close,
.bind = l2tp_ip6_bind,
.connect = l2tp_ip6_connect,
.disconnect = l2tp_ip6_disconnect,
.ioctl = udp_ioctl,
.destroy = l2tp_ip6_destroy_sock,
.setsockopt = ipv6_setsockopt,
.getsockopt = ipv6_getsockopt,
.sendmsg = l2tp_ip6_sendmsg,
.recvmsg = l2tp_ip6_recvmsg,
.backlog_rcv = l2tp_ip6_backlog_recv,
.hash = inet6_hash,
.unhash = inet_unhash,
.obj_size = sizeof(struct l2tp_ip6_sock),
#ifdef CONFIG_COMPAT
.compat_setsockopt = compat_ipv6_setsockopt,
.compat_getsockopt = compat_ipv6_getsockopt,
#endif
};
static const struct proto_ops l2tp_ip6_ops = {
.family = PF_INET6,
.owner = THIS_MODULE,
.release = inet6_release,
.bind = inet6_bind,
.connect = inet_dgram_connect,
.socketpair = sock_no_socketpair,
.accept = sock_no_accept,
.getname = l2tp_ip6_getname,
.poll = datagram_poll,
.ioctl = inet6_ioctl,
.listen = sock_no_listen,
.shutdown = inet_shutdown,
.setsockopt = sock_common_setsockopt,
.getsockopt = sock_common_getsockopt,
.sendmsg = inet_sendmsg,
.recvmsg = sock_common_recvmsg,
.mmap = sock_no_mmap,
.sendpage = sock_no_sendpage,
#ifdef CONFIG_COMPAT
.compat_setsockopt = compat_sock_common_setsockopt,
.compat_getsockopt = compat_sock_common_getsockopt,
#endif
};
static struct inet_protosw l2tp_ip6_protosw = {
.type = SOCK_DGRAM,
.protocol = IPPROTO_L2TP,
.prot = &l2tp_ip6_prot,
.ops = &l2tp_ip6_ops,
};
static struct inet6_protocol l2tp_ip6_protocol __read_mostly = {
.handler = l2tp_ip6_recv,
};
static int __init l2tp_ip6_init(void)
{
int err;
pr_info("L2TP IP encapsulation support for IPv6 (L2TPv3)\n");
err = proto_register(&l2tp_ip6_prot, 1);
if (err != 0)
goto out;
err = inet6_add_protocol(&l2tp_ip6_protocol, IPPROTO_L2TP);
if (err)
goto out1;
inet6_register_protosw(&l2tp_ip6_protosw);
return 0;
out1:
proto_unregister(&l2tp_ip6_prot);
out:
return err;
}
static void __exit l2tp_ip6_exit(void)
{
inet6_unregister_protosw(&l2tp_ip6_protosw);
inet6_del_protocol(&l2tp_ip6_protocol, IPPROTO_L2TP);
proto_unregister(&l2tp_ip6_prot);
}
module_init(l2tp_ip6_init);
module_exit(l2tp_ip6_exit);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Chris Elston <celston@katalix.com>");
MODULE_DESCRIPTION("L2TP IP encapsulation for IPv6");
MODULE_VERSION("1.0");
/* Use the value of SOCK_DGRAM (2) directory, because __stringify doesn't like
* enums
*/
MODULE_ALIAS_NET_PF_PROTO_TYPE(PF_INET6, 2, IPPROTO_L2TP);
MODULE_ALIAS_NET_PF_PROTO(PF_INET6, IPPROTO_L2TP);
| ./CrossVul/dataset_final_sorted/CWE-416/c/good_4841_1 |
crossvul-cpp_data_bad_1007_0 | 404: Not Found | ./CrossVul/dataset_final_sorted/CWE-416/c/bad_1007_0 |
crossvul-cpp_data_good_4231_2 | /*
** $Id: ltm.c $
** Tag methods
** See Copyright Notice in lua.h
*/
#define ltm_c
#define LUA_CORE
#include "lprefix.h"
#include <string.h>
#include "lua.h"
#include "ldebug.h"
#include "ldo.h"
#include "lgc.h"
#include "lobject.h"
#include "lstate.h"
#include "lstring.h"
#include "ltable.h"
#include "ltm.h"
#include "lvm.h"
static const char udatatypename[] = "userdata";
LUAI_DDEF const char *const luaT_typenames_[LUA_TOTALTYPES] = {
"no value",
"nil", "boolean", udatatypename, "number",
"string", "table", "function", udatatypename, "thread",
"upvalue", "proto" /* these last cases are used for tests only */
};
void luaT_init (lua_State *L) {
static const char *const luaT_eventname[] = { /* ORDER TM */
"__index", "__newindex",
"__gc", "__mode", "__len", "__eq",
"__add", "__sub", "__mul", "__mod", "__pow",
"__div", "__idiv",
"__band", "__bor", "__bxor", "__shl", "__shr",
"__unm", "__bnot", "__lt", "__le",
"__concat", "__call", "__close"
};
int i;
for (i=0; i<TM_N; i++) {
G(L)->tmname[i] = luaS_new(L, luaT_eventname[i]);
luaC_fix(L, obj2gco(G(L)->tmname[i])); /* never collect these names */
}
}
/*
** function to be used with macro "fasttm": optimized for absence of
** tag methods
*/
const TValue *luaT_gettm (Table *events, TMS event, TString *ename) {
const TValue *tm = luaH_getshortstr(events, ename);
lua_assert(event <= TM_EQ);
if (notm(tm)) { /* no tag method? */
events->flags |= cast_byte(1u<<event); /* cache this fact */
return NULL;
}
else return tm;
}
const TValue *luaT_gettmbyobj (lua_State *L, const TValue *o, TMS event) {
Table *mt;
switch (ttype(o)) {
case LUA_TTABLE:
mt = hvalue(o)->metatable;
break;
case LUA_TUSERDATA:
mt = uvalue(o)->metatable;
break;
default:
mt = G(L)->mt[ttype(o)];
}
return (mt ? luaH_getshortstr(mt, G(L)->tmname[event]) : &G(L)->nilvalue);
}
/*
** Return the name of the type of an object. For tables and userdata
** with metatable, use their '__name' metafield, if present.
*/
const char *luaT_objtypename (lua_State *L, const TValue *o) {
Table *mt;
if ((ttistable(o) && (mt = hvalue(o)->metatable) != NULL) ||
(ttisfulluserdata(o) && (mt = uvalue(o)->metatable) != NULL)) {
const TValue *name = luaH_getshortstr(mt, luaS_new(L, "__name"));
if (ttisstring(name)) /* is '__name' a string? */
return getstr(tsvalue(name)); /* use it as type name */
}
return ttypename(ttype(o)); /* else use standard type name */
}
void luaT_callTM (lua_State *L, const TValue *f, const TValue *p1,
const TValue *p2, const TValue *p3) {
StkId func = L->top;
setobj2s(L, func, f); /* push function (assume EXTRA_STACK) */
setobj2s(L, func + 1, p1); /* 1st argument */
setobj2s(L, func + 2, p2); /* 2nd argument */
setobj2s(L, func + 3, p3); /* 3rd argument */
L->top = func + 4;
/* metamethod may yield only when called from Lua code */
if (isLuacode(L->ci))
luaD_call(L, func, 0);
else
luaD_callnoyield(L, func, 0);
}
void luaT_callTMres (lua_State *L, const TValue *f, const TValue *p1,
const TValue *p2, StkId res) {
ptrdiff_t result = savestack(L, res);
StkId func = L->top;
setobj2s(L, func, f); /* push function (assume EXTRA_STACK) */
setobj2s(L, func + 1, p1); /* 1st argument */
setobj2s(L, func + 2, p2); /* 2nd argument */
L->top += 3;
/* metamethod may yield only when called from Lua code */
if (isLuacode(L->ci))
luaD_call(L, func, 1);
else
luaD_callnoyield(L, func, 1);
res = restorestack(L, result);
setobjs2s(L, res, --L->top); /* move result to its place */
}
static int callbinTM (lua_State *L, const TValue *p1, const TValue *p2,
StkId res, TMS event) {
const TValue *tm = luaT_gettmbyobj(L, p1, event); /* try first operand */
if (notm(tm))
tm = luaT_gettmbyobj(L, p2, event); /* try second operand */
if (notm(tm)) return 0;
luaT_callTMres(L, tm, p1, p2, res);
return 1;
}
void luaT_trybinTM (lua_State *L, const TValue *p1, const TValue *p2,
StkId res, TMS event) {
if (!callbinTM(L, p1, p2, res, event)) {
switch (event) {
case TM_BAND: case TM_BOR: case TM_BXOR:
case TM_SHL: case TM_SHR: case TM_BNOT: {
if (ttisnumber(p1) && ttisnumber(p2))
luaG_tointerror(L, p1, p2);
else
luaG_opinterror(L, p1, p2, "perform bitwise operation on");
}
/* calls never return, but to avoid warnings: *//* FALLTHROUGH */
default:
luaG_opinterror(L, p1, p2, "perform arithmetic on");
}
}
}
void luaT_tryconcatTM (lua_State *L) {
StkId top = L->top;
if (!callbinTM(L, s2v(top - 2), s2v(top - 1), top - 2, TM_CONCAT))
luaG_concaterror(L, s2v(top - 2), s2v(top - 1));
}
void luaT_trybinassocTM (lua_State *L, const TValue *p1, const TValue *p2,
int flip, StkId res, TMS event) {
if (flip)
luaT_trybinTM(L, p2, p1, res, event);
else
luaT_trybinTM(L, p1, p2, res, event);
}
void luaT_trybiniTM (lua_State *L, const TValue *p1, lua_Integer i2,
int flip, StkId res, TMS event) {
TValue aux;
setivalue(&aux, i2);
luaT_trybinassocTM(L, p1, &aux, flip, res, event);
}
/*
** Calls an order tag method.
** For lessequal, LUA_COMPAT_LT_LE keeps compatibility with old
** behavior: if there is no '__le', try '__lt', based on l <= r iff
** !(r < l) (assuming a total order). If the metamethod yields during
** this substitution, the continuation has to know about it (to negate
** the result of r<l); bit CIST_LEQ in the call status keeps that
** information.
*/
int luaT_callorderTM (lua_State *L, const TValue *p1, const TValue *p2,
TMS event) {
if (callbinTM(L, p1, p2, L->top, event)) /* try original event */
return !l_isfalse(s2v(L->top));
#if defined(LUA_COMPAT_LT_LE)
else if (event == TM_LE) {
/* try '!(p2 < p1)' for '(p1 <= p2)' */
L->ci->callstatus |= CIST_LEQ; /* mark it is doing 'lt' for 'le' */
if (callbinTM(L, p2, p1, L->top, TM_LT)) {
L->ci->callstatus ^= CIST_LEQ; /* clear mark */
return l_isfalse(s2v(L->top));
}
/* else error will remove this 'ci'; no need to clear mark */
}
#endif
luaG_ordererror(L, p1, p2); /* no metamethod found */
return 0; /* to avoid warnings */
}
int luaT_callorderiTM (lua_State *L, const TValue *p1, int v2,
int flip, int isfloat, TMS event) {
TValue aux; const TValue *p2;
if (isfloat) {
setfltvalue(&aux, cast_num(v2));
}
else
setivalue(&aux, v2);
if (flip) { /* arguments were exchanged? */
p2 = p1; p1 = &aux; /* correct them */
}
else
p2 = &aux;
return luaT_callorderTM(L, p1, p2, event);
}
void luaT_adjustvarargs (lua_State *L, int nfixparams, CallInfo *ci,
const Proto *p) {
int i;
int actual = cast_int(L->top - ci->func) - 1; /* number of arguments */
int nextra = actual - nfixparams; /* number of extra arguments */
ci->u.l.nextraargs = nextra;
luaD_checkstack(L, p->maxstacksize + 1);
/* copy function to the top of the stack */
setobjs2s(L, L->top++, ci->func);
/* move fixed parameters to the top of the stack */
for (i = 1; i <= nfixparams; i++) {
setobjs2s(L, L->top++, ci->func + i);
setnilvalue(s2v(ci->func + i)); /* erase original parameter (for GC) */
}
ci->func += actual + 1;
ci->top += actual + 1;
lua_assert(L->top <= ci->top && ci->top <= L->stack_last);
}
void luaT_getvarargs (lua_State *L, CallInfo *ci, StkId where, int wanted) {
int i;
int nextra = ci->u.l.nextraargs;
if (wanted < 0) {
wanted = nextra; /* get all extra arguments available */
checkstackGCp(L, nextra, where); /* ensure stack space */
L->top = where + nextra; /* next instruction will need top */
}
for (i = 0; i < wanted && i < nextra; i++)
setobjs2s(L, where + i, ci->func - nextra + i);
for (; i < wanted; i++) /* complete required results with nil */
setnilvalue(s2v(where + i));
}
| ./CrossVul/dataset_final_sorted/CWE-416/c/good_4231_2 |
crossvul-cpp_data_good_832_0 | // SPDX-License-Identifier: GPL-2.0+
/*
* ipmi_si.c
*
* The interface to the IPMI driver for the system interfaces (KCS, SMIC,
* BT).
*
* Author: MontaVista Software, Inc.
* Corey Minyard <minyard@mvista.com>
* source@mvista.com
*
* Copyright 2002 MontaVista Software Inc.
* Copyright 2006 IBM Corp., Christian Krafft <krafft@de.ibm.com>
*/
/*
* This file holds the "policy" for the interface to the SMI state
* machine. It does the configuration, handles timers and interrupts,
* and drives the real SMI state machine.
*/
#define pr_fmt(fmt) "ipmi_si: " fmt
#include <linux/module.h>
#include <linux/moduleparam.h>
#include <linux/sched.h>
#include <linux/seq_file.h>
#include <linux/timer.h>
#include <linux/errno.h>
#include <linux/spinlock.h>
#include <linux/slab.h>
#include <linux/delay.h>
#include <linux/list.h>
#include <linux/notifier.h>
#include <linux/mutex.h>
#include <linux/kthread.h>
#include <asm/irq.h>
#include <linux/interrupt.h>
#include <linux/rcupdate.h>
#include <linux/ipmi.h>
#include <linux/ipmi_smi.h>
#include "ipmi_si.h"
#include <linux/string.h>
#include <linux/ctype.h>
/* Measure times between events in the driver. */
#undef DEBUG_TIMING
/* Call every 10 ms. */
#define SI_TIMEOUT_TIME_USEC 10000
#define SI_USEC_PER_JIFFY (1000000/HZ)
#define SI_TIMEOUT_JIFFIES (SI_TIMEOUT_TIME_USEC/SI_USEC_PER_JIFFY)
#define SI_SHORT_TIMEOUT_USEC 250 /* .25ms when the SM request a
short timeout */
enum si_intf_state {
SI_NORMAL,
SI_GETTING_FLAGS,
SI_GETTING_EVENTS,
SI_CLEARING_FLAGS,
SI_GETTING_MESSAGES,
SI_CHECKING_ENABLES,
SI_SETTING_ENABLES
/* FIXME - add watchdog stuff. */
};
/* Some BT-specific defines we need here. */
#define IPMI_BT_INTMASK_REG 2
#define IPMI_BT_INTMASK_CLEAR_IRQ_BIT 2
#define IPMI_BT_INTMASK_ENABLE_IRQ_BIT 1
static const char * const si_to_str[] = { "invalid", "kcs", "smic", "bt" };
static int initialized;
/*
* Indexes into stats[] in smi_info below.
*/
enum si_stat_indexes {
/*
* Number of times the driver requested a timer while an operation
* was in progress.
*/
SI_STAT_short_timeouts = 0,
/*
* Number of times the driver requested a timer while nothing was in
* progress.
*/
SI_STAT_long_timeouts,
/* Number of times the interface was idle while being polled. */
SI_STAT_idles,
/* Number of interrupts the driver handled. */
SI_STAT_interrupts,
/* Number of time the driver got an ATTN from the hardware. */
SI_STAT_attentions,
/* Number of times the driver requested flags from the hardware. */
SI_STAT_flag_fetches,
/* Number of times the hardware didn't follow the state machine. */
SI_STAT_hosed_count,
/* Number of completed messages. */
SI_STAT_complete_transactions,
/* Number of IPMI events received from the hardware. */
SI_STAT_events,
/* Number of watchdog pretimeouts. */
SI_STAT_watchdog_pretimeouts,
/* Number of asynchronous messages received. */
SI_STAT_incoming_messages,
/* This *must* remain last, add new values above this. */
SI_NUM_STATS
};
struct smi_info {
int si_num;
struct ipmi_smi *intf;
struct si_sm_data *si_sm;
const struct si_sm_handlers *handlers;
spinlock_t si_lock;
struct ipmi_smi_msg *waiting_msg;
struct ipmi_smi_msg *curr_msg;
enum si_intf_state si_state;
/*
* Used to handle the various types of I/O that can occur with
* IPMI
*/
struct si_sm_io io;
/*
* Per-OEM handler, called from handle_flags(). Returns 1
* when handle_flags() needs to be re-run or 0 indicating it
* set si_state itself.
*/
int (*oem_data_avail_handler)(struct smi_info *smi_info);
/*
* Flags from the last GET_MSG_FLAGS command, used when an ATTN
* is set to hold the flags until we are done handling everything
* from the flags.
*/
#define RECEIVE_MSG_AVAIL 0x01
#define EVENT_MSG_BUFFER_FULL 0x02
#define WDT_PRE_TIMEOUT_INT 0x08
#define OEM0_DATA_AVAIL 0x20
#define OEM1_DATA_AVAIL 0x40
#define OEM2_DATA_AVAIL 0x80
#define OEM_DATA_AVAIL (OEM0_DATA_AVAIL | \
OEM1_DATA_AVAIL | \
OEM2_DATA_AVAIL)
unsigned char msg_flags;
/* Does the BMC have an event buffer? */
bool has_event_buffer;
/*
* If set to true, this will request events the next time the
* state machine is idle.
*/
atomic_t req_events;
/*
* If true, run the state machine to completion on every send
* call. Generally used after a panic to make sure stuff goes
* out.
*/
bool run_to_completion;
/* The timer for this si. */
struct timer_list si_timer;
/* This flag is set, if the timer can be set */
bool timer_can_start;
/* This flag is set, if the timer is running (timer_pending() isn't enough) */
bool timer_running;
/* The time (in jiffies) the last timeout occurred at. */
unsigned long last_timeout_jiffies;
/* Are we waiting for the events, pretimeouts, received msgs? */
atomic_t need_watch;
/*
* The driver will disable interrupts when it gets into a
* situation where it cannot handle messages due to lack of
* memory. Once that situation clears up, it will re-enable
* interrupts.
*/
bool interrupt_disabled;
/*
* Does the BMC support events?
*/
bool supports_event_msg_buff;
/*
* Can we disable interrupts the global enables receive irq
* bit? There are currently two forms of brokenness, some
* systems cannot disable the bit (which is technically within
* the spec but a bad idea) and some systems have the bit
* forced to zero even though interrupts work (which is
* clearly outside the spec). The next bool tells which form
* of brokenness is present.
*/
bool cannot_disable_irq;
/*
* Some systems are broken and cannot set the irq enable
* bit, even if they support interrupts.
*/
bool irq_enable_broken;
/*
* Did we get an attention that we did not handle?
*/
bool got_attn;
/* From the get device id response... */
struct ipmi_device_id device_id;
/* Default driver model device. */
struct platform_device *pdev;
/* Have we added the device group to the device? */
bool dev_group_added;
/* Have we added the platform device? */
bool pdev_registered;
/* Counters and things for the proc filesystem. */
atomic_t stats[SI_NUM_STATS];
struct task_struct *thread;
struct list_head link;
};
#define smi_inc_stat(smi, stat) \
atomic_inc(&(smi)->stats[SI_STAT_ ## stat])
#define smi_get_stat(smi, stat) \
((unsigned int) atomic_read(&(smi)->stats[SI_STAT_ ## stat]))
#define IPMI_MAX_INTFS 4
static int force_kipmid[IPMI_MAX_INTFS];
static int num_force_kipmid;
static unsigned int kipmid_max_busy_us[IPMI_MAX_INTFS];
static int num_max_busy_us;
static bool unload_when_empty = true;
static int try_smi_init(struct smi_info *smi);
static void cleanup_one_si(struct smi_info *smi_info);
static void cleanup_ipmi_si(void);
#ifdef DEBUG_TIMING
void debug_timestamp(char *msg)
{
struct timespec64 t;
ktime_get_ts64(&t);
pr_debug("**%s: %lld.%9.9ld\n", msg, (long long) t.tv_sec, t.tv_nsec);
}
#else
#define debug_timestamp(x)
#endif
static ATOMIC_NOTIFIER_HEAD(xaction_notifier_list);
static int register_xaction_notifier(struct notifier_block *nb)
{
return atomic_notifier_chain_register(&xaction_notifier_list, nb);
}
static void deliver_recv_msg(struct smi_info *smi_info,
struct ipmi_smi_msg *msg)
{
/* Deliver the message to the upper layer. */
ipmi_smi_msg_received(smi_info->intf, msg);
}
static void return_hosed_msg(struct smi_info *smi_info, int cCode)
{
struct ipmi_smi_msg *msg = smi_info->curr_msg;
if (cCode < 0 || cCode > IPMI_ERR_UNSPECIFIED)
cCode = IPMI_ERR_UNSPECIFIED;
/* else use it as is */
/* Make it a response */
msg->rsp[0] = msg->data[0] | 4;
msg->rsp[1] = msg->data[1];
msg->rsp[2] = cCode;
msg->rsp_size = 3;
smi_info->curr_msg = NULL;
deliver_recv_msg(smi_info, msg);
}
static enum si_sm_result start_next_msg(struct smi_info *smi_info)
{
int rv;
if (!smi_info->waiting_msg) {
smi_info->curr_msg = NULL;
rv = SI_SM_IDLE;
} else {
int err;
smi_info->curr_msg = smi_info->waiting_msg;
smi_info->waiting_msg = NULL;
debug_timestamp("Start2");
err = atomic_notifier_call_chain(&xaction_notifier_list,
0, smi_info);
if (err & NOTIFY_STOP_MASK) {
rv = SI_SM_CALL_WITHOUT_DELAY;
goto out;
}
err = smi_info->handlers->start_transaction(
smi_info->si_sm,
smi_info->curr_msg->data,
smi_info->curr_msg->data_size);
if (err)
return_hosed_msg(smi_info, err);
rv = SI_SM_CALL_WITHOUT_DELAY;
}
out:
return rv;
}
static void smi_mod_timer(struct smi_info *smi_info, unsigned long new_val)
{
if (!smi_info->timer_can_start)
return;
smi_info->last_timeout_jiffies = jiffies;
mod_timer(&smi_info->si_timer, new_val);
smi_info->timer_running = true;
}
/*
* Start a new message and (re)start the timer and thread.
*/
static void start_new_msg(struct smi_info *smi_info, unsigned char *msg,
unsigned int size)
{
smi_mod_timer(smi_info, jiffies + SI_TIMEOUT_JIFFIES);
if (smi_info->thread)
wake_up_process(smi_info->thread);
smi_info->handlers->start_transaction(smi_info->si_sm, msg, size);
}
static void start_check_enables(struct smi_info *smi_info)
{
unsigned char msg[2];
msg[0] = (IPMI_NETFN_APP_REQUEST << 2);
msg[1] = IPMI_GET_BMC_GLOBAL_ENABLES_CMD;
start_new_msg(smi_info, msg, 2);
smi_info->si_state = SI_CHECKING_ENABLES;
}
static void start_clear_flags(struct smi_info *smi_info)
{
unsigned char msg[3];
/* Make sure the watchdog pre-timeout flag is not set at startup. */
msg[0] = (IPMI_NETFN_APP_REQUEST << 2);
msg[1] = IPMI_CLEAR_MSG_FLAGS_CMD;
msg[2] = WDT_PRE_TIMEOUT_INT;
start_new_msg(smi_info, msg, 3);
smi_info->si_state = SI_CLEARING_FLAGS;
}
static void start_getting_msg_queue(struct smi_info *smi_info)
{
smi_info->curr_msg->data[0] = (IPMI_NETFN_APP_REQUEST << 2);
smi_info->curr_msg->data[1] = IPMI_GET_MSG_CMD;
smi_info->curr_msg->data_size = 2;
start_new_msg(smi_info, smi_info->curr_msg->data,
smi_info->curr_msg->data_size);
smi_info->si_state = SI_GETTING_MESSAGES;
}
static void start_getting_events(struct smi_info *smi_info)
{
smi_info->curr_msg->data[0] = (IPMI_NETFN_APP_REQUEST << 2);
smi_info->curr_msg->data[1] = IPMI_READ_EVENT_MSG_BUFFER_CMD;
smi_info->curr_msg->data_size = 2;
start_new_msg(smi_info, smi_info->curr_msg->data,
smi_info->curr_msg->data_size);
smi_info->si_state = SI_GETTING_EVENTS;
}
/*
* When we have a situtaion where we run out of memory and cannot
* allocate messages, we just leave them in the BMC and run the system
* polled until we can allocate some memory. Once we have some
* memory, we will re-enable the interrupt.
*
* Note that we cannot just use disable_irq(), since the interrupt may
* be shared.
*/
static inline bool disable_si_irq(struct smi_info *smi_info)
{
if ((smi_info->io.irq) && (!smi_info->interrupt_disabled)) {
smi_info->interrupt_disabled = true;
start_check_enables(smi_info);
return true;
}
return false;
}
static inline bool enable_si_irq(struct smi_info *smi_info)
{
if ((smi_info->io.irq) && (smi_info->interrupt_disabled)) {
smi_info->interrupt_disabled = false;
start_check_enables(smi_info);
return true;
}
return false;
}
/*
* Allocate a message. If unable to allocate, start the interrupt
* disable process and return NULL. If able to allocate but
* interrupts are disabled, free the message and return NULL after
* starting the interrupt enable process.
*/
static struct ipmi_smi_msg *alloc_msg_handle_irq(struct smi_info *smi_info)
{
struct ipmi_smi_msg *msg;
msg = ipmi_alloc_smi_msg();
if (!msg) {
if (!disable_si_irq(smi_info))
smi_info->si_state = SI_NORMAL;
} else if (enable_si_irq(smi_info)) {
ipmi_free_smi_msg(msg);
msg = NULL;
}
return msg;
}
static void handle_flags(struct smi_info *smi_info)
{
retry:
if (smi_info->msg_flags & WDT_PRE_TIMEOUT_INT) {
/* Watchdog pre-timeout */
smi_inc_stat(smi_info, watchdog_pretimeouts);
start_clear_flags(smi_info);
smi_info->msg_flags &= ~WDT_PRE_TIMEOUT_INT;
ipmi_smi_watchdog_pretimeout(smi_info->intf);
} else if (smi_info->msg_flags & RECEIVE_MSG_AVAIL) {
/* Messages available. */
smi_info->curr_msg = alloc_msg_handle_irq(smi_info);
if (!smi_info->curr_msg)
return;
start_getting_msg_queue(smi_info);
} else if (smi_info->msg_flags & EVENT_MSG_BUFFER_FULL) {
/* Events available. */
smi_info->curr_msg = alloc_msg_handle_irq(smi_info);
if (!smi_info->curr_msg)
return;
start_getting_events(smi_info);
} else if (smi_info->msg_flags & OEM_DATA_AVAIL &&
smi_info->oem_data_avail_handler) {
if (smi_info->oem_data_avail_handler(smi_info))
goto retry;
} else
smi_info->si_state = SI_NORMAL;
}
/*
* Global enables we care about.
*/
#define GLOBAL_ENABLES_MASK (IPMI_BMC_EVT_MSG_BUFF | IPMI_BMC_RCV_MSG_INTR | \
IPMI_BMC_EVT_MSG_INTR)
static u8 current_global_enables(struct smi_info *smi_info, u8 base,
bool *irq_on)
{
u8 enables = 0;
if (smi_info->supports_event_msg_buff)
enables |= IPMI_BMC_EVT_MSG_BUFF;
if (((smi_info->io.irq && !smi_info->interrupt_disabled) ||
smi_info->cannot_disable_irq) &&
!smi_info->irq_enable_broken)
enables |= IPMI_BMC_RCV_MSG_INTR;
if (smi_info->supports_event_msg_buff &&
smi_info->io.irq && !smi_info->interrupt_disabled &&
!smi_info->irq_enable_broken)
enables |= IPMI_BMC_EVT_MSG_INTR;
*irq_on = enables & (IPMI_BMC_EVT_MSG_INTR | IPMI_BMC_RCV_MSG_INTR);
return enables;
}
static void check_bt_irq(struct smi_info *smi_info, bool irq_on)
{
u8 irqstate = smi_info->io.inputb(&smi_info->io, IPMI_BT_INTMASK_REG);
irqstate &= IPMI_BT_INTMASK_ENABLE_IRQ_BIT;
if ((bool)irqstate == irq_on)
return;
if (irq_on)
smi_info->io.outputb(&smi_info->io, IPMI_BT_INTMASK_REG,
IPMI_BT_INTMASK_ENABLE_IRQ_BIT);
else
smi_info->io.outputb(&smi_info->io, IPMI_BT_INTMASK_REG, 0);
}
static void handle_transaction_done(struct smi_info *smi_info)
{
struct ipmi_smi_msg *msg;
debug_timestamp("Done");
switch (smi_info->si_state) {
case SI_NORMAL:
if (!smi_info->curr_msg)
break;
smi_info->curr_msg->rsp_size
= smi_info->handlers->get_result(
smi_info->si_sm,
smi_info->curr_msg->rsp,
IPMI_MAX_MSG_LENGTH);
/*
* Do this here becase deliver_recv_msg() releases the
* lock, and a new message can be put in during the
* time the lock is released.
*/
msg = smi_info->curr_msg;
smi_info->curr_msg = NULL;
deliver_recv_msg(smi_info, msg);
break;
case SI_GETTING_FLAGS:
{
unsigned char msg[4];
unsigned int len;
/* We got the flags from the SMI, now handle them. */
len = smi_info->handlers->get_result(smi_info->si_sm, msg, 4);
if (msg[2] != 0) {
/* Error fetching flags, just give up for now. */
smi_info->si_state = SI_NORMAL;
} else if (len < 4) {
/*
* Hmm, no flags. That's technically illegal, but
* don't use uninitialized data.
*/
smi_info->si_state = SI_NORMAL;
} else {
smi_info->msg_flags = msg[3];
handle_flags(smi_info);
}
break;
}
case SI_CLEARING_FLAGS:
{
unsigned char msg[3];
/* We cleared the flags. */
smi_info->handlers->get_result(smi_info->si_sm, msg, 3);
if (msg[2] != 0) {
/* Error clearing flags */
dev_warn(smi_info->io.dev,
"Error clearing flags: %2.2x\n", msg[2]);
}
smi_info->si_state = SI_NORMAL;
break;
}
case SI_GETTING_EVENTS:
{
smi_info->curr_msg->rsp_size
= smi_info->handlers->get_result(
smi_info->si_sm,
smi_info->curr_msg->rsp,
IPMI_MAX_MSG_LENGTH);
/*
* Do this here becase deliver_recv_msg() releases the
* lock, and a new message can be put in during the
* time the lock is released.
*/
msg = smi_info->curr_msg;
smi_info->curr_msg = NULL;
if (msg->rsp[2] != 0) {
/* Error getting event, probably done. */
msg->done(msg);
/* Take off the event flag. */
smi_info->msg_flags &= ~EVENT_MSG_BUFFER_FULL;
handle_flags(smi_info);
} else {
smi_inc_stat(smi_info, events);
/*
* Do this before we deliver the message
* because delivering the message releases the
* lock and something else can mess with the
* state.
*/
handle_flags(smi_info);
deliver_recv_msg(smi_info, msg);
}
break;
}
case SI_GETTING_MESSAGES:
{
smi_info->curr_msg->rsp_size
= smi_info->handlers->get_result(
smi_info->si_sm,
smi_info->curr_msg->rsp,
IPMI_MAX_MSG_LENGTH);
/*
* Do this here becase deliver_recv_msg() releases the
* lock, and a new message can be put in during the
* time the lock is released.
*/
msg = smi_info->curr_msg;
smi_info->curr_msg = NULL;
if (msg->rsp[2] != 0) {
/* Error getting event, probably done. */
msg->done(msg);
/* Take off the msg flag. */
smi_info->msg_flags &= ~RECEIVE_MSG_AVAIL;
handle_flags(smi_info);
} else {
smi_inc_stat(smi_info, incoming_messages);
/*
* Do this before we deliver the message
* because delivering the message releases the
* lock and something else can mess with the
* state.
*/
handle_flags(smi_info);
deliver_recv_msg(smi_info, msg);
}
break;
}
case SI_CHECKING_ENABLES:
{
unsigned char msg[4];
u8 enables;
bool irq_on;
/* We got the flags from the SMI, now handle them. */
smi_info->handlers->get_result(smi_info->si_sm, msg, 4);
if (msg[2] != 0) {
dev_warn(smi_info->io.dev,
"Couldn't get irq info: %x.\n", msg[2]);
dev_warn(smi_info->io.dev,
"Maybe ok, but ipmi might run very slowly.\n");
smi_info->si_state = SI_NORMAL;
break;
}
enables = current_global_enables(smi_info, 0, &irq_on);
if (smi_info->io.si_type == SI_BT)
/* BT has its own interrupt enable bit. */
check_bt_irq(smi_info, irq_on);
if (enables != (msg[3] & GLOBAL_ENABLES_MASK)) {
/* Enables are not correct, fix them. */
msg[0] = (IPMI_NETFN_APP_REQUEST << 2);
msg[1] = IPMI_SET_BMC_GLOBAL_ENABLES_CMD;
msg[2] = enables | (msg[3] & ~GLOBAL_ENABLES_MASK);
smi_info->handlers->start_transaction(
smi_info->si_sm, msg, 3);
smi_info->si_state = SI_SETTING_ENABLES;
} else if (smi_info->supports_event_msg_buff) {
smi_info->curr_msg = ipmi_alloc_smi_msg();
if (!smi_info->curr_msg) {
smi_info->si_state = SI_NORMAL;
break;
}
start_getting_events(smi_info);
} else {
smi_info->si_state = SI_NORMAL;
}
break;
}
case SI_SETTING_ENABLES:
{
unsigned char msg[4];
smi_info->handlers->get_result(smi_info->si_sm, msg, 4);
if (msg[2] != 0)
dev_warn(smi_info->io.dev,
"Could not set the global enables: 0x%x.\n",
msg[2]);
if (smi_info->supports_event_msg_buff) {
smi_info->curr_msg = ipmi_alloc_smi_msg();
if (!smi_info->curr_msg) {
smi_info->si_state = SI_NORMAL;
break;
}
start_getting_events(smi_info);
} else {
smi_info->si_state = SI_NORMAL;
}
break;
}
}
}
/*
* Called on timeouts and events. Timeouts should pass the elapsed
* time, interrupts should pass in zero. Must be called with
* si_lock held and interrupts disabled.
*/
static enum si_sm_result smi_event_handler(struct smi_info *smi_info,
int time)
{
enum si_sm_result si_sm_result;
restart:
/*
* There used to be a loop here that waited a little while
* (around 25us) before giving up. That turned out to be
* pointless, the minimum delays I was seeing were in the 300us
* range, which is far too long to wait in an interrupt. So
* we just run until the state machine tells us something
* happened or it needs a delay.
*/
si_sm_result = smi_info->handlers->event(smi_info->si_sm, time);
time = 0;
while (si_sm_result == SI_SM_CALL_WITHOUT_DELAY)
si_sm_result = smi_info->handlers->event(smi_info->si_sm, 0);
if (si_sm_result == SI_SM_TRANSACTION_COMPLETE) {
smi_inc_stat(smi_info, complete_transactions);
handle_transaction_done(smi_info);
goto restart;
} else if (si_sm_result == SI_SM_HOSED) {
smi_inc_stat(smi_info, hosed_count);
/*
* Do the before return_hosed_msg, because that
* releases the lock.
*/
smi_info->si_state = SI_NORMAL;
if (smi_info->curr_msg != NULL) {
/*
* If we were handling a user message, format
* a response to send to the upper layer to
* tell it about the error.
*/
return_hosed_msg(smi_info, IPMI_ERR_UNSPECIFIED);
}
goto restart;
}
/*
* We prefer handling attn over new messages. But don't do
* this if there is not yet an upper layer to handle anything.
*/
if (si_sm_result == SI_SM_ATTN || smi_info->got_attn) {
unsigned char msg[2];
if (smi_info->si_state != SI_NORMAL) {
/*
* We got an ATTN, but we are doing something else.
* Handle the ATTN later.
*/
smi_info->got_attn = true;
} else {
smi_info->got_attn = false;
smi_inc_stat(smi_info, attentions);
/*
* Got a attn, send down a get message flags to see
* what's causing it. It would be better to handle
* this in the upper layer, but due to the way
* interrupts work with the SMI, that's not really
* possible.
*/
msg[0] = (IPMI_NETFN_APP_REQUEST << 2);
msg[1] = IPMI_GET_MSG_FLAGS_CMD;
start_new_msg(smi_info, msg, 2);
smi_info->si_state = SI_GETTING_FLAGS;
goto restart;
}
}
/* If we are currently idle, try to start the next message. */
if (si_sm_result == SI_SM_IDLE) {
smi_inc_stat(smi_info, idles);
si_sm_result = start_next_msg(smi_info);
if (si_sm_result != SI_SM_IDLE)
goto restart;
}
if ((si_sm_result == SI_SM_IDLE)
&& (atomic_read(&smi_info->req_events))) {
/*
* We are idle and the upper layer requested that I fetch
* events, so do so.
*/
atomic_set(&smi_info->req_events, 0);
/*
* Take this opportunity to check the interrupt and
* message enable state for the BMC. The BMC can be
* asynchronously reset, and may thus get interrupts
* disable and messages disabled.
*/
if (smi_info->supports_event_msg_buff || smi_info->io.irq) {
start_check_enables(smi_info);
} else {
smi_info->curr_msg = alloc_msg_handle_irq(smi_info);
if (!smi_info->curr_msg)
goto out;
start_getting_events(smi_info);
}
goto restart;
}
if (si_sm_result == SI_SM_IDLE && smi_info->timer_running) {
/* Ok it if fails, the timer will just go off. */
if (del_timer(&smi_info->si_timer))
smi_info->timer_running = false;
}
out:
return si_sm_result;
}
static void check_start_timer_thread(struct smi_info *smi_info)
{
if (smi_info->si_state == SI_NORMAL && smi_info->curr_msg == NULL) {
smi_mod_timer(smi_info, jiffies + SI_TIMEOUT_JIFFIES);
if (smi_info->thread)
wake_up_process(smi_info->thread);
start_next_msg(smi_info);
smi_event_handler(smi_info, 0);
}
}
static void flush_messages(void *send_info)
{
struct smi_info *smi_info = send_info;
enum si_sm_result result;
/*
* Currently, this function is called only in run-to-completion
* mode. This means we are single-threaded, no need for locks.
*/
result = smi_event_handler(smi_info, 0);
while (result != SI_SM_IDLE) {
udelay(SI_SHORT_TIMEOUT_USEC);
result = smi_event_handler(smi_info, SI_SHORT_TIMEOUT_USEC);
}
}
static void sender(void *send_info,
struct ipmi_smi_msg *msg)
{
struct smi_info *smi_info = send_info;
unsigned long flags;
debug_timestamp("Enqueue");
if (smi_info->run_to_completion) {
/*
* If we are running to completion, start it. Upper
* layer will call flush_messages to clear it out.
*/
smi_info->waiting_msg = msg;
return;
}
spin_lock_irqsave(&smi_info->si_lock, flags);
/*
* The following two lines don't need to be under the lock for
* the lock's sake, but they do need SMP memory barriers to
* avoid getting things out of order. We are already claiming
* the lock, anyway, so just do it under the lock to avoid the
* ordering problem.
*/
BUG_ON(smi_info->waiting_msg);
smi_info->waiting_msg = msg;
check_start_timer_thread(smi_info);
spin_unlock_irqrestore(&smi_info->si_lock, flags);
}
static void set_run_to_completion(void *send_info, bool i_run_to_completion)
{
struct smi_info *smi_info = send_info;
smi_info->run_to_completion = i_run_to_completion;
if (i_run_to_completion)
flush_messages(smi_info);
}
/*
* Use -1 in the nsec value of the busy waiting timespec to tell that
* we are spinning in kipmid looking for something and not delaying
* between checks
*/
static inline void ipmi_si_set_not_busy(struct timespec64 *ts)
{
ts->tv_nsec = -1;
}
static inline int ipmi_si_is_busy(struct timespec64 *ts)
{
return ts->tv_nsec != -1;
}
static inline int ipmi_thread_busy_wait(enum si_sm_result smi_result,
const struct smi_info *smi_info,
struct timespec64 *busy_until)
{
unsigned int max_busy_us = 0;
if (smi_info->si_num < num_max_busy_us)
max_busy_us = kipmid_max_busy_us[smi_info->si_num];
if (max_busy_us == 0 || smi_result != SI_SM_CALL_WITH_DELAY)
ipmi_si_set_not_busy(busy_until);
else if (!ipmi_si_is_busy(busy_until)) {
ktime_get_ts64(busy_until);
timespec64_add_ns(busy_until, max_busy_us*NSEC_PER_USEC);
} else {
struct timespec64 now;
ktime_get_ts64(&now);
if (unlikely(timespec64_compare(&now, busy_until) > 0)) {
ipmi_si_set_not_busy(busy_until);
return 0;
}
}
return 1;
}
/*
* A busy-waiting loop for speeding up IPMI operation.
*
* Lousy hardware makes this hard. This is only enabled for systems
* that are not BT and do not have interrupts. It starts spinning
* when an operation is complete or until max_busy tells it to stop
* (if that is enabled). See the paragraph on kimid_max_busy_us in
* Documentation/IPMI.txt for details.
*/
static int ipmi_thread(void *data)
{
struct smi_info *smi_info = data;
unsigned long flags;
enum si_sm_result smi_result;
struct timespec64 busy_until;
ipmi_si_set_not_busy(&busy_until);
set_user_nice(current, MAX_NICE);
while (!kthread_should_stop()) {
int busy_wait;
spin_lock_irqsave(&(smi_info->si_lock), flags);
smi_result = smi_event_handler(smi_info, 0);
/*
* If the driver is doing something, there is a possible
* race with the timer. If the timer handler see idle,
* and the thread here sees something else, the timer
* handler won't restart the timer even though it is
* required. So start it here if necessary.
*/
if (smi_result != SI_SM_IDLE && !smi_info->timer_running)
smi_mod_timer(smi_info, jiffies + SI_TIMEOUT_JIFFIES);
spin_unlock_irqrestore(&(smi_info->si_lock), flags);
busy_wait = ipmi_thread_busy_wait(smi_result, smi_info,
&busy_until);
if (smi_result == SI_SM_CALL_WITHOUT_DELAY)
; /* do nothing */
else if (smi_result == SI_SM_CALL_WITH_DELAY && busy_wait)
schedule();
else if (smi_result == SI_SM_IDLE) {
if (atomic_read(&smi_info->need_watch)) {
schedule_timeout_interruptible(100);
} else {
/* Wait to be woken up when we are needed. */
__set_current_state(TASK_INTERRUPTIBLE);
schedule();
}
} else
schedule_timeout_interruptible(1);
}
return 0;
}
static void poll(void *send_info)
{
struct smi_info *smi_info = send_info;
unsigned long flags = 0;
bool run_to_completion = smi_info->run_to_completion;
/*
* Make sure there is some delay in the poll loop so we can
* drive time forward and timeout things.
*/
udelay(10);
if (!run_to_completion)
spin_lock_irqsave(&smi_info->si_lock, flags);
smi_event_handler(smi_info, 10);
if (!run_to_completion)
spin_unlock_irqrestore(&smi_info->si_lock, flags);
}
static void request_events(void *send_info)
{
struct smi_info *smi_info = send_info;
if (!smi_info->has_event_buffer)
return;
atomic_set(&smi_info->req_events, 1);
}
static void set_need_watch(void *send_info, bool enable)
{
struct smi_info *smi_info = send_info;
unsigned long flags;
atomic_set(&smi_info->need_watch, enable);
spin_lock_irqsave(&smi_info->si_lock, flags);
check_start_timer_thread(smi_info);
spin_unlock_irqrestore(&smi_info->si_lock, flags);
}
static void smi_timeout(struct timer_list *t)
{
struct smi_info *smi_info = from_timer(smi_info, t, si_timer);
enum si_sm_result smi_result;
unsigned long flags;
unsigned long jiffies_now;
long time_diff;
long timeout;
spin_lock_irqsave(&(smi_info->si_lock), flags);
debug_timestamp("Timer");
jiffies_now = jiffies;
time_diff = (((long)jiffies_now - (long)smi_info->last_timeout_jiffies)
* SI_USEC_PER_JIFFY);
smi_result = smi_event_handler(smi_info, time_diff);
if ((smi_info->io.irq) && (!smi_info->interrupt_disabled)) {
/* Running with interrupts, only do long timeouts. */
timeout = jiffies + SI_TIMEOUT_JIFFIES;
smi_inc_stat(smi_info, long_timeouts);
goto do_mod_timer;
}
/*
* If the state machine asks for a short delay, then shorten
* the timer timeout.
*/
if (smi_result == SI_SM_CALL_WITH_DELAY) {
smi_inc_stat(smi_info, short_timeouts);
timeout = jiffies + 1;
} else {
smi_inc_stat(smi_info, long_timeouts);
timeout = jiffies + SI_TIMEOUT_JIFFIES;
}
do_mod_timer:
if (smi_result != SI_SM_IDLE)
smi_mod_timer(smi_info, timeout);
else
smi_info->timer_running = false;
spin_unlock_irqrestore(&(smi_info->si_lock), flags);
}
irqreturn_t ipmi_si_irq_handler(int irq, void *data)
{
struct smi_info *smi_info = data;
unsigned long flags;
if (smi_info->io.si_type == SI_BT)
/* We need to clear the IRQ flag for the BT interface. */
smi_info->io.outputb(&smi_info->io, IPMI_BT_INTMASK_REG,
IPMI_BT_INTMASK_CLEAR_IRQ_BIT
| IPMI_BT_INTMASK_ENABLE_IRQ_BIT);
spin_lock_irqsave(&(smi_info->si_lock), flags);
smi_inc_stat(smi_info, interrupts);
debug_timestamp("Interrupt");
smi_event_handler(smi_info, 0);
spin_unlock_irqrestore(&(smi_info->si_lock), flags);
return IRQ_HANDLED;
}
static int smi_start_processing(void *send_info,
struct ipmi_smi *intf)
{
struct smi_info *new_smi = send_info;
int enable = 0;
new_smi->intf = intf;
/* Set up the timer that drives the interface. */
timer_setup(&new_smi->si_timer, smi_timeout, 0);
new_smi->timer_can_start = true;
smi_mod_timer(new_smi, jiffies + SI_TIMEOUT_JIFFIES);
/* Try to claim any interrupts. */
if (new_smi->io.irq_setup) {
new_smi->io.irq_handler_data = new_smi;
new_smi->io.irq_setup(&new_smi->io);
}
/*
* Check if the user forcefully enabled the daemon.
*/
if (new_smi->si_num < num_force_kipmid)
enable = force_kipmid[new_smi->si_num];
/*
* The BT interface is efficient enough to not need a thread,
* and there is no need for a thread if we have interrupts.
*/
else if ((new_smi->io.si_type != SI_BT) && (!new_smi->io.irq))
enable = 1;
if (enable) {
new_smi->thread = kthread_run(ipmi_thread, new_smi,
"kipmi%d", new_smi->si_num);
if (IS_ERR(new_smi->thread)) {
dev_notice(new_smi->io.dev, "Could not start"
" kernel thread due to error %ld, only using"
" timers to drive the interface\n",
PTR_ERR(new_smi->thread));
new_smi->thread = NULL;
}
}
return 0;
}
static int get_smi_info(void *send_info, struct ipmi_smi_info *data)
{
struct smi_info *smi = send_info;
data->addr_src = smi->io.addr_source;
data->dev = smi->io.dev;
data->addr_info = smi->io.addr_info;
get_device(smi->io.dev);
return 0;
}
static void set_maintenance_mode(void *send_info, bool enable)
{
struct smi_info *smi_info = send_info;
if (!enable)
atomic_set(&smi_info->req_events, 0);
}
static void shutdown_smi(void *send_info);
static const struct ipmi_smi_handlers handlers = {
.owner = THIS_MODULE,
.start_processing = smi_start_processing,
.shutdown = shutdown_smi,
.get_smi_info = get_smi_info,
.sender = sender,
.request_events = request_events,
.set_need_watch = set_need_watch,
.set_maintenance_mode = set_maintenance_mode,
.set_run_to_completion = set_run_to_completion,
.flush_messages = flush_messages,
.poll = poll,
};
static LIST_HEAD(smi_infos);
static DEFINE_MUTEX(smi_infos_lock);
static int smi_num; /* Used to sequence the SMIs */
static const char * const addr_space_to_str[] = { "i/o", "mem" };
module_param_array(force_kipmid, int, &num_force_kipmid, 0);
MODULE_PARM_DESC(force_kipmid, "Force the kipmi daemon to be enabled (1) or"
" disabled(0). Normally the IPMI driver auto-detects"
" this, but the value may be overridden by this parm.");
module_param(unload_when_empty, bool, 0);
MODULE_PARM_DESC(unload_when_empty, "Unload the module if no interfaces are"
" specified or found, default is 1. Setting to 0"
" is useful for hot add of devices using hotmod.");
module_param_array(kipmid_max_busy_us, uint, &num_max_busy_us, 0644);
MODULE_PARM_DESC(kipmid_max_busy_us,
"Max time (in microseconds) to busy-wait for IPMI data before"
" sleeping. 0 (default) means to wait forever. Set to 100-500"
" if kipmid is using up a lot of CPU time.");
void ipmi_irq_finish_setup(struct si_sm_io *io)
{
if (io->si_type == SI_BT)
/* Enable the interrupt in the BT interface. */
io->outputb(io, IPMI_BT_INTMASK_REG,
IPMI_BT_INTMASK_ENABLE_IRQ_BIT);
}
void ipmi_irq_start_cleanup(struct si_sm_io *io)
{
if (io->si_type == SI_BT)
/* Disable the interrupt in the BT interface. */
io->outputb(io, IPMI_BT_INTMASK_REG, 0);
}
static void std_irq_cleanup(struct si_sm_io *io)
{
ipmi_irq_start_cleanup(io);
free_irq(io->irq, io->irq_handler_data);
}
int ipmi_std_irq_setup(struct si_sm_io *io)
{
int rv;
if (!io->irq)
return 0;
rv = request_irq(io->irq,
ipmi_si_irq_handler,
IRQF_SHARED,
DEVICE_NAME,
io->irq_handler_data);
if (rv) {
dev_warn(io->dev, "%s unable to claim interrupt %d,"
" running polled\n",
DEVICE_NAME, io->irq);
io->irq = 0;
} else {
io->irq_cleanup = std_irq_cleanup;
ipmi_irq_finish_setup(io);
dev_info(io->dev, "Using irq %d\n", io->irq);
}
return rv;
}
static int wait_for_msg_done(struct smi_info *smi_info)
{
enum si_sm_result smi_result;
smi_result = smi_info->handlers->event(smi_info->si_sm, 0);
for (;;) {
if (smi_result == SI_SM_CALL_WITH_DELAY ||
smi_result == SI_SM_CALL_WITH_TICK_DELAY) {
schedule_timeout_uninterruptible(1);
smi_result = smi_info->handlers->event(
smi_info->si_sm, jiffies_to_usecs(1));
} else if (smi_result == SI_SM_CALL_WITHOUT_DELAY) {
smi_result = smi_info->handlers->event(
smi_info->si_sm, 0);
} else
break;
}
if (smi_result == SI_SM_HOSED)
/*
* We couldn't get the state machine to run, so whatever's at
* the port is probably not an IPMI SMI interface.
*/
return -ENODEV;
return 0;
}
static int try_get_dev_id(struct smi_info *smi_info)
{
unsigned char msg[2];
unsigned char *resp;
unsigned long resp_len;
int rv = 0;
resp = kmalloc(IPMI_MAX_MSG_LENGTH, GFP_KERNEL);
if (!resp)
return -ENOMEM;
/*
* Do a Get Device ID command, since it comes back with some
* useful info.
*/
msg[0] = IPMI_NETFN_APP_REQUEST << 2;
msg[1] = IPMI_GET_DEVICE_ID_CMD;
smi_info->handlers->start_transaction(smi_info->si_sm, msg, 2);
rv = wait_for_msg_done(smi_info);
if (rv)
goto out;
resp_len = smi_info->handlers->get_result(smi_info->si_sm,
resp, IPMI_MAX_MSG_LENGTH);
/* Check and record info from the get device id, in case we need it. */
rv = ipmi_demangle_device_id(resp[0] >> 2, resp[1],
resp + 2, resp_len - 2, &smi_info->device_id);
out:
kfree(resp);
return rv;
}
static int get_global_enables(struct smi_info *smi_info, u8 *enables)
{
unsigned char msg[3];
unsigned char *resp;
unsigned long resp_len;
int rv;
resp = kmalloc(IPMI_MAX_MSG_LENGTH, GFP_KERNEL);
if (!resp)
return -ENOMEM;
msg[0] = IPMI_NETFN_APP_REQUEST << 2;
msg[1] = IPMI_GET_BMC_GLOBAL_ENABLES_CMD;
smi_info->handlers->start_transaction(smi_info->si_sm, msg, 2);
rv = wait_for_msg_done(smi_info);
if (rv) {
dev_warn(smi_info->io.dev,
"Error getting response from get global enables command: %d\n",
rv);
goto out;
}
resp_len = smi_info->handlers->get_result(smi_info->si_sm,
resp, IPMI_MAX_MSG_LENGTH);
if (resp_len < 4 ||
resp[0] != (IPMI_NETFN_APP_REQUEST | 1) << 2 ||
resp[1] != IPMI_GET_BMC_GLOBAL_ENABLES_CMD ||
resp[2] != 0) {
dev_warn(smi_info->io.dev,
"Invalid return from get global enables command: %ld %x %x %x\n",
resp_len, resp[0], resp[1], resp[2]);
rv = -EINVAL;
goto out;
} else {
*enables = resp[3];
}
out:
kfree(resp);
return rv;
}
/*
* Returns 1 if it gets an error from the command.
*/
static int set_global_enables(struct smi_info *smi_info, u8 enables)
{
unsigned char msg[3];
unsigned char *resp;
unsigned long resp_len;
int rv;
resp = kmalloc(IPMI_MAX_MSG_LENGTH, GFP_KERNEL);
if (!resp)
return -ENOMEM;
msg[0] = IPMI_NETFN_APP_REQUEST << 2;
msg[1] = IPMI_SET_BMC_GLOBAL_ENABLES_CMD;
msg[2] = enables;
smi_info->handlers->start_transaction(smi_info->si_sm, msg, 3);
rv = wait_for_msg_done(smi_info);
if (rv) {
dev_warn(smi_info->io.dev,
"Error getting response from set global enables command: %d\n",
rv);
goto out;
}
resp_len = smi_info->handlers->get_result(smi_info->si_sm,
resp, IPMI_MAX_MSG_LENGTH);
if (resp_len < 3 ||
resp[0] != (IPMI_NETFN_APP_REQUEST | 1) << 2 ||
resp[1] != IPMI_SET_BMC_GLOBAL_ENABLES_CMD) {
dev_warn(smi_info->io.dev,
"Invalid return from set global enables command: %ld %x %x\n",
resp_len, resp[0], resp[1]);
rv = -EINVAL;
goto out;
}
if (resp[2] != 0)
rv = 1;
out:
kfree(resp);
return rv;
}
/*
* Some BMCs do not support clearing the receive irq bit in the global
* enables (even if they don't support interrupts on the BMC). Check
* for this and handle it properly.
*/
static void check_clr_rcv_irq(struct smi_info *smi_info)
{
u8 enables = 0;
int rv;
rv = get_global_enables(smi_info, &enables);
if (!rv) {
if ((enables & IPMI_BMC_RCV_MSG_INTR) == 0)
/* Already clear, should work ok. */
return;
enables &= ~IPMI_BMC_RCV_MSG_INTR;
rv = set_global_enables(smi_info, enables);
}
if (rv < 0) {
dev_err(smi_info->io.dev,
"Cannot check clearing the rcv irq: %d\n", rv);
return;
}
if (rv) {
/*
* An error when setting the event buffer bit means
* clearing the bit is not supported.
*/
dev_warn(smi_info->io.dev,
"The BMC does not support clearing the recv irq bit, compensating, but the BMC needs to be fixed.\n");
smi_info->cannot_disable_irq = true;
}
}
/*
* Some BMCs do not support setting the interrupt bits in the global
* enables even if they support interrupts. Clearly bad, but we can
* compensate.
*/
static void check_set_rcv_irq(struct smi_info *smi_info)
{
u8 enables = 0;
int rv;
if (!smi_info->io.irq)
return;
rv = get_global_enables(smi_info, &enables);
if (!rv) {
enables |= IPMI_BMC_RCV_MSG_INTR;
rv = set_global_enables(smi_info, enables);
}
if (rv < 0) {
dev_err(smi_info->io.dev,
"Cannot check setting the rcv irq: %d\n", rv);
return;
}
if (rv) {
/*
* An error when setting the event buffer bit means
* setting the bit is not supported.
*/
dev_warn(smi_info->io.dev,
"The BMC does not support setting the recv irq bit, compensating, but the BMC needs to be fixed.\n");
smi_info->cannot_disable_irq = true;
smi_info->irq_enable_broken = true;
}
}
static int try_enable_event_buffer(struct smi_info *smi_info)
{
unsigned char msg[3];
unsigned char *resp;
unsigned long resp_len;
int rv = 0;
resp = kmalloc(IPMI_MAX_MSG_LENGTH, GFP_KERNEL);
if (!resp)
return -ENOMEM;
msg[0] = IPMI_NETFN_APP_REQUEST << 2;
msg[1] = IPMI_GET_BMC_GLOBAL_ENABLES_CMD;
smi_info->handlers->start_transaction(smi_info->si_sm, msg, 2);
rv = wait_for_msg_done(smi_info);
if (rv) {
pr_warn("Error getting response from get global enables command, the event buffer is not enabled\n");
goto out;
}
resp_len = smi_info->handlers->get_result(smi_info->si_sm,
resp, IPMI_MAX_MSG_LENGTH);
if (resp_len < 4 ||
resp[0] != (IPMI_NETFN_APP_REQUEST | 1) << 2 ||
resp[1] != IPMI_GET_BMC_GLOBAL_ENABLES_CMD ||
resp[2] != 0) {
pr_warn("Invalid return from get global enables command, cannot enable the event buffer\n");
rv = -EINVAL;
goto out;
}
if (resp[3] & IPMI_BMC_EVT_MSG_BUFF) {
/* buffer is already enabled, nothing to do. */
smi_info->supports_event_msg_buff = true;
goto out;
}
msg[0] = IPMI_NETFN_APP_REQUEST << 2;
msg[1] = IPMI_SET_BMC_GLOBAL_ENABLES_CMD;
msg[2] = resp[3] | IPMI_BMC_EVT_MSG_BUFF;
smi_info->handlers->start_transaction(smi_info->si_sm, msg, 3);
rv = wait_for_msg_done(smi_info);
if (rv) {
pr_warn("Error getting response from set global, enables command, the event buffer is not enabled\n");
goto out;
}
resp_len = smi_info->handlers->get_result(smi_info->si_sm,
resp, IPMI_MAX_MSG_LENGTH);
if (resp_len < 3 ||
resp[0] != (IPMI_NETFN_APP_REQUEST | 1) << 2 ||
resp[1] != IPMI_SET_BMC_GLOBAL_ENABLES_CMD) {
pr_warn("Invalid return from get global, enables command, not enable the event buffer\n");
rv = -EINVAL;
goto out;
}
if (resp[2] != 0)
/*
* An error when setting the event buffer bit means
* that the event buffer is not supported.
*/
rv = -ENOENT;
else
smi_info->supports_event_msg_buff = true;
out:
kfree(resp);
return rv;
}
#define IPMI_SI_ATTR(name) \
static ssize_t ipmi_##name##_show(struct device *dev, \
struct device_attribute *attr, \
char *buf) \
{ \
struct smi_info *smi_info = dev_get_drvdata(dev); \
\
return snprintf(buf, 10, "%u\n", smi_get_stat(smi_info, name)); \
} \
static DEVICE_ATTR(name, S_IRUGO, ipmi_##name##_show, NULL)
static ssize_t ipmi_type_show(struct device *dev,
struct device_attribute *attr,
char *buf)
{
struct smi_info *smi_info = dev_get_drvdata(dev);
return snprintf(buf, 10, "%s\n", si_to_str[smi_info->io.si_type]);
}
static DEVICE_ATTR(type, S_IRUGO, ipmi_type_show, NULL);
static ssize_t ipmi_interrupts_enabled_show(struct device *dev,
struct device_attribute *attr,
char *buf)
{
struct smi_info *smi_info = dev_get_drvdata(dev);
int enabled = smi_info->io.irq && !smi_info->interrupt_disabled;
return snprintf(buf, 10, "%d\n", enabled);
}
static DEVICE_ATTR(interrupts_enabled, S_IRUGO,
ipmi_interrupts_enabled_show, NULL);
IPMI_SI_ATTR(short_timeouts);
IPMI_SI_ATTR(long_timeouts);
IPMI_SI_ATTR(idles);
IPMI_SI_ATTR(interrupts);
IPMI_SI_ATTR(attentions);
IPMI_SI_ATTR(flag_fetches);
IPMI_SI_ATTR(hosed_count);
IPMI_SI_ATTR(complete_transactions);
IPMI_SI_ATTR(events);
IPMI_SI_ATTR(watchdog_pretimeouts);
IPMI_SI_ATTR(incoming_messages);
static ssize_t ipmi_params_show(struct device *dev,
struct device_attribute *attr,
char *buf)
{
struct smi_info *smi_info = dev_get_drvdata(dev);
return snprintf(buf, 200,
"%s,%s,0x%lx,rsp=%d,rsi=%d,rsh=%d,irq=%d,ipmb=%d\n",
si_to_str[smi_info->io.si_type],
addr_space_to_str[smi_info->io.addr_type],
smi_info->io.addr_data,
smi_info->io.regspacing,
smi_info->io.regsize,
smi_info->io.regshift,
smi_info->io.irq,
smi_info->io.slave_addr);
}
static DEVICE_ATTR(params, S_IRUGO, ipmi_params_show, NULL);
static struct attribute *ipmi_si_dev_attrs[] = {
&dev_attr_type.attr,
&dev_attr_interrupts_enabled.attr,
&dev_attr_short_timeouts.attr,
&dev_attr_long_timeouts.attr,
&dev_attr_idles.attr,
&dev_attr_interrupts.attr,
&dev_attr_attentions.attr,
&dev_attr_flag_fetches.attr,
&dev_attr_hosed_count.attr,
&dev_attr_complete_transactions.attr,
&dev_attr_events.attr,
&dev_attr_watchdog_pretimeouts.attr,
&dev_attr_incoming_messages.attr,
&dev_attr_params.attr,
NULL
};
static const struct attribute_group ipmi_si_dev_attr_group = {
.attrs = ipmi_si_dev_attrs,
};
/*
* oem_data_avail_to_receive_msg_avail
* @info - smi_info structure with msg_flags set
*
* Converts flags from OEM_DATA_AVAIL to RECEIVE_MSG_AVAIL
* Returns 1 indicating need to re-run handle_flags().
*/
static int oem_data_avail_to_receive_msg_avail(struct smi_info *smi_info)
{
smi_info->msg_flags = ((smi_info->msg_flags & ~OEM_DATA_AVAIL) |
RECEIVE_MSG_AVAIL);
return 1;
}
/*
* setup_dell_poweredge_oem_data_handler
* @info - smi_info.device_id must be populated
*
* Systems that match, but have firmware version < 1.40 may assert
* OEM0_DATA_AVAIL on their own, without being told via Set Flags that
* it's safe to do so. Such systems will de-assert OEM1_DATA_AVAIL
* upon receipt of IPMI_GET_MSG_CMD, so we should treat these flags
* as RECEIVE_MSG_AVAIL instead.
*
* As Dell has no plans to release IPMI 1.5 firmware that *ever*
* assert the OEM[012] bits, and if it did, the driver would have to
* change to handle that properly, we don't actually check for the
* firmware version.
* Device ID = 0x20 BMC on PowerEdge 8G servers
* Device Revision = 0x80
* Firmware Revision1 = 0x01 BMC version 1.40
* Firmware Revision2 = 0x40 BCD encoded
* IPMI Version = 0x51 IPMI 1.5
* Manufacturer ID = A2 02 00 Dell IANA
*
* Additionally, PowerEdge systems with IPMI < 1.5 may also assert
* OEM0_DATA_AVAIL and needs to be treated as RECEIVE_MSG_AVAIL.
*
*/
#define DELL_POWEREDGE_8G_BMC_DEVICE_ID 0x20
#define DELL_POWEREDGE_8G_BMC_DEVICE_REV 0x80
#define DELL_POWEREDGE_8G_BMC_IPMI_VERSION 0x51
#define DELL_IANA_MFR_ID 0x0002a2
static void setup_dell_poweredge_oem_data_handler(struct smi_info *smi_info)
{
struct ipmi_device_id *id = &smi_info->device_id;
if (id->manufacturer_id == DELL_IANA_MFR_ID) {
if (id->device_id == DELL_POWEREDGE_8G_BMC_DEVICE_ID &&
id->device_revision == DELL_POWEREDGE_8G_BMC_DEVICE_REV &&
id->ipmi_version == DELL_POWEREDGE_8G_BMC_IPMI_VERSION) {
smi_info->oem_data_avail_handler =
oem_data_avail_to_receive_msg_avail;
} else if (ipmi_version_major(id) < 1 ||
(ipmi_version_major(id) == 1 &&
ipmi_version_minor(id) < 5)) {
smi_info->oem_data_avail_handler =
oem_data_avail_to_receive_msg_avail;
}
}
}
#define CANNOT_RETURN_REQUESTED_LENGTH 0xCA
static void return_hosed_msg_badsize(struct smi_info *smi_info)
{
struct ipmi_smi_msg *msg = smi_info->curr_msg;
/* Make it a response */
msg->rsp[0] = msg->data[0] | 4;
msg->rsp[1] = msg->data[1];
msg->rsp[2] = CANNOT_RETURN_REQUESTED_LENGTH;
msg->rsp_size = 3;
smi_info->curr_msg = NULL;
deliver_recv_msg(smi_info, msg);
}
/*
* dell_poweredge_bt_xaction_handler
* @info - smi_info.device_id must be populated
*
* Dell PowerEdge servers with the BT interface (x6xx and 1750) will
* not respond to a Get SDR command if the length of the data
* requested is exactly 0x3A, which leads to command timeouts and no
* data returned. This intercepts such commands, and causes userspace
* callers to try again with a different-sized buffer, which succeeds.
*/
#define STORAGE_NETFN 0x0A
#define STORAGE_CMD_GET_SDR 0x23
static int dell_poweredge_bt_xaction_handler(struct notifier_block *self,
unsigned long unused,
void *in)
{
struct smi_info *smi_info = in;
unsigned char *data = smi_info->curr_msg->data;
unsigned int size = smi_info->curr_msg->data_size;
if (size >= 8 &&
(data[0]>>2) == STORAGE_NETFN &&
data[1] == STORAGE_CMD_GET_SDR &&
data[7] == 0x3A) {
return_hosed_msg_badsize(smi_info);
return NOTIFY_STOP;
}
return NOTIFY_DONE;
}
static struct notifier_block dell_poweredge_bt_xaction_notifier = {
.notifier_call = dell_poweredge_bt_xaction_handler,
};
/*
* setup_dell_poweredge_bt_xaction_handler
* @info - smi_info.device_id must be filled in already
*
* Fills in smi_info.device_id.start_transaction_pre_hook
* when we know what function to use there.
*/
static void
setup_dell_poweredge_bt_xaction_handler(struct smi_info *smi_info)
{
struct ipmi_device_id *id = &smi_info->device_id;
if (id->manufacturer_id == DELL_IANA_MFR_ID &&
smi_info->io.si_type == SI_BT)
register_xaction_notifier(&dell_poweredge_bt_xaction_notifier);
}
/*
* setup_oem_data_handler
* @info - smi_info.device_id must be filled in already
*
* Fills in smi_info.device_id.oem_data_available_handler
* when we know what function to use there.
*/
static void setup_oem_data_handler(struct smi_info *smi_info)
{
setup_dell_poweredge_oem_data_handler(smi_info);
}
static void setup_xaction_handlers(struct smi_info *smi_info)
{
setup_dell_poweredge_bt_xaction_handler(smi_info);
}
static void check_for_broken_irqs(struct smi_info *smi_info)
{
check_clr_rcv_irq(smi_info);
check_set_rcv_irq(smi_info);
}
static inline void stop_timer_and_thread(struct smi_info *smi_info)
{
if (smi_info->thread != NULL) {
kthread_stop(smi_info->thread);
smi_info->thread = NULL;
}
smi_info->timer_can_start = false;
if (smi_info->timer_running)
del_timer_sync(&smi_info->si_timer);
}
static struct smi_info *find_dup_si(struct smi_info *info)
{
struct smi_info *e;
list_for_each_entry(e, &smi_infos, link) {
if (e->io.addr_type != info->io.addr_type)
continue;
if (e->io.addr_data == info->io.addr_data) {
/*
* This is a cheap hack, ACPI doesn't have a defined
* slave address but SMBIOS does. Pick it up from
* any source that has it available.
*/
if (info->io.slave_addr && !e->io.slave_addr)
e->io.slave_addr = info->io.slave_addr;
return e;
}
}
return NULL;
}
int ipmi_si_add_smi(struct si_sm_io *io)
{
int rv = 0;
struct smi_info *new_smi, *dup;
if (!io->io_setup) {
if (io->addr_type == IPMI_IO_ADDR_SPACE) {
io->io_setup = ipmi_si_port_setup;
} else if (io->addr_type == IPMI_MEM_ADDR_SPACE) {
io->io_setup = ipmi_si_mem_setup;
} else {
return -EINVAL;
}
}
new_smi = kzalloc(sizeof(*new_smi), GFP_KERNEL);
if (!new_smi)
return -ENOMEM;
spin_lock_init(&new_smi->si_lock);
new_smi->io = *io;
mutex_lock(&smi_infos_lock);
dup = find_dup_si(new_smi);
if (dup) {
if (new_smi->io.addr_source == SI_ACPI &&
dup->io.addr_source == SI_SMBIOS) {
/* We prefer ACPI over SMBIOS. */
dev_info(dup->io.dev,
"Removing SMBIOS-specified %s state machine in favor of ACPI\n",
si_to_str[new_smi->io.si_type]);
cleanup_one_si(dup);
} else {
dev_info(new_smi->io.dev,
"%s-specified %s state machine: duplicate\n",
ipmi_addr_src_to_str(new_smi->io.addr_source),
si_to_str[new_smi->io.si_type]);
rv = -EBUSY;
kfree(new_smi);
goto out_err;
}
}
pr_info("Adding %s-specified %s state machine\n",
ipmi_addr_src_to_str(new_smi->io.addr_source),
si_to_str[new_smi->io.si_type]);
list_add_tail(&new_smi->link, &smi_infos);
if (initialized)
rv = try_smi_init(new_smi);
out_err:
mutex_unlock(&smi_infos_lock);
return rv;
}
/*
* Try to start up an interface. Must be called with smi_infos_lock
* held, primarily to keep smi_num consistent, we only one to do these
* one at a time.
*/
static int try_smi_init(struct smi_info *new_smi)
{
int rv = 0;
int i;
char *init_name = NULL;
pr_info("Trying %s-specified %s state machine at %s address 0x%lx, slave address 0x%x, irq %d\n",
ipmi_addr_src_to_str(new_smi->io.addr_source),
si_to_str[new_smi->io.si_type],
addr_space_to_str[new_smi->io.addr_type],
new_smi->io.addr_data,
new_smi->io.slave_addr, new_smi->io.irq);
switch (new_smi->io.si_type) {
case SI_KCS:
new_smi->handlers = &kcs_smi_handlers;
break;
case SI_SMIC:
new_smi->handlers = &smic_smi_handlers;
break;
case SI_BT:
new_smi->handlers = &bt_smi_handlers;
break;
default:
/* No support for anything else yet. */
rv = -EIO;
goto out_err;
}
new_smi->si_num = smi_num;
/* Do this early so it's available for logs. */
if (!new_smi->io.dev) {
init_name = kasprintf(GFP_KERNEL, "ipmi_si.%d",
new_smi->si_num);
/*
* If we don't already have a device from something
* else (like PCI), then register a new one.
*/
new_smi->pdev = platform_device_alloc("ipmi_si",
new_smi->si_num);
if (!new_smi->pdev) {
pr_err("Unable to allocate platform device\n");
rv = -ENOMEM;
goto out_err;
}
new_smi->io.dev = &new_smi->pdev->dev;
new_smi->io.dev->driver = &ipmi_platform_driver.driver;
/* Nulled by device_add() */
new_smi->io.dev->init_name = init_name;
}
/* Allocate the state machine's data and initialize it. */
new_smi->si_sm = kmalloc(new_smi->handlers->size(), GFP_KERNEL);
if (!new_smi->si_sm) {
rv = -ENOMEM;
goto out_err;
}
new_smi->io.io_size = new_smi->handlers->init_data(new_smi->si_sm,
&new_smi->io);
/* Now that we know the I/O size, we can set up the I/O. */
rv = new_smi->io.io_setup(&new_smi->io);
if (rv) {
dev_err(new_smi->io.dev, "Could not set up I/O space\n");
goto out_err;
}
/* Do low-level detection first. */
if (new_smi->handlers->detect(new_smi->si_sm)) {
if (new_smi->io.addr_source)
dev_err(new_smi->io.dev,
"Interface detection failed\n");
rv = -ENODEV;
goto out_err;
}
/*
* Attempt a get device id command. If it fails, we probably
* don't have a BMC here.
*/
rv = try_get_dev_id(new_smi);
if (rv) {
if (new_smi->io.addr_source)
dev_err(new_smi->io.dev,
"There appears to be no BMC at this location\n");
goto out_err;
}
setup_oem_data_handler(new_smi);
setup_xaction_handlers(new_smi);
check_for_broken_irqs(new_smi);
new_smi->waiting_msg = NULL;
new_smi->curr_msg = NULL;
atomic_set(&new_smi->req_events, 0);
new_smi->run_to_completion = false;
for (i = 0; i < SI_NUM_STATS; i++)
atomic_set(&new_smi->stats[i], 0);
new_smi->interrupt_disabled = true;
atomic_set(&new_smi->need_watch, 0);
rv = try_enable_event_buffer(new_smi);
if (rv == 0)
new_smi->has_event_buffer = true;
/*
* Start clearing the flags before we enable interrupts or the
* timer to avoid racing with the timer.
*/
start_clear_flags(new_smi);
/*
* IRQ is defined to be set when non-zero. req_events will
* cause a global flags check that will enable interrupts.
*/
if (new_smi->io.irq) {
new_smi->interrupt_disabled = false;
atomic_set(&new_smi->req_events, 1);
}
if (new_smi->pdev && !new_smi->pdev_registered) {
rv = platform_device_add(new_smi->pdev);
if (rv) {
dev_err(new_smi->io.dev,
"Unable to register system interface device: %d\n",
rv);
goto out_err;
}
new_smi->pdev_registered = true;
}
dev_set_drvdata(new_smi->io.dev, new_smi);
rv = device_add_group(new_smi->io.dev, &ipmi_si_dev_attr_group);
if (rv) {
dev_err(new_smi->io.dev,
"Unable to add device attributes: error %d\n",
rv);
goto out_err;
}
new_smi->dev_group_added = true;
rv = ipmi_register_smi(&handlers,
new_smi,
new_smi->io.dev,
new_smi->io.slave_addr);
if (rv) {
dev_err(new_smi->io.dev,
"Unable to register device: error %d\n",
rv);
goto out_err;
}
/* Don't increment till we know we have succeeded. */
smi_num++;
dev_info(new_smi->io.dev, "IPMI %s interface initialized\n",
si_to_str[new_smi->io.si_type]);
WARN_ON(new_smi->io.dev->init_name != NULL);
out_err:
if (rv && new_smi->io.io_cleanup) {
new_smi->io.io_cleanup(&new_smi->io);
new_smi->io.io_cleanup = NULL;
}
kfree(init_name);
return rv;
}
static int init_ipmi_si(void)
{
struct smi_info *e;
enum ipmi_addr_src type = SI_INVALID;
if (initialized)
return 0;
pr_info("IPMI System Interface driver\n");
/* If the user gave us a device, they presumably want us to use it */
if (!ipmi_si_hardcode_find_bmc())
goto do_scan;
ipmi_si_platform_init();
ipmi_si_pci_init();
ipmi_si_parisc_init();
/* We prefer devices with interrupts, but in the case of a machine
with multiple BMCs we assume that there will be several instances
of a given type so if we succeed in registering a type then also
try to register everything else of the same type */
do_scan:
mutex_lock(&smi_infos_lock);
list_for_each_entry(e, &smi_infos, link) {
/* Try to register a device if it has an IRQ and we either
haven't successfully registered a device yet or this
device has the same type as one we successfully registered */
if (e->io.irq && (!type || e->io.addr_source == type)) {
if (!try_smi_init(e)) {
type = e->io.addr_source;
}
}
}
/* type will only have been set if we successfully registered an si */
if (type)
goto skip_fallback_noirq;
/* Fall back to the preferred device */
list_for_each_entry(e, &smi_infos, link) {
if (!e->io.irq && (!type || e->io.addr_source == type)) {
if (!try_smi_init(e)) {
type = e->io.addr_source;
}
}
}
skip_fallback_noirq:
initialized = 1;
mutex_unlock(&smi_infos_lock);
if (type)
return 0;
mutex_lock(&smi_infos_lock);
if (unload_when_empty && list_empty(&smi_infos)) {
mutex_unlock(&smi_infos_lock);
cleanup_ipmi_si();
pr_warn("Unable to find any System Interface(s)\n");
return -ENODEV;
} else {
mutex_unlock(&smi_infos_lock);
return 0;
}
}
module_init(init_ipmi_si);
static void shutdown_smi(void *send_info)
{
struct smi_info *smi_info = send_info;
if (smi_info->dev_group_added) {
device_remove_group(smi_info->io.dev, &ipmi_si_dev_attr_group);
smi_info->dev_group_added = false;
}
if (smi_info->io.dev)
dev_set_drvdata(smi_info->io.dev, NULL);
/*
* Make sure that interrupts, the timer and the thread are
* stopped and will not run again.
*/
smi_info->interrupt_disabled = true;
if (smi_info->io.irq_cleanup) {
smi_info->io.irq_cleanup(&smi_info->io);
smi_info->io.irq_cleanup = NULL;
}
stop_timer_and_thread(smi_info);
/*
* Wait until we know that we are out of any interrupt
* handlers might have been running before we freed the
* interrupt.
*/
synchronize_rcu();
/*
* Timeouts are stopped, now make sure the interrupts are off
* in the BMC. Note that timers and CPU interrupts are off,
* so no need for locks.
*/
while (smi_info->curr_msg || (smi_info->si_state != SI_NORMAL)) {
poll(smi_info);
schedule_timeout_uninterruptible(1);
}
if (smi_info->handlers)
disable_si_irq(smi_info);
while (smi_info->curr_msg || (smi_info->si_state != SI_NORMAL)) {
poll(smi_info);
schedule_timeout_uninterruptible(1);
}
if (smi_info->handlers)
smi_info->handlers->cleanup(smi_info->si_sm);
if (smi_info->io.addr_source_cleanup) {
smi_info->io.addr_source_cleanup(&smi_info->io);
smi_info->io.addr_source_cleanup = NULL;
}
if (smi_info->io.io_cleanup) {
smi_info->io.io_cleanup(&smi_info->io);
smi_info->io.io_cleanup = NULL;
}
kfree(smi_info->si_sm);
smi_info->si_sm = NULL;
smi_info->intf = NULL;
}
/*
* Must be called with smi_infos_lock held, to serialize the
* smi_info->intf check.
*/
static void cleanup_one_si(struct smi_info *smi_info)
{
if (!smi_info)
return;
list_del(&smi_info->link);
if (smi_info->intf)
ipmi_unregister_smi(smi_info->intf);
if (smi_info->pdev) {
if (smi_info->pdev_registered)
platform_device_unregister(smi_info->pdev);
else
platform_device_put(smi_info->pdev);
}
kfree(smi_info);
}
int ipmi_si_remove_by_dev(struct device *dev)
{
struct smi_info *e;
int rv = -ENOENT;
mutex_lock(&smi_infos_lock);
list_for_each_entry(e, &smi_infos, link) {
if (e->io.dev == dev) {
cleanup_one_si(e);
rv = 0;
break;
}
}
mutex_unlock(&smi_infos_lock);
return rv;
}
void ipmi_si_remove_by_data(int addr_space, enum si_type si_type,
unsigned long addr)
{
/* remove */
struct smi_info *e, *tmp_e;
mutex_lock(&smi_infos_lock);
list_for_each_entry_safe(e, tmp_e, &smi_infos, link) {
if (e->io.addr_type != addr_space)
continue;
if (e->io.si_type != si_type)
continue;
if (e->io.addr_data == addr)
cleanup_one_si(e);
}
mutex_unlock(&smi_infos_lock);
}
static void cleanup_ipmi_si(void)
{
struct smi_info *e, *tmp_e;
if (!initialized)
return;
ipmi_si_pci_shutdown();
ipmi_si_parisc_shutdown();
ipmi_si_platform_shutdown();
mutex_lock(&smi_infos_lock);
list_for_each_entry_safe(e, tmp_e, &smi_infos, link)
cleanup_one_si(e);
mutex_unlock(&smi_infos_lock);
}
module_exit(cleanup_ipmi_si);
MODULE_ALIAS("platform:dmi-ipmi-si");
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Corey Minyard <minyard@mvista.com>");
MODULE_DESCRIPTION("Interface to the IPMI driver for the KCS, SMIC, and BT"
" system interfaces.");
| ./CrossVul/dataset_final_sorted/CWE-416/c/good_832_0 |
crossvul-cpp_data_bad_3013_0 | /*
* linux/mm/oom_kill.c
*
* Copyright (C) 1998,2000 Rik van Riel
* Thanks go out to Claus Fischer for some serious inspiration and
* for goading me into coding this file...
* Copyright (C) 2010 Google, Inc.
* Rewritten by David Rientjes
*
* The routines in this file are used to kill a process when
* we're seriously out of memory. This gets called from __alloc_pages()
* in mm/page_alloc.c when we really run out of memory.
*
* Since we won't call these routines often (on a well-configured
* machine) this file will double as a 'coding guide' and a signpost
* for newbie kernel hackers. It features several pointers to major
* kernel subsystems and hints as to where to find out what things do.
*/
#include <linux/oom.h>
#include <linux/mm.h>
#include <linux/err.h>
#include <linux/gfp.h>
#include <linux/sched.h>
#include <linux/sched/mm.h>
#include <linux/sched/coredump.h>
#include <linux/sched/task.h>
#include <linux/swap.h>
#include <linux/timex.h>
#include <linux/jiffies.h>
#include <linux/cpuset.h>
#include <linux/export.h>
#include <linux/notifier.h>
#include <linux/memcontrol.h>
#include <linux/mempolicy.h>
#include <linux/security.h>
#include <linux/ptrace.h>
#include <linux/freezer.h>
#include <linux/ftrace.h>
#include <linux/ratelimit.h>
#include <linux/kthread.h>
#include <linux/init.h>
#include <linux/mmu_notifier.h>
#include <asm/tlb.h>
#include "internal.h"
#include "slab.h"
#define CREATE_TRACE_POINTS
#include <trace/events/oom.h>
int sysctl_panic_on_oom;
int sysctl_oom_kill_allocating_task;
int sysctl_oom_dump_tasks = 1;
DEFINE_MUTEX(oom_lock);
#ifdef CONFIG_NUMA
/**
* has_intersects_mems_allowed() - check task eligiblity for kill
* @start: task struct of which task to consider
* @mask: nodemask passed to page allocator for mempolicy ooms
*
* Task eligibility is determined by whether or not a candidate task, @tsk,
* shares the same mempolicy nodes as current if it is bound by such a policy
* and whether or not it has the same set of allowed cpuset nodes.
*/
static bool has_intersects_mems_allowed(struct task_struct *start,
const nodemask_t *mask)
{
struct task_struct *tsk;
bool ret = false;
rcu_read_lock();
for_each_thread(start, tsk) {
if (mask) {
/*
* If this is a mempolicy constrained oom, tsk's
* cpuset is irrelevant. Only return true if its
* mempolicy intersects current, otherwise it may be
* needlessly killed.
*/
ret = mempolicy_nodemask_intersects(tsk, mask);
} else {
/*
* This is not a mempolicy constrained oom, so only
* check the mems of tsk's cpuset.
*/
ret = cpuset_mems_allowed_intersects(current, tsk);
}
if (ret)
break;
}
rcu_read_unlock();
return ret;
}
#else
static bool has_intersects_mems_allowed(struct task_struct *tsk,
const nodemask_t *mask)
{
return true;
}
#endif /* CONFIG_NUMA */
/*
* The process p may have detached its own ->mm while exiting or through
* use_mm(), but one or more of its subthreads may still have a valid
* pointer. Return p, or any of its subthreads with a valid ->mm, with
* task_lock() held.
*/
struct task_struct *find_lock_task_mm(struct task_struct *p)
{
struct task_struct *t;
rcu_read_lock();
for_each_thread(p, t) {
task_lock(t);
if (likely(t->mm))
goto found;
task_unlock(t);
}
t = NULL;
found:
rcu_read_unlock();
return t;
}
/*
* order == -1 means the oom kill is required by sysrq, otherwise only
* for display purposes.
*/
static inline bool is_sysrq_oom(struct oom_control *oc)
{
return oc->order == -1;
}
static inline bool is_memcg_oom(struct oom_control *oc)
{
return oc->memcg != NULL;
}
/* return true if the task is not adequate as candidate victim task. */
static bool oom_unkillable_task(struct task_struct *p,
struct mem_cgroup *memcg, const nodemask_t *nodemask)
{
if (is_global_init(p))
return true;
if (p->flags & PF_KTHREAD)
return true;
/* When mem_cgroup_out_of_memory() and p is not member of the group */
if (memcg && !task_in_mem_cgroup(p, memcg))
return true;
/* p may not have freeable memory in nodemask */
if (!has_intersects_mems_allowed(p, nodemask))
return true;
return false;
}
/*
* Print out unreclaimble slabs info when unreclaimable slabs amount is greater
* than all user memory (LRU pages)
*/
static bool is_dump_unreclaim_slabs(void)
{
unsigned long nr_lru;
nr_lru = global_node_page_state(NR_ACTIVE_ANON) +
global_node_page_state(NR_INACTIVE_ANON) +
global_node_page_state(NR_ACTIVE_FILE) +
global_node_page_state(NR_INACTIVE_FILE) +
global_node_page_state(NR_ISOLATED_ANON) +
global_node_page_state(NR_ISOLATED_FILE) +
global_node_page_state(NR_UNEVICTABLE);
return (global_node_page_state(NR_SLAB_UNRECLAIMABLE) > nr_lru);
}
/**
* oom_badness - heuristic function to determine which candidate task to kill
* @p: task struct of which task we should calculate
* @totalpages: total present RAM allowed for page allocation
*
* The heuristic for determining which task to kill is made to be as simple and
* predictable as possible. The goal is to return the highest value for the
* task consuming the most memory to avoid subsequent oom failures.
*/
unsigned long oom_badness(struct task_struct *p, struct mem_cgroup *memcg,
const nodemask_t *nodemask, unsigned long totalpages)
{
long points;
long adj;
if (oom_unkillable_task(p, memcg, nodemask))
return 0;
p = find_lock_task_mm(p);
if (!p)
return 0;
/*
* Do not even consider tasks which are explicitly marked oom
* unkillable or have been already oom reaped or the are in
* the middle of vfork
*/
adj = (long)p->signal->oom_score_adj;
if (adj == OOM_SCORE_ADJ_MIN ||
test_bit(MMF_OOM_SKIP, &p->mm->flags) ||
in_vfork(p)) {
task_unlock(p);
return 0;
}
/*
* The baseline for the badness score is the proportion of RAM that each
* task's rss, pagetable and swap space use.
*/
points = get_mm_rss(p->mm) + get_mm_counter(p->mm, MM_SWAPENTS) +
mm_pgtables_bytes(p->mm) / PAGE_SIZE;
task_unlock(p);
/*
* Root processes get 3% bonus, just like the __vm_enough_memory()
* implementation used by LSMs.
*/
if (has_capability_noaudit(p, CAP_SYS_ADMIN))
points -= (points * 3) / 100;
/* Normalize to oom_score_adj units */
adj *= totalpages / 1000;
points += adj;
/*
* Never return 0 for an eligible task regardless of the root bonus and
* oom_score_adj (oom_score_adj can't be OOM_SCORE_ADJ_MIN here).
*/
return points > 0 ? points : 1;
}
enum oom_constraint {
CONSTRAINT_NONE,
CONSTRAINT_CPUSET,
CONSTRAINT_MEMORY_POLICY,
CONSTRAINT_MEMCG,
};
/*
* Determine the type of allocation constraint.
*/
static enum oom_constraint constrained_alloc(struct oom_control *oc)
{
struct zone *zone;
struct zoneref *z;
enum zone_type high_zoneidx = gfp_zone(oc->gfp_mask);
bool cpuset_limited = false;
int nid;
if (is_memcg_oom(oc)) {
oc->totalpages = mem_cgroup_get_limit(oc->memcg) ?: 1;
return CONSTRAINT_MEMCG;
}
/* Default to all available memory */
oc->totalpages = totalram_pages + total_swap_pages;
if (!IS_ENABLED(CONFIG_NUMA))
return CONSTRAINT_NONE;
if (!oc->zonelist)
return CONSTRAINT_NONE;
/*
* Reach here only when __GFP_NOFAIL is used. So, we should avoid
* to kill current.We have to random task kill in this case.
* Hopefully, CONSTRAINT_THISNODE...but no way to handle it, now.
*/
if (oc->gfp_mask & __GFP_THISNODE)
return CONSTRAINT_NONE;
/*
* This is not a __GFP_THISNODE allocation, so a truncated nodemask in
* the page allocator means a mempolicy is in effect. Cpuset policy
* is enforced in get_page_from_freelist().
*/
if (oc->nodemask &&
!nodes_subset(node_states[N_MEMORY], *oc->nodemask)) {
oc->totalpages = total_swap_pages;
for_each_node_mask(nid, *oc->nodemask)
oc->totalpages += node_spanned_pages(nid);
return CONSTRAINT_MEMORY_POLICY;
}
/* Check this allocation failure is caused by cpuset's wall function */
for_each_zone_zonelist_nodemask(zone, z, oc->zonelist,
high_zoneidx, oc->nodemask)
if (!cpuset_zone_allowed(zone, oc->gfp_mask))
cpuset_limited = true;
if (cpuset_limited) {
oc->totalpages = total_swap_pages;
for_each_node_mask(nid, cpuset_current_mems_allowed)
oc->totalpages += node_spanned_pages(nid);
return CONSTRAINT_CPUSET;
}
return CONSTRAINT_NONE;
}
static int oom_evaluate_task(struct task_struct *task, void *arg)
{
struct oom_control *oc = arg;
unsigned long points;
if (oom_unkillable_task(task, NULL, oc->nodemask))
goto next;
/*
* This task already has access to memory reserves and is being killed.
* Don't allow any other task to have access to the reserves unless
* the task has MMF_OOM_SKIP because chances that it would release
* any memory is quite low.
*/
if (!is_sysrq_oom(oc) && tsk_is_oom_victim(task)) {
if (test_bit(MMF_OOM_SKIP, &task->signal->oom_mm->flags))
goto next;
goto abort;
}
/*
* If task is allocating a lot of memory and has been marked to be
* killed first if it triggers an oom, then select it.
*/
if (oom_task_origin(task)) {
points = ULONG_MAX;
goto select;
}
points = oom_badness(task, NULL, oc->nodemask, oc->totalpages);
if (!points || points < oc->chosen_points)
goto next;
/* Prefer thread group leaders for display purposes */
if (points == oc->chosen_points && thread_group_leader(oc->chosen))
goto next;
select:
if (oc->chosen)
put_task_struct(oc->chosen);
get_task_struct(task);
oc->chosen = task;
oc->chosen_points = points;
next:
return 0;
abort:
if (oc->chosen)
put_task_struct(oc->chosen);
oc->chosen = (void *)-1UL;
return 1;
}
/*
* Simple selection loop. We choose the process with the highest number of
* 'points'. In case scan was aborted, oc->chosen is set to -1.
*/
static void select_bad_process(struct oom_control *oc)
{
if (is_memcg_oom(oc))
mem_cgroup_scan_tasks(oc->memcg, oom_evaluate_task, oc);
else {
struct task_struct *p;
rcu_read_lock();
for_each_process(p)
if (oom_evaluate_task(p, oc))
break;
rcu_read_unlock();
}
oc->chosen_points = oc->chosen_points * 1000 / oc->totalpages;
}
/**
* dump_tasks - dump current memory state of all system tasks
* @memcg: current's memory controller, if constrained
* @nodemask: nodemask passed to page allocator for mempolicy ooms
*
* Dumps the current memory state of all eligible tasks. Tasks not in the same
* memcg, not in the same cpuset, or bound to a disjoint set of mempolicy nodes
* are not shown.
* State information includes task's pid, uid, tgid, vm size, rss,
* pgtables_bytes, swapents, oom_score_adj value, and name.
*/
static void dump_tasks(struct mem_cgroup *memcg, const nodemask_t *nodemask)
{
struct task_struct *p;
struct task_struct *task;
pr_info("[ pid ] uid tgid total_vm rss pgtables_bytes swapents oom_score_adj name\n");
rcu_read_lock();
for_each_process(p) {
if (oom_unkillable_task(p, memcg, nodemask))
continue;
task = find_lock_task_mm(p);
if (!task) {
/*
* This is a kthread or all of p's threads have already
* detached their mm's. There's no need to report
* them; they can't be oom killed anyway.
*/
continue;
}
pr_info("[%5d] %5d %5d %8lu %8lu %8ld %8lu %5hd %s\n",
task->pid, from_kuid(&init_user_ns, task_uid(task)),
task->tgid, task->mm->total_vm, get_mm_rss(task->mm),
mm_pgtables_bytes(task->mm),
get_mm_counter(task->mm, MM_SWAPENTS),
task->signal->oom_score_adj, task->comm);
task_unlock(task);
}
rcu_read_unlock();
}
static void dump_header(struct oom_control *oc, struct task_struct *p)
{
pr_warn("%s invoked oom-killer: gfp_mask=%#x(%pGg), nodemask=%*pbl, order=%d, oom_score_adj=%hd\n",
current->comm, oc->gfp_mask, &oc->gfp_mask,
nodemask_pr_args(oc->nodemask), oc->order,
current->signal->oom_score_adj);
if (!IS_ENABLED(CONFIG_COMPACTION) && oc->order)
pr_warn("COMPACTION is disabled!!!\n");
cpuset_print_current_mems_allowed();
dump_stack();
if (is_memcg_oom(oc))
mem_cgroup_print_oom_info(oc->memcg, p);
else {
show_mem(SHOW_MEM_FILTER_NODES, oc->nodemask);
if (is_dump_unreclaim_slabs())
dump_unreclaimable_slab();
}
if (sysctl_oom_dump_tasks)
dump_tasks(oc->memcg, oc->nodemask);
}
/*
* Number of OOM victims in flight
*/
static atomic_t oom_victims = ATOMIC_INIT(0);
static DECLARE_WAIT_QUEUE_HEAD(oom_victims_wait);
static bool oom_killer_disabled __read_mostly;
#define K(x) ((x) << (PAGE_SHIFT-10))
/*
* task->mm can be NULL if the task is the exited group leader. So to
* determine whether the task is using a particular mm, we examine all the
* task's threads: if one of those is using this mm then this task was also
* using it.
*/
bool process_shares_mm(struct task_struct *p, struct mm_struct *mm)
{
struct task_struct *t;
for_each_thread(p, t) {
struct mm_struct *t_mm = READ_ONCE(t->mm);
if (t_mm)
return t_mm == mm;
}
return false;
}
#ifdef CONFIG_MMU
/*
* OOM Reaper kernel thread which tries to reap the memory used by the OOM
* victim (if that is possible) to help the OOM killer to move on.
*/
static struct task_struct *oom_reaper_th;
static DECLARE_WAIT_QUEUE_HEAD(oom_reaper_wait);
static struct task_struct *oom_reaper_list;
static DEFINE_SPINLOCK(oom_reaper_lock);
static bool __oom_reap_task_mm(struct task_struct *tsk, struct mm_struct *mm)
{
struct mmu_gather tlb;
struct vm_area_struct *vma;
bool ret = true;
/*
* We have to make sure to not race with the victim exit path
* and cause premature new oom victim selection:
* __oom_reap_task_mm exit_mm
* mmget_not_zero
* mmput
* atomic_dec_and_test
* exit_oom_victim
* [...]
* out_of_memory
* select_bad_process
* # no TIF_MEMDIE task selects new victim
* unmap_page_range # frees some memory
*/
mutex_lock(&oom_lock);
if (!down_read_trylock(&mm->mmap_sem)) {
ret = false;
trace_skip_task_reaping(tsk->pid);
goto unlock_oom;
}
/*
* If the mm has notifiers then we would need to invalidate them around
* unmap_page_range and that is risky because notifiers can sleep and
* what they do is basically undeterministic. So let's have a short
* sleep to give the oom victim some more time.
* TODO: we really want to get rid of this ugly hack and make sure that
* notifiers cannot block for unbounded amount of time and add
* mmu_notifier_invalidate_range_{start,end} around unmap_page_range
*/
if (mm_has_notifiers(mm)) {
up_read(&mm->mmap_sem);
schedule_timeout_idle(HZ);
goto unlock_oom;
}
/*
* MMF_OOM_SKIP is set by exit_mmap when the OOM reaper can't
* work on the mm anymore. The check for MMF_OOM_SKIP must run
* under mmap_sem for reading because it serializes against the
* down_write();up_write() cycle in exit_mmap().
*/
if (test_bit(MMF_OOM_SKIP, &mm->flags)) {
up_read(&mm->mmap_sem);
trace_skip_task_reaping(tsk->pid);
goto unlock_oom;
}
trace_start_task_reaping(tsk->pid);
/*
* Tell all users of get_user/copy_from_user etc... that the content
* is no longer stable. No barriers really needed because unmapping
* should imply barriers already and the reader would hit a page fault
* if it stumbled over a reaped memory.
*/
set_bit(MMF_UNSTABLE, &mm->flags);
tlb_gather_mmu(&tlb, mm, 0, -1);
for (vma = mm->mmap ; vma; vma = vma->vm_next) {
if (!can_madv_dontneed_vma(vma))
continue;
/*
* Only anonymous pages have a good chance to be dropped
* without additional steps which we cannot afford as we
* are OOM already.
*
* We do not even care about fs backed pages because all
* which are reclaimable have already been reclaimed and
* we do not want to block exit_mmap by keeping mm ref
* count elevated without a good reason.
*/
if (vma_is_anonymous(vma) || !(vma->vm_flags & VM_SHARED))
unmap_page_range(&tlb, vma, vma->vm_start, vma->vm_end,
NULL);
}
tlb_finish_mmu(&tlb, 0, -1);
pr_info("oom_reaper: reaped process %d (%s), now anon-rss:%lukB, file-rss:%lukB, shmem-rss:%lukB\n",
task_pid_nr(tsk), tsk->comm,
K(get_mm_counter(mm, MM_ANONPAGES)),
K(get_mm_counter(mm, MM_FILEPAGES)),
K(get_mm_counter(mm, MM_SHMEMPAGES)));
up_read(&mm->mmap_sem);
trace_finish_task_reaping(tsk->pid);
unlock_oom:
mutex_unlock(&oom_lock);
return ret;
}
#define MAX_OOM_REAP_RETRIES 10
static void oom_reap_task(struct task_struct *tsk)
{
int attempts = 0;
struct mm_struct *mm = tsk->signal->oom_mm;
/* Retry the down_read_trylock(mmap_sem) a few times */
while (attempts++ < MAX_OOM_REAP_RETRIES && !__oom_reap_task_mm(tsk, mm))
schedule_timeout_idle(HZ/10);
if (attempts <= MAX_OOM_REAP_RETRIES)
goto done;
pr_info("oom_reaper: unable to reap pid:%d (%s)\n",
task_pid_nr(tsk), tsk->comm);
debug_show_all_locks();
done:
tsk->oom_reaper_list = NULL;
/*
* Hide this mm from OOM killer because it has been either reaped or
* somebody can't call up_write(mmap_sem).
*/
set_bit(MMF_OOM_SKIP, &mm->flags);
/* Drop a reference taken by wake_oom_reaper */
put_task_struct(tsk);
}
static int oom_reaper(void *unused)
{
while (true) {
struct task_struct *tsk = NULL;
wait_event_freezable(oom_reaper_wait, oom_reaper_list != NULL);
spin_lock(&oom_reaper_lock);
if (oom_reaper_list != NULL) {
tsk = oom_reaper_list;
oom_reaper_list = tsk->oom_reaper_list;
}
spin_unlock(&oom_reaper_lock);
if (tsk)
oom_reap_task(tsk);
}
return 0;
}
static void wake_oom_reaper(struct task_struct *tsk)
{
/* tsk is already queued? */
if (tsk == oom_reaper_list || tsk->oom_reaper_list)
return;
get_task_struct(tsk);
spin_lock(&oom_reaper_lock);
tsk->oom_reaper_list = oom_reaper_list;
oom_reaper_list = tsk;
spin_unlock(&oom_reaper_lock);
trace_wake_reaper(tsk->pid);
wake_up(&oom_reaper_wait);
}
static int __init oom_init(void)
{
oom_reaper_th = kthread_run(oom_reaper, NULL, "oom_reaper");
return 0;
}
subsys_initcall(oom_init)
#else
static inline void wake_oom_reaper(struct task_struct *tsk)
{
}
#endif /* CONFIG_MMU */
/**
* mark_oom_victim - mark the given task as OOM victim
* @tsk: task to mark
*
* Has to be called with oom_lock held and never after
* oom has been disabled already.
*
* tsk->mm has to be non NULL and caller has to guarantee it is stable (either
* under task_lock or operate on the current).
*/
static void mark_oom_victim(struct task_struct *tsk)
{
struct mm_struct *mm = tsk->mm;
WARN_ON(oom_killer_disabled);
/* OOM killer might race with memcg OOM */
if (test_and_set_tsk_thread_flag(tsk, TIF_MEMDIE))
return;
/* oom_mm is bound to the signal struct life time. */
if (!cmpxchg(&tsk->signal->oom_mm, NULL, mm))
mmgrab(tsk->signal->oom_mm);
/*
* Make sure that the task is woken up from uninterruptible sleep
* if it is frozen because OOM killer wouldn't be able to free
* any memory and livelock. freezing_slow_path will tell the freezer
* that TIF_MEMDIE tasks should be ignored.
*/
__thaw_task(tsk);
atomic_inc(&oom_victims);
trace_mark_victim(tsk->pid);
}
/**
* exit_oom_victim - note the exit of an OOM victim
*/
void exit_oom_victim(void)
{
clear_thread_flag(TIF_MEMDIE);
if (!atomic_dec_return(&oom_victims))
wake_up_all(&oom_victims_wait);
}
/**
* oom_killer_enable - enable OOM killer
*/
void oom_killer_enable(void)
{
oom_killer_disabled = false;
pr_info("OOM killer enabled.\n");
}
/**
* oom_killer_disable - disable OOM killer
* @timeout: maximum timeout to wait for oom victims in jiffies
*
* Forces all page allocations to fail rather than trigger OOM killer.
* Will block and wait until all OOM victims are killed or the given
* timeout expires.
*
* The function cannot be called when there are runnable user tasks because
* the userspace would see unexpected allocation failures as a result. Any
* new usage of this function should be consulted with MM people.
*
* Returns true if successful and false if the OOM killer cannot be
* disabled.
*/
bool oom_killer_disable(signed long timeout)
{
signed long ret;
/*
* Make sure to not race with an ongoing OOM killer. Check that the
* current is not killed (possibly due to sharing the victim's memory).
*/
if (mutex_lock_killable(&oom_lock))
return false;
oom_killer_disabled = true;
mutex_unlock(&oom_lock);
ret = wait_event_interruptible_timeout(oom_victims_wait,
!atomic_read(&oom_victims), timeout);
if (ret <= 0) {
oom_killer_enable();
return false;
}
pr_info("OOM killer disabled.\n");
return true;
}
static inline bool __task_will_free_mem(struct task_struct *task)
{
struct signal_struct *sig = task->signal;
/*
* A coredumping process may sleep for an extended period in exit_mm(),
* so the oom killer cannot assume that the process will promptly exit
* and release memory.
*/
if (sig->flags & SIGNAL_GROUP_COREDUMP)
return false;
if (sig->flags & SIGNAL_GROUP_EXIT)
return true;
if (thread_group_empty(task) && (task->flags & PF_EXITING))
return true;
return false;
}
/*
* Checks whether the given task is dying or exiting and likely to
* release its address space. This means that all threads and processes
* sharing the same mm have to be killed or exiting.
* Caller has to make sure that task->mm is stable (hold task_lock or
* it operates on the current).
*/
static bool task_will_free_mem(struct task_struct *task)
{
struct mm_struct *mm = task->mm;
struct task_struct *p;
bool ret = true;
/*
* Skip tasks without mm because it might have passed its exit_mm and
* exit_oom_victim. oom_reaper could have rescued that but do not rely
* on that for now. We can consider find_lock_task_mm in future.
*/
if (!mm)
return false;
if (!__task_will_free_mem(task))
return false;
/*
* This task has already been drained by the oom reaper so there are
* only small chances it will free some more
*/
if (test_bit(MMF_OOM_SKIP, &mm->flags))
return false;
if (atomic_read(&mm->mm_users) <= 1)
return true;
/*
* Make sure that all tasks which share the mm with the given tasks
* are dying as well to make sure that a) nobody pins its mm and
* b) the task is also reapable by the oom reaper.
*/
rcu_read_lock();
for_each_process(p) {
if (!process_shares_mm(p, mm))
continue;
if (same_thread_group(task, p))
continue;
ret = __task_will_free_mem(p);
if (!ret)
break;
}
rcu_read_unlock();
return ret;
}
static void oom_kill_process(struct oom_control *oc, const char *message)
{
struct task_struct *p = oc->chosen;
unsigned int points = oc->chosen_points;
struct task_struct *victim = p;
struct task_struct *child;
struct task_struct *t;
struct mm_struct *mm;
unsigned int victim_points = 0;
static DEFINE_RATELIMIT_STATE(oom_rs, DEFAULT_RATELIMIT_INTERVAL,
DEFAULT_RATELIMIT_BURST);
bool can_oom_reap = true;
/*
* If the task is already exiting, don't alarm the sysadmin or kill
* its children or threads, just give it access to memory reserves
* so it can die quickly
*/
task_lock(p);
if (task_will_free_mem(p)) {
mark_oom_victim(p);
wake_oom_reaper(p);
task_unlock(p);
put_task_struct(p);
return;
}
task_unlock(p);
if (__ratelimit(&oom_rs))
dump_header(oc, p);
pr_err("%s: Kill process %d (%s) score %u or sacrifice child\n",
message, task_pid_nr(p), p->comm, points);
/*
* If any of p's children has a different mm and is eligible for kill,
* the one with the highest oom_badness() score is sacrificed for its
* parent. This attempts to lose the minimal amount of work done while
* still freeing memory.
*/
read_lock(&tasklist_lock);
for_each_thread(p, t) {
list_for_each_entry(child, &t->children, sibling) {
unsigned int child_points;
if (process_shares_mm(child, p->mm))
continue;
/*
* oom_badness() returns 0 if the thread is unkillable
*/
child_points = oom_badness(child,
oc->memcg, oc->nodemask, oc->totalpages);
if (child_points > victim_points) {
put_task_struct(victim);
victim = child;
victim_points = child_points;
get_task_struct(victim);
}
}
}
read_unlock(&tasklist_lock);
p = find_lock_task_mm(victim);
if (!p) {
put_task_struct(victim);
return;
} else if (victim != p) {
get_task_struct(p);
put_task_struct(victim);
victim = p;
}
/* Get a reference to safely compare mm after task_unlock(victim) */
mm = victim->mm;
mmgrab(mm);
/* Raise event before sending signal: task reaper must see this */
count_vm_event(OOM_KILL);
count_memcg_event_mm(mm, OOM_KILL);
/*
* We should send SIGKILL before granting access to memory reserves
* in order to prevent the OOM victim from depleting the memory
* reserves from the user space under its control.
*/
do_send_sig_info(SIGKILL, SEND_SIG_FORCED, victim, true);
mark_oom_victim(victim);
pr_err("Killed process %d (%s) total-vm:%lukB, anon-rss:%lukB, file-rss:%lukB, shmem-rss:%lukB\n",
task_pid_nr(victim), victim->comm, K(victim->mm->total_vm),
K(get_mm_counter(victim->mm, MM_ANONPAGES)),
K(get_mm_counter(victim->mm, MM_FILEPAGES)),
K(get_mm_counter(victim->mm, MM_SHMEMPAGES)));
task_unlock(victim);
/*
* Kill all user processes sharing victim->mm in other thread groups, if
* any. They don't get access to memory reserves, though, to avoid
* depletion of all memory. This prevents mm->mmap_sem livelock when an
* oom killed thread cannot exit because it requires the semaphore and
* its contended by another thread trying to allocate memory itself.
* That thread will now get access to memory reserves since it has a
* pending fatal signal.
*/
rcu_read_lock();
for_each_process(p) {
if (!process_shares_mm(p, mm))
continue;
if (same_thread_group(p, victim))
continue;
if (is_global_init(p)) {
can_oom_reap = false;
set_bit(MMF_OOM_SKIP, &mm->flags);
pr_info("oom killer %d (%s) has mm pinned by %d (%s)\n",
task_pid_nr(victim), victim->comm,
task_pid_nr(p), p->comm);
continue;
}
/*
* No use_mm() user needs to read from the userspace so we are
* ok to reap it.
*/
if (unlikely(p->flags & PF_KTHREAD))
continue;
do_send_sig_info(SIGKILL, SEND_SIG_FORCED, p, true);
}
rcu_read_unlock();
if (can_oom_reap)
wake_oom_reaper(victim);
mmdrop(mm);
put_task_struct(victim);
}
#undef K
/*
* Determines whether the kernel must panic because of the panic_on_oom sysctl.
*/
static void check_panic_on_oom(struct oom_control *oc,
enum oom_constraint constraint)
{
if (likely(!sysctl_panic_on_oom))
return;
if (sysctl_panic_on_oom != 2) {
/*
* panic_on_oom == 1 only affects CONSTRAINT_NONE, the kernel
* does not panic for cpuset, mempolicy, or memcg allocation
* failures.
*/
if (constraint != CONSTRAINT_NONE)
return;
}
/* Do not panic for oom kills triggered by sysrq */
if (is_sysrq_oom(oc))
return;
dump_header(oc, NULL);
panic("Out of memory: %s panic_on_oom is enabled\n",
sysctl_panic_on_oom == 2 ? "compulsory" : "system-wide");
}
static BLOCKING_NOTIFIER_HEAD(oom_notify_list);
int register_oom_notifier(struct notifier_block *nb)
{
return blocking_notifier_chain_register(&oom_notify_list, nb);
}
EXPORT_SYMBOL_GPL(register_oom_notifier);
int unregister_oom_notifier(struct notifier_block *nb)
{
return blocking_notifier_chain_unregister(&oom_notify_list, nb);
}
EXPORT_SYMBOL_GPL(unregister_oom_notifier);
/**
* out_of_memory - kill the "best" process when we run out of memory
* @oc: pointer to struct oom_control
*
* If we run out of memory, we have the choice between either
* killing a random task (bad), letting the system crash (worse)
* OR try to be smart about which process to kill. Note that we
* don't have to be perfect here, we just have to be good.
*/
bool out_of_memory(struct oom_control *oc)
{
unsigned long freed = 0;
enum oom_constraint constraint = CONSTRAINT_NONE;
if (oom_killer_disabled)
return false;
if (!is_memcg_oom(oc)) {
blocking_notifier_call_chain(&oom_notify_list, 0, &freed);
if (freed > 0)
/* Got some memory back in the last second. */
return true;
}
/*
* If current has a pending SIGKILL or is exiting, then automatically
* select it. The goal is to allow it to allocate so that it may
* quickly exit and free its memory.
*/
if (task_will_free_mem(current)) {
mark_oom_victim(current);
wake_oom_reaper(current);
return true;
}
/*
* The OOM killer does not compensate for IO-less reclaim.
* pagefault_out_of_memory lost its gfp context so we have to
* make sure exclude 0 mask - all other users should have at least
* ___GFP_DIRECT_RECLAIM to get here.
*/
if (oc->gfp_mask && !(oc->gfp_mask & __GFP_FS))
return true;
/*
* Check if there were limitations on the allocation (only relevant for
* NUMA and memcg) that may require different handling.
*/
constraint = constrained_alloc(oc);
if (constraint != CONSTRAINT_MEMORY_POLICY)
oc->nodemask = NULL;
check_panic_on_oom(oc, constraint);
if (!is_memcg_oom(oc) && sysctl_oom_kill_allocating_task &&
current->mm && !oom_unkillable_task(current, NULL, oc->nodemask) &&
current->signal->oom_score_adj != OOM_SCORE_ADJ_MIN) {
get_task_struct(current);
oc->chosen = current;
oom_kill_process(oc, "Out of memory (oom_kill_allocating_task)");
return true;
}
select_bad_process(oc);
/* Found nothing?!?! Either we hang forever, or we panic. */
if (!oc->chosen && !is_sysrq_oom(oc) && !is_memcg_oom(oc)) {
dump_header(oc, NULL);
panic("Out of memory and no killable processes...\n");
}
if (oc->chosen && oc->chosen != (void *)-1UL) {
oom_kill_process(oc, !is_memcg_oom(oc) ? "Out of memory" :
"Memory cgroup out of memory");
/*
* Give the killed process a good chance to exit before trying
* to allocate memory again.
*/
schedule_timeout_killable(1);
}
return !!oc->chosen;
}
/*
* The pagefault handler calls here because it is out of memory, so kill a
* memory-hogging task. If oom_lock is held by somebody else, a parallel oom
* killing is already in progress so do nothing.
*/
void pagefault_out_of_memory(void)
{
struct oom_control oc = {
.zonelist = NULL,
.nodemask = NULL,
.memcg = NULL,
.gfp_mask = 0,
.order = 0,
};
if (mem_cgroup_oom_synchronize(true))
return;
if (!mutex_trylock(&oom_lock))
return;
out_of_memory(&oc);
mutex_unlock(&oom_lock);
}
| ./CrossVul/dataset_final_sorted/CWE-416/c/bad_3013_0 |
crossvul-cpp_data_bad_903_2 | /*
irc-servers-setup.c : irssi
Copyright (C) 1999-2000 Timo Sirainen
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License along
with this program; if not, write to the Free Software Foundation, Inc.,
51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
*/
#include "module.h"
#include <irssi/src/core/signals.h>
#include <irssi/src/core/network.h>
#include <irssi/src/core/servers-setup.h>
#include <irssi/src/lib-config/iconfig.h>
#include <irssi/src/core/settings.h>
#include <irssi/src/irc/core/irc-chatnets.h>
#include <irssi/src/irc/core/irc-servers-setup.h>
#include <irssi/src/irc/core/irc-servers.h>
#include <irssi/src/irc/core/sasl.h>
/* Fill information to connection from server setup record */
static void sig_server_setup_fill_reconn(IRC_SERVER_CONNECT_REC *conn,
IRC_SERVER_SETUP_REC *sserver)
{
if (!IS_IRC_SERVER_CONNECT(conn) ||
!IS_IRC_SERVER_SETUP(sserver))
return;
if (sserver->cmd_queue_speed > 0)
conn->cmd_queue_speed = sserver->cmd_queue_speed;
if (sserver->max_cmds_at_once > 0)
conn->max_cmds_at_once = sserver->max_cmds_at_once;
if (sserver->max_query_chans > 0)
conn->max_query_chans = sserver->max_query_chans;
}
static void sig_server_setup_fill_connect(IRC_SERVER_CONNECT_REC *conn)
{
const char *value;
if (!IS_IRC_SERVER_CONNECT(conn))
return;
value = settings_get_str("alternate_nick");
conn->alternate_nick = (value != NULL && *value != '\0') ?
g_strdup(value) : NULL;
value = settings_get_str("usermode");
conn->usermode = (value != NULL && *value != '\0') ?
g_strdup(value) : NULL;
}
static void sig_server_setup_fill_chatnet(IRC_SERVER_CONNECT_REC *conn,
IRC_CHATNET_REC *ircnet)
{
if (!IS_IRC_SERVER_CONNECT(conn))
return;
g_return_if_fail(IS_IRCNET(ircnet));
if (ircnet->alternate_nick != NULL) {
g_free_and_null(conn->alternate_nick);
conn->alternate_nick = g_strdup(ircnet->alternate_nick);
}
if (ircnet->usermode != NULL) {
g_free_and_null(conn->usermode);
conn->usermode = g_strdup(ircnet->usermode);
}
if (ircnet->max_kicks > 0) conn->max_kicks = ircnet->max_kicks;
if (ircnet->max_msgs > 0) conn->max_msgs = ircnet->max_msgs;
if (ircnet->max_modes > 0) conn->max_modes = ircnet->max_modes;
if (ircnet->max_whois > 0) conn->max_whois = ircnet->max_whois;
if (ircnet->max_cmds_at_once > 0)
conn->max_cmds_at_once = ircnet->max_cmds_at_once;
if (ircnet->cmd_queue_speed > 0)
conn->cmd_queue_speed = ircnet->cmd_queue_speed;
if (ircnet->max_query_chans > 0)
conn->max_query_chans = ircnet->max_query_chans;
/* Validate the SASL parameters filled by sig_chatnet_read() or cmd_network_add */
conn->sasl_mechanism = SASL_MECHANISM_NONE;
conn->sasl_username = NULL;
conn->sasl_password = NULL;
if (ircnet->sasl_mechanism != NULL) {
if (!g_ascii_strcasecmp(ircnet->sasl_mechanism, "plain")) {
/* The PLAIN method needs both the username and the password */
conn->sasl_mechanism = SASL_MECHANISM_PLAIN;
if (ircnet->sasl_username != NULL && *ircnet->sasl_username &&
ircnet->sasl_password != NULL && *ircnet->sasl_password) {
conn->sasl_username = ircnet->sasl_username;
conn->sasl_password = ircnet->sasl_password;
} else
g_warning("The fields sasl_username and sasl_password are either missing or empty");
}
else if (!g_ascii_strcasecmp(ircnet->sasl_mechanism, "external")) {
conn->sasl_mechanism = SASL_MECHANISM_EXTERNAL;
}
else
g_warning("Unsupported SASL mechanism \"%s\" selected", ircnet->sasl_mechanism);
}
}
static void init_userinfo(void)
{
unsigned int changed;
const char *set, *nick, *user_name, *str;
changed = 0;
/* check if nick/username/realname wasn't read from setup.. */
set = settings_get_str("real_name");
if (set == NULL || *set == '\0') {
str = g_getenv("IRCNAME");
settings_set_str("real_name",
str != NULL ? str : g_get_real_name());
changed |= USER_SETTINGS_REAL_NAME;
}
/* username */
user_name = settings_get_str("user_name");
if (user_name == NULL || *user_name == '\0') {
str = g_getenv("IRCUSER");
settings_set_str("user_name",
str != NULL ? str : g_get_user_name());
user_name = settings_get_str("user_name");
changed |= USER_SETTINGS_USER_NAME;
}
/* nick */
nick = settings_get_str("nick");
if (nick == NULL || *nick == '\0') {
str = g_getenv("IRCNICK");
settings_set_str("nick", str != NULL ? str : user_name);
nick = settings_get_str("nick");
changed |= USER_SETTINGS_NICK;
}
/* host name */
set = settings_get_str("hostname");
if (set == NULL || *set == '\0') {
str = g_getenv("IRCHOST");
if (str != NULL) {
settings_set_str("hostname", str);
changed |= USER_SETTINGS_HOSTNAME;
}
}
signal_emit("irssi init userinfo changed", 1, GUINT_TO_POINTER(changed));
}
static void sig_server_setup_read(IRC_SERVER_SETUP_REC *rec, CONFIG_NODE *node)
{
g_return_if_fail(rec != NULL);
g_return_if_fail(node != NULL);
if (!IS_IRC_SERVER_SETUP(rec))
return;
rec->max_cmds_at_once = config_node_get_int(node, "cmds_max_at_once", 0);
rec->cmd_queue_speed = config_node_get_int(node, "cmd_queue_speed", 0);
rec->max_query_chans = config_node_get_int(node, "max_query_chans", 0);
}
static void sig_server_setup_saved(IRC_SERVER_SETUP_REC *rec,
CONFIG_NODE *node)
{
if (!IS_IRC_SERVER_SETUP(rec))
return;
if (rec->max_cmds_at_once > 0)
iconfig_node_set_int(node, "cmds_max_at_once", rec->max_cmds_at_once);
if (rec->cmd_queue_speed > 0)
iconfig_node_set_int(node, "cmd_queue_speed", rec->cmd_queue_speed);
if (rec->max_query_chans > 0)
iconfig_node_set_int(node, "max_query_chans", rec->max_query_chans);
}
void irc_servers_setup_init(void)
{
settings_add_bool("server", "skip_motd", FALSE);
settings_add_str("server", "alternate_nick", "");
init_userinfo();
signal_add("server setup fill reconn", (SIGNAL_FUNC) sig_server_setup_fill_reconn);
signal_add("server setup fill connect", (SIGNAL_FUNC) sig_server_setup_fill_connect);
signal_add("server setup fill chatnet", (SIGNAL_FUNC) sig_server_setup_fill_chatnet);
signal_add("server setup read", (SIGNAL_FUNC) sig_server_setup_read);
signal_add("server setup saved", (SIGNAL_FUNC) sig_server_setup_saved);
}
void irc_servers_setup_deinit(void)
{
signal_remove("server setup fill reconn", (SIGNAL_FUNC) sig_server_setup_fill_reconn);
signal_remove("server setup fill connect", (SIGNAL_FUNC) sig_server_setup_fill_connect);
signal_remove("server setup fill chatnet", (SIGNAL_FUNC) sig_server_setup_fill_chatnet);
signal_remove("server setup read", (SIGNAL_FUNC) sig_server_setup_read);
signal_remove("server setup saved", (SIGNAL_FUNC) sig_server_setup_saved);
}
| ./CrossVul/dataset_final_sorted/CWE-416/c/bad_903_2 |
crossvul-cpp_data_good_1390_1 | /* libcomps - C alternative to yum.comps library
* Copyright (C) 2013 Jindrich Luza
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* any later version.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301,
* USA
*/
#include "comps_objmradix.h"
#include "comps_set.h"
#include <stdio.h>
void comps_objmrtree_data_destroy(COMPS_ObjMRTreeData * rtd) {
free(rtd->key);
COMPS_OBJECT_DESTROY(rtd->data);
comps_hslist_destroy(&rtd->subnodes);
free(rtd);
}
inline void comps_objmrtree_data_destroy_v(void * rtd) {
comps_objmrtree_data_destroy((COMPS_ObjMRTreeData*)rtd);
}
static COMPS_ObjMRTreeData * __comps_objmrtree_data_create(char * key,
size_t keylen,
COMPS_Object *data) {
COMPS_ObjMRTreeData * rtd;
if ((rtd = malloc(sizeof(*rtd))) == NULL)
return NULL;
if ((rtd->key = malloc(sizeof(char) * (keylen+1))) == NULL) {
free(rtd);
return NULL;
}
memcpy(rtd->key, key, sizeof(char)*keylen);
rtd->key[keylen] = '\0';
rtd->is_leaf = 1;
rtd->data = COMPS_OBJECT_CREATE(COMPS_ObjList, NULL);
if (data)
comps_objlist_append_x(rtd->data, data);
rtd->subnodes = comps_hslist_create();
comps_hslist_init(rtd->subnodes, NULL,
NULL,
&comps_objmrtree_data_destroy_v);
return rtd;
}
COMPS_ObjMRTreeData * comps_objmrtree_data_create(char *key, COMPS_Object *data){
COMPS_ObjMRTreeData * rtd;
rtd = __comps_objmrtree_data_create(key, strlen(key), data);
return rtd;
}
COMPS_ObjMRTreeData * comps_objmrtree_data_create_n(char * key, unsigned keylen,
void * data) {
COMPS_ObjMRTreeData * rtd;
rtd = __comps_objmrtree_data_create(key, keylen, data);
return rtd;
}
static void comps_objmrtree_create(COMPS_ObjMRTree *rtree, COMPS_Object **args){
(void)args;
rtree->subnodes = comps_hslist_create();
comps_hslist_init(rtree->subnodes, NULL, NULL, &comps_objmrtree_data_destroy_v);
if (rtree->subnodes == NULL) {
COMPS_OBJECT_DESTROY(rtree);
return;
}
rtree->len = 0;
}
void comps_objmrtree_create_u(COMPS_Object * obj, COMPS_Object **args) {
(void)args;
comps_objmrtree_create((COMPS_ObjMRTree*)obj, NULL);
}
static void comps_objmrtree_destroy(COMPS_ObjMRTree * rt) {
comps_hslist_destroy(&(rt->subnodes));
}
void comps_objmrtree_destroy_u(COMPS_Object *obj) {
comps_objmrtree_destroy((COMPS_ObjMRTree*)obj);
}
void comps_objmrtree_values_walk(COMPS_ObjMRTree * rt, void* udata,
void (*walk_f)(void*, void*)) {
COMPS_HSList *tmplist, *tmp_subnodes;
COMPS_HSListItem *it, *it2;
tmplist = comps_hslist_create();
comps_hslist_init(tmplist, NULL, NULL, NULL);
comps_hslist_append(tmplist, rt->subnodes, 0);
while (tmplist->first != NULL) {
it = tmplist->first;
comps_hslist_remove(tmplist, tmplist->first);
tmp_subnodes = (COMPS_HSList*)it->data;
free(it);
for (it = tmp_subnodes->first; it != NULL; it=it->next) {
if (((COMPS_ObjMRTreeData*)it->data)->subnodes->first) {
comps_hslist_append(tmplist,
((COMPS_ObjMRTreeData*)it->data)->subnodes, 0);
}
for (it2 = (COMPS_HSListItem*)((COMPS_ObjMRTreeData*)it->data)->data->first;
it2 != NULL; it2 = it2->next) {
walk_f(udata, it2->data);
}
}
}
comps_hslist_destroy(&tmplist);
}
void comps_objmrtree_copy(COMPS_ObjMRTree *ret, COMPS_ObjMRTree *rt){
COMPS_HSList * to_clone, *tmplist, *new_subnodes;
COMPS_HSListItem *it, *it2;
COMPS_ObjMRTreeData *rtdata;
COMPS_ObjList *new_data_list;
to_clone = comps_hslist_create();
comps_hslist_init(to_clone, NULL, NULL, NULL);
for (it = rt->subnodes->first; it != NULL; it = it->next) {
rtdata = comps_objmrtree_data_create(
((COMPS_ObjMRTreeData*)it->data)->key,
NULL);
new_data_list = (COMPS_ObjList*)
COMPS_OBJECT_COPY(((COMPS_ObjMRTreeData*)it->data)->data);
COMPS_OBJECT_DESTROY(&rtdata->data);
comps_hslist_destroy(&rtdata->subnodes);
rtdata->subnodes = ((COMPS_ObjMRTreeData*)it->data)->subnodes;
rtdata->data = new_data_list;
comps_hslist_append(ret->subnodes, rtdata, 0);
comps_hslist_append(to_clone, rtdata, 0);
}
while (to_clone->first) {
it2 = to_clone->first;
tmplist = ((COMPS_ObjMRTreeData*)it2->data)->subnodes;
comps_hslist_remove(to_clone, to_clone->first);
new_subnodes = comps_hslist_create();
comps_hslist_init(new_subnodes, NULL, NULL, &comps_objmrtree_data_destroy_v);
for (it = tmplist->first; it != NULL; it = it->next) {
rtdata = comps_objmrtree_data_create(
((COMPS_ObjMRTreeData*)it->data)->key,
NULL);
new_data_list = (COMPS_ObjList*)
COMPS_OBJECT_COPY(((COMPS_ObjMRTreeData*)it->data)->data);
comps_hslist_destroy(&rtdata->subnodes);
COMPS_OBJECT_DESTROY(rtdata->data);
rtdata->subnodes = ((COMPS_ObjMRTreeData*)it->data)->subnodes;
rtdata->data = new_data_list;
comps_hslist_append(new_subnodes, rtdata, 0);
comps_hslist_append(to_clone, rtdata, 0);
}
((COMPS_ObjMRTreeData*)it2->data)->subnodes = new_subnodes;
free(it2);
}
ret->len = rt->len;
comps_hslist_destroy(&to_clone);
}
COMPS_COPY_u(objmrtree, COMPS_ObjMRTree) /*comps_utils.h macro*/
void comps_objmrtree_copy_shallow(COMPS_ObjMRTree *ret, COMPS_ObjMRTree *rt){
COMPS_HSList * to_clone, *tmplist, *new_subnodes;
COMPS_HSListItem *it, *it2;
COMPS_ObjMRTreeData *rtdata;
COMPS_ObjList *new_data_list;
to_clone = comps_hslist_create();
comps_hslist_init(to_clone, NULL, NULL, NULL);
for (it = rt->subnodes->first; it != NULL; it = it->next) {
rtdata = comps_objmrtree_data_create(
((COMPS_ObjMRTreeData*)it->data)->key,
NULL);
new_data_list = (COMPS_ObjList*)
COMPS_OBJECT_COPY(((COMPS_ObjMRTreeData*)it->data)->data);
COMPS_OBJECT_DESTROY(&rtdata->data);
comps_hslist_destroy(&rtdata->subnodes);
rtdata->subnodes = ((COMPS_ObjMRTreeData*)it->data)->subnodes;
rtdata->data = new_data_list;
comps_hslist_append(ret->subnodes, rtdata, 0);
comps_hslist_append(to_clone, rtdata, 0);
}
while (to_clone->first) {
it2 = to_clone->first;
tmplist = ((COMPS_ObjMRTreeData*)it2->data)->subnodes;
comps_hslist_remove(to_clone, to_clone->first);
new_subnodes = comps_hslist_create();
comps_hslist_init(new_subnodes, NULL, NULL, &comps_objmrtree_data_destroy_v);
for (it = tmplist->first; it != NULL; it = it->next) {
rtdata = comps_objmrtree_data_create(
((COMPS_ObjMRTreeData*)it->data)->key,
NULL);
new_data_list = ((COMPS_ObjMRTreeData*)it->data)->data;
comps_hslist_destroy(&rtdata->subnodes);
COMPS_OBJECT_DESTROY(rtdata->data);
rtdata->subnodes = ((COMPS_ObjMRTreeData*)it->data)->subnodes;
rtdata->data = new_data_list;
comps_hslist_append(new_subnodes, rtdata, 0);
comps_hslist_append(to_clone, rtdata, 0);
}
((COMPS_ObjMRTreeData*)it2->data)->subnodes = new_subnodes;
free(it2);
}
ret->len = rt->len;
comps_hslist_destroy(&to_clone);
}
COMPS_ObjMRTree * comps_objmrtree_clone(COMPS_ObjMRTree * rt) {
COMPS_HSList * to_clone, *tmplist, *new_subnodes;
COMPS_ObjMRTree * ret;
COMPS_HSListItem *it, *it2;
COMPS_ObjMRTreeData *rtdata;
COMPS_ObjList *new_data_list;
to_clone = comps_hslist_create();
comps_hslist_init(to_clone, NULL, NULL, NULL);
ret = COMPS_OBJECT_CREATE(COMPS_ObjMRTree, NULL);
for (it = rt->subnodes->first; it != NULL; it = it->next) {
rtdata = comps_objmrtree_data_create(
((COMPS_ObjMRTreeData*)it->data)->key,
NULL);
new_data_list = (COMPS_ObjList*)
COMPS_OBJECT_COPY(((COMPS_ObjMRTreeData*)it->data)->data);
COMPS_OBJECT_DESTROY(&rtdata->data);
comps_hslist_destroy(&rtdata->subnodes);
rtdata->subnodes = ((COMPS_ObjMRTreeData*)it->data)->subnodes;
rtdata->data = new_data_list;
comps_hslist_append(ret->subnodes, rtdata, 0);
comps_hslist_append(to_clone, rtdata, 0);
}
while (to_clone->first) {
it2 = to_clone->first;
tmplist = ((COMPS_ObjMRTreeData*)it2->data)->subnodes;
comps_hslist_remove(to_clone, to_clone->first);
new_subnodes = comps_hslist_create();
comps_hslist_init(new_subnodes, NULL, NULL, &comps_objmrtree_data_destroy_v);
for (it = tmplist->first; it != NULL; it = it->next) {
rtdata = comps_objmrtree_data_create(
((COMPS_ObjMRTreeData*)it->data)->key,
NULL);
new_data_list = (COMPS_ObjList*)
COMPS_OBJECT_COPY(((COMPS_ObjMRTreeData*)it->data)->data);
comps_hslist_destroy(&rtdata->subnodes);
COMPS_OBJECT_DESTROY(rtdata->data);
rtdata->subnodes = ((COMPS_ObjMRTreeData*)it->data)->subnodes;
rtdata->data = new_data_list;
comps_hslist_append(new_subnodes, rtdata, 0);
comps_hslist_append(to_clone, rtdata, 0);
}
((COMPS_ObjMRTreeData*)it2->data)->subnodes = new_subnodes;
free(it2);
}
ret->len = rt->len;
comps_hslist_destroy(&to_clone);
return ret;
}
void comps_objmrtree_unite(COMPS_ObjMRTree *rt1, COMPS_ObjMRTree *rt2) {
COMPS_HSList *tmplist, *tmp_subnodes;
COMPS_HSListItem *it;
COMPS_ObjListIt *it2;
struct Pair {
COMPS_HSList * subnodes;
char * key;
} *pair, *parent_pair;
pair = malloc(sizeof(struct Pair));
pair->subnodes = rt2->subnodes;
pair->key = NULL;
tmplist = comps_hslist_create();
comps_hslist_init(tmplist, NULL, NULL, &free);
comps_hslist_append(tmplist, pair, 0);
while (tmplist->first != NULL) {
it = tmplist->first;
comps_hslist_remove(tmplist, tmplist->first);
tmp_subnodes = ((struct Pair*)it->data)->subnodes;
parent_pair = (struct Pair*) it->data;
free(it);
for (it = tmp_subnodes->first; it != NULL; it=it->next) {
pair = malloc(sizeof(struct Pair));
pair->subnodes = ((COMPS_ObjMRTreeData*)it->data)->subnodes;
if (parent_pair->key != NULL) {
pair->key =
malloc(sizeof(char)
* (strlen(((COMPS_ObjMRTreeData*)it->data)->key)
+ strlen(parent_pair->key) + 1));
memcpy(pair->key, parent_pair->key,
sizeof(char) * strlen(parent_pair->key));
memcpy(pair->key+strlen(parent_pair->key),
((COMPS_ObjMRTreeData*)it->data)->key,
sizeof(char)*(strlen(((COMPS_ObjMRTreeData*)it->data)->key)+1));
} else {
pair->key = malloc(sizeof(char)*
(strlen(((COMPS_ObjMRTreeData*)it->data)->key) +
1));
memcpy(pair->key, ((COMPS_ObjMRTreeData*)it->data)->key,
sizeof(char)*(strlen(((COMPS_ObjMRTreeData*)it->data)->key)+1));
}
/* current node has data */
if (((COMPS_ObjMRTreeData*)it->data)->data->first != NULL) {
for (it2 = ((COMPS_ObjMRTreeData*)it->data)->data->first;
it2 != NULL; it2 = it2->next) {
comps_objmrtree_set(rt1, pair->key, it2->comps_obj);
}
if (((COMPS_ObjMRTreeData*)it->data)->subnodes->first) {
comps_hslist_append(tmplist, pair, 0);
} else {
free(pair->key);
free(pair);
}
/* current node hasn't data */
} else {
if (((COMPS_ObjMRTreeData*)it->data)->subnodes->first) {
comps_hslist_append(tmplist, pair, 0);
} else {
free(pair->key);
free(pair);
}
}
}
free(parent_pair->key);
free(parent_pair);
}
comps_hslist_destroy(&tmplist);
}
void comps_objmrtree_set_x(COMPS_ObjMRTree *rt, char *key, COMPS_Object *data) {
__comps_objmrtree_set(rt, key, strlen(key), data);
}
void comps_objmrtree_set(COMPS_ObjMRTree *rt, char *key, COMPS_Object *data) {
__comps_objmrtree_set(rt, key, strlen(key), comps_object_incref(data));
}
void __comps_objmrtree_set(COMPS_ObjMRTree *rt, char *key,
size_t len, COMPS_Object *ndata) {
static COMPS_HSListItem *it;
COMPS_HSList *subnodes;
COMPS_ObjMRTreeData *rtd;
static COMPS_ObjMRTreeData *rtdata;
size_t _len, offset=0;
unsigned x, found = 0;
char ended;//, tmpch;
if (rt->subnodes == NULL)
return;
subnodes = rt->subnodes;
while (offset != len)
{
found = 0;
for (it = subnodes->first; it != NULL; it = it->next) {
if (((COMPS_ObjMRTreeData*)it->data)->key[0] == key[offset]) {
found = 1;
break;
}
}
if (!found) { // not found in subnodes; create new subnode
rtd = comps_objmrtree_data_create(key+offset, ndata);
comps_hslist_append(subnodes, rtd, 0);
rt->len++;
return;
} else {
rtdata = (COMPS_ObjMRTreeData*)it->data;
ended = 0;
for (x=1; ;x++) {
if (rtdata->key[x] == 0) ended += 1;
if (x == len - offset) ended += 2;
if (ended != 0) break;
if (key[offset+x] != rtdata->key[x]) break;
}
if (ended == 3) { //keys equals; append new data
comps_objlist_append_x(rtdata->data, ndata);
rt->len++;
return;
} else if (ended == 2) { //global key ends first; make global leaf
comps_hslist_remove(subnodes, it);
it->next = NULL;
rtd = comps_objmrtree_data_create(key+offset, ndata);
comps_hslist_append(subnodes, rtd, 0);
((COMPS_ObjMRTreeData*)subnodes->last->data)->subnodes->last = it;
((COMPS_ObjMRTreeData*)subnodes->last->data)->subnodes->first = it;
_len = strlen(key + offset);
memmove(rtdata->key,rtdata->key + _len,
strlen(rtdata->key) - _len);
rtdata->key[strlen(rtdata->key) - _len] = 0;
rtdata->key = realloc(rtdata->key,
sizeof(char)* (strlen(rtdata->key)+1));
rt->len++;
return;
} else if (ended == 1) { //local key ends first; go deeper
subnodes = rtdata->subnodes;
offset += x;
} else { /* keys differ */
COMPS_ObjList *tmpdata = rtdata->data;
COMPS_HSList *tmphslist = rtdata->subnodes;
rtdata->subnodes = comps_hslist_create();
comps_hslist_init(rtdata->subnodes, NULL, NULL,
&comps_objmrtree_data_destroy_v);
int cmpret = strcmp(key+offset+x, rtdata->key+x);
rtdata->data = COMPS_OBJECT_CREATE(COMPS_ObjList, NULL);
if (cmpret > 0) {
rtd = comps_objmrtree_data_create(rtdata->key+x,
(COMPS_Object*)tmpdata);
comps_hslist_destroy(&rtd->subnodes);
rtd->subnodes = tmphslist;
comps_hslist_append(rtdata->subnodes, rtd, 0);
rtd = comps_objmrtree_data_create(key+offset+x,
(COMPS_Object*)ndata);
comps_hslist_append(rtdata->subnodes, rtd, 0);
} else {
rtd = comps_objmrtree_data_create(key+offset+x,
(COMPS_Object*)ndata);
comps_hslist_append(rtdata->subnodes, rtd, 0);
rtd = comps_objmrtree_data_create(rtdata->key+x,
(COMPS_Object*)tmpdata);
comps_hslist_destroy(&rtd->subnodes);
rtd->subnodes = tmphslist;
comps_hslist_append(rtdata->subnodes, rtd, 0);
}
rtdata->key = realloc(rtdata->key, sizeof(char)*(x+1));
rtdata->key[x] = 0;
rt->len++;
return;
}
}
}
}
void comps_objmrtree_set_n(COMPS_ObjMRTree *rt, char *key,
size_t len, void *ndata) {
__comps_objmrtree_set(rt, key, len, ndata);
}
COMPS_ObjList * comps_objmrtree_get(COMPS_ObjMRTree * rt, const char * key) {
COMPS_HSList * subnodes;
COMPS_HSListItem * it = NULL;
COMPS_ObjMRTreeData * rtdata;
unsigned int offset, len, x;
char found, ended;
len = strlen(key);
offset = 0;
subnodes = rt->subnodes;
while (offset != len) {
found = 0;
for (it = subnodes->first; it != NULL; it=it->next) {
if (((COMPS_ObjMRTreeData*)it->data)->key[0] == key[offset]) {
found = 1;
break;
}
}
if (!found)
return NULL;
rtdata = (COMPS_ObjMRTreeData*)it->data;
for (x=1; ;x++) {
ended=0;
if (rtdata->key[x] == 0) ended += 1;
if (x == len - offset) ended += 2;
if (ended != 0) break;
if (key[offset+x] != rtdata->key[x]) break;
}
if (ended == 3) return (COMPS_ObjList*)
comps_object_incref((COMPS_Object*)rtdata->data);
else if (ended == 1) offset+=x;
else return NULL;
subnodes = ((COMPS_ObjMRTreeData*)it->data)->subnodes;
}
if (it)
return ((COMPS_ObjMRTreeData*)it->data)->data;
else return NULL;
}
void comps_objmrtree_unset(COMPS_ObjMRTree * rt, const char * key) {
COMPS_HSList * subnodes;
COMPS_HSListItem * it;
COMPS_ObjMRTreeData * rtdata;
unsigned int offset, len, x;
char found, ended;
COMPS_HSList * path;
struct Relation {
COMPS_HSList * parent_nodes;
COMPS_HSListItem * child_it;
} *relation;
path = comps_hslist_create();
comps_hslist_init(path, NULL, NULL, &free);
len = strlen(key);
offset = 0;
subnodes = rt->subnodes;
while (offset != len) {
found = 0;
for (it = subnodes->first; it != NULL; it=it->next) {
if (((COMPS_ObjMRTreeData*)it->data)->key[0] == key[offset]) {
found = 1;
break;
}
}
if (!found) {
comps_hslist_destroy(&path);
return;
}
rtdata = (COMPS_ObjMRTreeData*)it->data;
for (x=1; ;x++) {
ended=0;
if (rtdata->key[x] == 0) ended += 1;
if (x == len - offset) ended += 2;
if (ended != 0) break;
if (key[offset+x] != rtdata->key[x]) break;
}
if (ended == 3) {
/* remove node from tree only if there's no descendant*/
if (rtdata->subnodes->last == NULL) {
comps_hslist_remove(subnodes, it);
rt->len -= rtdata->data->len;
comps_objmrtree_data_destroy(rtdata);
free(it);
}
else {
rt->len -= rtdata->data->len;
comps_objlist_clear(rtdata->data);
rtdata->is_leaf = 0;
}
if (path->last == NULL) {
comps_hslist_destroy(&path);
return;
}
rtdata = (COMPS_ObjMRTreeData*)
((struct Relation*)path->last->data)->child_it->data;
/*remove all predecessor of deleted node (recursive) with no childs*/
while (rtdata->subnodes->last == NULL) {
//printf("removing '%s'\n", rtdata->key);
comps_objmrtree_data_destroy(rtdata);
comps_hslist_remove(
((struct Relation*)path->last->data)->parent_nodes,
((struct Relation*)path->last->data)->child_it);
free(((struct Relation*)path->last->data)->child_it);
it = path->last;
comps_hslist_remove(path, path->last);
free(it);
rtdata = (COMPS_ObjMRTreeData*)
((struct Relation*)path->last->data)->child_it->data;
}
comps_hslist_destroy(&path);
return;
}
else if (ended == 1) offset+=x;
else {
comps_hslist_destroy(&path);
return;
}
if ((relation = malloc(sizeof(struct Relation))) == NULL) {
comps_hslist_destroy(&path);
return;
}
subnodes = ((COMPS_ObjMRTreeData*)it->data)->subnodes;
relation->parent_nodes = subnodes;
relation->child_it = it;
comps_hslist_append(path, (void*)relation, 0);
}
comps_hslist_destroy(&path);
return;
}
inline void comps_objmrtree_pair_destroy_v(void * pair) {
free(((COMPS_ObjMRTreePair *)pair)->key);
free(pair);
}
inline COMPS_HSList* __comps_objmrtree_all(COMPS_ObjMRTree * rt, char keyvalpair) {
COMPS_HSList *to_process, *ret;
COMPS_HSListItem *hsit, *oldit;
size_t x;
struct Pair {
char *key;
void *data;
COMPS_HSList *subnodes;
} *pair, *current_pair=NULL;//, *oldpair=NULL;
COMPS_ObjMRTreePair *rtpair;
to_process = comps_hslist_create();
comps_hslist_init(to_process, NULL, NULL, &free);
ret = comps_hslist_create();
if (keyvalpair == 0)
comps_hslist_init(ret, NULL, NULL, &free);
else if (keyvalpair == 1)
comps_hslist_init(ret, NULL, NULL, NULL);
else
comps_hslist_init(ret, NULL, NULL, &comps_objmrtree_pair_destroy_v);
for (hsit = rt->subnodes->first; hsit != NULL; hsit = hsit->next) {
pair = malloc(sizeof(struct Pair));
pair->key = __comps_strcpy(((COMPS_ObjMRTreeData*)hsit->data)->key);
pair->data = ((COMPS_ObjMRTreeData*)hsit->data)->data;
pair->subnodes = ((COMPS_ObjMRTreeData*)hsit->data)->subnodes;
comps_hslist_append(to_process, pair, 0);
}
while (to_process->first) {
//oldpair = current_pair;
current_pair = to_process->first->data;
oldit = to_process->first;
comps_hslist_remove(to_process, to_process->first);
if (current_pair->data) {
if (keyvalpair == 0) {
comps_hslist_append(ret, __comps_strcpy(current_pair->key), 0);
} else if (keyvalpair == 1) {
comps_hslist_append(ret, current_pair->data, 0);
} else {
rtpair = malloc(sizeof(COMPS_ObjMRTreePair));
rtpair->key = __comps_strcpy(current_pair->key);
rtpair->data = current_pair->data;
comps_hslist_append(ret, rtpair, 0);
}
}
for (hsit = current_pair->subnodes->first, x = 0;
hsit != NULL; hsit = hsit->next, x++) {
pair = malloc(sizeof(struct Pair));
pair->key = __comps_strcat(current_pair->key,
((COMPS_ObjMRTreeData*)hsit->data)->key);
pair->data = ((COMPS_ObjMRTreeData*)hsit->data)->data;
pair->subnodes = ((COMPS_ObjMRTreeData*)hsit->data)->subnodes;
comps_hslist_insert_at(to_process, x, pair, 0);
}
free(current_pair->key);
free(current_pair);
free(oldit);
}
comps_hslist_destroy(&to_process);
return ret;
}
COMPS_HSList* comps_objmrtree_keys(COMPS_ObjMRTree * rt) {
return __comps_objmrtree_all(rt, 0);
}
COMPS_HSList* comps_objmrtree_values(COMPS_ObjMRTree * rt) {
return __comps_objmrtree_all(rt, 1);
}
COMPS_HSList* comps_objmrtree_pairs(COMPS_ObjMRTree * rt) {
return __comps_objmrtree_all(rt, 2);
}
void comps_objmrtree_clear(COMPS_ObjMRTree * rt) {
COMPS_HSListItem *it, *oldit;
if (rt == NULL) return;
if (rt->subnodes == NULL) return;
oldit = rt->subnodes->first;
it = (oldit)?oldit->next:NULL;
for (;it != NULL; it=it->next) {
if (rt->subnodes->data_destructor != NULL)
rt->subnodes->data_destructor(oldit->data);
free(oldit);
oldit = it;
}
if (oldit) {
if (rt->subnodes->data_destructor != NULL)
rt->subnodes->data_destructor(oldit->data);
free(oldit);
}
}
char comps_objmrtree_paircmp(void *obj1, void *obj2) {
if (strcmp(((COMPS_ObjMRTreePair*)obj1)->key,
((COMPS_ObjMRTreePair*)obj2)->key) != 0)
return 0;
return comps_object_cmp((COMPS_Object*)((COMPS_ObjMRTreePair*)obj1)->data,
(COMPS_Object*)((COMPS_ObjMRTreePair*)obj1)->data);
}
signed char comps_objmrtree_cmp(COMPS_ObjMRTree *ort1, COMPS_ObjMRTree *ort2) {
COMPS_HSList *values1, *values2;
COMPS_HSListItem *it;
COMPS_Set *set1, *set2;
signed char ret;
values1 = comps_objmrtree_pairs(ort1);
values2 = comps_objmrtree_pairs(ort2);
set1 = comps_set_create();
comps_set_init(set1, NULL, NULL, NULL, &comps_objmrtree_paircmp);
set2 = comps_set_create();
comps_set_init(set2, NULL, NULL, NULL, &comps_objmrtree_paircmp);
for (it = values1->first; it != NULL; it = it->next) {
comps_set_add(set1, it->data);
}
for (it = values2->first; it != NULL; it = it->next) {
comps_set_add(set2, it->data);
}
ret = comps_set_cmp(set1, set2);
comps_set_destroy(&set1);
comps_set_destroy(&set2);
//printf("objmrtree cmp %d\n", !ret);
comps_hslist_destroy(&values1);
comps_hslist_destroy(&values2);
return !ret;
}
COMPS_CMP_u(objmrtree, COMPS_ObjMRTree)
COMPS_ObjectInfo COMPS_ObjMRTree_ObjInfo = {
.obj_size = sizeof(COMPS_ObjMRTree),
.constructor = &comps_objmrtree_create_u,
.destructor = &comps_objmrtree_destroy_u,
.copy = &comps_objmrtree_copy_u,
.obj_cmp = &comps_objmrtree_cmp_u
};
| ./CrossVul/dataset_final_sorted/CWE-416/c/good_1390_1 |
crossvul-cpp_data_good_3994_0 | // SPDX-License-Identifier: GPL-2.0
/*
* message.c - synchronous message handling
*
* Released under the GPLv2 only.
*/
#include <linux/acpi.h>
#include <linux/pci.h> /* for scatterlist macros */
#include <linux/usb.h>
#include <linux/module.h>
#include <linux/slab.h>
#include <linux/mm.h>
#include <linux/timer.h>
#include <linux/ctype.h>
#include <linux/nls.h>
#include <linux/device.h>
#include <linux/scatterlist.h>
#include <linux/usb/cdc.h>
#include <linux/usb/quirks.h>
#include <linux/usb/hcd.h> /* for usbcore internals */
#include <linux/usb/of.h>
#include <asm/byteorder.h>
#include "usb.h"
static void cancel_async_set_config(struct usb_device *udev);
struct api_context {
struct completion done;
int status;
};
static void usb_api_blocking_completion(struct urb *urb)
{
struct api_context *ctx = urb->context;
ctx->status = urb->status;
complete(&ctx->done);
}
/*
* Starts urb and waits for completion or timeout. Note that this call
* is NOT interruptible. Many device driver i/o requests should be
* interruptible and therefore these drivers should implement their
* own interruptible routines.
*/
static int usb_start_wait_urb(struct urb *urb, int timeout, int *actual_length)
{
struct api_context ctx;
unsigned long expire;
int retval;
init_completion(&ctx.done);
urb->context = &ctx;
urb->actual_length = 0;
retval = usb_submit_urb(urb, GFP_NOIO);
if (unlikely(retval))
goto out;
expire = timeout ? msecs_to_jiffies(timeout) : MAX_SCHEDULE_TIMEOUT;
if (!wait_for_completion_timeout(&ctx.done, expire)) {
usb_kill_urb(urb);
retval = (ctx.status == -ENOENT ? -ETIMEDOUT : ctx.status);
dev_dbg(&urb->dev->dev,
"%s timed out on ep%d%s len=%u/%u\n",
current->comm,
usb_endpoint_num(&urb->ep->desc),
usb_urb_dir_in(urb) ? "in" : "out",
urb->actual_length,
urb->transfer_buffer_length);
} else
retval = ctx.status;
out:
if (actual_length)
*actual_length = urb->actual_length;
usb_free_urb(urb);
return retval;
}
/*-------------------------------------------------------------------*/
/* returns status (negative) or length (positive) */
static int usb_internal_control_msg(struct usb_device *usb_dev,
unsigned int pipe,
struct usb_ctrlrequest *cmd,
void *data, int len, int timeout)
{
struct urb *urb;
int retv;
int length;
urb = usb_alloc_urb(0, GFP_NOIO);
if (!urb)
return -ENOMEM;
usb_fill_control_urb(urb, usb_dev, pipe, (unsigned char *)cmd, data,
len, usb_api_blocking_completion, NULL);
retv = usb_start_wait_urb(urb, timeout, &length);
if (retv < 0)
return retv;
else
return length;
}
/**
* usb_control_msg - Builds a control urb, sends it off and waits for completion
* @dev: pointer to the usb device to send the message to
* @pipe: endpoint "pipe" to send the message to
* @request: USB message request value
* @requesttype: USB message request type value
* @value: USB message value
* @index: USB message index value
* @data: pointer to the data to send
* @size: length in bytes of the data to send
* @timeout: time in msecs to wait for the message to complete before timing
* out (if 0 the wait is forever)
*
* Context: !in_interrupt ()
*
* This function sends a simple control message to a specified endpoint and
* waits for the message to complete, or timeout.
*
* Don't use this function from within an interrupt context. If you need
* an asynchronous message, or need to send a message from within interrupt
* context, use usb_submit_urb(). If a thread in your driver uses this call,
* make sure your disconnect() method can wait for it to complete. Since you
* don't have a handle on the URB used, you can't cancel the request.
*
* Return: If successful, the number of bytes transferred. Otherwise, a negative
* error number.
*/
int usb_control_msg(struct usb_device *dev, unsigned int pipe, __u8 request,
__u8 requesttype, __u16 value, __u16 index, void *data,
__u16 size, int timeout)
{
struct usb_ctrlrequest *dr;
int ret;
dr = kmalloc(sizeof(struct usb_ctrlrequest), GFP_NOIO);
if (!dr)
return -ENOMEM;
dr->bRequestType = requesttype;
dr->bRequest = request;
dr->wValue = cpu_to_le16(value);
dr->wIndex = cpu_to_le16(index);
dr->wLength = cpu_to_le16(size);
ret = usb_internal_control_msg(dev, pipe, dr, data, size, timeout);
/* Linger a bit, prior to the next control message. */
if (dev->quirks & USB_QUIRK_DELAY_CTRL_MSG)
msleep(200);
kfree(dr);
return ret;
}
EXPORT_SYMBOL_GPL(usb_control_msg);
/**
* usb_interrupt_msg - Builds an interrupt urb, sends it off and waits for completion
* @usb_dev: pointer to the usb device to send the message to
* @pipe: endpoint "pipe" to send the message to
* @data: pointer to the data to send
* @len: length in bytes of the data to send
* @actual_length: pointer to a location to put the actual length transferred
* in bytes
* @timeout: time in msecs to wait for the message to complete before
* timing out (if 0 the wait is forever)
*
* Context: !in_interrupt ()
*
* This function sends a simple interrupt message to a specified endpoint and
* waits for the message to complete, or timeout.
*
* Don't use this function from within an interrupt context. If you need
* an asynchronous message, or need to send a message from within interrupt
* context, use usb_submit_urb() If a thread in your driver uses this call,
* make sure your disconnect() method can wait for it to complete. Since you
* don't have a handle on the URB used, you can't cancel the request.
*
* Return:
* If successful, 0. Otherwise a negative error number. The number of actual
* bytes transferred will be stored in the @actual_length parameter.
*/
int usb_interrupt_msg(struct usb_device *usb_dev, unsigned int pipe,
void *data, int len, int *actual_length, int timeout)
{
return usb_bulk_msg(usb_dev, pipe, data, len, actual_length, timeout);
}
EXPORT_SYMBOL_GPL(usb_interrupt_msg);
/**
* usb_bulk_msg - Builds a bulk urb, sends it off and waits for completion
* @usb_dev: pointer to the usb device to send the message to
* @pipe: endpoint "pipe" to send the message to
* @data: pointer to the data to send
* @len: length in bytes of the data to send
* @actual_length: pointer to a location to put the actual length transferred
* in bytes
* @timeout: time in msecs to wait for the message to complete before
* timing out (if 0 the wait is forever)
*
* Context: !in_interrupt ()
*
* This function sends a simple bulk message to a specified endpoint
* and waits for the message to complete, or timeout.
*
* Don't use this function from within an interrupt context. If you need
* an asynchronous message, or need to send a message from within interrupt
* context, use usb_submit_urb() If a thread in your driver uses this call,
* make sure your disconnect() method can wait for it to complete. Since you
* don't have a handle on the URB used, you can't cancel the request.
*
* Because there is no usb_interrupt_msg() and no USBDEVFS_INTERRUPT ioctl,
* users are forced to abuse this routine by using it to submit URBs for
* interrupt endpoints. We will take the liberty of creating an interrupt URB
* (with the default interval) if the target is an interrupt endpoint.
*
* Return:
* If successful, 0. Otherwise a negative error number. The number of actual
* bytes transferred will be stored in the @actual_length parameter.
*
*/
int usb_bulk_msg(struct usb_device *usb_dev, unsigned int pipe,
void *data, int len, int *actual_length, int timeout)
{
struct urb *urb;
struct usb_host_endpoint *ep;
ep = usb_pipe_endpoint(usb_dev, pipe);
if (!ep || len < 0)
return -EINVAL;
urb = usb_alloc_urb(0, GFP_KERNEL);
if (!urb)
return -ENOMEM;
if ((ep->desc.bmAttributes & USB_ENDPOINT_XFERTYPE_MASK) ==
USB_ENDPOINT_XFER_INT) {
pipe = (pipe & ~(3 << 30)) | (PIPE_INTERRUPT << 30);
usb_fill_int_urb(urb, usb_dev, pipe, data, len,
usb_api_blocking_completion, NULL,
ep->desc.bInterval);
} else
usb_fill_bulk_urb(urb, usb_dev, pipe, data, len,
usb_api_blocking_completion, NULL);
return usb_start_wait_urb(urb, timeout, actual_length);
}
EXPORT_SYMBOL_GPL(usb_bulk_msg);
/*-------------------------------------------------------------------*/
static void sg_clean(struct usb_sg_request *io)
{
if (io->urbs) {
while (io->entries--)
usb_free_urb(io->urbs[io->entries]);
kfree(io->urbs);
io->urbs = NULL;
}
io->dev = NULL;
}
static void sg_complete(struct urb *urb)
{
unsigned long flags;
struct usb_sg_request *io = urb->context;
int status = urb->status;
spin_lock_irqsave(&io->lock, flags);
/* In 2.5 we require hcds' endpoint queues not to progress after fault
* reports, until the completion callback (this!) returns. That lets
* device driver code (like this routine) unlink queued urbs first,
* if it needs to, since the HC won't work on them at all. So it's
* not possible for page N+1 to overwrite page N, and so on.
*
* That's only for "hard" faults; "soft" faults (unlinks) sometimes
* complete before the HCD can get requests away from hardware,
* though never during cleanup after a hard fault.
*/
if (io->status
&& (io->status != -ECONNRESET
|| status != -ECONNRESET)
&& urb->actual_length) {
dev_err(io->dev->bus->controller,
"dev %s ep%d%s scatterlist error %d/%d\n",
io->dev->devpath,
usb_endpoint_num(&urb->ep->desc),
usb_urb_dir_in(urb) ? "in" : "out",
status, io->status);
/* BUG (); */
}
if (io->status == 0 && status && status != -ECONNRESET) {
int i, found, retval;
io->status = status;
/* the previous urbs, and this one, completed already.
* unlink pending urbs so they won't rx/tx bad data.
* careful: unlink can sometimes be synchronous...
*/
spin_unlock_irqrestore(&io->lock, flags);
for (i = 0, found = 0; i < io->entries; i++) {
if (!io->urbs[i])
continue;
if (found) {
usb_block_urb(io->urbs[i]);
retval = usb_unlink_urb(io->urbs[i]);
if (retval != -EINPROGRESS &&
retval != -ENODEV &&
retval != -EBUSY &&
retval != -EIDRM)
dev_err(&io->dev->dev,
"%s, unlink --> %d\n",
__func__, retval);
} else if (urb == io->urbs[i])
found = 1;
}
spin_lock_irqsave(&io->lock, flags);
}
/* on the last completion, signal usb_sg_wait() */
io->bytes += urb->actual_length;
io->count--;
if (!io->count)
complete(&io->complete);
spin_unlock_irqrestore(&io->lock, flags);
}
/**
* usb_sg_init - initializes scatterlist-based bulk/interrupt I/O request
* @io: request block being initialized. until usb_sg_wait() returns,
* treat this as a pointer to an opaque block of memory,
* @dev: the usb device that will send or receive the data
* @pipe: endpoint "pipe" used to transfer the data
* @period: polling rate for interrupt endpoints, in frames or
* (for high speed endpoints) microframes; ignored for bulk
* @sg: scatterlist entries
* @nents: how many entries in the scatterlist
* @length: how many bytes to send from the scatterlist, or zero to
* send every byte identified in the list.
* @mem_flags: SLAB_* flags affecting memory allocations in this call
*
* This initializes a scatter/gather request, allocating resources such as
* I/O mappings and urb memory (except maybe memory used by USB controller
* drivers).
*
* The request must be issued using usb_sg_wait(), which waits for the I/O to
* complete (or to be canceled) and then cleans up all resources allocated by
* usb_sg_init().
*
* The request may be canceled with usb_sg_cancel(), either before or after
* usb_sg_wait() is called.
*
* Return: Zero for success, else a negative errno value.
*/
int usb_sg_init(struct usb_sg_request *io, struct usb_device *dev,
unsigned pipe, unsigned period, struct scatterlist *sg,
int nents, size_t length, gfp_t mem_flags)
{
int i;
int urb_flags;
int use_sg;
if (!io || !dev || !sg
|| usb_pipecontrol(pipe)
|| usb_pipeisoc(pipe)
|| nents <= 0)
return -EINVAL;
spin_lock_init(&io->lock);
io->dev = dev;
io->pipe = pipe;
if (dev->bus->sg_tablesize > 0) {
use_sg = true;
io->entries = 1;
} else {
use_sg = false;
io->entries = nents;
}
/* initialize all the urbs we'll use */
io->urbs = kmalloc_array(io->entries, sizeof(*io->urbs), mem_flags);
if (!io->urbs)
goto nomem;
urb_flags = URB_NO_INTERRUPT;
if (usb_pipein(pipe))
urb_flags |= URB_SHORT_NOT_OK;
for_each_sg(sg, sg, io->entries, i) {
struct urb *urb;
unsigned len;
urb = usb_alloc_urb(0, mem_flags);
if (!urb) {
io->entries = i;
goto nomem;
}
io->urbs[i] = urb;
urb->dev = NULL;
urb->pipe = pipe;
urb->interval = period;
urb->transfer_flags = urb_flags;
urb->complete = sg_complete;
urb->context = io;
urb->sg = sg;
if (use_sg) {
/* There is no single transfer buffer */
urb->transfer_buffer = NULL;
urb->num_sgs = nents;
/* A length of zero means transfer the whole sg list */
len = length;
if (len == 0) {
struct scatterlist *sg2;
int j;
for_each_sg(sg, sg2, nents, j)
len += sg2->length;
}
} else {
/*
* Some systems can't use DMA; they use PIO instead.
* For their sakes, transfer_buffer is set whenever
* possible.
*/
if (!PageHighMem(sg_page(sg)))
urb->transfer_buffer = sg_virt(sg);
else
urb->transfer_buffer = NULL;
len = sg->length;
if (length) {
len = min_t(size_t, len, length);
length -= len;
if (length == 0)
io->entries = i + 1;
}
}
urb->transfer_buffer_length = len;
}
io->urbs[--i]->transfer_flags &= ~URB_NO_INTERRUPT;
/* transaction state */
io->count = io->entries;
io->status = 0;
io->bytes = 0;
init_completion(&io->complete);
return 0;
nomem:
sg_clean(io);
return -ENOMEM;
}
EXPORT_SYMBOL_GPL(usb_sg_init);
/**
* usb_sg_wait - synchronously execute scatter/gather request
* @io: request block handle, as initialized with usb_sg_init().
* some fields become accessible when this call returns.
* Context: !in_interrupt ()
*
* This function blocks until the specified I/O operation completes. It
* leverages the grouping of the related I/O requests to get good transfer
* rates, by queueing the requests. At higher speeds, such queuing can
* significantly improve USB throughput.
*
* There are three kinds of completion for this function.
*
* (1) success, where io->status is zero. The number of io->bytes
* transferred is as requested.
* (2) error, where io->status is a negative errno value. The number
* of io->bytes transferred before the error is usually less
* than requested, and can be nonzero.
* (3) cancellation, a type of error with status -ECONNRESET that
* is initiated by usb_sg_cancel().
*
* When this function returns, all memory allocated through usb_sg_init() or
* this call will have been freed. The request block parameter may still be
* passed to usb_sg_cancel(), or it may be freed. It could also be
* reinitialized and then reused.
*
* Data Transfer Rates:
*
* Bulk transfers are valid for full or high speed endpoints.
* The best full speed data rate is 19 packets of 64 bytes each
* per frame, or 1216 bytes per millisecond.
* The best high speed data rate is 13 packets of 512 bytes each
* per microframe, or 52 KBytes per millisecond.
*
* The reason to use interrupt transfers through this API would most likely
* be to reserve high speed bandwidth, where up to 24 KBytes per millisecond
* could be transferred. That capability is less useful for low or full
* speed interrupt endpoints, which allow at most one packet per millisecond,
* of at most 8 or 64 bytes (respectively).
*
* It is not necessary to call this function to reserve bandwidth for devices
* under an xHCI host controller, as the bandwidth is reserved when the
* configuration or interface alt setting is selected.
*/
void usb_sg_wait(struct usb_sg_request *io)
{
int i;
int entries = io->entries;
/* queue the urbs. */
spin_lock_irq(&io->lock);
i = 0;
while (i < entries && !io->status) {
int retval;
io->urbs[i]->dev = io->dev;
spin_unlock_irq(&io->lock);
retval = usb_submit_urb(io->urbs[i], GFP_NOIO);
switch (retval) {
/* maybe we retrying will recover */
case -ENXIO: /* hc didn't queue this one */
case -EAGAIN:
case -ENOMEM:
retval = 0;
yield();
break;
/* no error? continue immediately.
*
* NOTE: to work better with UHCI (4K I/O buffer may
* need 3K of TDs) it may be good to limit how many
* URBs are queued at once; N milliseconds?
*/
case 0:
++i;
cpu_relax();
break;
/* fail any uncompleted urbs */
default:
io->urbs[i]->status = retval;
dev_dbg(&io->dev->dev, "%s, submit --> %d\n",
__func__, retval);
usb_sg_cancel(io);
}
spin_lock_irq(&io->lock);
if (retval && (io->status == 0 || io->status == -ECONNRESET))
io->status = retval;
}
io->count -= entries - i;
if (io->count == 0)
complete(&io->complete);
spin_unlock_irq(&io->lock);
/* OK, yes, this could be packaged as non-blocking.
* So could the submit loop above ... but it's easier to
* solve neither problem than to solve both!
*/
wait_for_completion(&io->complete);
sg_clean(io);
}
EXPORT_SYMBOL_GPL(usb_sg_wait);
/**
* usb_sg_cancel - stop scatter/gather i/o issued by usb_sg_wait()
* @io: request block, initialized with usb_sg_init()
*
* This stops a request after it has been started by usb_sg_wait().
* It can also prevents one initialized by usb_sg_init() from starting,
* so that call just frees resources allocated to the request.
*/
void usb_sg_cancel(struct usb_sg_request *io)
{
unsigned long flags;
int i, retval;
spin_lock_irqsave(&io->lock, flags);
if (io->status || io->count == 0) {
spin_unlock_irqrestore(&io->lock, flags);
return;
}
/* shut everything down */
io->status = -ECONNRESET;
io->count++; /* Keep the request alive until we're done */
spin_unlock_irqrestore(&io->lock, flags);
for (i = io->entries - 1; i >= 0; --i) {
usb_block_urb(io->urbs[i]);
retval = usb_unlink_urb(io->urbs[i]);
if (retval != -EINPROGRESS
&& retval != -ENODEV
&& retval != -EBUSY
&& retval != -EIDRM)
dev_warn(&io->dev->dev, "%s, unlink --> %d\n",
__func__, retval);
}
spin_lock_irqsave(&io->lock, flags);
io->count--;
if (!io->count)
complete(&io->complete);
spin_unlock_irqrestore(&io->lock, flags);
}
EXPORT_SYMBOL_GPL(usb_sg_cancel);
/*-------------------------------------------------------------------*/
/**
* usb_get_descriptor - issues a generic GET_DESCRIPTOR request
* @dev: the device whose descriptor is being retrieved
* @type: the descriptor type (USB_DT_*)
* @index: the number of the descriptor
* @buf: where to put the descriptor
* @size: how big is "buf"?
* Context: !in_interrupt ()
*
* Gets a USB descriptor. Convenience functions exist to simplify
* getting some types of descriptors. Use
* usb_get_string() or usb_string() for USB_DT_STRING.
* Device (USB_DT_DEVICE) and configuration descriptors (USB_DT_CONFIG)
* are part of the device structure.
* In addition to a number of USB-standard descriptors, some
* devices also use class-specific or vendor-specific descriptors.
*
* This call is synchronous, and may not be used in an interrupt context.
*
* Return: The number of bytes received on success, or else the status code
* returned by the underlying usb_control_msg() call.
*/
int usb_get_descriptor(struct usb_device *dev, unsigned char type,
unsigned char index, void *buf, int size)
{
int i;
int result;
memset(buf, 0, size); /* Make sure we parse really received data */
for (i = 0; i < 3; ++i) {
/* retry on length 0 or error; some devices are flakey */
result = usb_control_msg(dev, usb_rcvctrlpipe(dev, 0),
USB_REQ_GET_DESCRIPTOR, USB_DIR_IN,
(type << 8) + index, 0, buf, size,
USB_CTRL_GET_TIMEOUT);
if (result <= 0 && result != -ETIMEDOUT)
continue;
if (result > 1 && ((u8 *)buf)[1] != type) {
result = -ENODATA;
continue;
}
break;
}
return result;
}
EXPORT_SYMBOL_GPL(usb_get_descriptor);
/**
* usb_get_string - gets a string descriptor
* @dev: the device whose string descriptor is being retrieved
* @langid: code for language chosen (from string descriptor zero)
* @index: the number of the descriptor
* @buf: where to put the string
* @size: how big is "buf"?
* Context: !in_interrupt ()
*
* Retrieves a string, encoded using UTF-16LE (Unicode, 16 bits per character,
* in little-endian byte order).
* The usb_string() function will often be a convenient way to turn
* these strings into kernel-printable form.
*
* Strings may be referenced in device, configuration, interface, or other
* descriptors, and could also be used in vendor-specific ways.
*
* This call is synchronous, and may not be used in an interrupt context.
*
* Return: The number of bytes received on success, or else the status code
* returned by the underlying usb_control_msg() call.
*/
static int usb_get_string(struct usb_device *dev, unsigned short langid,
unsigned char index, void *buf, int size)
{
int i;
int result;
for (i = 0; i < 3; ++i) {
/* retry on length 0 or stall; some devices are flakey */
result = usb_control_msg(dev, usb_rcvctrlpipe(dev, 0),
USB_REQ_GET_DESCRIPTOR, USB_DIR_IN,
(USB_DT_STRING << 8) + index, langid, buf, size,
USB_CTRL_GET_TIMEOUT);
if (result == 0 || result == -EPIPE)
continue;
if (result > 1 && ((u8 *) buf)[1] != USB_DT_STRING) {
result = -ENODATA;
continue;
}
break;
}
return result;
}
static void usb_try_string_workarounds(unsigned char *buf, int *length)
{
int newlength, oldlength = *length;
for (newlength = 2; newlength + 1 < oldlength; newlength += 2)
if (!isprint(buf[newlength]) || buf[newlength + 1])
break;
if (newlength > 2) {
buf[0] = newlength;
*length = newlength;
}
}
static int usb_string_sub(struct usb_device *dev, unsigned int langid,
unsigned int index, unsigned char *buf)
{
int rc;
/* Try to read the string descriptor by asking for the maximum
* possible number of bytes */
if (dev->quirks & USB_QUIRK_STRING_FETCH_255)
rc = -EIO;
else
rc = usb_get_string(dev, langid, index, buf, 255);
/* If that failed try to read the descriptor length, then
* ask for just that many bytes */
if (rc < 2) {
rc = usb_get_string(dev, langid, index, buf, 2);
if (rc == 2)
rc = usb_get_string(dev, langid, index, buf, buf[0]);
}
if (rc >= 2) {
if (!buf[0] && !buf[1])
usb_try_string_workarounds(buf, &rc);
/* There might be extra junk at the end of the descriptor */
if (buf[0] < rc)
rc = buf[0];
rc = rc - (rc & 1); /* force a multiple of two */
}
if (rc < 2)
rc = (rc < 0 ? rc : -EINVAL);
return rc;
}
static int usb_get_langid(struct usb_device *dev, unsigned char *tbuf)
{
int err;
if (dev->have_langid)
return 0;
if (dev->string_langid < 0)
return -EPIPE;
err = usb_string_sub(dev, 0, 0, tbuf);
/* If the string was reported but is malformed, default to english
* (0x0409) */
if (err == -ENODATA || (err > 0 && err < 4)) {
dev->string_langid = 0x0409;
dev->have_langid = 1;
dev_err(&dev->dev,
"language id specifier not provided by device, defaulting to English\n");
return 0;
}
/* In case of all other errors, we assume the device is not able to
* deal with strings at all. Set string_langid to -1 in order to
* prevent any string to be retrieved from the device */
if (err < 0) {
dev_info(&dev->dev, "string descriptor 0 read error: %d\n",
err);
dev->string_langid = -1;
return -EPIPE;
}
/* always use the first langid listed */
dev->string_langid = tbuf[2] | (tbuf[3] << 8);
dev->have_langid = 1;
dev_dbg(&dev->dev, "default language 0x%04x\n",
dev->string_langid);
return 0;
}
/**
* usb_string - returns UTF-8 version of a string descriptor
* @dev: the device whose string descriptor is being retrieved
* @index: the number of the descriptor
* @buf: where to put the string
* @size: how big is "buf"?
* Context: !in_interrupt ()
*
* This converts the UTF-16LE encoded strings returned by devices, from
* usb_get_string_descriptor(), to null-terminated UTF-8 encoded ones
* that are more usable in most kernel contexts. Note that this function
* chooses strings in the first language supported by the device.
*
* This call is synchronous, and may not be used in an interrupt context.
*
* Return: length of the string (>= 0) or usb_control_msg status (< 0).
*/
int usb_string(struct usb_device *dev, int index, char *buf, size_t size)
{
unsigned char *tbuf;
int err;
if (dev->state == USB_STATE_SUSPENDED)
return -EHOSTUNREACH;
if (size <= 0 || !buf)
return -EINVAL;
buf[0] = 0;
if (index <= 0 || index >= 256)
return -EINVAL;
tbuf = kmalloc(256, GFP_NOIO);
if (!tbuf)
return -ENOMEM;
err = usb_get_langid(dev, tbuf);
if (err < 0)
goto errout;
err = usb_string_sub(dev, dev->string_langid, index, tbuf);
if (err < 0)
goto errout;
size--; /* leave room for trailing NULL char in output buffer */
err = utf16s_to_utf8s((wchar_t *) &tbuf[2], (err - 2) / 2,
UTF16_LITTLE_ENDIAN, buf, size);
buf[err] = 0;
if (tbuf[1] != USB_DT_STRING)
dev_dbg(&dev->dev,
"wrong descriptor type %02x for string %d (\"%s\")\n",
tbuf[1], index, buf);
errout:
kfree(tbuf);
return err;
}
EXPORT_SYMBOL_GPL(usb_string);
/* one UTF-8-encoded 16-bit character has at most three bytes */
#define MAX_USB_STRING_SIZE (127 * 3 + 1)
/**
* usb_cache_string - read a string descriptor and cache it for later use
* @udev: the device whose string descriptor is being read
* @index: the descriptor index
*
* Return: A pointer to a kmalloc'ed buffer containing the descriptor string,
* or %NULL if the index is 0 or the string could not be read.
*/
char *usb_cache_string(struct usb_device *udev, int index)
{
char *buf;
char *smallbuf = NULL;
int len;
if (index <= 0)
return NULL;
buf = kmalloc(MAX_USB_STRING_SIZE, GFP_NOIO);
if (buf) {
len = usb_string(udev, index, buf, MAX_USB_STRING_SIZE);
if (len > 0) {
smallbuf = kmalloc(++len, GFP_NOIO);
if (!smallbuf)
return buf;
memcpy(smallbuf, buf, len);
}
kfree(buf);
}
return smallbuf;
}
/*
* usb_get_device_descriptor - (re)reads the device descriptor (usbcore)
* @dev: the device whose device descriptor is being updated
* @size: how much of the descriptor to read
* Context: !in_interrupt ()
*
* Updates the copy of the device descriptor stored in the device structure,
* which dedicates space for this purpose.
*
* Not exported, only for use by the core. If drivers really want to read
* the device descriptor directly, they can call usb_get_descriptor() with
* type = USB_DT_DEVICE and index = 0.
*
* This call is synchronous, and may not be used in an interrupt context.
*
* Return: The number of bytes received on success, or else the status code
* returned by the underlying usb_control_msg() call.
*/
int usb_get_device_descriptor(struct usb_device *dev, unsigned int size)
{
struct usb_device_descriptor *desc;
int ret;
if (size > sizeof(*desc))
return -EINVAL;
desc = kmalloc(sizeof(*desc), GFP_NOIO);
if (!desc)
return -ENOMEM;
ret = usb_get_descriptor(dev, USB_DT_DEVICE, 0, desc, size);
if (ret >= 0)
memcpy(&dev->descriptor, desc, size);
kfree(desc);
return ret;
}
/*
* usb_set_isoch_delay - informs the device of the packet transmit delay
* @dev: the device whose delay is to be informed
* Context: !in_interrupt()
*
* Since this is an optional request, we don't bother if it fails.
*/
int usb_set_isoch_delay(struct usb_device *dev)
{
/* skip hub devices */
if (dev->descriptor.bDeviceClass == USB_CLASS_HUB)
return 0;
/* skip non-SS/non-SSP devices */
if (dev->speed < USB_SPEED_SUPER)
return 0;
return usb_control_msg(dev, usb_sndctrlpipe(dev, 0),
USB_REQ_SET_ISOCH_DELAY,
USB_DIR_OUT | USB_TYPE_STANDARD | USB_RECIP_DEVICE,
dev->hub_delay, 0, NULL, 0,
USB_CTRL_SET_TIMEOUT);
}
/**
* usb_get_status - issues a GET_STATUS call
* @dev: the device whose status is being checked
* @recip: USB_RECIP_*; for device, interface, or endpoint
* @type: USB_STATUS_TYPE_*; for standard or PTM status types
* @target: zero (for device), else interface or endpoint number
* @data: pointer to two bytes of bitmap data
* Context: !in_interrupt ()
*
* Returns device, interface, or endpoint status. Normally only of
* interest to see if the device is self powered, or has enabled the
* remote wakeup facility; or whether a bulk or interrupt endpoint
* is halted ("stalled").
*
* Bits in these status bitmaps are set using the SET_FEATURE request,
* and cleared using the CLEAR_FEATURE request. The usb_clear_halt()
* function should be used to clear halt ("stall") status.
*
* This call is synchronous, and may not be used in an interrupt context.
*
* Returns 0 and the status value in *@data (in host byte order) on success,
* or else the status code from the underlying usb_control_msg() call.
*/
int usb_get_status(struct usb_device *dev, int recip, int type, int target,
void *data)
{
int ret;
void *status;
int length;
switch (type) {
case USB_STATUS_TYPE_STANDARD:
length = 2;
break;
case USB_STATUS_TYPE_PTM:
if (recip != USB_RECIP_DEVICE)
return -EINVAL;
length = 4;
break;
default:
return -EINVAL;
}
status = kmalloc(length, GFP_KERNEL);
if (!status)
return -ENOMEM;
ret = usb_control_msg(dev, usb_rcvctrlpipe(dev, 0),
USB_REQ_GET_STATUS, USB_DIR_IN | recip, USB_STATUS_TYPE_STANDARD,
target, status, length, USB_CTRL_GET_TIMEOUT);
switch (ret) {
case 4:
if (type != USB_STATUS_TYPE_PTM) {
ret = -EIO;
break;
}
*(u32 *) data = le32_to_cpu(*(__le32 *) status);
ret = 0;
break;
case 2:
if (type != USB_STATUS_TYPE_STANDARD) {
ret = -EIO;
break;
}
*(u16 *) data = le16_to_cpu(*(__le16 *) status);
ret = 0;
break;
default:
ret = -EIO;
}
kfree(status);
return ret;
}
EXPORT_SYMBOL_GPL(usb_get_status);
/**
* usb_clear_halt - tells device to clear endpoint halt/stall condition
* @dev: device whose endpoint is halted
* @pipe: endpoint "pipe" being cleared
* Context: !in_interrupt ()
*
* This is used to clear halt conditions for bulk and interrupt endpoints,
* as reported by URB completion status. Endpoints that are halted are
* sometimes referred to as being "stalled". Such endpoints are unable
* to transmit or receive data until the halt status is cleared. Any URBs
* queued for such an endpoint should normally be unlinked by the driver
* before clearing the halt condition, as described in sections 5.7.5
* and 5.8.5 of the USB 2.0 spec.
*
* Note that control and isochronous endpoints don't halt, although control
* endpoints report "protocol stall" (for unsupported requests) using the
* same status code used to report a true stall.
*
* This call is synchronous, and may not be used in an interrupt context.
*
* Return: Zero on success, or else the status code returned by the
* underlying usb_control_msg() call.
*/
int usb_clear_halt(struct usb_device *dev, int pipe)
{
int result;
int endp = usb_pipeendpoint(pipe);
if (usb_pipein(pipe))
endp |= USB_DIR_IN;
/* we don't care if it wasn't halted first. in fact some devices
* (like some ibmcam model 1 units) seem to expect hosts to make
* this request for iso endpoints, which can't halt!
*/
result = usb_control_msg(dev, usb_sndctrlpipe(dev, 0),
USB_REQ_CLEAR_FEATURE, USB_RECIP_ENDPOINT,
USB_ENDPOINT_HALT, endp, NULL, 0,
USB_CTRL_SET_TIMEOUT);
/* don't un-halt or force to DATA0 except on success */
if (result < 0)
return result;
/* NOTE: seems like Microsoft and Apple don't bother verifying
* the clear "took", so some devices could lock up if you check...
* such as the Hagiwara FlashGate DUAL. So we won't bother.
*
* NOTE: make sure the logic here doesn't diverge much from
* the copy in usb-storage, for as long as we need two copies.
*/
usb_reset_endpoint(dev, endp);
return 0;
}
EXPORT_SYMBOL_GPL(usb_clear_halt);
static int create_intf_ep_devs(struct usb_interface *intf)
{
struct usb_device *udev = interface_to_usbdev(intf);
struct usb_host_interface *alt = intf->cur_altsetting;
int i;
if (intf->ep_devs_created || intf->unregistering)
return 0;
for (i = 0; i < alt->desc.bNumEndpoints; ++i)
(void) usb_create_ep_devs(&intf->dev, &alt->endpoint[i], udev);
intf->ep_devs_created = 1;
return 0;
}
static void remove_intf_ep_devs(struct usb_interface *intf)
{
struct usb_host_interface *alt = intf->cur_altsetting;
int i;
if (!intf->ep_devs_created)
return;
for (i = 0; i < alt->desc.bNumEndpoints; ++i)
usb_remove_ep_devs(&alt->endpoint[i]);
intf->ep_devs_created = 0;
}
/**
* usb_disable_endpoint -- Disable an endpoint by address
* @dev: the device whose endpoint is being disabled
* @epaddr: the endpoint's address. Endpoint number for output,
* endpoint number + USB_DIR_IN for input
* @reset_hardware: flag to erase any endpoint state stored in the
* controller hardware
*
* Disables the endpoint for URB submission and nukes all pending URBs.
* If @reset_hardware is set then also deallocates hcd/hardware state
* for the endpoint.
*/
void usb_disable_endpoint(struct usb_device *dev, unsigned int epaddr,
bool reset_hardware)
{
unsigned int epnum = epaddr & USB_ENDPOINT_NUMBER_MASK;
struct usb_host_endpoint *ep;
if (!dev)
return;
if (usb_endpoint_out(epaddr)) {
ep = dev->ep_out[epnum];
if (reset_hardware)
dev->ep_out[epnum] = NULL;
} else {
ep = dev->ep_in[epnum];
if (reset_hardware)
dev->ep_in[epnum] = NULL;
}
if (ep) {
ep->enabled = 0;
usb_hcd_flush_endpoint(dev, ep);
if (reset_hardware)
usb_hcd_disable_endpoint(dev, ep);
}
}
/**
* usb_reset_endpoint - Reset an endpoint's state.
* @dev: the device whose endpoint is to be reset
* @epaddr: the endpoint's address. Endpoint number for output,
* endpoint number + USB_DIR_IN for input
*
* Resets any host-side endpoint state such as the toggle bit,
* sequence number or current window.
*/
void usb_reset_endpoint(struct usb_device *dev, unsigned int epaddr)
{
unsigned int epnum = epaddr & USB_ENDPOINT_NUMBER_MASK;
struct usb_host_endpoint *ep;
if (usb_endpoint_out(epaddr))
ep = dev->ep_out[epnum];
else
ep = dev->ep_in[epnum];
if (ep)
usb_hcd_reset_endpoint(dev, ep);
}
EXPORT_SYMBOL_GPL(usb_reset_endpoint);
/**
* usb_disable_interface -- Disable all endpoints for an interface
* @dev: the device whose interface is being disabled
* @intf: pointer to the interface descriptor
* @reset_hardware: flag to erase any endpoint state stored in the
* controller hardware
*
* Disables all the endpoints for the interface's current altsetting.
*/
void usb_disable_interface(struct usb_device *dev, struct usb_interface *intf,
bool reset_hardware)
{
struct usb_host_interface *alt = intf->cur_altsetting;
int i;
for (i = 0; i < alt->desc.bNumEndpoints; ++i) {
usb_disable_endpoint(dev,
alt->endpoint[i].desc.bEndpointAddress,
reset_hardware);
}
}
/**
* usb_disable_device - Disable all the endpoints for a USB device
* @dev: the device whose endpoints are being disabled
* @skip_ep0: 0 to disable endpoint 0, 1 to skip it.
*
* Disables all the device's endpoints, potentially including endpoint 0.
* Deallocates hcd/hardware state for the endpoints (nuking all or most
* pending urbs) and usbcore state for the interfaces, so that usbcore
* must usb_set_configuration() before any interfaces could be used.
*/
void usb_disable_device(struct usb_device *dev, int skip_ep0)
{
int i;
struct usb_hcd *hcd = bus_to_hcd(dev->bus);
/* getting rid of interfaces will disconnect
* any drivers bound to them (a key side effect)
*/
if (dev->actconfig) {
/*
* FIXME: In order to avoid self-deadlock involving the
* bandwidth_mutex, we have to mark all the interfaces
* before unregistering any of them.
*/
for (i = 0; i < dev->actconfig->desc.bNumInterfaces; i++)
dev->actconfig->interface[i]->unregistering = 1;
for (i = 0; i < dev->actconfig->desc.bNumInterfaces; i++) {
struct usb_interface *interface;
/* remove this interface if it has been registered */
interface = dev->actconfig->interface[i];
if (!device_is_registered(&interface->dev))
continue;
dev_dbg(&dev->dev, "unregistering interface %s\n",
dev_name(&interface->dev));
remove_intf_ep_devs(interface);
device_del(&interface->dev);
}
/* Now that the interfaces are unbound, nobody should
* try to access them.
*/
for (i = 0; i < dev->actconfig->desc.bNumInterfaces; i++) {
put_device(&dev->actconfig->interface[i]->dev);
dev->actconfig->interface[i] = NULL;
}
usb_disable_usb2_hardware_lpm(dev);
usb_unlocked_disable_lpm(dev);
usb_disable_ltm(dev);
dev->actconfig = NULL;
if (dev->state == USB_STATE_CONFIGURED)
usb_set_device_state(dev, USB_STATE_ADDRESS);
}
dev_dbg(&dev->dev, "%s nuking %s URBs\n", __func__,
skip_ep0 ? "non-ep0" : "all");
if (hcd->driver->check_bandwidth) {
/* First pass: Cancel URBs, leave endpoint pointers intact. */
for (i = skip_ep0; i < 16; ++i) {
usb_disable_endpoint(dev, i, false);
usb_disable_endpoint(dev, i + USB_DIR_IN, false);
}
/* Remove endpoints from the host controller internal state */
mutex_lock(hcd->bandwidth_mutex);
usb_hcd_alloc_bandwidth(dev, NULL, NULL, NULL);
mutex_unlock(hcd->bandwidth_mutex);
/* Second pass: remove endpoint pointers */
}
for (i = skip_ep0; i < 16; ++i) {
usb_disable_endpoint(dev, i, true);
usb_disable_endpoint(dev, i + USB_DIR_IN, true);
}
}
/**
* usb_enable_endpoint - Enable an endpoint for USB communications
* @dev: the device whose interface is being enabled
* @ep: the endpoint
* @reset_ep: flag to reset the endpoint state
*
* Resets the endpoint state if asked, and sets dev->ep_{in,out} pointers.
* For control endpoints, both the input and output sides are handled.
*/
void usb_enable_endpoint(struct usb_device *dev, struct usb_host_endpoint *ep,
bool reset_ep)
{
int epnum = usb_endpoint_num(&ep->desc);
int is_out = usb_endpoint_dir_out(&ep->desc);
int is_control = usb_endpoint_xfer_control(&ep->desc);
if (reset_ep)
usb_hcd_reset_endpoint(dev, ep);
if (is_out || is_control)
dev->ep_out[epnum] = ep;
if (!is_out || is_control)
dev->ep_in[epnum] = ep;
ep->enabled = 1;
}
/**
* usb_enable_interface - Enable all the endpoints for an interface
* @dev: the device whose interface is being enabled
* @intf: pointer to the interface descriptor
* @reset_eps: flag to reset the endpoints' state
*
* Enables all the endpoints for the interface's current altsetting.
*/
void usb_enable_interface(struct usb_device *dev,
struct usb_interface *intf, bool reset_eps)
{
struct usb_host_interface *alt = intf->cur_altsetting;
int i;
for (i = 0; i < alt->desc.bNumEndpoints; ++i)
usb_enable_endpoint(dev, &alt->endpoint[i], reset_eps);
}
/**
* usb_set_interface - Makes a particular alternate setting be current
* @dev: the device whose interface is being updated
* @interface: the interface being updated
* @alternate: the setting being chosen.
* Context: !in_interrupt ()
*
* This is used to enable data transfers on interfaces that may not
* be enabled by default. Not all devices support such configurability.
* Only the driver bound to an interface may change its setting.
*
* Within any given configuration, each interface may have several
* alternative settings. These are often used to control levels of
* bandwidth consumption. For example, the default setting for a high
* speed interrupt endpoint may not send more than 64 bytes per microframe,
* while interrupt transfers of up to 3KBytes per microframe are legal.
* Also, isochronous endpoints may never be part of an
* interface's default setting. To access such bandwidth, alternate
* interface settings must be made current.
*
* Note that in the Linux USB subsystem, bandwidth associated with
* an endpoint in a given alternate setting is not reserved until an URB
* is submitted that needs that bandwidth. Some other operating systems
* allocate bandwidth early, when a configuration is chosen.
*
* xHCI reserves bandwidth and configures the alternate setting in
* usb_hcd_alloc_bandwidth(). If it fails the original interface altsetting
* may be disabled. Drivers cannot rely on any particular alternate
* setting being in effect after a failure.
*
* This call is synchronous, and may not be used in an interrupt context.
* Also, drivers must not change altsettings while urbs are scheduled for
* endpoints in that interface; all such urbs must first be completed
* (perhaps forced by unlinking).
*
* Return: Zero on success, or else the status code returned by the
* underlying usb_control_msg() call.
*/
int usb_set_interface(struct usb_device *dev, int interface, int alternate)
{
struct usb_interface *iface;
struct usb_host_interface *alt;
struct usb_hcd *hcd = bus_to_hcd(dev->bus);
int i, ret, manual = 0;
unsigned int epaddr;
unsigned int pipe;
if (dev->state == USB_STATE_SUSPENDED)
return -EHOSTUNREACH;
iface = usb_ifnum_to_if(dev, interface);
if (!iface) {
dev_dbg(&dev->dev, "selecting invalid interface %d\n",
interface);
return -EINVAL;
}
if (iface->unregistering)
return -ENODEV;
alt = usb_altnum_to_altsetting(iface, alternate);
if (!alt) {
dev_warn(&dev->dev, "selecting invalid altsetting %d\n",
alternate);
return -EINVAL;
}
/*
* usb3 hosts configure the interface in usb_hcd_alloc_bandwidth,
* including freeing dropped endpoint ring buffers.
* Make sure the interface endpoints are flushed before that
*/
usb_disable_interface(dev, iface, false);
/* Make sure we have enough bandwidth for this alternate interface.
* Remove the current alt setting and add the new alt setting.
*/
mutex_lock(hcd->bandwidth_mutex);
/* Disable LPM, and re-enable it once the new alt setting is installed,
* so that the xHCI driver can recalculate the U1/U2 timeouts.
*/
if (usb_disable_lpm(dev)) {
dev_err(&iface->dev, "%s Failed to disable LPM\n", __func__);
mutex_unlock(hcd->bandwidth_mutex);
return -ENOMEM;
}
/* Changing alt-setting also frees any allocated streams */
for (i = 0; i < iface->cur_altsetting->desc.bNumEndpoints; i++)
iface->cur_altsetting->endpoint[i].streams = 0;
ret = usb_hcd_alloc_bandwidth(dev, NULL, iface->cur_altsetting, alt);
if (ret < 0) {
dev_info(&dev->dev, "Not enough bandwidth for altsetting %d\n",
alternate);
usb_enable_lpm(dev);
mutex_unlock(hcd->bandwidth_mutex);
return ret;
}
if (dev->quirks & USB_QUIRK_NO_SET_INTF)
ret = -EPIPE;
else
ret = usb_control_msg(dev, usb_sndctrlpipe(dev, 0),
USB_REQ_SET_INTERFACE, USB_RECIP_INTERFACE,
alternate, interface, NULL, 0, 5000);
/* 9.4.10 says devices don't need this and are free to STALL the
* request if the interface only has one alternate setting.
*/
if (ret == -EPIPE && iface->num_altsetting == 1) {
dev_dbg(&dev->dev,
"manual set_interface for iface %d, alt %d\n",
interface, alternate);
manual = 1;
} else if (ret < 0) {
/* Re-instate the old alt setting */
usb_hcd_alloc_bandwidth(dev, NULL, alt, iface->cur_altsetting);
usb_enable_lpm(dev);
mutex_unlock(hcd->bandwidth_mutex);
return ret;
}
mutex_unlock(hcd->bandwidth_mutex);
/* FIXME drivers shouldn't need to replicate/bugfix the logic here
* when they implement async or easily-killable versions of this or
* other "should-be-internal" functions (like clear_halt).
* should hcd+usbcore postprocess control requests?
*/
/* prevent submissions using previous endpoint settings */
if (iface->cur_altsetting != alt) {
remove_intf_ep_devs(iface);
usb_remove_sysfs_intf_files(iface);
}
usb_disable_interface(dev, iface, true);
iface->cur_altsetting = alt;
/* Now that the interface is installed, re-enable LPM. */
usb_unlocked_enable_lpm(dev);
/* If the interface only has one altsetting and the device didn't
* accept the request, we attempt to carry out the equivalent action
* by manually clearing the HALT feature for each endpoint in the
* new altsetting.
*/
if (manual) {
for (i = 0; i < alt->desc.bNumEndpoints; i++) {
epaddr = alt->endpoint[i].desc.bEndpointAddress;
pipe = __create_pipe(dev,
USB_ENDPOINT_NUMBER_MASK & epaddr) |
(usb_endpoint_out(epaddr) ?
USB_DIR_OUT : USB_DIR_IN);
usb_clear_halt(dev, pipe);
}
}
/* 9.1.1.5: reset toggles for all endpoints in the new altsetting
*
* Note:
* Despite EP0 is always present in all interfaces/AS, the list of
* endpoints from the descriptor does not contain EP0. Due to its
* omnipresence one might expect EP0 being considered "affected" by
* any SetInterface request and hence assume toggles need to be reset.
* However, EP0 toggles are re-synced for every individual transfer
* during the SETUP stage - hence EP0 toggles are "don't care" here.
* (Likewise, EP0 never "halts" on well designed devices.)
*/
usb_enable_interface(dev, iface, true);
if (device_is_registered(&iface->dev)) {
usb_create_sysfs_intf_files(iface);
create_intf_ep_devs(iface);
}
return 0;
}
EXPORT_SYMBOL_GPL(usb_set_interface);
/**
* usb_reset_configuration - lightweight device reset
* @dev: the device whose configuration is being reset
*
* This issues a standard SET_CONFIGURATION request to the device using
* the current configuration. The effect is to reset most USB-related
* state in the device, including interface altsettings (reset to zero),
* endpoint halts (cleared), and endpoint state (only for bulk and interrupt
* endpoints). Other usbcore state is unchanged, including bindings of
* usb device drivers to interfaces.
*
* Because this affects multiple interfaces, avoid using this with composite
* (multi-interface) devices. Instead, the driver for each interface may
* use usb_set_interface() on the interfaces it claims. Be careful though;
* some devices don't support the SET_INTERFACE request, and others won't
* reset all the interface state (notably endpoint state). Resetting the whole
* configuration would affect other drivers' interfaces.
*
* The caller must own the device lock.
*
* Return: Zero on success, else a negative error code.
*/
int usb_reset_configuration(struct usb_device *dev)
{
int i, retval;
struct usb_host_config *config;
struct usb_hcd *hcd = bus_to_hcd(dev->bus);
if (dev->state == USB_STATE_SUSPENDED)
return -EHOSTUNREACH;
/* caller must have locked the device and must own
* the usb bus readlock (so driver bindings are stable);
* calls during probe() are fine
*/
for (i = 1; i < 16; ++i) {
usb_disable_endpoint(dev, i, true);
usb_disable_endpoint(dev, i + USB_DIR_IN, true);
}
config = dev->actconfig;
retval = 0;
mutex_lock(hcd->bandwidth_mutex);
/* Disable LPM, and re-enable it once the configuration is reset, so
* that the xHCI driver can recalculate the U1/U2 timeouts.
*/
if (usb_disable_lpm(dev)) {
dev_err(&dev->dev, "%s Failed to disable LPM\n", __func__);
mutex_unlock(hcd->bandwidth_mutex);
return -ENOMEM;
}
/* Make sure we have enough bandwidth for each alternate setting 0 */
for (i = 0; i < config->desc.bNumInterfaces; i++) {
struct usb_interface *intf = config->interface[i];
struct usb_host_interface *alt;
alt = usb_altnum_to_altsetting(intf, 0);
if (!alt)
alt = &intf->altsetting[0];
if (alt != intf->cur_altsetting)
retval = usb_hcd_alloc_bandwidth(dev, NULL,
intf->cur_altsetting, alt);
if (retval < 0)
break;
}
/* If not, reinstate the old alternate settings */
if (retval < 0) {
reset_old_alts:
for (i--; i >= 0; i--) {
struct usb_interface *intf = config->interface[i];
struct usb_host_interface *alt;
alt = usb_altnum_to_altsetting(intf, 0);
if (!alt)
alt = &intf->altsetting[0];
if (alt != intf->cur_altsetting)
usb_hcd_alloc_bandwidth(dev, NULL,
alt, intf->cur_altsetting);
}
usb_enable_lpm(dev);
mutex_unlock(hcd->bandwidth_mutex);
return retval;
}
retval = usb_control_msg(dev, usb_sndctrlpipe(dev, 0),
USB_REQ_SET_CONFIGURATION, 0,
config->desc.bConfigurationValue, 0,
NULL, 0, USB_CTRL_SET_TIMEOUT);
if (retval < 0)
goto reset_old_alts;
mutex_unlock(hcd->bandwidth_mutex);
/* re-init hc/hcd interface/endpoint state */
for (i = 0; i < config->desc.bNumInterfaces; i++) {
struct usb_interface *intf = config->interface[i];
struct usb_host_interface *alt;
alt = usb_altnum_to_altsetting(intf, 0);
/* No altsetting 0? We'll assume the first altsetting.
* We could use a GetInterface call, but if a device is
* so non-compliant that it doesn't have altsetting 0
* then I wouldn't trust its reply anyway.
*/
if (!alt)
alt = &intf->altsetting[0];
if (alt != intf->cur_altsetting) {
remove_intf_ep_devs(intf);
usb_remove_sysfs_intf_files(intf);
}
intf->cur_altsetting = alt;
usb_enable_interface(dev, intf, true);
if (device_is_registered(&intf->dev)) {
usb_create_sysfs_intf_files(intf);
create_intf_ep_devs(intf);
}
}
/* Now that the interfaces are installed, re-enable LPM. */
usb_unlocked_enable_lpm(dev);
return 0;
}
EXPORT_SYMBOL_GPL(usb_reset_configuration);
static void usb_release_interface(struct device *dev)
{
struct usb_interface *intf = to_usb_interface(dev);
struct usb_interface_cache *intfc =
altsetting_to_usb_interface_cache(intf->altsetting);
kref_put(&intfc->ref, usb_release_interface_cache);
usb_put_dev(interface_to_usbdev(intf));
of_node_put(dev->of_node);
kfree(intf);
}
/*
* usb_deauthorize_interface - deauthorize an USB interface
*
* @intf: USB interface structure
*/
void usb_deauthorize_interface(struct usb_interface *intf)
{
struct device *dev = &intf->dev;
device_lock(dev->parent);
if (intf->authorized) {
device_lock(dev);
intf->authorized = 0;
device_unlock(dev);
usb_forced_unbind_intf(intf);
}
device_unlock(dev->parent);
}
/*
* usb_authorize_interface - authorize an USB interface
*
* @intf: USB interface structure
*/
void usb_authorize_interface(struct usb_interface *intf)
{
struct device *dev = &intf->dev;
if (!intf->authorized) {
device_lock(dev);
intf->authorized = 1; /* authorize interface */
device_unlock(dev);
}
}
static int usb_if_uevent(struct device *dev, struct kobj_uevent_env *env)
{
struct usb_device *usb_dev;
struct usb_interface *intf;
struct usb_host_interface *alt;
intf = to_usb_interface(dev);
usb_dev = interface_to_usbdev(intf);
alt = intf->cur_altsetting;
if (add_uevent_var(env, "INTERFACE=%d/%d/%d",
alt->desc.bInterfaceClass,
alt->desc.bInterfaceSubClass,
alt->desc.bInterfaceProtocol))
return -ENOMEM;
if (add_uevent_var(env,
"MODALIAS=usb:"
"v%04Xp%04Xd%04Xdc%02Xdsc%02Xdp%02Xic%02Xisc%02Xip%02Xin%02X",
le16_to_cpu(usb_dev->descriptor.idVendor),
le16_to_cpu(usb_dev->descriptor.idProduct),
le16_to_cpu(usb_dev->descriptor.bcdDevice),
usb_dev->descriptor.bDeviceClass,
usb_dev->descriptor.bDeviceSubClass,
usb_dev->descriptor.bDeviceProtocol,
alt->desc.bInterfaceClass,
alt->desc.bInterfaceSubClass,
alt->desc.bInterfaceProtocol,
alt->desc.bInterfaceNumber))
return -ENOMEM;
return 0;
}
struct device_type usb_if_device_type = {
.name = "usb_interface",
.release = usb_release_interface,
.uevent = usb_if_uevent,
};
static struct usb_interface_assoc_descriptor *find_iad(struct usb_device *dev,
struct usb_host_config *config,
u8 inum)
{
struct usb_interface_assoc_descriptor *retval = NULL;
struct usb_interface_assoc_descriptor *intf_assoc;
int first_intf;
int last_intf;
int i;
for (i = 0; (i < USB_MAXIADS && config->intf_assoc[i]); i++) {
intf_assoc = config->intf_assoc[i];
if (intf_assoc->bInterfaceCount == 0)
continue;
first_intf = intf_assoc->bFirstInterface;
last_intf = first_intf + (intf_assoc->bInterfaceCount - 1);
if (inum >= first_intf && inum <= last_intf) {
if (!retval)
retval = intf_assoc;
else
dev_err(&dev->dev, "Interface #%d referenced"
" by multiple IADs\n", inum);
}
}
return retval;
}
/*
* Internal function to queue a device reset
* See usb_queue_reset_device() for more details
*/
static void __usb_queue_reset_device(struct work_struct *ws)
{
int rc;
struct usb_interface *iface =
container_of(ws, struct usb_interface, reset_ws);
struct usb_device *udev = interface_to_usbdev(iface);
rc = usb_lock_device_for_reset(udev, iface);
if (rc >= 0) {
usb_reset_device(udev);
usb_unlock_device(udev);
}
usb_put_intf(iface); /* Undo _get_ in usb_queue_reset_device() */
}
/*
* usb_set_configuration - Makes a particular device setting be current
* @dev: the device whose configuration is being updated
* @configuration: the configuration being chosen.
* Context: !in_interrupt(), caller owns the device lock
*
* This is used to enable non-default device modes. Not all devices
* use this kind of configurability; many devices only have one
* configuration.
*
* @configuration is the value of the configuration to be installed.
* According to the USB spec (e.g. section 9.1.1.5), configuration values
* must be non-zero; a value of zero indicates that the device in
* unconfigured. However some devices erroneously use 0 as one of their
* configuration values. To help manage such devices, this routine will
* accept @configuration = -1 as indicating the device should be put in
* an unconfigured state.
*
* USB device configurations may affect Linux interoperability,
* power consumption and the functionality available. For example,
* the default configuration is limited to using 100mA of bus power,
* so that when certain device functionality requires more power,
* and the device is bus powered, that functionality should be in some
* non-default device configuration. Other device modes may also be
* reflected as configuration options, such as whether two ISDN
* channels are available independently; and choosing between open
* standard device protocols (like CDC) or proprietary ones.
*
* Note that a non-authorized device (dev->authorized == 0) will only
* be put in unconfigured mode.
*
* Note that USB has an additional level of device configurability,
* associated with interfaces. That configurability is accessed using
* usb_set_interface().
*
* This call is synchronous. The calling context must be able to sleep,
* must own the device lock, and must not hold the driver model's USB
* bus mutex; usb interface driver probe() methods cannot use this routine.
*
* Returns zero on success, or else the status code returned by the
* underlying call that failed. On successful completion, each interface
* in the original device configuration has been destroyed, and each one
* in the new configuration has been probed by all relevant usb device
* drivers currently known to the kernel.
*/
int usb_set_configuration(struct usb_device *dev, int configuration)
{
int i, ret;
struct usb_host_config *cp = NULL;
struct usb_interface **new_interfaces = NULL;
struct usb_hcd *hcd = bus_to_hcd(dev->bus);
int n, nintf;
if (dev->authorized == 0 || configuration == -1)
configuration = 0;
else {
for (i = 0; i < dev->descriptor.bNumConfigurations; i++) {
if (dev->config[i].desc.bConfigurationValue ==
configuration) {
cp = &dev->config[i];
break;
}
}
}
if ((!cp && configuration != 0))
return -EINVAL;
/* The USB spec says configuration 0 means unconfigured.
* But if a device includes a configuration numbered 0,
* we will accept it as a correctly configured state.
* Use -1 if you really want to unconfigure the device.
*/
if (cp && configuration == 0)
dev_warn(&dev->dev, "config 0 descriptor??\n");
/* Allocate memory for new interfaces before doing anything else,
* so that if we run out then nothing will have changed. */
n = nintf = 0;
if (cp) {
nintf = cp->desc.bNumInterfaces;
new_interfaces = kmalloc_array(nintf, sizeof(*new_interfaces),
GFP_NOIO);
if (!new_interfaces)
return -ENOMEM;
for (; n < nintf; ++n) {
new_interfaces[n] = kzalloc(
sizeof(struct usb_interface),
GFP_NOIO);
if (!new_interfaces[n]) {
ret = -ENOMEM;
free_interfaces:
while (--n >= 0)
kfree(new_interfaces[n]);
kfree(new_interfaces);
return ret;
}
}
i = dev->bus_mA - usb_get_max_power(dev, cp);
if (i < 0)
dev_warn(&dev->dev, "new config #%d exceeds power "
"limit by %dmA\n",
configuration, -i);
}
/* Wake up the device so we can send it the Set-Config request */
ret = usb_autoresume_device(dev);
if (ret)
goto free_interfaces;
/* if it's already configured, clear out old state first.
* getting rid of old interfaces means unbinding their drivers.
*/
if (dev->state != USB_STATE_ADDRESS)
usb_disable_device(dev, 1); /* Skip ep0 */
/* Get rid of pending async Set-Config requests for this device */
cancel_async_set_config(dev);
/* Make sure we have bandwidth (and available HCD resources) for this
* configuration. Remove endpoints from the schedule if we're dropping
* this configuration to set configuration 0. After this point, the
* host controller will not allow submissions to dropped endpoints. If
* this call fails, the device state is unchanged.
*/
mutex_lock(hcd->bandwidth_mutex);
/* Disable LPM, and re-enable it once the new configuration is
* installed, so that the xHCI driver can recalculate the U1/U2
* timeouts.
*/
if (dev->actconfig && usb_disable_lpm(dev)) {
dev_err(&dev->dev, "%s Failed to disable LPM\n", __func__);
mutex_unlock(hcd->bandwidth_mutex);
ret = -ENOMEM;
goto free_interfaces;
}
ret = usb_hcd_alloc_bandwidth(dev, cp, NULL, NULL);
if (ret < 0) {
if (dev->actconfig)
usb_enable_lpm(dev);
mutex_unlock(hcd->bandwidth_mutex);
usb_autosuspend_device(dev);
goto free_interfaces;
}
/*
* Initialize the new interface structures and the
* hc/hcd/usbcore interface/endpoint state.
*/
for (i = 0; i < nintf; ++i) {
struct usb_interface_cache *intfc;
struct usb_interface *intf;
struct usb_host_interface *alt;
u8 ifnum;
cp->interface[i] = intf = new_interfaces[i];
intfc = cp->intf_cache[i];
intf->altsetting = intfc->altsetting;
intf->num_altsetting = intfc->num_altsetting;
intf->authorized = !!HCD_INTF_AUTHORIZED(hcd);
kref_get(&intfc->ref);
alt = usb_altnum_to_altsetting(intf, 0);
/* No altsetting 0? We'll assume the first altsetting.
* We could use a GetInterface call, but if a device is
* so non-compliant that it doesn't have altsetting 0
* then I wouldn't trust its reply anyway.
*/
if (!alt)
alt = &intf->altsetting[0];
ifnum = alt->desc.bInterfaceNumber;
intf->intf_assoc = find_iad(dev, cp, ifnum);
intf->cur_altsetting = alt;
usb_enable_interface(dev, intf, true);
intf->dev.parent = &dev->dev;
if (usb_of_has_combined_node(dev)) {
device_set_of_node_from_dev(&intf->dev, &dev->dev);
} else {
intf->dev.of_node = usb_of_get_interface_node(dev,
configuration, ifnum);
}
ACPI_COMPANION_SET(&intf->dev, ACPI_COMPANION(&dev->dev));
intf->dev.driver = NULL;
intf->dev.bus = &usb_bus_type;
intf->dev.type = &usb_if_device_type;
intf->dev.groups = usb_interface_groups;
/*
* Please refer to usb_alloc_dev() to see why we set
* dma_mask and dma_pfn_offset.
*/
intf->dev.dma_mask = dev->dev.dma_mask;
intf->dev.dma_pfn_offset = dev->dev.dma_pfn_offset;
INIT_WORK(&intf->reset_ws, __usb_queue_reset_device);
intf->minor = -1;
device_initialize(&intf->dev);
pm_runtime_no_callbacks(&intf->dev);
dev_set_name(&intf->dev, "%d-%s:%d.%d", dev->bus->busnum,
dev->devpath, configuration, ifnum);
usb_get_dev(dev);
}
kfree(new_interfaces);
ret = usb_control_msg(dev, usb_sndctrlpipe(dev, 0),
USB_REQ_SET_CONFIGURATION, 0, configuration, 0,
NULL, 0, USB_CTRL_SET_TIMEOUT);
if (ret < 0 && cp) {
/*
* All the old state is gone, so what else can we do?
* The device is probably useless now anyway.
*/
usb_hcd_alloc_bandwidth(dev, NULL, NULL, NULL);
for (i = 0; i < nintf; ++i) {
usb_disable_interface(dev, cp->interface[i], true);
put_device(&cp->interface[i]->dev);
cp->interface[i] = NULL;
}
cp = NULL;
}
dev->actconfig = cp;
mutex_unlock(hcd->bandwidth_mutex);
if (!cp) {
usb_set_device_state(dev, USB_STATE_ADDRESS);
/* Leave LPM disabled while the device is unconfigured. */
usb_autosuspend_device(dev);
return ret;
}
usb_set_device_state(dev, USB_STATE_CONFIGURED);
if (cp->string == NULL &&
!(dev->quirks & USB_QUIRK_CONFIG_INTF_STRINGS))
cp->string = usb_cache_string(dev, cp->desc.iConfiguration);
/* Now that the interfaces are installed, re-enable LPM. */
usb_unlocked_enable_lpm(dev);
/* Enable LTM if it was turned off by usb_disable_device. */
usb_enable_ltm(dev);
/* Now that all the interfaces are set up, register them
* to trigger binding of drivers to interfaces. probe()
* routines may install different altsettings and may
* claim() any interfaces not yet bound. Many class drivers
* need that: CDC, audio, video, etc.
*/
for (i = 0; i < nintf; ++i) {
struct usb_interface *intf = cp->interface[i];
if (intf->dev.of_node &&
!of_device_is_available(intf->dev.of_node)) {
dev_info(&dev->dev, "skipping disabled interface %d\n",
intf->cur_altsetting->desc.bInterfaceNumber);
continue;
}
dev_dbg(&dev->dev,
"adding %s (config #%d, interface %d)\n",
dev_name(&intf->dev), configuration,
intf->cur_altsetting->desc.bInterfaceNumber);
device_enable_async_suspend(&intf->dev);
ret = device_add(&intf->dev);
if (ret != 0) {
dev_err(&dev->dev, "device_add(%s) --> %d\n",
dev_name(&intf->dev), ret);
continue;
}
create_intf_ep_devs(intf);
}
usb_autosuspend_device(dev);
return 0;
}
EXPORT_SYMBOL_GPL(usb_set_configuration);
static LIST_HEAD(set_config_list);
static DEFINE_SPINLOCK(set_config_lock);
struct set_config_request {
struct usb_device *udev;
int config;
struct work_struct work;
struct list_head node;
};
/* Worker routine for usb_driver_set_configuration() */
static void driver_set_config_work(struct work_struct *work)
{
struct set_config_request *req =
container_of(work, struct set_config_request, work);
struct usb_device *udev = req->udev;
usb_lock_device(udev);
spin_lock(&set_config_lock);
list_del(&req->node);
spin_unlock(&set_config_lock);
if (req->config >= -1) /* Is req still valid? */
usb_set_configuration(udev, req->config);
usb_unlock_device(udev);
usb_put_dev(udev);
kfree(req);
}
/* Cancel pending Set-Config requests for a device whose configuration
* was just changed
*/
static void cancel_async_set_config(struct usb_device *udev)
{
struct set_config_request *req;
spin_lock(&set_config_lock);
list_for_each_entry(req, &set_config_list, node) {
if (req->udev == udev)
req->config = -999; /* Mark as cancelled */
}
spin_unlock(&set_config_lock);
}
/**
* usb_driver_set_configuration - Provide a way for drivers to change device configurations
* @udev: the device whose configuration is being updated
* @config: the configuration being chosen.
* Context: In process context, must be able to sleep
*
* Device interface drivers are not allowed to change device configurations.
* This is because changing configurations will destroy the interface the
* driver is bound to and create new ones; it would be like a floppy-disk
* driver telling the computer to replace the floppy-disk drive with a
* tape drive!
*
* Still, in certain specialized circumstances the need may arise. This
* routine gets around the normal restrictions by using a work thread to
* submit the change-config request.
*
* Return: 0 if the request was successfully queued, error code otherwise.
* The caller has no way to know whether the queued request will eventually
* succeed.
*/
int usb_driver_set_configuration(struct usb_device *udev, int config)
{
struct set_config_request *req;
req = kmalloc(sizeof(*req), GFP_KERNEL);
if (!req)
return -ENOMEM;
req->udev = udev;
req->config = config;
INIT_WORK(&req->work, driver_set_config_work);
spin_lock(&set_config_lock);
list_add(&req->node, &set_config_list);
spin_unlock(&set_config_lock);
usb_get_dev(udev);
schedule_work(&req->work);
return 0;
}
EXPORT_SYMBOL_GPL(usb_driver_set_configuration);
/**
* cdc_parse_cdc_header - parse the extra headers present in CDC devices
* @hdr: the place to put the results of the parsing
* @intf: the interface for which parsing is requested
* @buffer: pointer to the extra headers to be parsed
* @buflen: length of the extra headers
*
* This evaluates the extra headers present in CDC devices which
* bind the interfaces for data and control and provide details
* about the capabilities of the device.
*
* Return: number of descriptors parsed or -EINVAL
* if the header is contradictory beyond salvage
*/
int cdc_parse_cdc_header(struct usb_cdc_parsed_header *hdr,
struct usb_interface *intf,
u8 *buffer,
int buflen)
{
/* duplicates are ignored */
struct usb_cdc_union_desc *union_header = NULL;
/* duplicates are not tolerated */
struct usb_cdc_header_desc *header = NULL;
struct usb_cdc_ether_desc *ether = NULL;
struct usb_cdc_mdlm_detail_desc *detail = NULL;
struct usb_cdc_mdlm_desc *desc = NULL;
unsigned int elength;
int cnt = 0;
memset(hdr, 0x00, sizeof(struct usb_cdc_parsed_header));
hdr->phonet_magic_present = false;
while (buflen > 0) {
elength = buffer[0];
if (!elength) {
dev_err(&intf->dev, "skipping garbage byte\n");
elength = 1;
goto next_desc;
}
if ((buflen < elength) || (elength < 3)) {
dev_err(&intf->dev, "invalid descriptor buffer length\n");
break;
}
if (buffer[1] != USB_DT_CS_INTERFACE) {
dev_err(&intf->dev, "skipping garbage\n");
goto next_desc;
}
switch (buffer[2]) {
case USB_CDC_UNION_TYPE: /* we've found it */
if (elength < sizeof(struct usb_cdc_union_desc))
goto next_desc;
if (union_header) {
dev_err(&intf->dev, "More than one union descriptor, skipping ...\n");
goto next_desc;
}
union_header = (struct usb_cdc_union_desc *)buffer;
break;
case USB_CDC_COUNTRY_TYPE:
if (elength < sizeof(struct usb_cdc_country_functional_desc))
goto next_desc;
hdr->usb_cdc_country_functional_desc =
(struct usb_cdc_country_functional_desc *)buffer;
break;
case USB_CDC_HEADER_TYPE:
if (elength != sizeof(struct usb_cdc_header_desc))
goto next_desc;
if (header)
return -EINVAL;
header = (struct usb_cdc_header_desc *)buffer;
break;
case USB_CDC_ACM_TYPE:
if (elength < sizeof(struct usb_cdc_acm_descriptor))
goto next_desc;
hdr->usb_cdc_acm_descriptor =
(struct usb_cdc_acm_descriptor *)buffer;
break;
case USB_CDC_ETHERNET_TYPE:
if (elength != sizeof(struct usb_cdc_ether_desc))
goto next_desc;
if (ether)
return -EINVAL;
ether = (struct usb_cdc_ether_desc *)buffer;
break;
case USB_CDC_CALL_MANAGEMENT_TYPE:
if (elength < sizeof(struct usb_cdc_call_mgmt_descriptor))
goto next_desc;
hdr->usb_cdc_call_mgmt_descriptor =
(struct usb_cdc_call_mgmt_descriptor *)buffer;
break;
case USB_CDC_DMM_TYPE:
if (elength < sizeof(struct usb_cdc_dmm_desc))
goto next_desc;
hdr->usb_cdc_dmm_desc =
(struct usb_cdc_dmm_desc *)buffer;
break;
case USB_CDC_MDLM_TYPE:
if (elength < sizeof(struct usb_cdc_mdlm_desc))
goto next_desc;
if (desc)
return -EINVAL;
desc = (struct usb_cdc_mdlm_desc *)buffer;
break;
case USB_CDC_MDLM_DETAIL_TYPE:
if (elength < sizeof(struct usb_cdc_mdlm_detail_desc))
goto next_desc;
if (detail)
return -EINVAL;
detail = (struct usb_cdc_mdlm_detail_desc *)buffer;
break;
case USB_CDC_NCM_TYPE:
if (elength < sizeof(struct usb_cdc_ncm_desc))
goto next_desc;
hdr->usb_cdc_ncm_desc = (struct usb_cdc_ncm_desc *)buffer;
break;
case USB_CDC_MBIM_TYPE:
if (elength < sizeof(struct usb_cdc_mbim_desc))
goto next_desc;
hdr->usb_cdc_mbim_desc = (struct usb_cdc_mbim_desc *)buffer;
break;
case USB_CDC_MBIM_EXTENDED_TYPE:
if (elength < sizeof(struct usb_cdc_mbim_extended_desc))
break;
hdr->usb_cdc_mbim_extended_desc =
(struct usb_cdc_mbim_extended_desc *)buffer;
break;
case CDC_PHONET_MAGIC_NUMBER:
hdr->phonet_magic_present = true;
break;
default:
/*
* there are LOTS more CDC descriptors that
* could legitimately be found here.
*/
dev_dbg(&intf->dev, "Ignoring descriptor: type %02x, length %ud\n",
buffer[2], elength);
goto next_desc;
}
cnt++;
next_desc:
buflen -= elength;
buffer += elength;
}
hdr->usb_cdc_union_desc = union_header;
hdr->usb_cdc_header_desc = header;
hdr->usb_cdc_mdlm_detail_desc = detail;
hdr->usb_cdc_mdlm_desc = desc;
hdr->usb_cdc_ether_desc = ether;
return cnt;
}
EXPORT_SYMBOL(cdc_parse_cdc_header);
| ./CrossVul/dataset_final_sorted/CWE-416/c/good_3994_0 |
crossvul-cpp_data_good_1390_3 | /* libcomps - C alternative to yum.comps library
* Copyright (C) 2013 Jindrich Luza
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* any later version.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301,
* USA
*/
#include "comps_radix.h"
#include "comps_utils.h"
#include <stdio.h>
void comps_rtree_data_destroy(COMPS_RTreeData * rtd) {
free(rtd->key);
if ((rtd->data) && (*rtd->data_destructor))
(*rtd->data_destructor)(rtd->data);
comps_hslist_destroy(&rtd->subnodes);
free(rtd);
}
inline void comps_rtree_data_destroy_v(void * rtd) {
comps_rtree_data_destroy((COMPS_RTreeData*)rtd);
}
inline COMPS_RTreeData * __comps_rtree_data_create(COMPS_RTree *rt, char *key,
unsigned int keylen,
void *data){
COMPS_RTreeData * rtd;
if ((rtd = malloc(sizeof(*rtd))) == NULL)
return NULL;
if ((rtd->key = malloc(sizeof(char) * (keylen+1))) == NULL) {
free(rtd);
return NULL;
}
memcpy(rtd->key, key, sizeof(char)*keylen);
rtd->key[keylen] = 0;
rtd->data = data;
if (data != NULL) {
rtd->is_leaf = 1;
}
rtd->data_destructor = &rt->data_destructor;
rtd->subnodes = comps_hslist_create();
comps_hslist_init(rtd->subnodes, NULL, NULL, &comps_rtree_data_destroy_v);
return rtd;
}
COMPS_RTreeData * comps_rtree_data_create(COMPS_RTree *rt,char * key,
void * data) {
COMPS_RTreeData * rtd;
rtd = __comps_rtree_data_create(rt, key, strlen(key), data);
return rtd;
}
COMPS_RTreeData * comps_rtree_data_create_n(COMPS_RTree *rt, char * key,
size_t keylen, void * data) {
COMPS_RTreeData * rtd;
rtd = __comps_rtree_data_create(rt, key, keylen, data);
return rtd;
}
COMPS_RTree * comps_rtree_create(void* (*data_constructor)(void*),
void* (*data_cloner)(void*),
void (*data_destructor)(void*)) {
COMPS_RTree *ret;
if ((ret = malloc(sizeof(COMPS_RTree))) == NULL)
return NULL;
ret->subnodes = comps_hslist_create();
comps_hslist_init(ret->subnodes, NULL, NULL, &comps_rtree_data_destroy_v);
if (ret->subnodes == NULL) {
free(ret);
return NULL;
}
ret->data_constructor = data_constructor;
ret->data_cloner = data_cloner;
ret->data_destructor = data_destructor;
return ret;
}
void comps_rtree_destroy(COMPS_RTree * rt) {
if (!rt) return;
comps_hslist_destroy(&(rt->subnodes));
free(rt);
}
void comps_rtree_print(COMPS_HSList * hl, unsigned deep) {
COMPS_HSListItem * it;
for (it = hl->first; it != NULL; it=it->next) {
printf("%d %s\n",deep, (((COMPS_RTreeData*)it->data)->key));
comps_rtree_print(((COMPS_RTreeData*)it->data)->subnodes, deep+1);
}
}
COMPS_RTree * comps_rtree_clone(COMPS_RTree *rt) {
COMPS_HSList *to_clone, *tmplist, *new_subnodes;
COMPS_RTree *ret;
COMPS_HSListItem *it, *it2;
COMPS_RTreeData *rtdata;
void *new_data;
if (!rt) return NULL;
to_clone = comps_hslist_create();
comps_hslist_init(to_clone, NULL, NULL, NULL);
ret = comps_rtree_create(rt->data_constructor, rt->data_cloner,
rt->data_destructor);
for (it = rt->subnodes->first; it != NULL; it = it->next) {
rtdata = comps_rtree_data_create(ret,
((COMPS_RTreeData*)it->data)->key, NULL);
if (((COMPS_RTreeData*)it->data)->data != NULL)
new_data = rt->data_cloner(((COMPS_RTreeData*)it->data)->data);
else
new_data = NULL;
comps_hslist_destroy(&rtdata->subnodes);
rtdata->subnodes = ((COMPS_RTreeData*)it->data)->subnodes;
rtdata->data = new_data;
comps_hslist_append(ret->subnodes, rtdata, 0);
comps_hslist_append(to_clone, rtdata, 0);
}
while (to_clone->first) {
it2 = to_clone->first;
tmplist = ((COMPS_RTreeData*)it2->data)->subnodes;
comps_hslist_remove(to_clone, to_clone->first);
new_subnodes = comps_hslist_create();
comps_hslist_init(new_subnodes, NULL, NULL, &comps_rtree_data_destroy_v);
for (it = tmplist->first; it != NULL; it = it->next) {
rtdata = comps_rtree_data_create(ret,
((COMPS_RTreeData*)it->data)->key, NULL);
if (((COMPS_RTreeData*)it->data)->data != NULL)
new_data = rt->data_cloner(((COMPS_RTreeData*)it->data)->data);
else
new_data = NULL;
comps_hslist_destroy(&rtdata->subnodes);
rtdata->subnodes = ((COMPS_RTreeData*)it->data)->subnodes;
rtdata->data = new_data;
comps_hslist_append(new_subnodes, rtdata, 0);
comps_hslist_append(to_clone, rtdata, 0);
}
((COMPS_RTreeData*)it2->data)->subnodes = new_subnodes;
free(it2);
}
comps_hslist_destroy(&to_clone);
return ret;
}
void comps_rtree_values_walk(COMPS_RTree * rt, void* udata,
void (*walk_f)(void*, void*)) {
COMPS_HSList *tmplist, *tmp_subnodes;
COMPS_HSListItem *it;
tmplist = comps_hslist_create();
comps_hslist_init(tmplist, NULL, NULL, NULL);
comps_hslist_append(tmplist, rt->subnodes, 0);
while (tmplist->first != NULL) {
it = tmplist->first;
comps_hslist_remove(tmplist, tmplist->first);
tmp_subnodes = (COMPS_HSList*)it->data;
for (it = tmp_subnodes->first; it != NULL; it=it->next) {
if (((COMPS_RTreeData*)it->data)->subnodes->first) {
comps_hslist_append(tmplist,
((COMPS_RTreeData*)it->data)->subnodes, 0);
}
if (((COMPS_RTreeData*)it->data)->data != NULL) {
walk_f(udata, ((COMPS_RTreeData*)it->data)->data);
}
}
}
comps_hslist_destroy(&tmplist);
}
void __comps_rtree_set(COMPS_RTree * rt, char * key, size_t len, void * data)
{
COMPS_HSListItem *it, *lesser;
COMPS_HSList *subnodes;
COMPS_RTreeData *rtd;
static COMPS_RTreeData *rtdata;
size_t offset=0, _len;
unsigned x, found = 0;
void *ndata;
char ended;//, tmpch;
if (rt->subnodes == NULL)
return;
if (rt->data_constructor) {
ndata = rt->data_constructor(data);
} else {
ndata = data;
}
subnodes = rt->subnodes;
while (offset != len)
{
found = 0;
lesser = NULL;
for (it = subnodes->first; it != NULL; it=it->next) {
if (((COMPS_RTreeData*)it->data)->key[0] == key[offset]) {
found = 1;
break;
} else if (((COMPS_RTreeData*)it->data)->key[0] < key[offset]) {
lesser = it;
}
}
if (!found) { // not found in subnodes; create new subnode
rtd = comps_rtree_data_create(rt, key+offset, ndata);
if (!lesser) {
comps_hslist_prepend(subnodes, rtd, 0);
} else {
comps_hslist_insert_after(subnodes, lesser, rtd, 0);
}
return;
} else {
rtdata = (COMPS_RTreeData*)it->data;
ended = 0;
for (x=1; ;x++) {
if (rtdata->key[x] == 0) ended += 1;
if (x == len - offset) ended += 2;
if (ended != 0) break;
if (key[offset+x] != rtdata->key[x]) break;
}
if (ended == 3) { //keys equals; data replacement
rt->data_destructor(rtdata->data);
rtdata->data = ndata;
return;
} else if (ended == 2) { //global key ends first; make global leaf
comps_hslist_remove(subnodes, it);
it->next = NULL;
rtd = comps_rtree_data_create_n(rt, key+offset,
len-offset, ndata);
comps_hslist_append(subnodes, rtd, 0);
((COMPS_RTreeData*)subnodes->last->data)->subnodes->last = it;
((COMPS_RTreeData*)subnodes->last->data)->subnodes->first = it;
_len = len - offset;
memmove(rtdata->key,rtdata->key + _len,
strlen(rtdata->key) - _len);
rtdata->key[strlen(rtdata->key) - _len] = 0;
rtdata->key = realloc(rtdata->key,
sizeof(char)* (strlen(rtdata->key)+1));
return;
} else if (ended == 1) { //local key ends first; go deeper
subnodes = rtdata->subnodes;
offset += x;
} else {
void *tmpdata = rtdata->data;
COMPS_HSList *tmpnodes = rtdata->subnodes;
int cmpret = strcmp(key+offset+x, rtdata->key+x);
rtdata->subnodes = comps_hslist_create();
comps_hslist_init(rtdata->subnodes, NULL, NULL,
&comps_rtree_data_destroy_v);
rtdata->data = NULL;
if (cmpret > 0) {
rtd = comps_rtree_data_create(rt, rtdata->key+x, tmpdata);
comps_hslist_destroy(&rtd->subnodes);
rtd->subnodes = tmpnodes;
comps_hslist_append(rtdata->subnodes, rtd, 0);
rtd = comps_rtree_data_create(rt, key+offset+x, ndata);
comps_hslist_append(rtdata->subnodes, rtd, 0);
} else {
rtd = comps_rtree_data_create(rt, key+offset+x, ndata);
comps_hslist_append(rtdata->subnodes, rtd, 0);
rtd = comps_rtree_data_create(rt, rtdata->key+x, tmpdata);
comps_hslist_destroy(&rtd->subnodes);
rtd->subnodes = tmpnodes;
comps_hslist_append(rtdata->subnodes, rtd, 0);
}
rtdata->key = realloc(rtdata->key, sizeof(char)*(x+1));
rtdata->key[x] = 0;
return;
}
}
}
}
void comps_rtree_set(COMPS_RTree * rt, char * key, void * data)
{
__comps_rtree_set(rt, key, strlen(key), data);
}
void comps_rtree_set_n(COMPS_RTree * rt, char * key, size_t keylen, void * data)
{
__comps_rtree_set(rt, key, keylen, data);
}
void* comps_rtree_get(COMPS_RTree * rt, const char * key) {
COMPS_HSList * subnodes;
COMPS_HSListItem * it = NULL;
COMPS_RTreeData * rtdata;
unsigned int offset, len, x;
char found, ended;
len = strlen(key);
offset = 0;
subnodes = rt->subnodes;
while (offset != len) {
found = 0;
for (it = subnodes->first; it != NULL; it=it->next) {
if (((COMPS_RTreeData*)it->data)->key[0] == key[offset]) {
found = 1;
break;
}
}
if (!found) {
//printf("not found\n");
return NULL;
}
rtdata = (COMPS_RTreeData*)it->data;
for (x=1; ;x++) {
ended=0;
if (x == strlen(rtdata->key)) ended += 1;
if (x == len-offset) ended += 2;
if (ended != 0) break;
if (key[offset+x] != rtdata->key[x]) break;
}
//printf("ended :%d key :%s|\n", ended, rtdata->key);
if (ended == 3) return rtdata->data;
else if (ended == 1) offset+=x;
else return NULL;
subnodes = ((COMPS_RTreeData*)it->data)->subnodes;
}
if (it != NULL)
return ((COMPS_RTreeData*)it->data)->data;
else return NULL;
}
void comps_rtree_unset(COMPS_RTree * rt, const char * key) {
COMPS_HSList * subnodes;
COMPS_HSListItem * it;
COMPS_RTreeData * rtdata;
unsigned int offset, len, x;
char found, ended;
COMPS_HSList * path;
struct Relation {
COMPS_HSList * parent_nodes;
COMPS_HSListItem * child_it;
} *relation;
path = comps_hslist_create();
comps_hslist_init(path, NULL, NULL, &free);
len = strlen(key);
offset = 0;
subnodes = rt->subnodes;
while (offset != len) {
found = 0;
for (it = subnodes->first; it != NULL; it=it->next) {
if (((COMPS_RTreeData*)it->data)->key[0] == key[offset]) {
found = 1;
break;
}
}
if (!found) {
comps_hslist_destroy(&path);
return;
}
rtdata = (COMPS_RTreeData*)it->data;
for (x=1; ;x++) {
ended=0;
if (rtdata->key[x] == 0) ended += 1;
if (x == len - offset) ended += 2;
if (ended != 0) break;
if (key[offset+x] != rtdata->key[x]) break;
}
if (ended == 3) {
/* remove node from tree only if there's no descendant*/
if (rtdata->subnodes->last == NULL) {
//printf("removing all\n");
comps_hslist_remove(subnodes, it);
comps_rtree_data_destroy(rtdata);
free(it);
}
else if (rtdata->data_destructor != NULL) {
//printf("removing data only\n");
(*rtdata->data_destructor)(rtdata->data);
rtdata->is_leaf = 0;
rtdata->data = NULL;
}
if (path->last == NULL) {
comps_hslist_destroy(&path);
return;
}
rtdata = (COMPS_RTreeData*)
((struct Relation*)path->last->data)->child_it->data;
/*remove all predecessor of deleted node (recursive) with no childs*/
while (rtdata->subnodes->last == NULL) {
//printf("removing '%s'\n", rtdata->key);
comps_rtree_data_destroy(rtdata);
comps_hslist_remove(
((struct Relation*)path->last->data)->parent_nodes,
((struct Relation*)path->last->data)->child_it);
free(((struct Relation*)path->last->data)->child_it);
it = path->last;
comps_hslist_remove(path, path->last);
free(it);
rtdata = (COMPS_RTreeData*)
((struct Relation*)path->last->data)->child_it->data;
}
comps_hslist_destroy(&path);
return;
}
else if (ended == 1) offset+=x;
else {
comps_hslist_destroy(&path);
return;
}
if ((relation = malloc(sizeof(struct Relation))) == NULL) {
comps_hslist_destroy(&path);
return;
}
subnodes = ((COMPS_RTreeData*)it->data)->subnodes;
relation->parent_nodes = subnodes;
relation->child_it = it;
comps_hslist_append(path, (void*)relation, 0);
}
comps_hslist_destroy(&path);
return;
}
void comps_rtree_clear(COMPS_RTree * rt) {
COMPS_HSListItem *it, *oldit;
if (rt==NULL) return;
if (rt->subnodes == NULL) return;
oldit = rt->subnodes->first;
it = (oldit)?oldit->next:NULL;
for (;it != NULL; it=it->next) {
if (rt->subnodes->data_destructor != NULL)
rt->subnodes->data_destructor(oldit->data);
free(oldit);
oldit = it;
}
if (oldit) {
if (rt->subnodes->data_destructor != NULL)
rt->subnodes->data_destructor(oldit->data);
free(oldit);
}
}
inline COMPS_HSList* __comps_rtree_all(COMPS_RTree * rt, char keyvalpair) {
COMPS_HSList *to_process, *ret;
COMPS_HSListItem *hsit, *oldit;
size_t x;
struct Pair {
char *key;
void *data;
COMPS_HSList *subnodes;
} *pair, *current_pair=NULL;//, *oldpair=NULL;
COMPS_RTreePair *rtpair;
to_process = comps_hslist_create();
comps_hslist_init(to_process, NULL, NULL, &free);
ret = comps_hslist_create();
if (keyvalpair == 0)
comps_hslist_init(ret, NULL, NULL, &free);
else if (keyvalpair == 1)
comps_hslist_init(ret, NULL, NULL, NULL);
else
comps_hslist_init(ret, NULL, NULL, &comps_rtree_pair_destroy_v);
for (hsit = rt->subnodes->first; hsit != NULL; hsit = hsit->next) {
pair = malloc(sizeof(struct Pair));
pair->key = __comps_strcpy(((COMPS_RTreeData*)hsit->data)->key);
pair->data = ((COMPS_RTreeData*)hsit->data)->data;
pair->subnodes = ((COMPS_RTreeData*)hsit->data)->subnodes;
comps_hslist_append(to_process, pair, 0);
}
while (to_process->first) {
//oldpair = current_pair;
current_pair = to_process->first->data;
oldit = to_process->first;
comps_hslist_remove(to_process, to_process->first);
if (current_pair->data) {
if (keyvalpair == 0) {
comps_hslist_append(ret, __comps_strcpy(current_pair->key), 0);
} else if (keyvalpair == 1) {
comps_hslist_append(ret, current_pair->data, 0);
} else {
rtpair = malloc(sizeof(COMPS_RTreePair));
rtpair->key = __comps_strcpy(current_pair->key);
rtpair->data = current_pair->data;
comps_hslist_append(ret, rtpair, 0);
}
}
for (hsit = current_pair->subnodes->first, x = 0;
hsit != NULL; hsit = hsit->next, x++) {
pair = malloc(sizeof(struct Pair));
pair->key = __comps_strcat(current_pair->key,
((COMPS_RTreeData*)hsit->data)->key);
pair->data = ((COMPS_RTreeData*)hsit->data)->data;
pair->subnodes = ((COMPS_RTreeData*)hsit->data)->subnodes;
comps_hslist_insert_at(to_process, x, pair, 0);
}
free(current_pair->key);
free(current_pair);
free(oldit);
}
comps_hslist_destroy(&to_process);
return ret;
}
void comps_rtree_unite(COMPS_RTree *rt1, COMPS_RTree *rt2) {
COMPS_HSList *tmplist, *tmp_subnodes;
COMPS_HSListItem *it;
struct Pair {
COMPS_HSList * subnodes;
char * key;
} *pair, *parent_pair;
pair = malloc(sizeof(struct Pair));
pair->subnodes = rt2->subnodes;
pair->key = NULL;
tmplist = comps_hslist_create();
comps_hslist_init(tmplist, NULL, NULL, &free);
comps_hslist_append(tmplist, pair, 0);
while (tmplist->first != NULL) {
it = tmplist->first;
comps_hslist_remove(tmplist, tmplist->first);
tmp_subnodes = ((struct Pair*)it->data)->subnodes;
parent_pair = (struct Pair*) it->data;
free(it);
for (it = tmp_subnodes->first; it != NULL; it=it->next) {
pair = malloc(sizeof(struct Pair));
pair->subnodes = ((COMPS_RTreeData*)it->data)->subnodes;
if (parent_pair->key != NULL) {
pair->key = malloc(sizeof(char)
* (strlen(((COMPS_RTreeData*)it->data)->key)
+ strlen(parent_pair->key) + 1));
memcpy(pair->key, parent_pair->key,
sizeof(char) * strlen(parent_pair->key));
memcpy(pair->key + strlen(parent_pair->key),
((COMPS_RTreeData*)it->data)->key,
sizeof(char)*(strlen(((COMPS_RTreeData*)it->data)->key)+1));
} else {
pair->key = malloc(sizeof(char)*
(strlen(((COMPS_RTreeData*)it->data)->key) +1));
memcpy(pair->key, ((COMPS_RTreeData*)it->data)->key,
sizeof(char)*(strlen(((COMPS_RTreeData*)it->data)->key)+1));
}
/* current node has data */
if (((COMPS_RTreeData*)it->data)->data != NULL) {
comps_rtree_set(rt1,
pair->key,
rt2->data_cloner(((COMPS_RTreeData*)it->data)->data));
}
if (((COMPS_RTreeData*)it->data)->subnodes->first) {
comps_hslist_append(tmplist, pair, 0);
} else {
free(pair->key);
free(pair);
}
}
free(parent_pair->key);
free(parent_pair);
}
comps_hslist_destroy(&tmplist);
}
COMPS_RTree* comps_rtree_union(COMPS_RTree *rt1, COMPS_RTree *rt2){
COMPS_RTree *ret;
ret = comps_rtree_clone(rt2);
comps_rtree_unite(ret, rt1);
return ret;
}
COMPS_HSList* comps_rtree_keys(COMPS_RTree * rt) {
return __comps_rtree_all(rt, 0);
}
COMPS_HSList* comps_rtree_values(COMPS_RTree * rt) {
return __comps_rtree_all(rt, 1);
}
COMPS_HSList* comps_rtree_pairs(COMPS_RTree * rt) {
return __comps_rtree_all(rt, 2);
}
inline void comps_rtree_pair_destroy(COMPS_RTreePair * pair) {
free(pair->key);
free(pair);
}
inline void comps_rtree_pair_destroy_v(void * pair) {
free(((COMPS_RTreePair *)pair)->key);
free(pair);
}
| ./CrossVul/dataset_final_sorted/CWE-416/c/good_1390_3 |
crossvul-cpp_data_good_3307_0 | /* radare - LGPL - Copyright 2010-2017 - nibble, pancake */
#include <stdio.h>
#include <r_types.h>
#include <r_util.h>
#include "mach0.h"
#define bprintf if(bin->verbose)eprintf
typedef struct _ulebr {
ut8 *p;
} ulebr;
/* move into struct MACH0_(obj_t) *bin */
static bool little_;
static ut64 read_uleb128(ulebr *r, ut8 *end) {
ut64 result = 0;
int bit = 0;
ut64 slice = 0;
ut8 *p = r->p;
do {
if (p == end) {
eprintf ("malformed uleb128");
break;
}
slice = *p & 0x7f;
if (bit > 63) {
eprintf ("uleb128 too big for uint64, bit=%d, result=0x%"PFMT64x, bit, result);
} else {
result |= (slice << bit);
bit += 7;
}
} while (*p++ & 0x80);
r->p = p;
return result;
}
static st64 read_sleb128(ulebr *r, ut8 *end) {
st64 result = 0;
int bit = 0;
ut8 byte = 0;
ut8 *p = r->p;
do {
if (p == end) {
eprintf ("malformed sleb128");
break;
}
byte = *p++;
result |= (((st64)(byte & 0x7f)) << bit);
bit += 7;
} while (byte & 0x80);
// sign extend negative numbers
if ((byte & 0x40)) {
result |= (-1LL) << bit;
}
r->p = p;
return result;
}
static ut64 entry_to_vaddr(struct MACH0_(obj_t)* bin) {
switch (bin->main_cmd.cmd) {
case LC_MAIN:
return bin->entry + bin->baddr;
case LC_UNIXTHREAD:
case LC_THREAD:
return bin->entry;
default:
return 0;
}
}
static ut64 addr_to_offset(struct MACH0_(obj_t)* bin, ut64 addr) {
ut64 segment_base, segment_size;
int i;
if (!bin->segs) {
return 0;
}
for (i = 0; i < bin->nsegs; i++) {
segment_base = (ut64)bin->segs[i].vmaddr;
segment_size = (ut64)bin->segs[i].vmsize;
if (addr >= segment_base && addr < segment_base + segment_size) {
return bin->segs[i].fileoff + (addr - segment_base);
}
}
return 0;
}
static int init_hdr(struct MACH0_(obj_t)* bin) {
ut8 magicbytes[4]= {0};
ut8 machohdrbytes[sizeof (struct MACH0_(mach_header))] = {0};
int len;
if (r_buf_read_at (bin->b, 0, magicbytes, 4) < 1) {
return false;
}
if (r_read_le32(magicbytes) == 0xfeedface) {
bin->big_endian = false;
} else if (r_read_be32(magicbytes) == 0xfeedface) {
bin->big_endian = true;
} else if (r_read_le32(magicbytes) == FAT_MAGIC) {
bin->big_endian = false;
} else if (r_read_be32(magicbytes) == FAT_MAGIC) {
bin->big_endian = true;
} else if (r_read_le32(magicbytes) == 0xfeedfacf) {
bin->big_endian = false;
} else if (r_read_be32(magicbytes) == 0xfeedfacf) {
bin->big_endian = true;
} else {
return false; // object files are magic == 0, but body is different :?
}
len = r_buf_read_at (bin->b, 0, machohdrbytes, sizeof (machohdrbytes));
if (len != sizeof (machohdrbytes)) {
bprintf ("Error: read (hdr)\n");
return false;
}
bin->hdr.magic = r_read_ble (&machohdrbytes[0], bin->big_endian, 32);
bin->hdr.cputype = r_read_ble (&machohdrbytes[4], bin->big_endian, 32);
bin->hdr.cpusubtype = r_read_ble (&machohdrbytes[8], bin->big_endian, 32);
bin->hdr.filetype = r_read_ble (&machohdrbytes[12], bin->big_endian, 32);
bin->hdr.ncmds = r_read_ble (&machohdrbytes[16], bin->big_endian, 32);
bin->hdr.sizeofcmds = r_read_ble (&machohdrbytes[20], bin->big_endian, 32);
bin->hdr.flags = r_read_ble (&machohdrbytes[24], bin->big_endian, 32);
#if R_BIN_MACH064
bin->hdr.reserved = r_read_ble (&machohdrbytes[28], bin->big_endian, 32);
#endif
sdb_set (bin->kv, "mach0_header.format",
"xxxxddx "
"magic cputype cpusubtype filetype ncmds sizeofcmds flags", 0);
sdb_num_set (bin->kv, "mach0_header.offset", 0, 0); // wat about fatmach0?
sdb_set (bin->kv, "mach_filetype.cparse", "enum mach_filetype{MH_OBJECT=1,"
"MH_EXECUTE=2, MH_FVMLIB=3, MH_CORE=4, MH_PRELOAD=5, MH_DYLIB=6,"
"MH_DYLINKER=7, MH_BUNDLE=8, MH_DYLIB_STUB=9, MH_DSYM=10,"
"MH_KEXT_BUNDLE=11}"
,0);
sdb_set (bin->kv, "mach_flags.cparse", "enum mach_flags{MH_NOUNDEFS=1,"
"MH_INCRLINK=2,MH_DYLDLINK=4,MH_BINDATLOAD=8,MH_PREBOUND=0x10,"
"MH_SPLIT_SEGS=0x20,MH_LAZY_INIT=0x40,MH_TWOLEVEL=0x80,"
"MH_FORCE_FLAT=0x100,MH_NOMULTIDEFS=0x200,MH_NOFIXPREBINDING=0x400,"
"MH_PREBINDABLE=0x800, MH_ALLMODSBOUND=0x1000,"
"MH_SUBSECTIONS_VIA_SYMBOLS=0x2000,"
"MH_CANONICAL=0x4000,MH_WEAK_DEFINES=0x8000,"
"MH_BINDS_TO_WEAK=0x10000,MH_ALLOW_STACK_EXECUTION=0x20000,"
"MH_ROOT_SAFE=0x40000,MH_SETUID_SAFE=0x80000,"
"MH_NO_REEXPORTED_DYLIBS=0x100000,MH_PIE=0x200000,"
"MH_DEAD_STRIPPABLE_DYLIB=0x400000,"
"MH_HAS_TLV_DESCRIPTORS=0x800000,"
"MH_NO_HEAP_EXECUTION=0x1000000 }",0);
return true;
}
static int parse_segments(struct MACH0_(obj_t)* bin, ut64 off) {
int i, j, k, sect, len;
ut32 size_sects;
ut8 segcom[sizeof (struct MACH0_(segment_command))] = {0};
ut8 sec[sizeof (struct MACH0_(section))] = {0};
if (!UT32_MUL (&size_sects, bin->nsegs, sizeof (struct MACH0_(segment_command)))) {
return false;
}
if (!size_sects || size_sects > bin->size) {
return false;
}
if (off > bin->size || off + sizeof (struct MACH0_(segment_command)) > bin->size) {
return false;
}
if (!(bin->segs = realloc (bin->segs, bin->nsegs * sizeof(struct MACH0_(segment_command))))) {
perror ("realloc (seg)");
return false;
}
j = bin->nsegs - 1;
len = r_buf_read_at (bin->b, off, segcom, sizeof (struct MACH0_(segment_command)));
if (len != sizeof (struct MACH0_(segment_command))) {
bprintf ("Error: read (seg)\n");
return false;
}
i = 0;
bin->segs[j].cmd = r_read_ble32 (&segcom[i], bin->big_endian);
i += sizeof (ut32);
bin->segs[j].cmdsize = r_read_ble32 (&segcom[i], bin->big_endian);
i += sizeof (ut32);
memcpy (&bin->segs[j].segname, &segcom[i], 16);
i += 16;
#if R_BIN_MACH064
bin->segs[j].vmaddr = r_read_ble64 (&segcom[i], bin->big_endian);
i += sizeof (ut64);
bin->segs[j].vmsize = r_read_ble64 (&segcom[i], bin->big_endian);
i += sizeof (ut64);
bin->segs[j].fileoff = r_read_ble64 (&segcom[i], bin->big_endian);
i += sizeof (ut64);
bin->segs[j].filesize = r_read_ble64 (&segcom[i], bin->big_endian);
i += sizeof (ut64);
#else
bin->segs[j].vmaddr = r_read_ble32 (&segcom[i], bin->big_endian);
i += sizeof (ut32);
bin->segs[j].vmsize = r_read_ble32 (&segcom[i], bin->big_endian);
i += sizeof (ut32);
bin->segs[j].fileoff = r_read_ble32 (&segcom[i], bin->big_endian);
i += sizeof (ut32);
bin->segs[j].filesize = r_read_ble32 (&segcom[i], bin->big_endian);
i += sizeof (ut32);
#endif
bin->segs[j].maxprot = r_read_ble32 (&segcom[i], bin->big_endian);
i += sizeof (ut32);
bin->segs[j].initprot = r_read_ble32 (&segcom[i], bin->big_endian);
i += sizeof (ut32);
bin->segs[j].nsects = r_read_ble32 (&segcom[i], bin->big_endian);
i += sizeof (ut32);
bin->segs[j].flags = r_read_ble32 (&segcom[i], bin->big_endian);
sdb_num_set (bin->kv, sdb_fmt (0, "mach0_segment_%d.offset", j), off, 0);
sdb_num_set (bin->kv, "mach0_segments.count", 0, 0);
sdb_set (bin->kv, "mach0_segment.format",
"xd[16]zxxxxoodx "
"cmd cmdsize segname vmaddr vmsize "
"fileoff filesize maxprot initprot nsects flags", 0);
if (bin->segs[j].nsects > 0) {
sect = bin->nsects;
bin->nsects += bin->segs[j].nsects;
if (bin->nsects > 128) {
int new_nsects = bin->nsects & 0xf;
bprintf ("WARNING: mach0 header contains too many sections (%d). Wrapping to %d\n",
bin->nsects, new_nsects);
bin->nsects = new_nsects;
}
if ((int)bin->nsects < 1) {
bprintf ("Warning: Invalid number of sections\n");
bin->nsects = sect;
return false;
}
if (!UT32_MUL (&size_sects, bin->nsects-sect, sizeof (struct MACH0_(section)))){
bin->nsects = sect;
return false;
}
if (!size_sects || size_sects > bin->size){
bin->nsects = sect;
return false;
}
if (bin->segs[j].cmdsize != sizeof (struct MACH0_(segment_command)) \
+ (sizeof (struct MACH0_(section))*bin->segs[j].nsects)){
bin->nsects = sect;
return false;
}
if (off + sizeof (struct MACH0_(segment_command)) > bin->size ||\
off + sizeof (struct MACH0_(segment_command)) + size_sects > bin->size){
bin->nsects = sect;
return false;
}
if (!(bin->sects = realloc (bin->sects, bin->nsects * sizeof (struct MACH0_(section))))) {
perror ("realloc (sects)");
bin->nsects = sect;
return false;
}
for (k = sect, j = 0; k < bin->nsects; k++, j++) {
ut64 offset = off + sizeof (struct MACH0_(segment_command)) + j * sizeof (struct MACH0_(section));
len = r_buf_read_at (bin->b, offset, sec, sizeof (struct MACH0_(section)));
if (len != sizeof (struct MACH0_(section))) {
bprintf ("Error: read (sects)\n");
bin->nsects = sect;
return false;
}
i = 0;
memcpy (&bin->sects[k].sectname, &sec[i], 16);
i += 16;
memcpy (&bin->sects[k].segname, &sec[i], 16);
bin->sects[k].segname[15] = 0;
i += 16;
#if R_BIN_MACH064
bin->sects[k].addr = r_read_ble64 (&sec[i], bin->big_endian);
i += sizeof (ut64);
bin->sects[k].size = r_read_ble64 (&sec[i], bin->big_endian);
i += sizeof (ut64);
#else
bin->sects[k].addr = r_read_ble32 (&sec[i], bin->big_endian);
i += sizeof (ut32);
bin->sects[k].size = r_read_ble32 (&sec[i], bin->big_endian);
i += sizeof (ut32);
#endif
bin->sects[k].offset = r_read_ble32 (&sec[i], bin->big_endian);
i += sizeof (ut32);
bin->sects[k].align = r_read_ble32 (&sec[i], bin->big_endian);
i += sizeof (ut32);
bin->sects[k].reloff = r_read_ble32 (&sec[i], bin->big_endian);
i += sizeof (ut32);
bin->sects[k].nreloc = r_read_ble32 (&sec[i], bin->big_endian);
i += sizeof (ut32);
bin->sects[k].flags = r_read_ble32 (&sec[i], bin->big_endian);
i += sizeof (ut32);
bin->sects[k].reserved1 = r_read_ble32 (&sec[i], bin->big_endian);
i += sizeof (ut32);
bin->sects[k].reserved2 = r_read_ble32 (&sec[i], bin->big_endian);
#if R_BIN_MACH064
i += sizeof (ut32);
bin->sects[k].reserved3 = r_read_ble32 (&sec[i], bin->big_endian);
#endif
}
}
return true;
}
static int parse_symtab(struct MACH0_(obj_t)* bin, ut64 off) {
struct symtab_command st;
ut32 size_sym;
int i;
ut8 symt[sizeof (struct symtab_command)] = {0};
ut8 nlst[sizeof (struct MACH0_(nlist))] = {0};
if (off > (ut64)bin->size || off + sizeof (struct symtab_command) > (ut64)bin->size) {
return false;
}
int len = r_buf_read_at (bin->b, off, symt, sizeof (struct symtab_command));
if (len != sizeof (struct symtab_command)) {
bprintf ("Error: read (symtab)\n");
return false;
}
st.cmd = r_read_ble32 (&symt[0], bin->big_endian);
st.cmdsize = r_read_ble32 (&symt[4], bin->big_endian);
st.symoff = r_read_ble32 (&symt[8], bin->big_endian);
st.nsyms = r_read_ble32 (&symt[12], bin->big_endian);
st.stroff = r_read_ble32 (&symt[16], bin->big_endian);
st.strsize = r_read_ble32 (&symt[20], bin->big_endian);
bin->symtab = NULL;
bin->nsymtab = 0;
if (st.strsize > 0 && st.strsize < bin->size && st.nsyms > 0) {
bin->nsymtab = st.nsyms;
if (st.stroff > bin->size || st.stroff + st.strsize > bin->size) {
return false;
}
if (!UT32_MUL (&size_sym, bin->nsymtab, sizeof (struct MACH0_(nlist)))) {
bprintf("fail2\n");
return false;
}
if (!size_sym) {
bprintf("fail3\n");
return false;
}
if (st.symoff > bin->size || st.symoff + size_sym > bin->size) {
bprintf("fail4\n");
return false;
}
if (!(bin->symstr = calloc (1, st.strsize + 2))) {
perror ("calloc (symstr)");
return false;
}
bin->symstrlen = st.strsize;
len = r_buf_read_at (bin->b, st.stroff, (ut8*)bin->symstr, st.strsize);
if (len != st.strsize) {
bprintf ("Error: read (symstr)\n");
R_FREE (bin->symstr);
return false;
}
if (!(bin->symtab = calloc (bin->nsymtab, sizeof (struct MACH0_(nlist))))) {
perror ("calloc (symtab)");
return false;
}
for (i = 0; i < bin->nsymtab; i++) {
len = r_buf_read_at (bin->b, st.symoff + (i * sizeof (struct MACH0_(nlist))),
nlst, sizeof (struct MACH0_(nlist)));
if (len != sizeof (struct MACH0_(nlist))) {
bprintf ("Error: read (nlist)\n");
R_FREE (bin->symtab);
return false;
}
//XXX not very safe what if is n_un.n_name instead?
bin->symtab[i].n_strx = r_read_ble32 (&nlst[0], bin->big_endian);
bin->symtab[i].n_type = r_read_ble8 (&nlst[4]);
bin->symtab[i].n_sect = r_read_ble8 (&nlst[5]);
bin->symtab[i].n_desc = r_read_ble16 (&nlst[6], bin->big_endian);
#if R_BIN_MACH064
bin->symtab[i].n_value = r_read_ble64 (&nlst[8], bin->big_endian);
#else
bin->symtab[i].n_value = r_read_ble32 (&nlst[8], bin->big_endian);
#endif
}
}
return true;
}
static int parse_dysymtab(struct MACH0_(obj_t)* bin, ut64 off) {
int len, i;
ut32 size_tab;
ut8 dysym[sizeof (struct dysymtab_command)] = {0};
ut8 dytoc[sizeof (struct dylib_table_of_contents)] = {0};
ut8 dymod[sizeof (struct MACH0_(dylib_module))] = {0};
ut8 idsyms[sizeof (ut32)] = {0};
if (off > bin->size || off + sizeof (struct dysymtab_command) > bin->size) {
return false;
}
len = r_buf_read_at(bin->b, off, dysym, sizeof (struct dysymtab_command));
if (len != sizeof (struct dysymtab_command)) {
bprintf ("Error: read (dysymtab)\n");
return false;
}
bin->dysymtab.cmd = r_read_ble32 (&dysym[0], bin->big_endian);
bin->dysymtab.cmdsize = r_read_ble32 (&dysym[4], bin->big_endian);
bin->dysymtab.ilocalsym = r_read_ble32 (&dysym[8], bin->big_endian);
bin->dysymtab.nlocalsym = r_read_ble32 (&dysym[12], bin->big_endian);
bin->dysymtab.iextdefsym = r_read_ble32 (&dysym[16], bin->big_endian);
bin->dysymtab.nextdefsym = r_read_ble32 (&dysym[20], bin->big_endian);
bin->dysymtab.iundefsym = r_read_ble32 (&dysym[24], bin->big_endian);
bin->dysymtab.nundefsym = r_read_ble32 (&dysym[28], bin->big_endian);
bin->dysymtab.tocoff = r_read_ble32 (&dysym[32], bin->big_endian);
bin->dysymtab.ntoc = r_read_ble32 (&dysym[36], bin->big_endian);
bin->dysymtab.modtaboff = r_read_ble32 (&dysym[40], bin->big_endian);
bin->dysymtab.nmodtab = r_read_ble32 (&dysym[44], bin->big_endian);
bin->dysymtab.extrefsymoff = r_read_ble32 (&dysym[48], bin->big_endian);
bin->dysymtab.nextrefsyms = r_read_ble32 (&dysym[52], bin->big_endian);
bin->dysymtab.indirectsymoff = r_read_ble32 (&dysym[56], bin->big_endian);
bin->dysymtab.nindirectsyms = r_read_ble32 (&dysym[60], bin->big_endian);
bin->dysymtab.extreloff = r_read_ble32 (&dysym[64], bin->big_endian);
bin->dysymtab.nextrel = r_read_ble32 (&dysym[68], bin->big_endian);
bin->dysymtab.locreloff = r_read_ble32 (&dysym[72], bin->big_endian);
bin->dysymtab.nlocrel = r_read_ble32 (&dysym[76], bin->big_endian);
bin->ntoc = bin->dysymtab.ntoc;
if (bin->ntoc > 0) {
if (!(bin->toc = calloc (bin->ntoc, sizeof(struct dylib_table_of_contents)))) {
perror ("calloc (toc)");
return false;
}
if (!UT32_MUL (&size_tab, bin->ntoc, sizeof (struct dylib_table_of_contents))){
R_FREE (bin->toc);
return false;
}
if (!size_tab){
R_FREE (bin->toc);
return false;
}
if (bin->dysymtab.tocoff > bin->size || bin->dysymtab.tocoff + size_tab > bin->size){
R_FREE (bin->toc);
return false;
}
for (i = 0; i < bin->ntoc; i++) {
len = r_buf_read_at(bin->b, bin->dysymtab.tocoff +
i * sizeof (struct dylib_table_of_contents),
dytoc, sizeof (struct dylib_table_of_contents));
if (len != sizeof (struct dylib_table_of_contents)) {
bprintf ("Error: read (toc)\n");
R_FREE (bin->toc);
return false;
}
bin->toc[i].symbol_index = r_read_ble32 (&dytoc[0], bin->big_endian);
bin->toc[i].module_index = r_read_ble32 (&dytoc[4], bin->big_endian);
}
}
bin->nmodtab = bin->dysymtab.nmodtab;
if (bin->nmodtab > 0) {
if (!(bin->modtab = calloc (bin->nmodtab, sizeof(struct MACH0_(dylib_module))))) {
perror ("calloc (modtab)");
return false;
}
if (!UT32_MUL (&size_tab, bin->nmodtab, sizeof (struct MACH0_(dylib_module)))){
R_FREE (bin->modtab);
return false;
}
if (!size_tab){
R_FREE (bin->modtab);
return false;
}
if (bin->dysymtab.modtaboff > bin->size || \
bin->dysymtab.modtaboff + size_tab > bin->size){
R_FREE (bin->modtab);
return false;
}
for (i = 0; i < bin->nmodtab; i++) {
len = r_buf_read_at(bin->b, bin->dysymtab.modtaboff +
i * sizeof (struct MACH0_(dylib_module)),
dymod, sizeof (struct MACH0_(dylib_module)));
if (len == -1) {
bprintf ("Error: read (modtab)\n");
R_FREE (bin->modtab);
return false;
}
bin->modtab[i].module_name = r_read_ble32 (&dymod[0], bin->big_endian);
bin->modtab[i].iextdefsym = r_read_ble32 (&dymod[4], bin->big_endian);
bin->modtab[i].nextdefsym = r_read_ble32 (&dymod[8], bin->big_endian);
bin->modtab[i].irefsym = r_read_ble32 (&dymod[12], bin->big_endian);
bin->modtab[i].nrefsym = r_read_ble32 (&dymod[16], bin->big_endian);
bin->modtab[i].ilocalsym = r_read_ble32 (&dymod[20], bin->big_endian);
bin->modtab[i].nlocalsym = r_read_ble32 (&dymod[24], bin->big_endian);
bin->modtab[i].iextrel = r_read_ble32 (&dymod[28], bin->big_endian);
bin->modtab[i].nextrel = r_read_ble32 (&dymod[32], bin->big_endian);
bin->modtab[i].iinit_iterm = r_read_ble32 (&dymod[36], bin->big_endian);
bin->modtab[i].ninit_nterm = r_read_ble32 (&dymod[40], bin->big_endian);
#if R_BIN_MACH064
bin->modtab[i].objc_module_info_size = r_read_ble32 (&dymod[44], bin->big_endian);
bin->modtab[i].objc_module_info_addr = r_read_ble64 (&dymod[48], bin->big_endian);
#else
bin->modtab[i].objc_module_info_addr = r_read_ble32 (&dymod[44], bin->big_endian);
bin->modtab[i].objc_module_info_size = r_read_ble32 (&dymod[48], bin->big_endian);
#endif
}
}
bin->nindirectsyms = bin->dysymtab.nindirectsyms;
if (bin->nindirectsyms > 0) {
if (!(bin->indirectsyms = calloc (bin->nindirectsyms, sizeof(ut32)))) {
perror ("calloc (indirectsyms)");
return false;
}
if (!UT32_MUL (&size_tab, bin->nindirectsyms, sizeof (ut32))){
R_FREE (bin->indirectsyms);
return false;
}
if (!size_tab){
R_FREE (bin->indirectsyms);
return false;
}
if (bin->dysymtab.indirectsymoff > bin->size || \
bin->dysymtab.indirectsymoff + size_tab > bin->size){
R_FREE (bin->indirectsyms);
return false;
}
for (i = 0; i < bin->nindirectsyms; i++) {
len = r_buf_read_at (bin->b, bin->dysymtab.indirectsymoff + i * sizeof (ut32), idsyms, 4);
if (len == -1) {
bprintf ("Error: read (indirect syms)\n");
R_FREE (bin->indirectsyms);
return false;
}
bin->indirectsyms[i] = r_read_ble32 (&idsyms[0], bin->big_endian);
}
}
/* TODO extrefsyms, extrel, locrel */
return true;
}
static bool parse_signature(struct MACH0_(obj_t) *bin, ut64 off) {
int i,len;
ut32 data;
bin->signature = NULL;
struct linkedit_data_command link = {};
ut8 lit[sizeof (struct linkedit_data_command)] = {0};
struct blob_index_t idx = {};
struct super_blob_t super = {};
if (off > bin->size || off + sizeof (struct linkedit_data_command) > bin->size) {
return false;
}
len = r_buf_read_at (bin->b, off, lit, sizeof (struct linkedit_data_command));
if (len != sizeof (struct linkedit_data_command)) {
bprintf ("Failed to get data while parsing LC_CODE_SIGNATURE command\n");
return false;
}
link.cmd = r_read_ble32 (&lit[0], bin->big_endian);
link.cmdsize = r_read_ble32 (&lit[4], bin->big_endian);
link.dataoff = r_read_ble32 (&lit[8], bin->big_endian);
link.datasize = r_read_ble32 (&lit[12], bin->big_endian);
data = link.dataoff;
if (data > bin->size || data + sizeof (struct super_blob_t) > bin->size) {
bin->signature = (ut8 *)strdup ("Malformed entitlement");
return true;
}
super.blob.magic = r_read_ble32 (bin->b->buf + data, little_);
super.blob.length = r_read_ble32 (bin->b->buf + data + 4, little_);
super.count = r_read_ble32 (bin->b->buf + data + 8, little_);
for (i = 0; i < super.count; ++i) {
if ((ut8 *)(bin->b->buf + data + i) > (ut8 *)(bin->b->buf + bin->size)) {
bin->signature = (ut8 *)strdup ("Malformed entitlement");
break;
}
struct blob_index_t bi;
if (r_buf_read_at (bin->b, data + 12 + (i * sizeof (struct blob_index_t)),
(ut8*)&bi, sizeof (struct blob_index_t)) < sizeof (struct blob_index_t)) {
break;
}
idx.type = r_read_ble32 (&bi.type, little_);
idx.offset = r_read_ble32 (&bi.offset, little_);
if (idx.type == CSSLOT_ENTITLEMENTS) {
ut64 off = data + idx.offset;
if (off > bin->size || off + sizeof (struct blob_t) > bin->size) {
bin->signature = (ut8 *)strdup ("Malformed entitlement");
break;
}
struct blob_t entitlements = {};
entitlements.magic = r_read_ble32 (bin->b->buf + off, little_);
entitlements.length = r_read_ble32 (bin->b->buf + off + 4, little_);
len = entitlements.length - sizeof(struct blob_t);
if (len <= bin->size && len > 1) {
bin->signature = calloc (1, len + 1);
if (bin->signature) {
ut8 *src = bin->b->buf + off + sizeof (struct blob_t);
if (off + sizeof (struct blob_t) + len < bin->b->length) {
memcpy (bin->signature, src, len);
bin->signature[len] = '\0';
return true;
}
bin->signature = (ut8 *)strdup ("Malformed entitlement");
return true;
}
} else {
bin->signature = (ut8 *)strdup ("Malformed entitlement");
}
}
}
if (!bin->signature) {
bin->signature = (ut8 *)strdup ("No entitlement found");
}
return true;
}
static int parse_thread(struct MACH0_(obj_t)* bin, struct load_command *lc, ut64 off, bool is_first_thread) {
ut64 ptr_thread, pc = UT64_MAX, pc_offset = UT64_MAX;
ut32 flavor, count;
ut8 *arw_ptr = NULL;
int arw_sz, len = 0;
ut8 thc[sizeof (struct thread_command)] = {0};
if (off > bin->size || off + sizeof (struct thread_command) > bin->size)
return false;
len = r_buf_read_at (bin->b, off, thc, 8);
if (len < 1)
goto wrong_read;
bin->thread.cmd = r_read_ble32 (&thc[0], bin->big_endian);
bin->thread.cmdsize = r_read_ble32 (&thc[4], bin->big_endian);
flavor = r_read_ble32 (bin->b->buf + off + sizeof(struct thread_command), bin->big_endian);
if (len == -1)
goto wrong_read;
if (off + sizeof (struct thread_command) + sizeof (flavor) > bin->size || \
off + sizeof (struct thread_command) + sizeof (flavor) + sizeof (ut32) > bin->size)
return false;
// TODO: use count for checks
count = r_read_ble32 (bin->b->buf + off + sizeof (struct thread_command) + sizeof(flavor),
bin->big_endian);
ptr_thread = off + sizeof (struct thread_command) + sizeof (flavor) + sizeof (count);
if (ptr_thread > bin->size)
return false;
switch (bin->hdr.cputype) {
case CPU_TYPE_I386:
case CPU_TYPE_X86_64:
switch (flavor) {
case X86_THREAD_STATE32:
if (ptr_thread + sizeof (struct x86_thread_state32) > bin->size)
return false;
if ((len = r_buf_fread_at (bin->b, ptr_thread,
(ut8*)&bin->thread_state.x86_32, "16i", 1)) == -1) {
bprintf ("Error: read (thread state x86_32)\n");
return false;
}
pc = bin->thread_state.x86_32.eip;
pc_offset = ptr_thread + r_offsetof(struct x86_thread_state32, eip);
arw_ptr = (ut8 *)&bin->thread_state.x86_32;
arw_sz = sizeof (struct x86_thread_state32);
break;
case X86_THREAD_STATE64:
if (ptr_thread + sizeof (struct x86_thread_state64) > bin->size)
return false;
if ((len = r_buf_fread_at (bin->b, ptr_thread,
(ut8*)&bin->thread_state.x86_64, "32l", 1)) == -1) {
bprintf ("Error: read (thread state x86_64)\n");
return false;
}
pc = bin->thread_state.x86_64.rip;
pc_offset = ptr_thread + r_offsetof(struct x86_thread_state64, rip);
arw_ptr = (ut8 *)&bin->thread_state.x86_64;
arw_sz = sizeof (struct x86_thread_state64);
break;
//default: bprintf ("Unknown type\n");
}
break;
case CPU_TYPE_POWERPC:
case CPU_TYPE_POWERPC64:
if (flavor == X86_THREAD_STATE32) {
if (ptr_thread + sizeof (struct ppc_thread_state32) > bin->size)
return false;
if ((len = r_buf_fread_at (bin->b, ptr_thread,
(ut8*)&bin->thread_state.ppc_32, bin->big_endian?"40I":"40i", 1)) == -1) {
bprintf ("Error: read (thread state ppc_32)\n");
return false;
}
pc = bin->thread_state.ppc_32.srr0;
pc_offset = ptr_thread + r_offsetof(struct ppc_thread_state32, srr0);
arw_ptr = (ut8 *)&bin->thread_state.ppc_32;
arw_sz = sizeof (struct ppc_thread_state32);
} else if (flavor == X86_THREAD_STATE64) {
if (ptr_thread + sizeof (struct ppc_thread_state64) > bin->size)
return false;
if ((len = r_buf_fread_at (bin->b, ptr_thread,
(ut8*)&bin->thread_state.ppc_64, bin->big_endian?"34LI3LI":"34li3li", 1)) == -1) {
bprintf ("Error: read (thread state ppc_64)\n");
return false;
}
pc = bin->thread_state.ppc_64.srr0;
pc_offset = ptr_thread + r_offsetof(struct ppc_thread_state64, srr0);
arw_ptr = (ut8 *)&bin->thread_state.ppc_64;
arw_sz = sizeof (struct ppc_thread_state64);
}
break;
case CPU_TYPE_ARM:
if (ptr_thread + sizeof (struct arm_thread_state32) > bin->size)
return false;
if ((len = r_buf_fread_at (bin->b, ptr_thread,
(ut8*)&bin->thread_state.arm_32, bin->big_endian?"17I":"17i", 1)) == -1) {
bprintf ("Error: read (thread state arm)\n");
return false;
}
pc = bin->thread_state.arm_32.r15;
pc_offset = ptr_thread + r_offsetof (struct arm_thread_state32, r15);
arw_ptr = (ut8 *)&bin->thread_state.arm_32;
arw_sz = sizeof (struct arm_thread_state32);
break;
case CPU_TYPE_ARM64:
if (ptr_thread + sizeof (struct arm_thread_state64) > bin->size) {
return false;
}
if ((len = r_buf_fread_at(bin->b, ptr_thread,
(ut8*)&bin->thread_state.arm_64, bin->big_endian?"34LI1I":"34Li1i", 1)) == -1) {
bprintf ("Error: read (thread state arm)\n");
return false;
}
pc = r_read_be64 (&bin->thread_state.arm_64.pc);
pc_offset = ptr_thread + r_offsetof (struct arm_thread_state64, pc);
arw_ptr = (ut8*)&bin->thread_state.arm_64;
arw_sz = sizeof (struct arm_thread_state64);
break;
default:
bprintf ("Error: read (unknown thread state structure)\n");
return false;
}
// TODO: this shouldnt be an bprintf...
if (arw_ptr && arw_sz > 0) {
int i;
ut8 *p = arw_ptr;
bprintf ("arw ");
for (i = 0; i < arw_sz; i++) {
bprintf ("%02x", 0xff & p[i]);
}
bprintf ("\n");
}
if (is_first_thread) {
bin->main_cmd = *lc;
if (pc != UT64_MAX) {
bin->entry = pc;
}
if (pc_offset != UT64_MAX) {
sdb_num_set (bin->kv, "mach0.entry.offset", pc_offset, 0);
}
}
return true;
wrong_read:
bprintf ("Error: read (thread)\n");
return false;
}
static int parse_function_starts (struct MACH0_(obj_t)* bin, ut64 off) {
struct linkedit_data_command fc;
ut8 sfc[sizeof (struct linkedit_data_command)] = {0};
ut8 *buf;
int len;
if (off > bin->size || off + sizeof (struct linkedit_data_command) > bin->size) {
bprintf ("Likely overflow while parsing"
" LC_FUNCTION_STARTS command\n");
}
bin->func_start = NULL;
len = r_buf_read_at (bin->b, off, sfc, sizeof (struct linkedit_data_command));
if (len < 1) {
bprintf ("Failed to get data while parsing"
" LC_FUNCTION_STARTS command\n");
}
fc.cmd = r_read_ble32 (&sfc[0], bin->big_endian);
fc.cmdsize = r_read_ble32 (&sfc[4], bin->big_endian);
fc.dataoff = r_read_ble32 (&sfc[8], bin->big_endian);
fc.datasize = r_read_ble32 (&sfc[12], bin->big_endian);
buf = calloc (1, fc.datasize + 1);
if (!buf) {
bprintf ("Failed to allocate buffer\n");
return false;
}
bin->func_size = fc.datasize;
if (fc.dataoff > bin->size || fc.dataoff + fc.datasize > bin->size) {
free (buf);
bprintf ("Likely overflow while parsing "
"LC_FUNCTION_STARTS command\n");
return false;
}
len = r_buf_read_at (bin->b, fc.dataoff, buf, fc.datasize);
if (len != fc.datasize) {
free (buf);
bprintf ("Failed to get data while parsing"
" LC_FUNCTION_STARTS\n");
return false;
}
buf[fc.datasize] = 0; // null-terminated buffer
bin->func_start = buf;
return true;
}
static int parse_dylib(struct MACH0_(obj_t)* bin, ut64 off) {
struct dylib_command dl;
int lib, len;
ut8 sdl[sizeof (struct dylib_command)] = {0};
if (off > bin->size || off + sizeof (struct dylib_command) > bin->size)
return false;
lib = bin->nlibs - 1;
if (!(bin->libs = realloc (bin->libs, bin->nlibs * R_BIN_MACH0_STRING_LENGTH))) {
perror ("realloc (libs)");
return false;
}
len = r_buf_read_at (bin->b, off, sdl, sizeof (struct dylib_command));
if (len < 1) {
bprintf ("Error: read (dylib)\n");
return false;
}
dl.cmd = r_read_ble32 (&sdl[0], bin->big_endian);
dl.cmdsize = r_read_ble32 (&sdl[4], bin->big_endian);
dl.dylib.name = r_read_ble32 (&sdl[8], bin->big_endian);
dl.dylib.timestamp = r_read_ble32 (&sdl[12], bin->big_endian);
dl.dylib.current_version = r_read_ble32 (&sdl[16], bin->big_endian);
dl.dylib.compatibility_version = r_read_ble32 (&sdl[20], bin->big_endian);
if (off + dl.dylib.name > bin->size ||\
off + dl.dylib.name + R_BIN_MACH0_STRING_LENGTH > bin->size)
return false;
len = r_buf_read_at (bin->b, off+dl.dylib.name, (ut8*)bin->libs[lib], R_BIN_MACH0_STRING_LENGTH);
if (len < 1) {
bprintf ("Error: read (dylib str)");
return false;
}
return true;
}
static const char *cmd_to_string(ut32 cmd) {
switch (cmd) {
case LC_DATA_IN_CODE:
return "LC_DATA_IN_CODE";
case LC_RPATH:
return "LC_RPATH";
case LC_SEGMENT:
return "LC_SEGMENT";
case LC_SEGMENT_64:
return "LC_SEGMENT_64";
case LC_SYMTAB:
return "LC_SYMTAB";
case LC_SYMSEG:
return "LC_SYMSEG";
case LC_DYSYMTAB:
return "LC_DYSYMTAB";
case LC_FUNCTION_STARTS:
return "LC_FUNCTION_STARTS";
case LC_DYLIB_CODE_SIGN_DRS:
return "LC_DYLIB_CODE_SIGN_DRS";
case LC_VERSION_MIN_MACOSX:
return "LC_VERSION_MIN_MACOSX";
case LC_VERSION_MIN_IPHONEOS:
return "LC_VERSION_MIN_IPHONEOS";
case LC_VERSION_MIN_TVOS:
return "LC_VERSION_MIN_TVOS";
case LC_VERSION_MIN_WATCHOS:
return "LC_VERSION_MIN_WATCHOS";
case LC_DYLD_INFO:
return "LC_DYLD_INFO";
case LC_SOURCE_VERSION:
return "LC_SOURCE_VERSION";
case LC_MAIN:
return "LC_MAIN";
case LC_UUID:
return "LC_UUID";
case LC_ENCRYPTION_INFO_64:
return "LC_ENCRYPTION_INFO_64";
case LC_ENCRYPTION_INFO:
return "LC_ENCRYPTION_INFO";
case LC_LOAD_DYLINKER:
return "LC_LOAD_DYLINKER";
case LC_LOAD_DYLIB:
return "LC_LOAD_DYLIB";
case LC_THREAD:
return "LC_THREAD";
case LC_UNIXTHREAD:
return "LC_UNIXTHREAD";
case LC_IDENT:
return "LC_IDENT";
}
return "";
}
static int init_items(struct MACH0_(obj_t)* bin) {
struct load_command lc = {0, 0};
ut8 loadc[sizeof (struct load_command)] = {0};
bool is_first_thread = true;
ut64 off = 0LL;
int i, len;
bin->uuidn = 0;
bin->os = 0;
bin->has_crypto = 0;
if (bin->hdr.sizeofcmds > bin->size) {
bprintf ("Warning: chopping hdr.sizeofcmds\n");
bin->hdr.sizeofcmds = bin->size - 128;
//return false;
}
//bprintf ("Commands: %d\n", bin->hdr.ncmds);
for (i = 0, off = sizeof (struct MACH0_(mach_header)); \
i < bin->hdr.ncmds; i++, off += lc.cmdsize) {
if (off > bin->size || off + sizeof (struct load_command) > bin->size){
bprintf ("mach0: out of bounds command\n");
return false;
}
len = r_buf_read_at (bin->b, off, loadc, sizeof (struct load_command));
if (len < 1) {
bprintf ("Error: read (lc) at 0x%08"PFMT64x"\n", off);
return false;
}
lc.cmd = r_read_ble32 (&loadc[0], bin->big_endian);
lc.cmdsize = r_read_ble32 (&loadc[4], bin->big_endian);
if (lc.cmdsize < 1 || off + lc.cmdsize > bin->size) {
bprintf ("Warning: mach0_header %d = cmdsize<1.\n", i);
break;
}
// TODO: a different format for each cmd
sdb_num_set (bin->kv, sdb_fmt (0, "mach0_cmd_%d.offset", i), off, 0);
sdb_set (bin->kv, sdb_fmt (0, "mach0_cmd_%d.format", i), "xd cmd size", 0);
//bprintf ("%d\n", lc.cmd);
switch (lc.cmd) {
case LC_DATA_IN_CODE:
sdb_set (bin->kv, sdb_fmt (0, "mach0_cmd_%d.cmd", i), "data_in_code", 0);
// TODO table of non-instructions in __text
break;
case LC_RPATH:
sdb_set (bin->kv, sdb_fmt (0, "mach0_cmd_%d.cmd", i), "rpath", 0);
//bprintf ("--->\n");
break;
case LC_SEGMENT_64:
case LC_SEGMENT:
sdb_set (bin->kv, sdb_fmt (0, "mach0_cmd_%d.cmd", i), "segment", 0);
bin->nsegs++;
if (!parse_segments (bin, off)) {
bprintf ("error parsing segment\n");
bin->nsegs--;
return false;
}
break;
case LC_SYMTAB:
sdb_set (bin->kv, sdb_fmt (0, "mach0_cmd_%d.cmd", i), "symtab", 0);
if (!parse_symtab (bin, off)) {
bprintf ("error parsing symtab\n");
return false;
}
break;
case LC_DYSYMTAB:
sdb_set (bin->kv, sdb_fmt (0, "mach0_cmd_%d.cmd", i), "dysymtab", 0);
if (!parse_dysymtab(bin, off)) {
bprintf ("error parsing dysymtab\n");
return false;
}
break;
case LC_DYLIB_CODE_SIGN_DRS:
sdb_set (bin->kv, sdb_fmt (0, "mach0_cmd_%d.cmd", i), "dylib_code_sign_drs", 0);
//bprintf ("[mach0] code is signed\n");
break;
case LC_VERSION_MIN_MACOSX:
sdb_set (bin->kv, sdb_fmt (0, "mach0_cmd_%d.cmd", i), "version_min_macosx", 0);
bin->os = 1;
// set OS = osx
//bprintf ("[mach0] Requires OSX >= x\n");
break;
case LC_VERSION_MIN_IPHONEOS:
sdb_set (bin->kv, sdb_fmt (0, "mach0_cmd_%d.cmd", i), "version_min_iphoneos", 0);
bin->os = 2;
// set OS = ios
//bprintf ("[mach0] Requires iOS >= x\n");
break;
case LC_VERSION_MIN_TVOS:
sdb_set (bin->kv, sdb_fmt (0, "mach0_cmd_%d.cmd", i), "version_min_tvos", 0);
bin->os = 4;
break;
case LC_VERSION_MIN_WATCHOS:
sdb_set (bin->kv, sdb_fmt (0, "mach0_cmd_%d.cmd", i), "version_min_watchos", 0);
bin->os = 3;
break;
case LC_UUID:
sdb_set (bin->kv, sdb_fmt (0, "mach0_cmd_%d.cmd", i), "uuid", 0);
{
struct uuid_command uc = {0};
if (off + sizeof (struct uuid_command) > bin->size) {
bprintf ("UUID out of obunds\n");
return false;
}
if (r_buf_fread_at (bin->b, off, (ut8*)&uc, "24c", 1) != -1) {
char key[128];
char val[128];
snprintf (key, sizeof (key)-1, "uuid.%d", bin->uuidn++);
r_hex_bin2str ((ut8*)&uc.uuid, 16, val);
sdb_set (bin->kv, key, val, 0);
//for (i=0;i<16; i++) bprintf ("%02x%c", uc.uuid[i], (i==15)?'\n':'-');
}
}
break;
case LC_ENCRYPTION_INFO_64:
/* TODO: the struct is probably different here */
case LC_ENCRYPTION_INFO:
sdb_set (bin->kv, sdb_fmt (0, "mach0_cmd_%d.cmd", i), "encryption_info", 0);
{
struct MACH0_(encryption_info_command) eic = {0};
ut8 seic[sizeof (struct MACH0_(encryption_info_command))] = {0};
if (off + sizeof (struct MACH0_(encryption_info_command)) > bin->size) {
bprintf ("encryption info out of bounds\n");
return false;
}
if (r_buf_read_at (bin->b, off, seic, sizeof (struct MACH0_(encryption_info_command))) != -1) {
eic.cmd = r_read_ble32 (&seic[0], bin->big_endian);
eic.cmdsize = r_read_ble32 (&seic[4], bin->big_endian);
eic.cryptoff = r_read_ble32 (&seic[8], bin->big_endian);
eic.cryptsize = r_read_ble32 (&seic[12], bin->big_endian);
eic.cryptid = r_read_ble32 (&seic[16], bin->big_endian);
bin->has_crypto = eic.cryptid;
sdb_set (bin->kv, "crypto", "true", 0);
sdb_num_set (bin->kv, "cryptid", eic.cryptid, 0);
sdb_num_set (bin->kv, "cryptoff", eic.cryptoff, 0);
sdb_num_set (bin->kv, "cryptsize", eic.cryptsize, 0);
sdb_num_set (bin->kv, "cryptheader", off, 0);
} }
break;
case LC_LOAD_DYLINKER:
{
sdb_set (bin->kv, sdb_fmt (0, "mach0_cmd_%d.cmd", i), "dylinker", 0);
free (bin->intrp);
bin->intrp = NULL;
//bprintf ("[mach0] load dynamic linker\n");
struct dylinker_command dy = {0};
ut8 sdy[sizeof (struct dylinker_command)] = {0};
if (off + sizeof (struct dylinker_command) > bin->size){
bprintf ("Warning: Cannot parse dylinker command\n");
return false;
}
if (r_buf_read_at (bin->b, off, sdy, sizeof (struct dylinker_command)) == -1) {
bprintf ("Warning: read (LC_DYLD_INFO) at 0x%08"PFMT64x"\n", off);
} else {
dy.cmd = r_read_ble32 (&sdy[0], bin->big_endian);
dy.cmdsize = r_read_ble32 (&sdy[4], bin->big_endian);
dy.name = r_read_ble32 (&sdy[8], bin->big_endian);
int len = dy.cmdsize;
char *buf = malloc (len+1);
if (buf) {
// wtf @ off + 0xc ?
r_buf_read_at (bin->b, off + 0xc, (ut8*)buf, len);
buf[len] = 0;
free (bin->intrp);
bin->intrp = buf;
}
}
}
break;
case LC_MAIN:
{
struct {
ut64 eo;
ut64 ss;
} ep = {0};
ut8 sep[2 * sizeof (ut64)] = {0};
sdb_set (bin->kv, sdb_fmt (0, "mach0_cmd_%d.cmd", i), "main", 0);
if (!is_first_thread) {
bprintf("Error: LC_MAIN with other threads\n");
return false;
}
if (off + 8 > bin->size || off + sizeof (ep) > bin->size) {
bprintf ("invalid command size for main\n");
return false;
}
r_buf_read_at (bin->b, off + 8, sep, 2 * sizeof (ut64));
ep.eo = r_read_ble64 (&sep[0], bin->big_endian);
ep.ss = r_read_ble64 (&sep[8], bin->big_endian);
bin->entry = ep.eo;
bin->main_cmd = lc;
sdb_num_set (bin->kv, "mach0.entry.offset", off + 8, 0);
sdb_num_set (bin->kv, "stacksize", ep.ss, 0);
is_first_thread = false;
}
break;
case LC_UNIXTHREAD:
sdb_set (bin->kv, sdb_fmt (0, "mach0_cmd_%d.cmd", i), "unixthread", 0);
if (!is_first_thread) {
bprintf("Error: LC_UNIXTHREAD with other threads\n");
return false;
}
case LC_THREAD:
sdb_set (bin->kv, sdb_fmt (0, "mach0_cmd_%d.cmd", i), "thread", 0);
if (!parse_thread (bin, &lc, off, is_first_thread)) {
bprintf ("Cannot parse thread\n");
return false;
}
is_first_thread = false;
break;
case LC_LOAD_DYLIB:
case LC_LOAD_WEAK_DYLIB:
sdb_set (bin->kv, sdb_fmt (0, "mach0_cmd_%d.cmd", i), "load_dylib", 0);
bin->nlibs++;
if (!parse_dylib (bin, off)){
bprintf ("Cannot parse dylib\n");
bin->nlibs--;
return false;
}
break;
case LC_DYLD_INFO:
case LC_DYLD_INFO_ONLY:
{
ut8 dyldi[sizeof (struct dyld_info_command)] = {0};
sdb_set (bin->kv, sdb_fmt (0, "mach0_cmd_%d.cmd", i), "dyld_info", 0);
bin->dyld_info = calloc (1, sizeof (struct dyld_info_command));
if (bin->dyld_info) {
if (off + sizeof (struct dyld_info_command) > bin->size){
bprintf ("Cannot parse dyldinfo\n");
R_FREE (bin->dyld_info);
return false;
}
if (r_buf_read_at (bin->b, off, dyldi, sizeof (struct dyld_info_command)) == -1) {
free (bin->dyld_info);
bin->dyld_info = NULL;
bprintf ("Error: read (LC_DYLD_INFO) at 0x%08"PFMT64x"\n", off);
} else {
bin->dyld_info->cmd = r_read_ble32 (&dyldi[0], bin->big_endian);
bin->dyld_info->cmdsize = r_read_ble32 (&dyldi[4], bin->big_endian);
bin->dyld_info->rebase_off = r_read_ble32 (&dyldi[8], bin->big_endian);
bin->dyld_info->rebase_size = r_read_ble32 (&dyldi[12], bin->big_endian);
bin->dyld_info->bind_off = r_read_ble32 (&dyldi[16], bin->big_endian);
bin->dyld_info->bind_size = r_read_ble32 (&dyldi[20], bin->big_endian);
bin->dyld_info->weak_bind_off = r_read_ble32 (&dyldi[24], bin->big_endian);
bin->dyld_info->weak_bind_size = r_read_ble32 (&dyldi[28], bin->big_endian);
bin->dyld_info->lazy_bind_off = r_read_ble32 (&dyldi[32], bin->big_endian);
bin->dyld_info->lazy_bind_size = r_read_ble32 (&dyldi[36], bin->big_endian);
bin->dyld_info->export_off = r_read_ble32 (&dyldi[40], bin->big_endian);
bin->dyld_info->export_size = r_read_ble32 (&dyldi[44], bin->big_endian);
}
}
}
break;
case LC_CODE_SIGNATURE:
parse_signature (bin, off);
sdb_set (bin->kv, sdb_fmt (0, "mach0_cmd_%d.cmd", i), "signature", 0);
/* ut32 dataoff
// ut32 datasize */
break;
case LC_SOURCE_VERSION:
sdb_set (bin->kv, sdb_fmt (0, "mach0_cmd_%d.cmd", i), "version", 0);
/* uint64_t version; */
/* A.B.C.D.E packed as a24.b10.c10.d10.e10 */
//bprintf ("mach0: TODO: Show source version\n");
break;
case LC_SEGMENT_SPLIT_INFO:
sdb_set (bin->kv, sdb_fmt (0, "mach0_cmd_%d.cmd", i), "split_info", 0);
/* TODO */
break;
case LC_FUNCTION_STARTS:
sdb_set (bin->kv, sdb_fmt (0, "mach0_cmd_%d.cmd", i), "function_starts", 0);
if (!parse_function_starts (bin, off)) {
bprintf ("Cannot parse LC_FUNCTION_STARTS\n");
}
break;
case LC_REEXPORT_DYLIB:
sdb_set (bin->kv, sdb_fmt (0, "mach0_cmd_%d.cmd", i), "dylib", 0);
/* TODO */
break;
default:
//bprintf ("mach0: Unknown header command %x\n", lc.cmd);
break;
}
}
return true;
}
static int init(struct MACH0_(obj_t)* bin) {
union {
ut16 word;
ut8 byte[2];
} endian = { 1 };
little_ = endian.byte[0];
if (!init_hdr(bin)) {
bprintf ("Warning: File is not MACH0\n");
return false;
}
if (!init_items (bin)) {
bprintf ("Warning: Cannot initialize items\n");
}
bin->baddr = MACH0_(get_baddr)(bin);
return true;
}
void* MACH0_(mach0_free)(struct MACH0_(obj_t)* bin) {
if (!bin) {
return NULL;
}
free (bin->segs);
free (bin->sects);
free (bin->symtab);
free (bin->symstr);
free (bin->indirectsyms);
free (bin->imports_by_ord);
free (bin->dyld_info);
free (bin->toc);
free (bin->modtab);
free (bin->libs);
free (bin->func_start);
free (bin->signature);
r_buf_free (bin->b);
free (bin);
return NULL;
}
struct MACH0_(obj_t)* MACH0_(mach0_new)(const char* file, bool verbose) {
ut8 *buf;
struct MACH0_(obj_t) *bin;
if (!(bin = malloc (sizeof (struct MACH0_(obj_t))))) {
return NULL;
}
memset (bin, 0, sizeof (struct MACH0_(obj_t)));
bin->verbose = verbose;
bin->file = file;
if (!(buf = (ut8*)r_file_slurp(file, &bin->size)))
return MACH0_(mach0_free)(bin);
bin->b = r_buf_new ();
if (!r_buf_set_bytes(bin->b, buf, bin->size)) {
free (buf);
return MACH0_(mach0_free)(bin);
}
free (buf);
bin->dyld_info = NULL;
if (!init(bin)) {
return MACH0_(mach0_free)(bin);
}
bin->imports_by_ord_size = 0;
bin->imports_by_ord = NULL;
return bin;
}
struct MACH0_(obj_t)* MACH0_(new_buf)(RBuffer *buf, bool verbose) {
struct MACH0_(obj_t) *bin = R_NEW0 (struct MACH0_(obj_t));
if (!bin) {
return NULL;
}
bin->kv = sdb_new (NULL, "bin.mach0", 0);
bin->b = r_buf_new ();
bin->size = buf->length;
bin->verbose = verbose;
if (!r_buf_set_bytes (bin->b, buf->buf, bin->size)){
return MACH0_(mach0_free) (bin);
}
if (!init (bin)) {
return MACH0_(mach0_free)(bin);
}
return bin;
}
// prot: r = 1, w = 2, x = 4
// perm: r = 4, w = 2, x = 1
static int prot2perm (int x) {
int r = 0;
if (x&1) r |= 4;
if (x&2) r |= 2;
if (x&4) r |= 1;
return r;
}
struct section_t* MACH0_(get_sections)(struct MACH0_(obj_t)* bin) {
struct section_t *sections;
char segname[32], sectname[32];
int i, j, to;
if (!bin) {
return NULL;
}
/* for core files */
if (bin->nsects < 1 && bin->nsegs > 0) {
struct MACH0_(segment_command) *seg;
if (!(sections = calloc ((bin->nsegs + 1), sizeof (struct section_t)))) {
return NULL;
}
for (i = 0; i < bin->nsegs; i++) {
seg = &bin->segs[i];
sections[i].addr = seg->vmaddr;
sections[i].offset = seg->fileoff;
sections[i].size = seg->vmsize;
sections[i].align = 4096;
sections[i].flags = seg->flags;
r_str_ncpy (sectname, seg->segname, sizeof (sectname) - 1);
// hack to support multiple sections with same name
sections[i].srwx = prot2perm (seg->initprot);
sections[i].last = 0;
}
sections[i].last = 1;
return sections;
}
if (!bin->sects) {
return NULL;
}
to = R_MIN (bin->nsects, 128); // limit number of sections here to avoid fuzzed bins
if (to < 1) {
return NULL;
}
if (!(sections = malloc ((bin->nsects + 1) * sizeof (struct section_t)))) {
return NULL;
}
for (i = 0; i < to; i++) {
sections[i].offset = (ut64)bin->sects[i].offset;
sections[i].addr = (ut64)bin->sects[i].addr;
sections[i].size = (ut64)bin->sects[i].size;
sections[i].align = bin->sects[i].align;
sections[i].flags = bin->sects[i].flags;
r_str_ncpy (sectname, bin->sects[i].sectname, sizeof (sectname) - 1);
// hack to support multiple sections with same name
// snprintf (segname, sizeof (segname), "%d", i); // wtf
snprintf (segname, sizeof (segname), "%d.%s", i, bin->sects[i].segname);
for (j = 0; j < bin->nsegs; j++) {
if (sections[i].addr >= bin->segs[j].vmaddr &&
sections[i].addr < (bin->segs[j].vmaddr + bin->segs[j].vmsize)) {
sections[i].srwx = prot2perm (bin->segs[j].initprot);
break;
}
}
// XXX: if two sections have the same name are merged :O
// XXX: append section index in flag name maybe?
// XXX: do not load out of bound sections?
// XXX: load segments instead of sections? what about PAGEZERO and ...
snprintf (sections[i].name, sizeof (sections[i].name), "%s.%s", segname, sectname);
sections[i].last = 0;
}
sections[i].last = 1;
return sections;
}
static int parse_import_stub(struct MACH0_(obj_t)* bin, struct symbol_t *symbol, int idx) {
int i, j, nsyms, stridx;
const char *symstr;
if (idx < 0) {
return 0;
}
symbol->offset = 0LL;
symbol->addr = 0LL;
symbol->name[0] = '\0';
if (!bin || !bin->sects) {
return false;
}
for (i = 0; i < bin->nsects; i++) {
if ((bin->sects[i].flags & SECTION_TYPE) == S_SYMBOL_STUBS && bin->sects[i].reserved2 > 0) {
nsyms = (int)(bin->sects[i].size / bin->sects[i].reserved2);
if (nsyms > bin->size) {
bprintf ("mach0: Invalid symbol table size\n");
}
for (j = 0; j < nsyms; j++) {
if (bin->sects) {
if (bin->sects[i].reserved1 + j >= bin->nindirectsyms) {
continue;
}
}
if (bin->indirectsyms) {
if (idx != bin->indirectsyms[bin->sects[i].reserved1 + j]) {
continue;
}
}
if (idx > bin->nsymtab) {
continue;
}
symbol->type = R_BIN_MACH0_SYMBOL_TYPE_LOCAL;
symbol->offset = bin->sects[i].offset + j * bin->sects[i].reserved2;
symbol->addr = bin->sects[i].addr + j * bin->sects[i].reserved2;
symbol->size = 0;
stridx = bin->symtab[idx].n_strx;
if (stridx >= 0 && stridx < bin->symstrlen) {
symstr = (char *)bin->symstr+stridx;
} else {
symstr = "???";
}
// Remove the extra underscore that every import seems to have in Mach-O.
if (*symstr == '_') {
symstr++;
}
snprintf (symbol->name, R_BIN_MACH0_STRING_LENGTH, "imp.%s", symstr);
return true;
}
}
}
return false;
}
#if 0
static ut64 get_text_base(struct MACH0_(obj_t)* bin) {
ut64 ret = 0LL;
struct section_t *sections;
if ((sections = MACH0_(get_sections) (bin))) {
int i;
for (i = 0; !sections[i].last; i++) {
if (strstr(sections[i].name, "text")) {
ret = sections[i].offset;
break;
}
}
free (sections);
}
return ret;
}
#endif
static int inSymtab(SdbHash *hash, struct symbol_t *symbols, const char *name, ut64 addr) {
bool found;
const char *key = sdb_fmt (0, "%s.%"PFMT64x, name, addr);
(void)sdb_ht_find (hash, key, &found);
if (found) {
return true;
}
sdb_ht_insert (hash, key, "1");
return false;
}
struct symbol_t* MACH0_(get_symbols)(struct MACH0_(obj_t)* bin) {
const char *symstr;
struct symbol_t *symbols;
int from, to, i, j, s, stridx, symbols_size, symbols_count;
SdbHash *hash;
//ut64 text_base = get_text_base (bin);
if (!bin || !bin->symtab || !bin->symstr) {
return NULL;
}
/* parse symbol table */
/* parse dynamic symbol table */
symbols_count = (bin->dysymtab.nextdefsym + \
bin->dysymtab.nlocalsym + \
bin->dysymtab.nundefsym );
symbols_count += bin->nsymtab;
//symbols_count = bin->nsymtab;
symbols_size = (symbols_count + 1) * 2 * sizeof (struct symbol_t);
if (symbols_size < 1) {
return NULL;
}
if (!(symbols = calloc (1, symbols_size))) {
return NULL;
}
hash = sdb_ht_new ();
j = 0; // symbol_idx
for (s = 0; s < 2; s++) {
switch (s) {
case 0:
from = bin->dysymtab.iextdefsym;
to = from + bin->dysymtab.nextdefsym;
break;
case 1:
from = bin->dysymtab.ilocalsym;
to = from + bin->dysymtab.nlocalsym;
break;
#if NOT_USED
case 2:
from = bin->dysymtab.iundefsym;
to = from + bin->dysymtab.nundefsym;
break;
#endif
}
if (from == to) {
continue;
}
#define OLD 1
#if OLD
from = R_MIN (R_MAX (0, from), symbols_size / sizeof (struct symbol_t));
to = R_MIN (to , symbols_size / sizeof (struct symbol_t));
to = R_MIN (to, bin->nsymtab);
#else
from = R_MIN (R_MAX (0, from), symbols_size/sizeof(struct symbol_t));
to = symbols_count; //symbols_size/sizeof(struct symbol_t);
#endif
int maxsymbols = symbols_size / sizeof(struct symbol_t);
if (to > 0x500000) {
bprintf ("WARNING: corrupted mach0 header: symbol table is too big %d\n", to);
free (symbols);
sdb_ht_free (hash);
return NULL;
}
if (symbols_count >= maxsymbols) {
symbols_count = maxsymbols - 1;
}
for (i = from; i < to && j < symbols_count; i++, j++) {
symbols[j].offset = addr_to_offset (bin, bin->symtab[i].n_value);
symbols[j].addr = bin->symtab[i].n_value;
symbols[j].size = 0; /* TODO: Is it anywhere? */
if (bin->symtab[i].n_type & N_EXT) {
symbols[j].type = R_BIN_MACH0_SYMBOL_TYPE_EXT;
} else {
symbols[j].type = R_BIN_MACH0_SYMBOL_TYPE_LOCAL;
}
stridx = bin->symtab[i].n_strx;
if (stridx >= 0 && stridx < bin->symstrlen) {
symstr = (char*)bin->symstr+stridx;
} else {
symstr = "???";
}
{
int i = 0;
int len = 0;
len = bin->symstrlen - stridx;
if (len > 0) {
for (i = 0; i < len; i++) {
if ((ut8)(symstr[i] & 0xff) == 0xff || !symstr[i]) {
len = i;
break;
}
}
char *symstr_dup = NULL;
if (len > 0) {
symstr_dup = r_str_ndup (symstr, len);
}
if (!symstr_dup) {
symbols[j].name[0] = 0;
} else {
strncpy (symbols[j].name, symstr_dup, R_BIN_MACH0_STRING_LENGTH-1);
symbols[j].name[R_BIN_MACH0_STRING_LENGTH - 2] = 0;
}
free (symstr_dup);
} else {
symbols[j].name[0] = 0;
}
symbols[j].last = 0;
}
if (inSymtab (hash, symbols, symbols[j].name, symbols[j].addr)) {
symbols[j].name[0] = 0;
j--;
}
}
}
to = R_MIN (bin->nsymtab, bin->dysymtab.iundefsym + bin->dysymtab.nundefsym);
for (i = bin->dysymtab.iundefsym; i < to; i++) {
if (j > symbols_count) {
bprintf ("mach0-get-symbols: error\n");
break;
}
if (parse_import_stub(bin, &symbols[j], i))
symbols[j++].last = 0;
}
#if 1
// symtab is wrongly parsed and produces dupped syms with incorrect vaddr */
for (i = 0; i < bin->nsymtab; i++) {
struct MACH0_(nlist) *st = &bin->symtab[i];
#if 0
bprintf ("stridx %d -> section %d type %d value = %d\n",
st->n_strx, st->n_sect, st->n_type, st->n_value);
#endif
stridx = st->n_strx;
if (stridx >= 0 && stridx < bin->symstrlen) {
symstr = (char*)bin->symstr + stridx;
} else {
symstr = "???";
}
// 0 is for imports
// 1 is for symbols
// 2 is for func.eh (exception handlers?)
int section = st->n_sect;
if (section == 1 && j < symbols_count) { // text ??st->n_type == 1)
/* is symbol */
symbols[j].addr = st->n_value; // + text_base;
symbols[j].offset = addr_to_offset (bin, symbols[j].addr);
symbols[j].size = 0; /* find next symbol and crop */
if (st->n_type & N_EXT) {
symbols[j].type = R_BIN_MACH0_SYMBOL_TYPE_EXT;
} else {
symbols[j].type = R_BIN_MACH0_SYMBOL_TYPE_LOCAL;
}
strncpy (symbols[j].name, symstr, R_BIN_MACH0_STRING_LENGTH);
symbols[j].name[R_BIN_MACH0_STRING_LENGTH - 1] = 0;
symbols[j].last = 0;
if (inSymtab (hash, symbols, symbols[j].name, symbols[j].addr)) {
symbols[j].name[0] = 0;
} else {
j++;
}
}
}
#endif
sdb_ht_free (hash);
symbols[j].last = 1;
return symbols;
}
static int parse_import_ptr(struct MACH0_(obj_t)* bin, struct reloc_t *reloc, int idx) {
int i, j, sym, wordsize;
ut32 stype;
wordsize = MACH0_(get_bits)(bin) / 8;
if (idx < 0 || idx >= bin->nsymtab) {
return 0;
}
if ((bin->symtab[idx].n_desc & REFERENCE_TYPE) == REFERENCE_FLAG_UNDEFINED_LAZY) {
stype = S_LAZY_SYMBOL_POINTERS;
} else {
stype = S_NON_LAZY_SYMBOL_POINTERS;
}
reloc->offset = 0;
reloc->addr = 0;
reloc->addend = 0;
#define CASE(T) case (T / 8): reloc->type = R_BIN_RELOC_ ## T; break
switch (wordsize) {
CASE(8);
CASE(16);
CASE(32);
CASE(64);
default: return false;
}
#undef CASE
for (i = 0; i < bin->nsects; i++) {
if ((bin->sects[i].flags & SECTION_TYPE) == stype) {
for (j=0, sym=-1; bin->sects[i].reserved1+j < bin->nindirectsyms; j++)
if (idx == bin->indirectsyms[bin->sects[i].reserved1 + j]) {
sym = j;
break;
}
reloc->offset = sym == -1 ? 0 : bin->sects[i].offset + sym * wordsize;
reloc->addr = sym == -1 ? 0 : bin->sects[i].addr + sym * wordsize;
return true;
}
}
return false;
}
struct import_t* MACH0_(get_imports)(struct MACH0_(obj_t)* bin) {
struct import_t *imports;
int i, j, idx, stridx;
const char *symstr;
if (!bin->symtab || !bin->symstr || !bin->sects || !bin->indirectsyms)
return NULL;
if (bin->dysymtab.nundefsym < 1 || bin->dysymtab.nundefsym > 0xfffff) {
return NULL;
}
if (!(imports = malloc ((bin->dysymtab.nundefsym + 1) * sizeof(struct import_t)))) {
return NULL;
}
for (i = j = 0; i < bin->dysymtab.nundefsym; i++) {
idx = bin->dysymtab.iundefsym +i;
if (idx < 0 || idx >= bin->nsymtab) {
bprintf ("WARNING: Imports index out of bounds. Ignoring relocs\n");
free (imports);
return NULL;
}
stridx = bin->symtab[idx].n_strx;
if (stridx >= 0 && stridx < bin->symstrlen) {
symstr = (char *)bin->symstr + stridx;
} else {
symstr = "";
}
if (!*symstr) {
continue;
}
{
int i = 0;
int len = 0;
char *symstr_dup = NULL;
len = bin->symstrlen - stridx;
imports[j].name[0] = 0;
if (len > 0) {
for (i = 0; i < len; i++) {
if ((unsigned char)symstr[i] == 0xff || !symstr[i]) {
len = i;
break;
}
}
symstr_dup = r_str_ndup (symstr, len);
if (symstr_dup) {
strncpy (imports[j].name, symstr_dup, R_BIN_MACH0_STRING_LENGTH - 1);
imports[j].name[R_BIN_MACH0_STRING_LENGTH - 2] = 0;
free (symstr_dup);
}
}
}
imports[j].ord = i;
imports[j++].last = 0;
}
imports[j].last = 1;
if (!bin->imports_by_ord_size) {
if (j > 0) {
bin->imports_by_ord_size = j;
bin->imports_by_ord = (RBinImport**)calloc (j, sizeof (RBinImport*));
} else {
bin->imports_by_ord_size = 0;
bin->imports_by_ord = NULL;
}
}
return imports;
}
struct reloc_t* MACH0_(get_relocs)(struct MACH0_(obj_t)* bin) {
struct reloc_t *relocs;
int i = 0, len;
ulebr ur = {NULL};
int wordsize = MACH0_(get_bits)(bin) / 8;
if (bin->dyld_info) {
ut8 *opcodes,*end, type = 0, rel_type = 0;
int lib_ord, seg_idx = -1, sym_ord = -1;
size_t j, count, skip, bind_size, lazy_size;
st64 addend = 0;
ut64 segmentAddress = 0LL;
ut64 addr = 0LL;
ut8 done = 0;
#define CASE(T) case (T / 8): rel_type = R_BIN_RELOC_ ## T; break
switch (wordsize) {
CASE(8);
CASE(16);
CASE(32);
CASE(64);
default: return NULL;
}
#undef CASE
bind_size = bin->dyld_info->bind_size;
lazy_size = bin->dyld_info->lazy_bind_size;
if (!bind_size || !lazy_size) {
return NULL;
}
if ((bind_size + lazy_size)<1) {
return NULL;
}
if (bin->dyld_info->bind_off > bin->size || bin->dyld_info->bind_off + bind_size > bin->size) {
return NULL;
}
if (bin->dyld_info->lazy_bind_off > bin->size || \
bin->dyld_info->lazy_bind_off + lazy_size > bin->size) {
return NULL;
}
if (bin->dyld_info->bind_off+bind_size+lazy_size > bin->size) {
return NULL;
}
// NOTE(eddyb) it's a waste of memory, but we don't know the actual number of relocs.
if (!(relocs = calloc (1, (1 + bind_size + lazy_size) * sizeof (struct reloc_t)))) {
return NULL;
}
opcodes = calloc (1, bind_size + lazy_size + 1);
if (!opcodes) {
free (relocs);
return NULL;
}
len = r_buf_read_at (bin->b, bin->dyld_info->bind_off, opcodes, bind_size);
i = r_buf_read_at (bin->b, bin->dyld_info->lazy_bind_off, opcodes + bind_size, lazy_size);
if (len < 1 || i < 1) {
bprintf ("Error: read (dyld_info bind) at 0x%08"PFMT64x"\n",
(ut64)(size_t)bin->dyld_info->bind_off);
free (opcodes);
relocs[i].last = 1;
return relocs;
}
i = 0;
// that +2 is a minimum required for uleb128, this may be wrong,
// the correct fix would be to make ULEB() must use rutil's
// implementation that already checks for buffer boundaries
for (ur.p = opcodes, end = opcodes + bind_size + lazy_size ; (ur.p+2 < end) && !done; ) {
ut8 imm = *ur.p & BIND_IMMEDIATE_MASK, op = *ur.p & BIND_OPCODE_MASK;
++ur.p;
switch (op) {
#define ULEB() read_uleb128 (&ur,end)
#define SLEB() read_sleb128 (&ur,end)
case BIND_OPCODE_DONE:
done = 1;
break;
case BIND_OPCODE_SET_DYLIB_ORDINAL_IMM:
lib_ord = imm;
break;
case BIND_OPCODE_SET_DYLIB_ORDINAL_ULEB:
lib_ord = ULEB();
break;
case BIND_OPCODE_SET_DYLIB_SPECIAL_IMM:
lib_ord = imm? (st8)(BIND_OPCODE_MASK | imm) : 0;
break;
case BIND_OPCODE_SET_SYMBOL_TRAILING_FLAGS_IMM: {
char *sym_name = (char*)ur.p;
//ut8 sym_flags = imm;
while (*ur.p++ && ur.p<end) {
/* empty loop */
}
sym_ord = -1;
if (bin->symtab && bin->dysymtab.nundefsym < 0xffff)
for (j = 0; j < bin->dysymtab.nundefsym; j++) {
int stridx = 0;
int iundefsym = bin->dysymtab.iundefsym;
if (iundefsym>=0 && iundefsym < bin->nsymtab) {
int sidx = iundefsym +j;
if (sidx<0 || sidx>= bin->nsymtab)
continue;
stridx = bin->symtab[sidx].n_strx;
if (stridx < 0 || stridx >= bin->symstrlen)
continue;
}
if (!strcmp ((char *)bin->symstr + stridx, sym_name)) {
sym_ord = j;
break;
}
}
break;
}
case BIND_OPCODE_SET_TYPE_IMM:
type = imm;
break;
case BIND_OPCODE_SET_ADDEND_SLEB:
addend = SLEB();
break;
case BIND_OPCODE_SET_SEGMENT_AND_OFFSET_ULEB:
seg_idx = imm;
if (seg_idx < 0 || seg_idx >= bin->nsegs) {
bprintf ("Error: BIND_OPCODE_SET_SEGMENT_AND_OFFSET_ULEB"
" has unexistent segment %d\n", seg_idx);
addr = 0LL;
return 0; // early exit to avoid future mayhem
} else {
addr = bin->segs[seg_idx].vmaddr + ULEB();
segmentAddress = bin->segs[seg_idx].vmaddr \
+ bin->segs[seg_idx].vmsize;
}
break;
case BIND_OPCODE_ADD_ADDR_ULEB:
addr += ULEB();
break;
#define DO_BIND() do {\
if (sym_ord < 0 || seg_idx < 0 ) break;\
if (i >= (bind_size + lazy_size)) break;\
relocs[i].addr = addr;\
relocs[i].offset = addr - bin->segs[seg_idx].vmaddr + bin->segs[seg_idx].fileoff;\
if (type == BIND_TYPE_TEXT_PCREL32)\
relocs[i].addend = addend - (bin->baddr + addr);\
else relocs[i].addend = addend;\
/* library ordinal ??? */ \
relocs[i].ord = lib_ord;\
relocs[i].ord = sym_ord;\
relocs[i].type = rel_type;\
relocs[i++].last = 0;\
} while (0)
case BIND_OPCODE_DO_BIND:
if (addr >= segmentAddress) {
bprintf ("Error: Malformed DO bind opcode\n");
goto beach;
}
DO_BIND();
addr += wordsize;
break;
case BIND_OPCODE_DO_BIND_ADD_ADDR_ULEB:
if (addr >= segmentAddress) {
bprintf ("Error: Malformed ADDR ULEB bind opcode\n");
goto beach;
}
DO_BIND();
addr += ULEB() + wordsize;
break;
case BIND_OPCODE_DO_BIND_ADD_ADDR_IMM_SCALED:
if (addr >= segmentAddress) {
bprintf ("Error: Malformed IMM SCALED bind opcode\n");
goto beach;
}
DO_BIND();
addr += (ut64)imm * (ut64)wordsize + wordsize;
break;
case BIND_OPCODE_DO_BIND_ULEB_TIMES_SKIPPING_ULEB:
count = ULEB();
skip = ULEB();
for (j = 0; j < count; j++) {
if (addr >= segmentAddress) {
bprintf ("Error: Malformed ULEB TIMES bind opcode\n");
goto beach;
}
DO_BIND();
addr += skip + wordsize;
}
break;
#undef DO_BIND
#undef ULEB
#undef SLEB
default:
bprintf ("Error: unknown bind opcode 0x%02x in dyld_info\n", *ur.p);
free (opcodes);
relocs[i].last = 1;
return relocs;
}
}
free (opcodes);
} else {
int j;
if (!bin->symtab || !bin->symstr || !bin->sects || !bin->indirectsyms) {
return NULL;
}
if (!(relocs = malloc ((bin->dysymtab.nundefsym + 1) * sizeof(struct reloc_t)))) {
return NULL;
}
for (j = 0; j < bin->dysymtab.nundefsym; j++) {
if (parse_import_ptr (bin, &relocs[i], bin->dysymtab.iundefsym + j)) {
relocs[i].ord = j;
relocs[i++].last = 0;
}
}
}
beach:
relocs[i].last = 1;
return relocs;
}
struct addr_t* MACH0_(get_entrypoint)(struct MACH0_(obj_t)* bin) {
struct addr_t *entry;
int i;
if (!bin->entry && !bin->sects) {
return NULL;
}
if (!(entry = calloc (1, sizeof (struct addr_t)))) {
return NULL;
}
if (bin->entry) {
entry->addr = entry_to_vaddr (bin);
entry->offset = addr_to_offset (bin, entry->addr);
entry->haddr = sdb_num_get (bin->kv, "mach0.entry.offset", 0);
}
if (!bin->entry || entry->offset == 0) {
// XXX: section name doesnt matters at all.. just check for exec flags
for (i = 0; i < bin->nsects; i++) {
if (!strncmp (bin->sects[i].sectname, "__text", 6)) {
entry->offset = (ut64)bin->sects[i].offset;
sdb_num_set (bin->kv, "mach0.entry", entry->offset, 0);
entry->addr = (ut64)bin->sects[i].addr;
if (!entry->addr) { // workaround for object files
entry->addr = entry->offset;
}
break;
}
}
bin->entry = entry->addr;
}
return entry;
}
struct lib_t* MACH0_(get_libs)(struct MACH0_(obj_t)* bin) {
struct lib_t *libs;
int i;
if (!bin->nlibs) {
return NULL;
}
if (!(libs = calloc ((bin->nlibs + 1), sizeof(struct lib_t)))) {
return NULL;
}
for (i = 0; i < bin->nlibs; i++) {
strncpy (libs[i].name, bin->libs[i], R_BIN_MACH0_STRING_LENGTH);
libs[i].name[R_BIN_MACH0_STRING_LENGTH-1] = '\0';
libs[i].last = 0;
}
libs[i].last = 1;
return libs;
}
ut64 MACH0_(get_baddr)(struct MACH0_(obj_t)* bin) {
int i;
if (bin->hdr.filetype != MH_EXECUTE && bin->hdr.filetype != MH_DYLINKER) {
return 0;
}
for (i = 0; i < bin->nsegs; ++i) {
if (bin->segs[i].fileoff == 0 && bin->segs[i].filesize != 0) {
return bin->segs[i].vmaddr;
}
}
return 0;
}
char* MACH0_(get_class)(struct MACH0_(obj_t)* bin) {
#if R_BIN_MACH064
return r_str_new ("MACH064");
#else
return r_str_new ("MACH0");
#endif
}
//XXX we are mixing up bits from cpu and opcodes
//since thumb use 16 bits opcode but run in 32 bits
//cpus so here we should only return 32 or 64
int MACH0_(get_bits)(struct MACH0_(obj_t)* bin) {
if (bin) {
int bits = MACH0_(get_bits_from_hdr) (&bin->hdr);
if (bin->hdr.cputype == CPU_TYPE_ARM && bin->entry & 1) {
return 16;
}
return bits;
}
return 32;
}
int MACH0_(get_bits_from_hdr)(struct MACH0_(mach_header)* hdr) {
if (hdr->magic == MH_MAGIC_64 || hdr->magic == MH_CIGAM_64) {
return 64;
}
if ((hdr->cpusubtype & CPU_SUBTYPE_MASK) == (CPU_SUBTYPE_ARM_V7K << 24)) {
return 16;
}
return 32;
}
bool MACH0_(is_big_endian)(struct MACH0_(obj_t)* bin) {
if (bin) {
const int cpu = bin->hdr.cputype;
return cpu == CPU_TYPE_POWERPC || cpu == CPU_TYPE_POWERPC64;
}
return false;
}
const char* MACH0_(get_intrp)(struct MACH0_(obj_t)* bin) {
return bin? bin->intrp: NULL;
}
const char* MACH0_(get_os)(struct MACH0_(obj_t)* bin) {
if (bin)
switch (bin->os) {
case 1: return "osx";
case 2: return "ios";
case 3: return "watchos";
case 4: return "tvos";
}
return "darwin";
}
char* MACH0_(get_cputype_from_hdr)(struct MACH0_(mach_header) *hdr) {
const char *archstr = "unknown";
switch (hdr->cputype) {
case CPU_TYPE_VAX:
archstr = "vax";
break;
case CPU_TYPE_MC680x0:
archstr = "mc680x0";
break;
case CPU_TYPE_I386:
case CPU_TYPE_X86_64:
archstr = "x86";
break;
case CPU_TYPE_MC88000:
archstr = "mc88000";
break;
case CPU_TYPE_MC98000:
archstr = "mc98000";
break;
case CPU_TYPE_HPPA:
archstr = "hppa";
break;
case CPU_TYPE_ARM:
case CPU_TYPE_ARM64:
archstr = "arm";
break;
case CPU_TYPE_SPARC:
archstr = "sparc";
break;
case CPU_TYPE_MIPS:
archstr = "mips";
break;
case CPU_TYPE_I860:
archstr = "i860";
break;
case CPU_TYPE_POWERPC:
case CPU_TYPE_POWERPC64:
archstr = "ppc";
}
return strdup (archstr);
}
char* MACH0_(get_cputype)(struct MACH0_(obj_t)* bin) {
if (bin) {
return MACH0_(get_cputype_from_hdr) (&bin->hdr);
}
return strdup ("unknown");
}
// TODO: use const char*
char* MACH0_(get_cpusubtype_from_hdr)(struct MACH0_(mach_header) *hdr) {
if (hdr) {
switch (hdr->cputype) {
case CPU_TYPE_VAX:
switch (hdr->cpusubtype) {
case CPU_SUBTYPE_VAX_ALL: return strdup ("all");
case CPU_SUBTYPE_VAX780: return strdup ("vax780");
case CPU_SUBTYPE_VAX785: return strdup ("vax785");
case CPU_SUBTYPE_VAX750: return strdup ("vax750");
case CPU_SUBTYPE_VAX730: return strdup ("vax730");
case CPU_SUBTYPE_UVAXI: return strdup ("uvaxI");
case CPU_SUBTYPE_UVAXII: return strdup ("uvaxII");
case CPU_SUBTYPE_VAX8200: return strdup ("vax8200");
case CPU_SUBTYPE_VAX8500: return strdup ("vax8500");
case CPU_SUBTYPE_VAX8600: return strdup ("vax8600");
case CPU_SUBTYPE_VAX8650: return strdup ("vax8650");
case CPU_SUBTYPE_VAX8800: return strdup ("vax8800");
case CPU_SUBTYPE_UVAXIII: return strdup ("uvaxIII");
default: return strdup ("Unknown vax subtype");
}
case CPU_TYPE_MC680x0:
switch (hdr->cpusubtype) {
case CPU_SUBTYPE_MC68030: return strdup ("mc68030");
case CPU_SUBTYPE_MC68040: return strdup ("mc68040");
case CPU_SUBTYPE_MC68030_ONLY: return strdup ("mc68030 only");
default: return strdup ("Unknown mc680x0 subtype");
}
case CPU_TYPE_I386:
switch (hdr->cpusubtype) {
case CPU_SUBTYPE_386: return strdup ("386");
case CPU_SUBTYPE_486: return strdup ("486");
case CPU_SUBTYPE_486SX: return strdup ("486sx");
case CPU_SUBTYPE_PENT: return strdup ("Pentium");
case CPU_SUBTYPE_PENTPRO: return strdup ("Pentium Pro");
case CPU_SUBTYPE_PENTII_M3: return strdup ("Pentium 3 M3");
case CPU_SUBTYPE_PENTII_M5: return strdup ("Pentium 3 M5");
case CPU_SUBTYPE_CELERON: return strdup ("Celeron");
case CPU_SUBTYPE_CELERON_MOBILE: return strdup ("Celeron Mobile");
case CPU_SUBTYPE_PENTIUM_3: return strdup ("Pentium 3");
case CPU_SUBTYPE_PENTIUM_3_M: return strdup ("Pentium 3 M");
case CPU_SUBTYPE_PENTIUM_3_XEON: return strdup ("Pentium 3 Xeon");
case CPU_SUBTYPE_PENTIUM_M: return strdup ("Pentium Mobile");
case CPU_SUBTYPE_PENTIUM_4: return strdup ("Pentium 4");
case CPU_SUBTYPE_PENTIUM_4_M: return strdup ("Pentium 4 M");
case CPU_SUBTYPE_ITANIUM: return strdup ("Itanium");
case CPU_SUBTYPE_ITANIUM_2: return strdup ("Itanium 2");
case CPU_SUBTYPE_XEON: return strdup ("Xeon");
case CPU_SUBTYPE_XEON_MP: return strdup ("Xeon MP");
default: return strdup ("Unknown i386 subtype");
}
case CPU_TYPE_X86_64:
switch (hdr->cpusubtype & 0xff) {
case CPU_SUBTYPE_X86_64_ALL: return strdup ("x86 64 all");
case CPU_SUBTYPE_X86_ARCH1: return strdup ("x86 arch 1");
default: return strdup ("Unknown x86 subtype");
}
case CPU_TYPE_MC88000:
switch (hdr->cpusubtype & 0xff) {
case CPU_SUBTYPE_MC88000_ALL: return strdup ("all");
case CPU_SUBTYPE_MC88100: return strdup ("mc88100");
case CPU_SUBTYPE_MC88110: return strdup ("mc88110");
default: return strdup ("Unknown mc88000 subtype");
}
case CPU_TYPE_MC98000:
switch (hdr->cpusubtype & 0xff) {
case CPU_SUBTYPE_MC98000_ALL: return strdup ("all");
case CPU_SUBTYPE_MC98601: return strdup ("mc98601");
default: return strdup ("Unknown mc98000 subtype");
}
case CPU_TYPE_HPPA:
switch (hdr->cpusubtype & 0xff) {
case CPU_SUBTYPE_HPPA_7100: return strdup ("hppa7100");
case CPU_SUBTYPE_HPPA_7100LC: return strdup ("hppa7100LC");
default: return strdup ("Unknown hppa subtype");
}
case CPU_TYPE_ARM64:
return strdup ("v8");
case CPU_TYPE_ARM:
switch (hdr->cpusubtype & 0xff) {
case CPU_SUBTYPE_ARM_ALL:
return strdup ("all");
case CPU_SUBTYPE_ARM_V4T:
return strdup ("v4t");
case CPU_SUBTYPE_ARM_V5:
return strdup ("v5");
case CPU_SUBTYPE_ARM_V6:
return strdup ("v6");
case CPU_SUBTYPE_ARM_XSCALE:
return strdup ("xscale");
case CPU_SUBTYPE_ARM_V7:
return strdup ("v7");
case CPU_SUBTYPE_ARM_V7F:
return strdup ("v7f");
case CPU_SUBTYPE_ARM_V7S:
return strdup ("v7s");
case CPU_SUBTYPE_ARM_V7K:
return strdup ("v7k");
case CPU_SUBTYPE_ARM_V7M:
return strdup ("v7m");
case CPU_SUBTYPE_ARM_V7EM:
return strdup ("v7em");
default:
return r_str_newf ("unknown ARM subtype %d", hdr->cpusubtype & 0xff);
}
case CPU_TYPE_SPARC:
switch (hdr->cpusubtype & 0xff) {
case CPU_SUBTYPE_SPARC_ALL: return strdup ("all");
default: return strdup ("Unknown sparc subtype");
}
case CPU_TYPE_MIPS:
switch (hdr->cpusubtype & 0xff) {
case CPU_SUBTYPE_MIPS_ALL: return strdup ("all");
case CPU_SUBTYPE_MIPS_R2300: return strdup ("r2300");
case CPU_SUBTYPE_MIPS_R2600: return strdup ("r2600");
case CPU_SUBTYPE_MIPS_R2800: return strdup ("r2800");
case CPU_SUBTYPE_MIPS_R2000a: return strdup ("r2000a");
case CPU_SUBTYPE_MIPS_R2000: return strdup ("r2000");
case CPU_SUBTYPE_MIPS_R3000a: return strdup ("r3000a");
case CPU_SUBTYPE_MIPS_R3000: return strdup ("r3000");
default: return strdup ("Unknown mips subtype");
}
case CPU_TYPE_I860:
switch (hdr->cpusubtype & 0xff) {
case CPU_SUBTYPE_I860_ALL: return strdup ("all");
case CPU_SUBTYPE_I860_860: return strdup ("860");
default: return strdup ("Unknown i860 subtype");
}
case CPU_TYPE_POWERPC:
case CPU_TYPE_POWERPC64:
switch (hdr->cpusubtype & 0xff) {
case CPU_SUBTYPE_POWERPC_ALL: return strdup ("all");
case CPU_SUBTYPE_POWERPC_601: return strdup ("601");
case CPU_SUBTYPE_POWERPC_602: return strdup ("602");
case CPU_SUBTYPE_POWERPC_603: return strdup ("603");
case CPU_SUBTYPE_POWERPC_603e: return strdup ("603e");
case CPU_SUBTYPE_POWERPC_603ev: return strdup ("603ev");
case CPU_SUBTYPE_POWERPC_604: return strdup ("604");
case CPU_SUBTYPE_POWERPC_604e: return strdup ("604e");
case CPU_SUBTYPE_POWERPC_620: return strdup ("620");
case CPU_SUBTYPE_POWERPC_750: return strdup ("750");
case CPU_SUBTYPE_POWERPC_7400: return strdup ("7400");
case CPU_SUBTYPE_POWERPC_7450: return strdup ("7450");
case CPU_SUBTYPE_POWERPC_970: return strdup ("970");
default: return strdup ("Unknown ppc subtype");
}
}
}
return strdup ("Unknown cputype");
}
char* MACH0_(get_cpusubtype)(struct MACH0_(obj_t)* bin) {
if (bin) {
return MACH0_(get_cpusubtype_from_hdr) (&bin->hdr);
}
return strdup ("Unknown");
}
int MACH0_(is_pie)(struct MACH0_(obj_t)* bin) {
return (bin && bin->hdr.filetype == MH_EXECUTE && bin->hdr.flags & MH_PIE);
}
int MACH0_(has_nx)(struct MACH0_(obj_t)* bin) {
return (bin && bin->hdr.filetype == MH_EXECUTE &&
bin->hdr.flags & MH_NO_HEAP_EXECUTION);
}
char* MACH0_(get_filetype_from_hdr)(struct MACH0_(mach_header) *hdr) {
const char *mhtype = "Unknown";
switch (hdr->filetype) {
case MH_OBJECT: mhtype = "Relocatable object"; break;
case MH_EXECUTE: mhtype = "Executable file"; break;
case MH_FVMLIB: mhtype = "Fixed VM shared library"; break;
case MH_CORE: mhtype = "Core file"; break;
case MH_PRELOAD: mhtype = "Preloaded executable file"; break;
case MH_DYLIB: mhtype = "Dynamically bound shared library"; break;
case MH_DYLINKER: mhtype = "Dynamic link editor"; break;
case MH_BUNDLE: mhtype = "Dynamically bound bundle file"; break;
case MH_DYLIB_STUB: mhtype = "Shared library stub for static linking (no sections)"; break;
case MH_DSYM: mhtype = "Companion file with only debug sections"; break;
}
return strdup (mhtype);
}
char* MACH0_(get_filetype)(struct MACH0_(obj_t)* bin) {
if (bin) {
return MACH0_(get_filetype_from_hdr) (&bin->hdr);
}
return strdup ("Unknown");
}
ut64 MACH0_(get_main)(struct MACH0_(obj_t)* bin) {
ut64 addr = 0LL;
struct symbol_t *symbols;
int i;
if (!(symbols = MACH0_(get_symbols) (bin))) {
return 0;
}
for (i = 0; !symbols[i].last; i++) {
if (!strcmp (symbols[i].name, "_main")) {
addr = symbols[i].addr;
break;
}
}
free (symbols);
if (!addr && bin->main_cmd.cmd == LC_MAIN) {
addr = bin->entry + bin->baddr;
}
if (!addr) {
ut8 b[128];
ut64 entry = addr_to_offset(bin, bin->entry);
// XXX: X86 only and hacky!
if (entry > bin->size || entry + sizeof (b) > bin->size) {
return 0;
}
i = r_buf_read_at (bin->b, entry, b, sizeof (b));
if (i < 1) {
return 0;
}
for (i = 0; i < 64; i++) {
if (b[i] == 0xe8 && !b[i+3] && !b[i+4]) {
int delta = b[i+1] | (b[i+2] << 8) | (b[i+3] << 16) | (b[i+4] << 24);
return bin->entry + i + 5 + delta;
}
}
}
return addr;
}
void MACH0_(mach_headerfields)(RBinFile *file) {
RBuffer *buf = file->buf;
int n = 0;
struct MACH0_(mach_header) *mh = MACH0_(get_hdr_from_bytes)(buf);
eprintf ("0x00000000 Magic 0x%x\n", mh->magic);
eprintf ("0x00000004 CpuType 0x%x\n", mh->cputype);
eprintf ("0x00000008 CpuSubType 0x%x\n", mh->cpusubtype);
eprintf ("0x0000000c FileType 0x%x\n", mh->filetype);
eprintf ("0x00000010 nCmds %d\n", mh->ncmds);
eprintf ("0x00000014 sizeOfCmds %d\n", mh->sizeofcmds);
eprintf ("0x00000018 Flags 0x%x\n", mh->flags);
ut64 addr = 0x20 - 4;
ut32 word = 0;
ut8 wordbuf[sizeof(word)];
#define READWORD() \
addr += 4; \
if (!r_buf_read_at (buf, addr, (ut8*)wordbuf, 4)) { \
eprintf ("Invalid address in buffer."); \
break; \
} \
word = r_read_le32 (wordbuf);
for (n = 0; n < mh->ncmds; n++) {
eprintf ("\nLoad Command %d\n", n);
READWORD();
eprintf ("0x%08"PFMT64x" cmd 0x%x %s\n",
addr, word, cmd_to_string (word));
READWORD();
word &= 0xFFFFFF;
eprintf ("0x%08"PFMT64x" cmdsize %d\n", addr, word);
if ((int)(word) < 1) {
eprintf ("Invalid size\n");
break;
}
addr += word - 8;
}
}
RList* MACH0_(mach_fields)(RBinFile *arch) {
struct MACH0_(mach_header) *mh = MACH0_(get_hdr_from_bytes)(arch->buf);
if (!mh) {
return NULL;
}
RList *ret = r_list_new ();
if (!ret) {
return NULL;
}
ret->free = free;
ut64 addr = 0;
#define ROW(nam,siz,val,fmt) \
r_list_append (ret, r_bin_field_new (addr, addr, siz, nam, sdb_fmt (0, "0x%08x", val), fmt)); \
addr += 4;
ROW("hdr.magic", 4, mh->magic, "x");
ROW("hdr.cputype", 4, mh->cputype, NULL);
ROW("hdr.cpusubtype", 4, mh->cpusubtype, NULL);
ROW("hdr.filetype", 4, mh->filetype, NULL);
ROW("hdr.ncmds", 4, mh->ncmds, NULL);
ROW("hdr.sizeofcmds", 4, mh->sizeofcmds, NULL);
return ret;
}
struct MACH0_(mach_header) * MACH0_(get_hdr_from_bytes)(RBuffer *buf) {
ut8 magicbytes[sizeof (ut32)] = {0};
ut8 machohdrbytes[sizeof (struct MACH0_(mach_header))] = {0};
int len;
struct MACH0_(mach_header) *macho_hdr = R_NEW0 (struct MACH0_(mach_header));
bool big_endian = false;
if (!macho_hdr) {
return NULL;
}
if (r_buf_read_at (buf, 0, magicbytes, 4) < 1) {
free (macho_hdr);
return false;
}
if (r_read_le32 (magicbytes) == 0xfeedface) {
big_endian = false;
} else if (r_read_be32 (magicbytes) == 0xfeedface) {
big_endian = true;
} else if (r_read_le32 (magicbytes) == FAT_MAGIC) {
big_endian = false;
} else if (r_read_be32 (magicbytes) == FAT_MAGIC) {
big_endian = true;
} else if (r_read_le32 (magicbytes) == 0xfeedfacf) {
big_endian = false;
} else if (r_read_be32 (magicbytes) == 0xfeedfacf) {
big_endian = true;
} else {
/* also extract non-mach0s */
#if 0
free (macho_hdr);
return NULL;
#endif
}
len = r_buf_read_at (buf, 0, machohdrbytes, sizeof (machohdrbytes));
if (len != sizeof(struct MACH0_(mach_header))) {
free (macho_hdr);
return NULL;
}
macho_hdr->magic = r_read_ble (&machohdrbytes[0], big_endian, 32);
macho_hdr->cputype = r_read_ble (&machohdrbytes[4], big_endian, 32);
macho_hdr->cpusubtype = r_read_ble (&machohdrbytes[8], big_endian, 32);
macho_hdr->filetype = r_read_ble (&machohdrbytes[12], big_endian, 32);
macho_hdr->ncmds = r_read_ble (&machohdrbytes[16], big_endian, 32);
macho_hdr->sizeofcmds = r_read_ble (&machohdrbytes[20], big_endian, 32);
macho_hdr->flags = r_read_ble (&machohdrbytes[24], big_endian, 32);
#if R_BIN_MACH064
macho_hdr->reserved = r_read_ble (&machohdrbytes[28], big_endian, 32);
#endif
return macho_hdr;
}
| ./CrossVul/dataset_final_sorted/CWE-416/c/good_3307_0 |
crossvul-cpp_data_good_1186_0 | /*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% DDDD RRRR AAA W W %
% D D R R A A W W %
% D D RRRR AAAAA W W W %
% D D R RN A A WW WW %
% DDDD R R A A W W %
% %
% %
% MagickCore Image Drawing Methods %
% %
% %
% Software Design %
% Cristy %
% July 1998 %
% %
% %
% Copyright 1999-2019 ImageMagick Studio LLC, a non-profit organization %
% dedicated to making software imaging solutions freely available. %
% %
% You may not use this file except in compliance with the License. You may %
% obtain a copy of the License at %
% %
% https://imagemagick.org/script/license.php %
% %
% Unless required by applicable law or agreed to in writing, software %
% distributed under the License is distributed on an "AS IS" BASIS, %
% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. %
% See the License for the specific language governing permissions and %
% limitations under the License. %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% Bill Radcliffe of Corbis (www.corbis.com) contributed the polygon
% rendering code based on Paul Heckbert's "Concave Polygon Scan Conversion",
% Graphics Gems, 1990. Leonard Rosenthal and David Harr of Appligent
% (www.appligent.com) contributed the dash pattern, linecap stroking
% algorithm, and minor rendering improvements.
%
*/
/*
Include declarations.
*/
#include "MagickCore/studio.h"
#include "MagickCore/annotate.h"
#include "MagickCore/artifact.h"
#include "MagickCore/blob.h"
#include "MagickCore/cache.h"
#include "MagickCore/cache-private.h"
#include "MagickCore/cache-view.h"
#include "MagickCore/channel.h"
#include "MagickCore/color.h"
#include "MagickCore/colorspace-private.h"
#include "MagickCore/composite.h"
#include "MagickCore/composite-private.h"
#include "MagickCore/constitute.h"
#include "MagickCore/draw.h"
#include "MagickCore/draw-private.h"
#include "MagickCore/enhance.h"
#include "MagickCore/exception.h"
#include "MagickCore/exception-private.h"
#include "MagickCore/gem.h"
#include "MagickCore/geometry.h"
#include "MagickCore/image-private.h"
#include "MagickCore/list.h"
#include "MagickCore/log.h"
#include "MagickCore/memory-private.h"
#include "MagickCore/monitor.h"
#include "MagickCore/monitor-private.h"
#include "MagickCore/option.h"
#include "MagickCore/paint.h"
#include "MagickCore/pixel-accessor.h"
#include "MagickCore/pixel-private.h"
#include "MagickCore/property.h"
#include "MagickCore/resample.h"
#include "MagickCore/resample-private.h"
#include "MagickCore/resource_.h"
#include "MagickCore/splay-tree.h"
#include "MagickCore/string_.h"
#include "MagickCore/string-private.h"
#include "MagickCore/thread-private.h"
#include "MagickCore/token.h"
#include "MagickCore/transform-private.h"
#include "MagickCore/utility.h"
/*
Define declarations.
*/
#define BezierQuantum 200
#define PrimitiveExtentPad 2048
#define MaxBezierCoordinates 4194304
#define ThrowPointExpectedException(token,exception) \
{ \
(void) ThrowMagickException(exception,GetMagickModule(),DrawError, \
"NonconformingDrawingPrimitiveDefinition","`%s'",token); \
status=MagickFalse; \
break; \
}
/*
Typedef declarations.
*/
typedef struct _EdgeInfo
{
SegmentInfo
bounds;
double
scanline;
PointInfo
*points;
size_t
number_points;
ssize_t
direction;
MagickBooleanType
ghostline;
size_t
highwater;
} EdgeInfo;
typedef struct _ElementInfo
{
double
cx,
cy,
major,
minor,
angle;
} ElementInfo;
typedef struct _MVGInfo
{
PrimitiveInfo
**primitive_info;
size_t
*extent;
ssize_t
offset;
PointInfo
point;
ExceptionInfo
*exception;
} MVGInfo;
typedef struct _PolygonInfo
{
EdgeInfo
*edges;
size_t
number_edges;
} PolygonInfo;
typedef enum
{
MoveToCode,
OpenCode,
GhostlineCode,
LineToCode,
EndCode
} PathInfoCode;
typedef struct _PathInfo
{
PointInfo
point;
PathInfoCode
code;
} PathInfo;
/*
Forward declarations.
*/
static Image
*DrawClippingMask(Image *,const DrawInfo *,const char *,const char *,
ExceptionInfo *);
static MagickBooleanType
DrawStrokePolygon(Image *,const DrawInfo *,const PrimitiveInfo *,
ExceptionInfo *),
RenderMVGContent(Image *,const DrawInfo *,const size_t,ExceptionInfo *),
TraceArc(MVGInfo *,const PointInfo,const PointInfo,const PointInfo),
TraceArcPath(MVGInfo *,const PointInfo,const PointInfo,const PointInfo,
const double,const MagickBooleanType,const MagickBooleanType),
TraceBezier(MVGInfo *,const size_t),
TraceCircle(MVGInfo *,const PointInfo,const PointInfo),
TraceEllipse(MVGInfo *,const PointInfo,const PointInfo,const PointInfo),
TraceLine(PrimitiveInfo *,const PointInfo,const PointInfo),
TraceRectangle(PrimitiveInfo *,const PointInfo,const PointInfo),
TraceRoundRectangle(MVGInfo *,const PointInfo,const PointInfo,PointInfo),
TraceSquareLinecap(PrimitiveInfo *,const size_t,const double);
static PrimitiveInfo
*TraceStrokePolygon(const Image *,const DrawInfo *,const PrimitiveInfo *);
static size_t
TracePath(MVGInfo *,const char *,ExceptionInfo *);
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% A c q u i r e D r a w I n f o %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% AcquireDrawInfo() returns a DrawInfo structure properly initialized.
%
% The format of the AcquireDrawInfo method is:
%
% DrawInfo *AcquireDrawInfo(void)
%
*/
MagickExport DrawInfo *AcquireDrawInfo(void)
{
DrawInfo
*draw_info;
draw_info=(DrawInfo *) AcquireCriticalMemory(sizeof(*draw_info));
GetDrawInfo((ImageInfo *) NULL,draw_info);
return(draw_info);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% C l o n e D r a w I n f o %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% CloneDrawInfo() makes a copy of the given draw_info structure. If NULL
% is specified, a new DrawInfo structure is created initialized to default
% values.
%
% The format of the CloneDrawInfo method is:
%
% DrawInfo *CloneDrawInfo(const ImageInfo *image_info,
% const DrawInfo *draw_info)
%
% A description of each parameter follows:
%
% o image_info: the image info.
%
% o draw_info: the draw info.
%
*/
MagickExport DrawInfo *CloneDrawInfo(const ImageInfo *image_info,
const DrawInfo *draw_info)
{
DrawInfo
*clone_info;
ExceptionInfo
*exception;
clone_info=(DrawInfo *) AcquireCriticalMemory(sizeof(*clone_info));
GetDrawInfo(image_info,clone_info);
if (draw_info == (DrawInfo *) NULL)
return(clone_info);
exception=AcquireExceptionInfo();
if (draw_info->primitive != (char *) NULL)
(void) CloneString(&clone_info->primitive,draw_info->primitive);
if (draw_info->geometry != (char *) NULL)
(void) CloneString(&clone_info->geometry,draw_info->geometry);
clone_info->compliance=draw_info->compliance;
clone_info->viewbox=draw_info->viewbox;
clone_info->affine=draw_info->affine;
clone_info->gravity=draw_info->gravity;
clone_info->fill=draw_info->fill;
clone_info->stroke=draw_info->stroke;
clone_info->stroke_width=draw_info->stroke_width;
if (draw_info->fill_pattern != (Image *) NULL)
clone_info->fill_pattern=CloneImage(draw_info->fill_pattern,0,0,MagickTrue,
exception);
if (draw_info->stroke_pattern != (Image *) NULL)
clone_info->stroke_pattern=CloneImage(draw_info->stroke_pattern,0,0,
MagickTrue,exception);
clone_info->stroke_antialias=draw_info->stroke_antialias;
clone_info->text_antialias=draw_info->text_antialias;
clone_info->fill_rule=draw_info->fill_rule;
clone_info->linecap=draw_info->linecap;
clone_info->linejoin=draw_info->linejoin;
clone_info->miterlimit=draw_info->miterlimit;
clone_info->dash_offset=draw_info->dash_offset;
clone_info->decorate=draw_info->decorate;
clone_info->compose=draw_info->compose;
if (draw_info->text != (char *) NULL)
(void) CloneString(&clone_info->text,draw_info->text);
if (draw_info->font != (char *) NULL)
(void) CloneString(&clone_info->font,draw_info->font);
if (draw_info->metrics != (char *) NULL)
(void) CloneString(&clone_info->metrics,draw_info->metrics);
if (draw_info->family != (char *) NULL)
(void) CloneString(&clone_info->family,draw_info->family);
clone_info->style=draw_info->style;
clone_info->stretch=draw_info->stretch;
clone_info->weight=draw_info->weight;
if (draw_info->encoding != (char *) NULL)
(void) CloneString(&clone_info->encoding,draw_info->encoding);
clone_info->pointsize=draw_info->pointsize;
clone_info->kerning=draw_info->kerning;
clone_info->interline_spacing=draw_info->interline_spacing;
clone_info->interword_spacing=draw_info->interword_spacing;
clone_info->direction=draw_info->direction;
if (draw_info->density != (char *) NULL)
(void) CloneString(&clone_info->density,draw_info->density);
clone_info->align=draw_info->align;
clone_info->undercolor=draw_info->undercolor;
clone_info->border_color=draw_info->border_color;
if (draw_info->server_name != (char *) NULL)
(void) CloneString(&clone_info->server_name,draw_info->server_name);
if (draw_info->dash_pattern != (double *) NULL)
{
register ssize_t
x;
for (x=0; fabs(draw_info->dash_pattern[x]) >= MagickEpsilon; x++) ;
clone_info->dash_pattern=(double *) AcquireQuantumMemory((size_t) (2*x+2),
sizeof(*clone_info->dash_pattern));
if (clone_info->dash_pattern == (double *) NULL)
ThrowFatalException(ResourceLimitFatalError,
"UnableToAllocateDashPattern");
(void) memset(clone_info->dash_pattern,0,(size_t) (2*x+2)*
sizeof(*clone_info->dash_pattern));
(void) memcpy(clone_info->dash_pattern,draw_info->dash_pattern,(size_t)
(x+1)*sizeof(*clone_info->dash_pattern));
}
clone_info->gradient=draw_info->gradient;
if (draw_info->gradient.stops != (StopInfo *) NULL)
{
size_t
number_stops;
number_stops=clone_info->gradient.number_stops;
clone_info->gradient.stops=(StopInfo *) AcquireQuantumMemory((size_t)
number_stops,sizeof(*clone_info->gradient.stops));
if (clone_info->gradient.stops == (StopInfo *) NULL)
ThrowFatalException(ResourceLimitFatalError,
"UnableToAllocateDashPattern");
(void) memcpy(clone_info->gradient.stops,draw_info->gradient.stops,
(size_t) number_stops*sizeof(*clone_info->gradient.stops));
}
clone_info->bounds=draw_info->bounds;
clone_info->fill_alpha=draw_info->fill_alpha;
clone_info->stroke_alpha=draw_info->stroke_alpha;
clone_info->element_reference=draw_info->element_reference;
clone_info->clip_path=draw_info->clip_path;
clone_info->clip_units=draw_info->clip_units;
if (draw_info->clip_mask != (char *) NULL)
(void) CloneString(&clone_info->clip_mask,draw_info->clip_mask);
if (draw_info->clipping_mask != (Image *) NULL)
clone_info->clipping_mask=CloneImage(draw_info->clipping_mask,0,0,
MagickTrue,exception);
if (draw_info->composite_mask != (Image *) NULL)
clone_info->composite_mask=CloneImage(draw_info->composite_mask,0,0,
MagickTrue,exception);
clone_info->render=draw_info->render;
clone_info->debug=IsEventLogging();
exception=DestroyExceptionInfo(exception);
return(clone_info);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ C o n v e r t P a t h T o P o l y g o n %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ConvertPathToPolygon() converts a path to the more efficient sorted
% rendering form.
%
% The format of the ConvertPathToPolygon method is:
%
% PolygonInfo *ConvertPathToPolygon(const PathInfo *path_info)
%
% A description of each parameter follows:
%
% o Method ConvertPathToPolygon returns the path in a more efficient sorted
% rendering form of type PolygonInfo.
%
% o draw_info: Specifies a pointer to an DrawInfo structure.
%
% o path_info: Specifies a pointer to an PathInfo structure.
%
%
*/
#if defined(__cplusplus) || defined(c_plusplus)
extern "C" {
#endif
static int DrawCompareEdges(const void *p_edge,const void *q_edge)
{
#define DrawCompareEdge(p,q) \
{ \
if (((p)-(q)) < 0.0) \
return(-1); \
if (((p)-(q)) > 0.0) \
return(1); \
}
register const PointInfo
*p,
*q;
/*
Edge sorting for right-handed coordinate system.
*/
p=((const EdgeInfo *) p_edge)->points;
q=((const EdgeInfo *) q_edge)->points;
DrawCompareEdge(p[0].y,q[0].y);
DrawCompareEdge(p[0].x,q[0].x);
DrawCompareEdge((p[1].x-p[0].x)*(q[1].y-q[0].y),(p[1].y-p[0].y)*
(q[1].x-q[0].x));
DrawCompareEdge(p[1].y,q[1].y);
DrawCompareEdge(p[1].x,q[1].x);
return(0);
}
#if defined(__cplusplus) || defined(c_plusplus)
}
#endif
static void LogPolygonInfo(const PolygonInfo *polygon_info)
{
register EdgeInfo
*p;
register ssize_t
i,
j;
(void) LogMagickEvent(DrawEvent,GetMagickModule()," begin active-edge");
p=polygon_info->edges;
for (i=0; i < (ssize_t) polygon_info->number_edges; i++)
{
(void) LogMagickEvent(DrawEvent,GetMagickModule()," edge %.20g:",
(double) i);
(void) LogMagickEvent(DrawEvent,GetMagickModule()," direction: %s",
p->direction != MagickFalse ? "down" : "up");
(void) LogMagickEvent(DrawEvent,GetMagickModule()," ghostline: %s",
p->ghostline != MagickFalse ? "transparent" : "opaque");
(void) LogMagickEvent(DrawEvent,GetMagickModule(),
" bounds: %g,%g - %g,%g",p->bounds.x1,p->bounds.y1,
p->bounds.x2,p->bounds.y2);
for (j=0; j < (ssize_t) p->number_points; j++)
(void) LogMagickEvent(DrawEvent,GetMagickModule()," %g,%g",
p->points[j].x,p->points[j].y);
p++;
}
(void) LogMagickEvent(DrawEvent,GetMagickModule()," end active-edge");
}
static void ReversePoints(PointInfo *points,const size_t number_points)
{
PointInfo
point;
register ssize_t
i;
for (i=0; i < (ssize_t) (number_points >> 1); i++)
{
point=points[i];
points[i]=points[number_points-(i+1)];
points[number_points-(i+1)]=point;
}
}
static PolygonInfo *ConvertPathToPolygon(const PathInfo *path_info)
{
long
direction,
next_direction;
PointInfo
point,
*points;
PolygonInfo
*polygon_info;
SegmentInfo
bounds;
register ssize_t
i,
n;
MagickBooleanType
ghostline;
size_t
edge,
number_edges,
number_points;
/*
Convert a path to the more efficient sorted rendering form.
*/
polygon_info=(PolygonInfo *) AcquireMagickMemory(sizeof(*polygon_info));
if (polygon_info == (PolygonInfo *) NULL)
return((PolygonInfo *) NULL);
number_edges=16;
polygon_info->edges=(EdgeInfo *) AcquireQuantumMemory(number_edges,
sizeof(*polygon_info->edges));
if (polygon_info->edges == (EdgeInfo *) NULL)
return((PolygonInfo *) NULL);
(void) memset(polygon_info->edges,0,number_edges*
sizeof(*polygon_info->edges));
direction=0;
edge=0;
ghostline=MagickFalse;
n=0;
number_points=0;
points=(PointInfo *) NULL;
(void) memset(&point,0,sizeof(point));
(void) memset(&bounds,0,sizeof(bounds));
polygon_info->edges[edge].number_points=(size_t) n;
polygon_info->edges[edge].scanline=0.0;
polygon_info->edges[edge].highwater=0;
polygon_info->edges[edge].ghostline=ghostline;
polygon_info->edges[edge].direction=(ssize_t) direction;
polygon_info->edges[edge].points=points;
polygon_info->edges[edge].bounds=bounds;
polygon_info->number_edges=0;
for (i=0; path_info[i].code != EndCode; i++)
{
if ((path_info[i].code == MoveToCode) || (path_info[i].code == OpenCode) ||
(path_info[i].code == GhostlineCode))
{
/*
Move to.
*/
if ((points != (PointInfo *) NULL) && (n >= 2))
{
if (edge == number_edges)
{
number_edges<<=1;
polygon_info->edges=(EdgeInfo *) ResizeQuantumMemory(
polygon_info->edges,(size_t) number_edges,
sizeof(*polygon_info->edges));
if (polygon_info->edges == (EdgeInfo *) NULL)
return((PolygonInfo *) NULL);
}
polygon_info->edges[edge].number_points=(size_t) n;
polygon_info->edges[edge].scanline=(-1.0);
polygon_info->edges[edge].highwater=0;
polygon_info->edges[edge].ghostline=ghostline;
polygon_info->edges[edge].direction=(ssize_t) (direction > 0);
if (direction < 0)
ReversePoints(points,(size_t) n);
polygon_info->edges[edge].points=points;
polygon_info->edges[edge].bounds=bounds;
polygon_info->edges[edge].bounds.y1=points[0].y;
polygon_info->edges[edge].bounds.y2=points[n-1].y;
points=(PointInfo *) NULL;
ghostline=MagickFalse;
edge++;
}
if (points == (PointInfo *) NULL)
{
number_points=16;
points=(PointInfo *) AcquireQuantumMemory((size_t) number_points,
sizeof(*points));
if (points == (PointInfo *) NULL)
return((PolygonInfo *) NULL);
}
ghostline=path_info[i].code == GhostlineCode ? MagickTrue : MagickFalse;
point=path_info[i].point;
points[0]=point;
bounds.x1=point.x;
bounds.x2=point.x;
direction=0;
n=1;
continue;
}
/*
Line to.
*/
next_direction=((path_info[i].point.y > point.y) ||
((fabs(path_info[i].point.y-point.y) < MagickEpsilon) &&
(path_info[i].point.x > point.x))) ? 1 : -1;
if ((points != (PointInfo *) NULL) && (direction != 0) &&
(direction != next_direction))
{
/*
New edge.
*/
point=points[n-1];
if (edge == number_edges)
{
number_edges<<=1;
polygon_info->edges=(EdgeInfo *) ResizeQuantumMemory(
polygon_info->edges,(size_t) number_edges,
sizeof(*polygon_info->edges));
if (polygon_info->edges == (EdgeInfo *) NULL)
return((PolygonInfo *) NULL);
}
polygon_info->edges[edge].number_points=(size_t) n;
polygon_info->edges[edge].scanline=(-1.0);
polygon_info->edges[edge].highwater=0;
polygon_info->edges[edge].ghostline=ghostline;
polygon_info->edges[edge].direction=(ssize_t) (direction > 0);
if (direction < 0)
ReversePoints(points,(size_t) n);
polygon_info->edges[edge].points=points;
polygon_info->edges[edge].bounds=bounds;
polygon_info->edges[edge].bounds.y1=points[0].y;
polygon_info->edges[edge].bounds.y2=points[n-1].y;
number_points=16;
points=(PointInfo *) AcquireQuantumMemory((size_t) number_points,
sizeof(*points));
if (points == (PointInfo *) NULL)
return((PolygonInfo *) NULL);
n=1;
ghostline=MagickFalse;
points[0]=point;
bounds.x1=point.x;
bounds.x2=point.x;
edge++;
}
direction=next_direction;
if (points == (PointInfo *) NULL)
continue;
if (n == (ssize_t) number_points)
{
number_points<<=1;
points=(PointInfo *) ResizeQuantumMemory(points,(size_t) number_points,
sizeof(*points));
if (points == (PointInfo *) NULL)
return((PolygonInfo *) NULL);
}
point=path_info[i].point;
points[n]=point;
if (point.x < bounds.x1)
bounds.x1=point.x;
if (point.x > bounds.x2)
bounds.x2=point.x;
n++;
}
if (points != (PointInfo *) NULL)
{
if (n < 2)
points=(PointInfo *) RelinquishMagickMemory(points);
else
{
if (edge == number_edges)
{
number_edges<<=1;
polygon_info->edges=(EdgeInfo *) ResizeQuantumMemory(
polygon_info->edges,(size_t) number_edges,
sizeof(*polygon_info->edges));
if (polygon_info->edges == (EdgeInfo *) NULL)
return((PolygonInfo *) NULL);
}
polygon_info->edges[edge].number_points=(size_t) n;
polygon_info->edges[edge].scanline=(-1.0);
polygon_info->edges[edge].highwater=0;
polygon_info->edges[edge].ghostline=ghostline;
polygon_info->edges[edge].direction=(ssize_t) (direction > 0);
if (direction < 0)
ReversePoints(points,(size_t) n);
polygon_info->edges[edge].points=points;
polygon_info->edges[edge].bounds=bounds;
polygon_info->edges[edge].bounds.y1=points[0].y;
polygon_info->edges[edge].bounds.y2=points[n-1].y;
ghostline=MagickFalse;
edge++;
}
}
polygon_info->number_edges=edge;
qsort(polygon_info->edges,(size_t) polygon_info->number_edges,
sizeof(*polygon_info->edges),DrawCompareEdges);
if (IsEventLogging() != MagickFalse)
LogPolygonInfo(polygon_info);
return(polygon_info);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ C o n v e r t P r i m i t i v e T o P a t h %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ConvertPrimitiveToPath() converts a PrimitiveInfo structure into a vector
% path structure.
%
% The format of the ConvertPrimitiveToPath method is:
%
% PathInfo *ConvertPrimitiveToPath(const DrawInfo *draw_info,
% const PrimitiveInfo *primitive_info)
%
% A description of each parameter follows:
%
% o Method ConvertPrimitiveToPath returns a vector path structure of type
% PathInfo.
%
% o draw_info: a structure of type DrawInfo.
%
% o primitive_info: Specifies a pointer to an PrimitiveInfo structure.
%
%
*/
static void LogPathInfo(const PathInfo *path_info)
{
register const PathInfo
*p;
(void) LogMagickEvent(DrawEvent,GetMagickModule()," begin vector-path");
for (p=path_info; p->code != EndCode; p++)
(void) LogMagickEvent(DrawEvent,GetMagickModule(),
" %g,%g %s",p->point.x,p->point.y,p->code == GhostlineCode ?
"moveto ghostline" : p->code == OpenCode ? "moveto open" :
p->code == MoveToCode ? "moveto" : p->code == LineToCode ? "lineto" :
"?");
(void) LogMagickEvent(DrawEvent,GetMagickModule()," end vector-path");
}
static PathInfo *ConvertPrimitiveToPath(const PrimitiveInfo *primitive_info)
{
MagickBooleanType
closed_subpath;
PathInfo
*path_info;
PathInfoCode
code;
PointInfo
p,
q;
register ssize_t
i,
n;
ssize_t
coordinates,
start;
/*
Converts a PrimitiveInfo structure into a vector path structure.
*/
switch (primitive_info->primitive)
{
case AlphaPrimitive:
case ColorPrimitive:
case ImagePrimitive:
case PointPrimitive:
case TextPrimitive:
return((PathInfo *) NULL);
default:
break;
}
for (i=0; primitive_info[i].primitive != UndefinedPrimitive; i++) ;
path_info=(PathInfo *) AcquireQuantumMemory((size_t) (3UL*i+1UL),
sizeof(*path_info));
if (path_info == (PathInfo *) NULL)
return((PathInfo *) NULL);
coordinates=0;
closed_subpath=MagickFalse;
n=0;
p.x=(-1.0);
p.y=(-1.0);
q.x=(-1.0);
q.y=(-1.0);
start=0;
for (i=0; primitive_info[i].primitive != UndefinedPrimitive; i++)
{
code=LineToCode;
if (coordinates <= 0)
{
/*
New subpath.
*/
coordinates=(ssize_t) primitive_info[i].coordinates;
p=primitive_info[i].point;
start=n;
code=MoveToCode;
closed_subpath=primitive_info[i].closed_subpath;
}
coordinates--;
if ((code == MoveToCode) || (coordinates <= 0) ||
(fabs(q.x-primitive_info[i].point.x) >= MagickEpsilon) ||
(fabs(q.y-primitive_info[i].point.y) >= MagickEpsilon))
{
/*
Eliminate duplicate points.
*/
path_info[n].code=code;
path_info[n].point=primitive_info[i].point;
q=primitive_info[i].point;
n++;
}
if (coordinates > 0)
continue; /* next point in current subpath */
if (closed_subpath != MagickFalse)
{
closed_subpath=MagickFalse;
continue;
}
/*
Mark the p point as open if the subpath is not closed.
*/
path_info[start].code=OpenCode;
path_info[n].code=GhostlineCode;
path_info[n].point=primitive_info[i].point;
n++;
path_info[n].code=LineToCode;
path_info[n].point=p;
n++;
}
path_info[n].code=EndCode;
path_info[n].point.x=0.0;
path_info[n].point.y=0.0;
if (IsEventLogging() != MagickFalse)
LogPathInfo(path_info);
path_info=(PathInfo *) ResizeQuantumMemory(path_info,(size_t) (n+1),
sizeof(*path_info));
return(path_info);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% D e s t r o y D r a w I n f o %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DestroyDrawInfo() deallocates memory associated with an DrawInfo structure.
%
% The format of the DestroyDrawInfo method is:
%
% DrawInfo *DestroyDrawInfo(DrawInfo *draw_info)
%
% A description of each parameter follows:
%
% o draw_info: the draw info.
%
*/
MagickExport DrawInfo *DestroyDrawInfo(DrawInfo *draw_info)
{
assert(draw_info != (DrawInfo *) NULL);
if (draw_info->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"...");
assert(draw_info->signature == MagickCoreSignature);
if (draw_info->primitive != (char *) NULL)
draw_info->primitive=DestroyString(draw_info->primitive);
if (draw_info->text != (char *) NULL)
draw_info->text=DestroyString(draw_info->text);
if (draw_info->geometry != (char *) NULL)
draw_info->geometry=DestroyString(draw_info->geometry);
if (draw_info->fill_pattern != (Image *) NULL)
draw_info->fill_pattern=DestroyImage(draw_info->fill_pattern);
if (draw_info->stroke_pattern != (Image *) NULL)
draw_info->stroke_pattern=DestroyImage(draw_info->stroke_pattern);
if (draw_info->font != (char *) NULL)
draw_info->font=DestroyString(draw_info->font);
if (draw_info->metrics != (char *) NULL)
draw_info->metrics=DestroyString(draw_info->metrics);
if (draw_info->family != (char *) NULL)
draw_info->family=DestroyString(draw_info->family);
if (draw_info->encoding != (char *) NULL)
draw_info->encoding=DestroyString(draw_info->encoding);
if (draw_info->density != (char *) NULL)
draw_info->density=DestroyString(draw_info->density);
if (draw_info->server_name != (char *) NULL)
draw_info->server_name=(char *)
RelinquishMagickMemory(draw_info->server_name);
if (draw_info->dash_pattern != (double *) NULL)
draw_info->dash_pattern=(double *) RelinquishMagickMemory(
draw_info->dash_pattern);
if (draw_info->gradient.stops != (StopInfo *) NULL)
draw_info->gradient.stops=(StopInfo *) RelinquishMagickMemory(
draw_info->gradient.stops);
if (draw_info->clip_mask != (char *) NULL)
draw_info->clip_mask=DestroyString(draw_info->clip_mask);
if (draw_info->clipping_mask != (Image *) NULL)
draw_info->clipping_mask=DestroyImage(draw_info->clipping_mask);
if (draw_info->composite_mask != (Image *) NULL)
draw_info->composite_mask=DestroyImage(draw_info->composite_mask);
draw_info->signature=(~MagickCoreSignature);
draw_info=(DrawInfo *) RelinquishMagickMemory(draw_info);
return(draw_info);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ D e s t r o y E d g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DestroyEdge() destroys the specified polygon edge.
%
% The format of the DestroyEdge method is:
%
% ssize_t DestroyEdge(PolygonInfo *polygon_info,const int edge)
%
% A description of each parameter follows:
%
% o polygon_info: Specifies a pointer to an PolygonInfo structure.
%
% o edge: the polygon edge number to destroy.
%
*/
static size_t DestroyEdge(PolygonInfo *polygon_info,
const size_t edge)
{
assert(edge < polygon_info->number_edges);
polygon_info->edges[edge].points=(PointInfo *) RelinquishMagickMemory(
polygon_info->edges[edge].points);
polygon_info->number_edges--;
if (edge < polygon_info->number_edges)
(void) memmove(polygon_info->edges+edge,polygon_info->edges+edge+1,
(size_t) (polygon_info->number_edges-edge)*sizeof(*polygon_info->edges));
return(polygon_info->number_edges);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ D e s t r o y P o l y g o n I n f o %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DestroyPolygonInfo() destroys the PolygonInfo data structure.
%
% The format of the DestroyPolygonInfo method is:
%
% PolygonInfo *DestroyPolygonInfo(PolygonInfo *polygon_info)
%
% A description of each parameter follows:
%
% o polygon_info: Specifies a pointer to an PolygonInfo structure.
%
*/
static PolygonInfo *DestroyPolygonInfo(PolygonInfo *polygon_info)
{
register ssize_t
i;
for (i=0; i < (ssize_t) polygon_info->number_edges; i++)
polygon_info->edges[i].points=(PointInfo *)
RelinquishMagickMemory(polygon_info->edges[i].points);
polygon_info->edges=(EdgeInfo *) RelinquishMagickMemory(polygon_info->edges);
return((PolygonInfo *) RelinquishMagickMemory(polygon_info));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% D r a w A f f i n e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DrawAffineImage() composites the source over the destination image as
% dictated by the affine transform.
%
% The format of the DrawAffineImage method is:
%
% MagickBooleanType DrawAffineImage(Image *image,const Image *source,
% const AffineMatrix *affine,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o source: the source image.
%
% o affine: the affine transform.
%
% o exception: return any errors or warnings in this structure.
%
*/
static SegmentInfo AffineEdge(const Image *image,const AffineMatrix *affine,
const double y,const SegmentInfo *edge)
{
double
intercept,
z;
register double
x;
SegmentInfo
inverse_edge;
/*
Determine left and right edges.
*/
inverse_edge.x1=edge->x1;
inverse_edge.y1=edge->y1;
inverse_edge.x2=edge->x2;
inverse_edge.y2=edge->y2;
z=affine->ry*y+affine->tx;
if (affine->sx >= MagickEpsilon)
{
intercept=(-z/affine->sx);
x=intercept;
if (x > inverse_edge.x1)
inverse_edge.x1=x;
intercept=(-z+(double) image->columns)/affine->sx;
x=intercept;
if (x < inverse_edge.x2)
inverse_edge.x2=x;
}
else
if (affine->sx < -MagickEpsilon)
{
intercept=(-z+(double) image->columns)/affine->sx;
x=intercept;
if (x > inverse_edge.x1)
inverse_edge.x1=x;
intercept=(-z/affine->sx);
x=intercept;
if (x < inverse_edge.x2)
inverse_edge.x2=x;
}
else
if ((z < 0.0) || ((size_t) floor(z+0.5) >= image->columns))
{
inverse_edge.x2=edge->x1;
return(inverse_edge);
}
/*
Determine top and bottom edges.
*/
z=affine->sy*y+affine->ty;
if (affine->rx >= MagickEpsilon)
{
intercept=(-z/affine->rx);
x=intercept;
if (x > inverse_edge.x1)
inverse_edge.x1=x;
intercept=(-z+(double) image->rows)/affine->rx;
x=intercept;
if (x < inverse_edge.x2)
inverse_edge.x2=x;
}
else
if (affine->rx < -MagickEpsilon)
{
intercept=(-z+(double) image->rows)/affine->rx;
x=intercept;
if (x > inverse_edge.x1)
inverse_edge.x1=x;
intercept=(-z/affine->rx);
x=intercept;
if (x < inverse_edge.x2)
inverse_edge.x2=x;
}
else
if ((z < 0.0) || ((size_t) floor(z+0.5) >= image->rows))
{
inverse_edge.x2=edge->x2;
return(inverse_edge);
}
return(inverse_edge);
}
static AffineMatrix InverseAffineMatrix(const AffineMatrix *affine)
{
AffineMatrix
inverse_affine;
double
determinant;
determinant=PerceptibleReciprocal(affine->sx*affine->sy-affine->rx*
affine->ry);
inverse_affine.sx=determinant*affine->sy;
inverse_affine.rx=determinant*(-affine->rx);
inverse_affine.ry=determinant*(-affine->ry);
inverse_affine.sy=determinant*affine->sx;
inverse_affine.tx=(-affine->tx)*inverse_affine.sx-affine->ty*
inverse_affine.ry;
inverse_affine.ty=(-affine->tx)*inverse_affine.rx-affine->ty*
inverse_affine.sy;
return(inverse_affine);
}
MagickExport MagickBooleanType DrawAffineImage(Image *image,
const Image *source,const AffineMatrix *affine,ExceptionInfo *exception)
{
AffineMatrix
inverse_affine;
CacheView
*image_view,
*source_view;
MagickBooleanType
status;
PixelInfo
zero;
PointInfo
extent[4],
min,
max;
register ssize_t
i;
SegmentInfo
edge;
ssize_t
start,
stop,
y;
/*
Determine bounding box.
*/
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(source != (const Image *) NULL);
assert(source->signature == MagickCoreSignature);
assert(affine != (AffineMatrix *) NULL);
extent[0].x=0.0;
extent[0].y=0.0;
extent[1].x=(double) source->columns-1.0;
extent[1].y=0.0;
extent[2].x=(double) source->columns-1.0;
extent[2].y=(double) source->rows-1.0;
extent[3].x=0.0;
extent[3].y=(double) source->rows-1.0;
for (i=0; i < 4; i++)
{
PointInfo
point;
point=extent[i];
extent[i].x=point.x*affine->sx+point.y*affine->ry+affine->tx;
extent[i].y=point.x*affine->rx+point.y*affine->sy+affine->ty;
}
min=extent[0];
max=extent[0];
for (i=1; i < 4; i++)
{
if (min.x > extent[i].x)
min.x=extent[i].x;
if (min.y > extent[i].y)
min.y=extent[i].y;
if (max.x < extent[i].x)
max.x=extent[i].x;
if (max.y < extent[i].y)
max.y=extent[i].y;
}
/*
Affine transform image.
*/
if (SetImageStorageClass(image,DirectClass,exception) == MagickFalse)
return(MagickFalse);
status=MagickTrue;
edge.x1=MagickMax(min.x,0.0);
edge.y1=MagickMax(min.y,0.0);
edge.x2=MagickMin(max.x,(double) image->columns-1.0);
edge.y2=MagickMin(max.y,(double) image->rows-1.0);
inverse_affine=InverseAffineMatrix(affine);
GetPixelInfo(image,&zero);
start=(ssize_t) ceil(edge.y1-0.5);
stop=(ssize_t) floor(edge.y2+0.5);
source_view=AcquireVirtualCacheView(source,exception);
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
magick_number_threads(source,image,stop-start,1)
#endif
for (y=start; y <= stop; y++)
{
PixelInfo
composite,
pixel;
PointInfo
point;
register ssize_t
x;
register Quantum
*magick_restrict q;
SegmentInfo
inverse_edge;
ssize_t
x_offset;
inverse_edge=AffineEdge(source,&inverse_affine,(double) y,&edge);
if (inverse_edge.x2 < inverse_edge.x1)
continue;
q=GetCacheViewAuthenticPixels(image_view,(ssize_t) ceil(inverse_edge.x1-
0.5),y,(size_t) (floor(inverse_edge.x2+0.5)-ceil(inverse_edge.x1-0.5)+1),
1,exception);
if (q == (Quantum *) NULL)
continue;
pixel=zero;
composite=zero;
x_offset=0;
for (x=(ssize_t) ceil(inverse_edge.x1-0.5); x <= (ssize_t) floor(inverse_edge.x2+0.5); x++)
{
point.x=(double) x*inverse_affine.sx+y*inverse_affine.ry+
inverse_affine.tx;
point.y=(double) x*inverse_affine.rx+y*inverse_affine.sy+
inverse_affine.ty;
status=InterpolatePixelInfo(source,source_view,UndefinedInterpolatePixel,
point.x,point.y,&pixel,exception);
if (status == MagickFalse)
break;
GetPixelInfoPixel(image,q,&composite);
CompositePixelInfoOver(&pixel,pixel.alpha,&composite,composite.alpha,
&composite);
SetPixelViaPixelInfo(image,&composite,q);
x_offset++;
q+=GetPixelChannels(image);
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
}
source_view=DestroyCacheView(source_view);
image_view=DestroyCacheView(image_view);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ D r a w B o u n d i n g R e c t a n g l e s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DrawBoundingRectangles() draws the bounding rectangles on the image. This
% is only useful for developers debugging the rendering algorithm.
%
% The format of the DrawBoundingRectangles method is:
%
% MagickBooleanType DrawBoundingRectangles(Image *image,
% const DrawInfo *draw_info,PolygonInfo *polygon_info,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o draw_info: the draw info.
%
% o polygon_info: Specifies a pointer to a PolygonInfo structure.
%
% o exception: return any errors or warnings in this structure.
%
*/
static inline double SaneStrokeWidth(const Image *image,
const DrawInfo *draw_info)
{
return(MagickMin((double) draw_info->stroke_width,
(2.0*sqrt(2.0)+MagickEpsilon)*MagickMax(image->columns,image->rows)));
}
static MagickBooleanType DrawBoundingRectangles(Image *image,
const DrawInfo *draw_info,const PolygonInfo *polygon_info,
ExceptionInfo *exception)
{
double
mid;
DrawInfo
*clone_info;
MagickStatusType
status;
PointInfo
end,
resolution,
start;
PrimitiveInfo
primitive_info[6];
register ssize_t
i;
SegmentInfo
bounds;
ssize_t
coordinates;
(void) memset(primitive_info,0,sizeof(primitive_info));
clone_info=CloneDrawInfo((ImageInfo *) NULL,draw_info);
status=QueryColorCompliance("#000F",AllCompliance,&clone_info->fill,
exception);
if (status == MagickFalse)
{
clone_info=DestroyDrawInfo(clone_info);
return(MagickFalse);
}
resolution.x=96.0;
resolution.y=96.0;
if (clone_info->density != (char *) NULL)
{
GeometryInfo
geometry_info;
MagickStatusType
flags;
flags=ParseGeometry(clone_info->density,&geometry_info);
resolution.x=geometry_info.rho;
resolution.y=geometry_info.sigma;
if ((flags & SigmaValue) == MagickFalse)
resolution.y=resolution.x;
}
mid=(resolution.x/96.0)*ExpandAffine(&clone_info->affine)*
SaneStrokeWidth(image,clone_info)/2.0;
bounds.x1=0.0;
bounds.y1=0.0;
bounds.x2=0.0;
bounds.y2=0.0;
if (polygon_info != (PolygonInfo *) NULL)
{
bounds=polygon_info->edges[0].bounds;
for (i=1; i < (ssize_t) polygon_info->number_edges; i++)
{
if (polygon_info->edges[i].bounds.x1 < (double) bounds.x1)
bounds.x1=polygon_info->edges[i].bounds.x1;
if (polygon_info->edges[i].bounds.y1 < (double) bounds.y1)
bounds.y1=polygon_info->edges[i].bounds.y1;
if (polygon_info->edges[i].bounds.x2 > (double) bounds.x2)
bounds.x2=polygon_info->edges[i].bounds.x2;
if (polygon_info->edges[i].bounds.y2 > (double) bounds.y2)
bounds.y2=polygon_info->edges[i].bounds.y2;
}
bounds.x1-=mid;
bounds.x1=bounds.x1 < 0.0 ? 0.0 : bounds.x1 >= (double)
image->columns ? (double) image->columns-1 : bounds.x1;
bounds.y1-=mid;
bounds.y1=bounds.y1 < 0.0 ? 0.0 : bounds.y1 >= (double)
image->rows ? (double) image->rows-1 : bounds.y1;
bounds.x2+=mid;
bounds.x2=bounds.x2 < 0.0 ? 0.0 : bounds.x2 >= (double)
image->columns ? (double) image->columns-1 : bounds.x2;
bounds.y2+=mid;
bounds.y2=bounds.y2 < 0.0 ? 0.0 : bounds.y2 >= (double)
image->rows ? (double) image->rows-1 : bounds.y2;
for (i=0; i < (ssize_t) polygon_info->number_edges; i++)
{
if (polygon_info->edges[i].direction != 0)
status=QueryColorCompliance("#f00",AllCompliance,&clone_info->stroke,
exception);
else
status=QueryColorCompliance("#0f0",AllCompliance,&clone_info->stroke,
exception);
if (status == MagickFalse)
break;
start.x=(double) (polygon_info->edges[i].bounds.x1-mid);
start.y=(double) (polygon_info->edges[i].bounds.y1-mid);
end.x=(double) (polygon_info->edges[i].bounds.x2+mid);
end.y=(double) (polygon_info->edges[i].bounds.y2+mid);
primitive_info[0].primitive=RectanglePrimitive;
status&=TraceRectangle(primitive_info,start,end);
primitive_info[0].method=ReplaceMethod;
coordinates=(ssize_t) primitive_info[0].coordinates;
primitive_info[coordinates].primitive=UndefinedPrimitive;
status=DrawPrimitive(image,clone_info,primitive_info,exception);
if (status == MagickFalse)
break;
}
if (i < (ssize_t) polygon_info->number_edges)
{
clone_info=DestroyDrawInfo(clone_info);
return(status == 0 ? MagickFalse : MagickTrue);
}
}
status=QueryColorCompliance("#00f",AllCompliance,&clone_info->stroke,
exception);
if (status == MagickFalse)
{
clone_info=DestroyDrawInfo(clone_info);
return(MagickFalse);
}
start.x=(double) (bounds.x1-mid);
start.y=(double) (bounds.y1-mid);
end.x=(double) (bounds.x2+mid);
end.y=(double) (bounds.y2+mid);
primitive_info[0].primitive=RectanglePrimitive;
status&=TraceRectangle(primitive_info,start,end);
primitive_info[0].method=ReplaceMethod;
coordinates=(ssize_t) primitive_info[0].coordinates;
primitive_info[coordinates].primitive=UndefinedPrimitive;
status=DrawPrimitive(image,clone_info,primitive_info,exception);
clone_info=DestroyDrawInfo(clone_info);
return(status == 0 ? MagickFalse : MagickTrue);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% D r a w C l i p P a t h %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DrawClipPath() draws the clip path on the image mask.
%
% The format of the DrawClipPath method is:
%
% MagickBooleanType DrawClipPath(Image *image,const DrawInfo *draw_info,
% const char *id,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o draw_info: the draw info.
%
% o id: the clip path id.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType DrawClipPath(Image *image,
const DrawInfo *draw_info,const char *id,ExceptionInfo *exception)
{
const char
*clip_path;
Image
*clipping_mask;
MagickBooleanType
status;
clip_path=GetImageArtifact(image,id);
if (clip_path == (const char *) NULL)
return(MagickFalse);
clipping_mask=DrawClippingMask(image,draw_info,draw_info->clip_mask,clip_path,
exception);
if (clipping_mask == (Image *) NULL)
return(MagickFalse);
status=SetImageMask(image,WritePixelMask,clipping_mask,exception);
clipping_mask=DestroyImage(clipping_mask);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% D r a w C l i p p i n g M a s k %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DrawClippingMask() draws the clip path and returns it as an image clipping
% mask.
%
% The format of the DrawClippingMask method is:
%
% Image *DrawClippingMask(Image *image,const DrawInfo *draw_info,
% const char *id,const char *clip_path,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o draw_info: the draw info.
%
% o id: the clip path id.
%
% o clip_path: the clip path.
%
% o exception: return any errors or warnings in this structure.
%
*/
static Image *DrawClippingMask(Image *image,const DrawInfo *draw_info,
const char *id,const char *clip_path,ExceptionInfo *exception)
{
DrawInfo
*clone_info;
Image
*clip_mask,
*separate_mask;
MagickStatusType
status;
/*
Draw a clip path.
*/
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(draw_info != (const DrawInfo *) NULL);
clip_mask=AcquireImage((const ImageInfo *) NULL,exception);
status=SetImageExtent(clip_mask,image->columns,image->rows,exception);
if (status == MagickFalse)
return(DestroyImage(clip_mask));
status=SetImageMask(clip_mask,WritePixelMask,(Image *) NULL,exception);
status=QueryColorCompliance("#0000",AllCompliance,
&clip_mask->background_color,exception);
clip_mask->background_color.alpha=(MagickRealType) TransparentAlpha;
clip_mask->background_color.alpha_trait=BlendPixelTrait;
status=SetImageBackgroundColor(clip_mask,exception);
if (image->debug != MagickFalse)
(void) LogMagickEvent(DrawEvent,GetMagickModule(),"\nbegin clip-path %s",
id);
clone_info=CloneDrawInfo((ImageInfo *) NULL,draw_info);
(void) CloneString(&clone_info->primitive,clip_path);
status=QueryColorCompliance("#ffffff",AllCompliance,&clone_info->fill,
exception);
if (clone_info->clip_mask != (char *) NULL)
clone_info->clip_mask=DestroyString(clone_info->clip_mask);
status=QueryColorCompliance("#00000000",AllCompliance,&clone_info->stroke,
exception);
clone_info->stroke_width=0.0;
clone_info->alpha=OpaqueAlpha;
clone_info->clip_path=MagickTrue;
status=RenderMVGContent(clip_mask,clone_info,0,exception);
clone_info=DestroyDrawInfo(clone_info);
separate_mask=SeparateImage(clip_mask,AlphaChannel,exception);
if (separate_mask != (Image *) NULL)
{
clip_mask=DestroyImage(clip_mask);
clip_mask=separate_mask;
status=NegateImage(clip_mask,MagickFalse,exception);
if (status == MagickFalse)
clip_mask=DestroyImage(clip_mask);
}
if (image->debug != MagickFalse)
(void) LogMagickEvent(DrawEvent,GetMagickModule(),"end clip-path");
return(clip_mask);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% D r a w C o m p o s i t e M a s k %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DrawCompositeMask() draws the mask path and returns it as an image mask.
%
% The format of the DrawCompositeMask method is:
%
% Image *DrawCompositeMask(Image *image,const DrawInfo *draw_info,
% const char *id,const char *mask_path,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o draw_info: the draw info.
%
% o id: the mask path id.
%
% o mask_path: the mask path.
%
% o exception: return any errors or warnings in this structure.
%
*/
static Image *DrawCompositeMask(Image *image,const DrawInfo *draw_info,
const char *id,const char *mask_path,ExceptionInfo *exception)
{
Image
*composite_mask,
*separate_mask;
DrawInfo
*clone_info;
MagickStatusType
status;
/*
Draw a mask path.
*/
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(draw_info != (const DrawInfo *) NULL);
composite_mask=AcquireImage((const ImageInfo *) NULL,exception);
status=SetImageExtent(composite_mask,image->columns,image->rows,exception);
if (status == MagickFalse)
return(DestroyImage(composite_mask));
status=SetImageMask(composite_mask,CompositePixelMask,(Image *) NULL,
exception);
status=QueryColorCompliance("#0000",AllCompliance,
&composite_mask->background_color,exception);
composite_mask->background_color.alpha=(MagickRealType) TransparentAlpha;
composite_mask->background_color.alpha_trait=BlendPixelTrait;
(void) SetImageBackgroundColor(composite_mask,exception);
if (image->debug != MagickFalse)
(void) LogMagickEvent(DrawEvent,GetMagickModule(),"\nbegin mask-path %s",
id);
clone_info=CloneDrawInfo((ImageInfo *) NULL,draw_info);
(void) CloneString(&clone_info->primitive,mask_path);
status=QueryColorCompliance("#ffffff",AllCompliance,&clone_info->fill,
exception);
status=QueryColorCompliance("#00000000",AllCompliance,&clone_info->stroke,
exception);
clone_info->stroke_width=0.0;
clone_info->alpha=OpaqueAlpha;
status=RenderMVGContent(composite_mask,clone_info,0,exception);
clone_info=DestroyDrawInfo(clone_info);
separate_mask=SeparateImage(composite_mask,AlphaChannel,exception);
if (separate_mask != (Image *) NULL)
{
composite_mask=DestroyImage(composite_mask);
composite_mask=separate_mask;
status=NegateImage(composite_mask,MagickFalse,exception);
if (status == MagickFalse)
composite_mask=DestroyImage(composite_mask);
}
if (image->debug != MagickFalse)
(void) LogMagickEvent(DrawEvent,GetMagickModule(),"end mask-path");
return(composite_mask);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ D r a w D a s h P o l y g o n %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DrawDashPolygon() draws a dashed polygon (line, rectangle, ellipse) on the
% image while respecting the dash offset and dash pattern attributes.
%
% The format of the DrawDashPolygon method is:
%
% MagickBooleanType DrawDashPolygon(const DrawInfo *draw_info,
% const PrimitiveInfo *primitive_info,Image *image,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o draw_info: the draw info.
%
% o primitive_info: Specifies a pointer to a PrimitiveInfo structure.
%
% o image: the image.
%
% o exception: return any errors or warnings in this structure.
%
*/
static MagickBooleanType DrawDashPolygon(const DrawInfo *draw_info,
const PrimitiveInfo *primitive_info,Image *image,ExceptionInfo *exception)
{
double
length,
maximum_length,
offset,
scale,
total_length;
DrawInfo
*clone_info;
MagickStatusType
status;
PrimitiveInfo
*dash_polygon;
register double
dx,
dy;
register ssize_t
i;
size_t
number_vertices;
ssize_t
j,
n;
assert(draw_info != (const DrawInfo *) NULL);
if (image->debug != MagickFalse)
(void) LogMagickEvent(DrawEvent,GetMagickModule()," begin draw-dash");
for (i=0; primitive_info[i].primitive != UndefinedPrimitive; i++) ;
number_vertices=(size_t) i;
dash_polygon=(PrimitiveInfo *) AcquireQuantumMemory((size_t)
(2UL*number_vertices+32UL),sizeof(*dash_polygon));
if (dash_polygon == (PrimitiveInfo *) NULL)
return(MagickFalse);
(void) memset(dash_polygon,0,(2UL*number_vertices+32UL)*
sizeof(*dash_polygon));
clone_info=CloneDrawInfo((ImageInfo *) NULL,draw_info);
clone_info->miterlimit=0;
dash_polygon[0]=primitive_info[0];
scale=ExpandAffine(&draw_info->affine);
length=scale*draw_info->dash_pattern[0];
offset=fabs(draw_info->dash_offset) >= MagickEpsilon ?
scale*draw_info->dash_offset : 0.0;
j=1;
for (n=0; offset > 0.0; j=0)
{
if (draw_info->dash_pattern[n] <= 0.0)
break;
length=scale*(draw_info->dash_pattern[n]+(n == 0 ? -0.5 : 0.5));
if (offset > length)
{
offset-=length;
n++;
length=scale*draw_info->dash_pattern[n];
continue;
}
if (offset < length)
{
length-=offset;
offset=0.0;
break;
}
offset=0.0;
n++;
}
status=MagickTrue;
maximum_length=0.0;
total_length=0.0;
for (i=1; (i < (ssize_t) number_vertices) && (length >= 0.0); i++)
{
dx=primitive_info[i].point.x-primitive_info[i-1].point.x;
dy=primitive_info[i].point.y-primitive_info[i-1].point.y;
maximum_length=hypot(dx,dy);
if (maximum_length > MaxBezierCoordinates)
break;
if (fabs(length) < MagickEpsilon)
{
if (fabs(draw_info->dash_pattern[n]) >= MagickEpsilon)
n++;
if (fabs(draw_info->dash_pattern[n]) < MagickEpsilon)
n=0;
length=scale*draw_info->dash_pattern[n];
}
for (total_length=0.0; (length >= 0.0) && (maximum_length >= (total_length+length)); )
{
total_length+=length;
if ((n & 0x01) != 0)
{
dash_polygon[0]=primitive_info[0];
dash_polygon[0].point.x=(double) (primitive_info[i-1].point.x+dx*
total_length*PerceptibleReciprocal(maximum_length));
dash_polygon[0].point.y=(double) (primitive_info[i-1].point.y+dy*
total_length*PerceptibleReciprocal(maximum_length));
j=1;
}
else
{
if ((j+1) > (ssize_t) number_vertices)
break;
dash_polygon[j]=primitive_info[i-1];
dash_polygon[j].point.x=(double) (primitive_info[i-1].point.x+dx*
total_length*PerceptibleReciprocal(maximum_length));
dash_polygon[j].point.y=(double) (primitive_info[i-1].point.y+dy*
total_length*PerceptibleReciprocal(maximum_length));
dash_polygon[j].coordinates=1;
j++;
dash_polygon[0].coordinates=(size_t) j;
dash_polygon[j].primitive=UndefinedPrimitive;
status&=DrawStrokePolygon(image,clone_info,dash_polygon,exception);
if (status == MagickFalse)
break;
}
if (fabs(draw_info->dash_pattern[n]) >= MagickEpsilon)
n++;
if (fabs(draw_info->dash_pattern[n]) < MagickEpsilon)
n=0;
length=scale*draw_info->dash_pattern[n];
}
length-=(maximum_length-total_length);
if ((n & 0x01) != 0)
continue;
dash_polygon[j]=primitive_info[i];
dash_polygon[j].coordinates=1;
j++;
}
if ((status != MagickFalse) && (total_length < maximum_length) &&
((n & 0x01) == 0) && (j > 1))
{
dash_polygon[j]=primitive_info[i-1];
dash_polygon[j].point.x+=MagickEpsilon;
dash_polygon[j].point.y+=MagickEpsilon;
dash_polygon[j].coordinates=1;
j++;
dash_polygon[0].coordinates=(size_t) j;
dash_polygon[j].primitive=UndefinedPrimitive;
status&=DrawStrokePolygon(image,clone_info,dash_polygon,exception);
}
dash_polygon=(PrimitiveInfo *) RelinquishMagickMemory(dash_polygon);
clone_info=DestroyDrawInfo(clone_info);
if (image->debug != MagickFalse)
(void) LogMagickEvent(DrawEvent,GetMagickModule()," end draw-dash");
return(status != 0 ? MagickTrue : MagickFalse);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% D r a w G r a d i e n t I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DrawGradientImage() draws a linear gradient on the image.
%
% The format of the DrawGradientImage method is:
%
% MagickBooleanType DrawGradientImage(Image *image,
% const DrawInfo *draw_info,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o draw_info: the draw info.
%
% o exception: return any errors or warnings in this structure.
%
*/
static inline double GetStopColorOffset(const GradientInfo *gradient,
const ssize_t x,const ssize_t y)
{
switch (gradient->type)
{
case UndefinedGradient:
case LinearGradient:
{
double
gamma,
length,
offset,
scale;
PointInfo
p,
q;
const SegmentInfo
*gradient_vector;
gradient_vector=(&gradient->gradient_vector);
p.x=gradient_vector->x2-gradient_vector->x1;
p.y=gradient_vector->y2-gradient_vector->y1;
q.x=(double) x-gradient_vector->x1;
q.y=(double) y-gradient_vector->y1;
length=sqrt(q.x*q.x+q.y*q.y);
gamma=sqrt(p.x*p.x+p.y*p.y)*length;
gamma=PerceptibleReciprocal(gamma);
scale=p.x*q.x+p.y*q.y;
offset=gamma*scale*length;
return(offset);
}
case RadialGradient:
{
PointInfo
v;
if (gradient->spread == RepeatSpread)
{
v.x=(double) x-gradient->center.x;
v.y=(double) y-gradient->center.y;
return(sqrt(v.x*v.x+v.y*v.y));
}
v.x=(double) (((x-gradient->center.x)*cos(DegreesToRadians(
gradient->angle)))+((y-gradient->center.y)*sin(DegreesToRadians(
gradient->angle))))*PerceptibleReciprocal(gradient->radii.x);
v.y=(double) (((x-gradient->center.x)*sin(DegreesToRadians(
gradient->angle)))-((y-gradient->center.y)*cos(DegreesToRadians(
gradient->angle))))*PerceptibleReciprocal(gradient->radii.y);
return(sqrt(v.x*v.x+v.y*v.y));
}
}
return(0.0);
}
static int StopInfoCompare(const void *x,const void *y)
{
StopInfo
*stop_1,
*stop_2;
stop_1=(StopInfo *) x;
stop_2=(StopInfo *) y;
if (stop_1->offset > stop_2->offset)
return(1);
if (fabs(stop_1->offset-stop_2->offset) <= MagickEpsilon)
return(0);
return(-1);
}
MagickExport MagickBooleanType DrawGradientImage(Image *image,
const DrawInfo *draw_info,ExceptionInfo *exception)
{
CacheView
*image_view;
const GradientInfo
*gradient;
const SegmentInfo
*gradient_vector;
double
length;
MagickBooleanType
status;
PixelInfo
zero;
PointInfo
point;
RectangleInfo
bounding_box;
ssize_t
y;
/*
Draw linear or radial gradient on image.
*/
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(draw_info != (const DrawInfo *) NULL);
gradient=(&draw_info->gradient);
qsort(gradient->stops,gradient->number_stops,sizeof(StopInfo),
StopInfoCompare);
gradient_vector=(&gradient->gradient_vector);
point.x=gradient_vector->x2-gradient_vector->x1;
point.y=gradient_vector->y2-gradient_vector->y1;
length=sqrt(point.x*point.x+point.y*point.y);
bounding_box=gradient->bounding_box;
status=MagickTrue;
GetPixelInfo(image,&zero);
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
magick_number_threads(image,image,bounding_box.height-bounding_box.y,1)
#endif
for (y=bounding_box.y; y < (ssize_t) bounding_box.height; y++)
{
PixelInfo
composite,
pixel;
double
alpha,
offset;
register Quantum
*magick_restrict q;
register ssize_t
i,
x;
ssize_t
j;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
pixel=zero;
composite=zero;
offset=GetStopColorOffset(gradient,0,y);
if (gradient->type != RadialGradient)
offset*=PerceptibleReciprocal(length);
for (x=bounding_box.x; x < (ssize_t) bounding_box.width; x++)
{
GetPixelInfoPixel(image,q,&pixel);
switch (gradient->spread)
{
case UndefinedSpread:
case PadSpread:
{
if ((x != (ssize_t) ceil(gradient_vector->x1-0.5)) ||
(y != (ssize_t) ceil(gradient_vector->y1-0.5)))
{
offset=GetStopColorOffset(gradient,x,y);
if (gradient->type != RadialGradient)
offset*=PerceptibleReciprocal(length);
}
for (i=0; i < (ssize_t) gradient->number_stops; i++)
if (offset < gradient->stops[i].offset)
break;
if ((offset < 0.0) || (i == 0))
composite=gradient->stops[0].color;
else
if ((offset > 1.0) || (i == (ssize_t) gradient->number_stops))
composite=gradient->stops[gradient->number_stops-1].color;
else
{
j=i;
i--;
alpha=(offset-gradient->stops[i].offset)/
(gradient->stops[j].offset-gradient->stops[i].offset);
CompositePixelInfoBlend(&gradient->stops[i].color,1.0-alpha,
&gradient->stops[j].color,alpha,&composite);
}
break;
}
case ReflectSpread:
{
if ((x != (ssize_t) ceil(gradient_vector->x1-0.5)) ||
(y != (ssize_t) ceil(gradient_vector->y1-0.5)))
{
offset=GetStopColorOffset(gradient,x,y);
if (gradient->type != RadialGradient)
offset*=PerceptibleReciprocal(length);
}
if (offset < 0.0)
offset=(-offset);
if ((ssize_t) fmod(offset,2.0) == 0)
offset=fmod(offset,1.0);
else
offset=1.0-fmod(offset,1.0);
for (i=0; i < (ssize_t) gradient->number_stops; i++)
if (offset < gradient->stops[i].offset)
break;
if (i == 0)
composite=gradient->stops[0].color;
else
if (i == (ssize_t) gradient->number_stops)
composite=gradient->stops[gradient->number_stops-1].color;
else
{
j=i;
i--;
alpha=(offset-gradient->stops[i].offset)/
(gradient->stops[j].offset-gradient->stops[i].offset);
CompositePixelInfoBlend(&gradient->stops[i].color,1.0-alpha,
&gradient->stops[j].color,alpha,&composite);
}
break;
}
case RepeatSpread:
{
MagickBooleanType
antialias;
double
repeat;
antialias=MagickFalse;
repeat=0.0;
if ((x != (ssize_t) ceil(gradient_vector->x1-0.5)) ||
(y != (ssize_t) ceil(gradient_vector->y1-0.5)))
{
offset=GetStopColorOffset(gradient,x,y);
if (gradient->type == LinearGradient)
{
repeat=fmod(offset,length);
if (repeat < 0.0)
repeat=length-fmod(-repeat,length);
else
repeat=fmod(offset,length);
antialias=(repeat < length) && ((repeat+1.0) > length) ?
MagickTrue : MagickFalse;
offset=PerceptibleReciprocal(length)*repeat;
}
else
{
repeat=fmod(offset,gradient->radius);
if (repeat < 0.0)
repeat=gradient->radius-fmod(-repeat,gradient->radius);
else
repeat=fmod(offset,gradient->radius);
antialias=repeat+1.0 > gradient->radius ? MagickTrue :
MagickFalse;
offset=repeat/gradient->radius;
}
}
for (i=0; i < (ssize_t) gradient->number_stops; i++)
if (offset < gradient->stops[i].offset)
break;
if (i == 0)
composite=gradient->stops[0].color;
else
if (i == (ssize_t) gradient->number_stops)
composite=gradient->stops[gradient->number_stops-1].color;
else
{
j=i;
i--;
alpha=(offset-gradient->stops[i].offset)/
(gradient->stops[j].offset-gradient->stops[i].offset);
if (antialias != MagickFalse)
{
if (gradient->type == LinearGradient)
alpha=length-repeat;
else
alpha=gradient->radius-repeat;
i=0;
j=(ssize_t) gradient->number_stops-1L;
}
CompositePixelInfoBlend(&gradient->stops[i].color,1.0-alpha,
&gradient->stops[j].color,alpha,&composite);
}
break;
}
}
CompositePixelInfoOver(&composite,composite.alpha,&pixel,pixel.alpha,
&pixel);
SetPixelViaPixelInfo(image,&pixel,q);
q+=GetPixelChannels(image);
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
}
image_view=DestroyCacheView(image_view);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% D r a w I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DrawImage() draws a graphic primitive on your image. The primitive
% may be represented as a string or filename. Precede the filename with an
% "at" sign (@) and the contents of the file are drawn on the image. You
% can affect how text is drawn by setting one or more members of the draw
% info structure.
%
% The format of the DrawImage method is:
%
% MagickBooleanType DrawImage(Image *image,const DrawInfo *draw_info,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o draw_info: the draw info.
%
% o exception: return any errors or warnings in this structure.
%
*/
static MagickBooleanType CheckPrimitiveExtent(MVGInfo *mvg_info,
const size_t pad)
{
double
extent;
size_t
quantum;
/*
Check if there is enough storage for drawing pimitives.
*/
extent=(double) mvg_info->offset+pad+PrimitiveExtentPad;
quantum=sizeof(**mvg_info->primitive_info);
if (((extent*quantum) < (double) SSIZE_MAX) &&
((extent*quantum) < (double) GetMaxMemoryRequest()))
{
if (extent <= (double) *mvg_info->extent)
return(MagickTrue);
*mvg_info->primitive_info=(PrimitiveInfo *) ResizeQuantumMemory(
*mvg_info->primitive_info,(size_t) extent,quantum);
if (*mvg_info->primitive_info != (PrimitiveInfo *) NULL)
{
register ssize_t
i;
*mvg_info->extent=(size_t) extent;
for (i=mvg_info->offset+1; i < (ssize_t) extent; i++)
(*mvg_info->primitive_info)[i].primitive=UndefinedPrimitive;
return(MagickTrue);
}
}
/*
Reallocation failed, allocate a primitive to facilitate unwinding.
*/
(void) ThrowMagickException(mvg_info->exception,GetMagickModule(),
ResourceLimitError,"MemoryAllocationFailed","`%s'","");
if (*mvg_info->primitive_info != (PrimitiveInfo *) NULL)
*mvg_info->primitive_info=(PrimitiveInfo *) RelinquishMagickMemory(
*mvg_info->primitive_info);
*mvg_info->primitive_info=(PrimitiveInfo *) AcquireCriticalMemory(
PrimitiveExtentPad*quantum);
(void) memset(*mvg_info->primitive_info,0,PrimitiveExtentPad*quantum);
*mvg_info->extent=1;
return(MagickFalse);
}
MagickExport int MVGMacroCompare(const void *target,const void *source)
{
const char
*p,
*q;
p=(const char *) target;
q=(const char *) source;
return(strcmp(p,q));
}
static SplayTreeInfo *GetMVGMacros(const char *primitive)
{
char
*macro,
*token;
const char
*q;
size_t
extent;
SplayTreeInfo
*macros;
/*
Scan graphic primitives for definitions and classes.
*/
if (primitive == (const char *) NULL)
return((SplayTreeInfo *) NULL);
macros=NewSplayTree(MVGMacroCompare,RelinquishMagickMemory,
RelinquishMagickMemory);
macro=AcquireString(primitive);
token=AcquireString(primitive);
extent=strlen(token)+MagickPathExtent;
for (q=primitive; *q != '\0'; )
{
if (GetNextToken(q,&q,extent,token) < 1)
break;
if (*token == '\0')
break;
if (LocaleCompare("push",token) == 0)
{
register const char
*end,
*start;
(void) GetNextToken(q,&q,extent,token);
if (*q == '"')
{
char
name[MagickPathExtent];
const char
*p;
ssize_t
n;
/*
Named macro (e.g. push graphic-context "wheel").
*/
(void) GetNextToken(q,&q,extent,token);
start=q;
end=q;
(void) CopyMagickString(name,token,MagickPathExtent);
n=1;
for (p=q; *p != '\0'; )
{
(void) GetNextToken(p,&p,extent,token);
if (*token == '\0')
break;
if (LocaleCompare(token,"pop") == 0)
{
end=p-strlen(token)-1;
n--;
}
if (LocaleCompare(token,"push") == 0)
n++;
if ((n == 0) && (end > start))
{
/*
Extract macro.
*/
(void) GetNextToken(p,&p,extent,token);
(void) CopyMagickString(macro,start,(size_t) (end-start));
(void) AddValueToSplayTree(macros,ConstantString(name),
ConstantString(macro));
break;
}
}
}
}
}
token=DestroyString(token);
macro=DestroyString(macro);
return(macros);
}
static inline MagickBooleanType IsPoint(const char *point)
{
char
*p;
double
value;
value=StringToDouble(point,&p);
return((fabs(value) < MagickEpsilon) && (p == point) ? MagickFalse :
MagickTrue);
}
static inline MagickBooleanType TracePoint(PrimitiveInfo *primitive_info,
const PointInfo point)
{
primitive_info->coordinates=1;
primitive_info->closed_subpath=MagickFalse;
primitive_info->point=point;
return(MagickTrue);
}
static MagickBooleanType RenderMVGContent(Image *image,
const DrawInfo *draw_info,const size_t depth,ExceptionInfo *exception)
{
#define RenderImageTag "Render/Image"
AffineMatrix
affine,
current;
char
keyword[MagickPathExtent],
geometry[MagickPathExtent],
*next_token,
pattern[MagickPathExtent],
*primitive,
*token;
const char
*q;
double
angle,
coordinates,
cursor,
factor,
primitive_extent;
DrawInfo
*clone_info,
**graphic_context;
MagickBooleanType
proceed;
MagickStatusType
status;
MVGInfo
mvg_info;
PointInfo
point;
PrimitiveInfo
*primitive_info;
PrimitiveType
primitive_type;
register const char
*p;
register ssize_t
i,
x;
SegmentInfo
bounds;
size_t
extent,
number_points,
number_stops;
SplayTreeInfo
*macros;
ssize_t
defsDepth,
j,
k,
n,
symbolDepth;
StopInfo
*stops;
TypeMetric
metrics;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(draw_info != (DrawInfo *) NULL);
assert(draw_info->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"...");
if (depth > MagickMaxRecursionDepth)
ThrowBinaryException(DrawError,"VectorGraphicsNestedTooDeeply",
image->filename);
if ((draw_info->primitive == (char *) NULL) ||
(*draw_info->primitive == '\0'))
return(MagickFalse);
if (image->debug != MagickFalse)
(void) LogMagickEvent(DrawEvent,GetMagickModule(),"begin draw-image");
if (SetImageStorageClass(image,DirectClass,exception) == MagickFalse)
return(MagickFalse);
if (image->alpha_trait == UndefinedPixelTrait)
{
status=SetImageAlphaChannel(image,OpaqueAlphaChannel,exception);
if (status == MagickFalse)
return(MagickFalse);
}
if ((*draw_info->primitive == '@') && (strlen(draw_info->primitive) > 1) &&
(*(draw_info->primitive+1) != '-') && (depth == 0))
primitive=FileToString(draw_info->primitive+1,~0UL,exception);
else
primitive=AcquireString(draw_info->primitive);
if (primitive == (char *) NULL)
return(MagickFalse);
primitive_extent=(double) strlen(primitive);
(void) SetImageArtifact(image,"mvg:vector-graphics",primitive);
n=0;
number_stops=0;
stops=(StopInfo *) NULL;
/*
Allocate primitive info memory.
*/
graphic_context=(DrawInfo **) AcquireMagickMemory(sizeof(*graphic_context));
if (graphic_context == (DrawInfo **) NULL)
{
primitive=DestroyString(primitive);
ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed",
image->filename);
}
number_points=PrimitiveExtentPad;
primitive_info=(PrimitiveInfo *) AcquireQuantumMemory((size_t) number_points,
sizeof(*primitive_info));
if (primitive_info == (PrimitiveInfo *) NULL)
{
primitive=DestroyString(primitive);
for ( ; n >= 0; n--)
graphic_context[n]=DestroyDrawInfo(graphic_context[n]);
graphic_context=(DrawInfo **) RelinquishMagickMemory(graphic_context);
ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed",
image->filename);
}
(void) memset(primitive_info,0,(size_t) number_points*
sizeof(*primitive_info));
(void) memset(&mvg_info,0,sizeof(mvg_info));
mvg_info.primitive_info=(&primitive_info);
mvg_info.extent=(&number_points);
mvg_info.exception=exception;
graphic_context[n]=CloneDrawInfo((ImageInfo *) NULL,draw_info);
graphic_context[n]->viewbox=image->page;
if ((image->page.width == 0) || (image->page.height == 0))
{
graphic_context[n]->viewbox.width=image->columns;
graphic_context[n]->viewbox.height=image->rows;
}
token=AcquireString(primitive);
extent=strlen(token)+MagickPathExtent;
defsDepth=0;
symbolDepth=0;
cursor=0.0;
macros=GetMVGMacros(primitive);
status=MagickTrue;
for (q=primitive; *q != '\0'; )
{
/*
Interpret graphic primitive.
*/
if (GetNextToken(q,&q,MagickPathExtent,keyword) < 1)
break;
if (*keyword == '\0')
break;
if (*keyword == '#')
{
/*
Comment.
*/
while ((*q != '\n') && (*q != '\0'))
q++;
continue;
}
p=q-strlen(keyword)-1;
primitive_type=UndefinedPrimitive;
current=graphic_context[n]->affine;
GetAffineMatrix(&affine);
*token='\0';
switch (*keyword)
{
case ';':
break;
case 'a':
case 'A':
{
if (LocaleCompare("affine",keyword) == 0)
{
(void) GetNextToken(q,&q,extent,token);
affine.sx=StringToDouble(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
(void) GetNextToken(q,&q,extent,token);
if (*token == ',')
(void) GetNextToken(q,&q,extent,token);
affine.rx=StringToDouble(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
(void) GetNextToken(q,&q,extent,token);
if (*token == ',')
(void) GetNextToken(q,&q,extent,token);
affine.ry=StringToDouble(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
(void) GetNextToken(q,&q,extent,token);
if (*token == ',')
(void) GetNextToken(q,&q,extent,token);
affine.sy=StringToDouble(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
(void) GetNextToken(q,&q,extent,token);
if (*token == ',')
(void) GetNextToken(q,&q,extent,token);
affine.tx=StringToDouble(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
(void) GetNextToken(q,&q,extent,token);
if (*token == ',')
(void) GetNextToken(q,&q,extent,token);
affine.ty=StringToDouble(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
break;
}
if (LocaleCompare("alpha",keyword) == 0)
{
primitive_type=AlphaPrimitive;
break;
}
if (LocaleCompare("arc",keyword) == 0)
{
primitive_type=ArcPrimitive;
break;
}
status=MagickFalse;
break;
}
case 'b':
case 'B':
{
if (LocaleCompare("bezier",keyword) == 0)
{
primitive_type=BezierPrimitive;
break;
}
if (LocaleCompare("border-color",keyword) == 0)
{
(void) GetNextToken(q,&q,extent,token);
status&=QueryColorCompliance(token,AllCompliance,
&graphic_context[n]->border_color,exception);
break;
}
status=MagickFalse;
break;
}
case 'c':
case 'C':
{
if (LocaleCompare("class",keyword) == 0)
{
const char
*mvg_class;
(void) GetNextToken(q,&q,extent,token);
if (*token == '\0')
{
status=MagickFalse;
break;
}
mvg_class=(const char *) GetValueFromSplayTree(macros,token);
if (mvg_class != (const char *) NULL)
{
char
*elements;
ssize_t
offset;
/*
Inject class elements in stream.
*/
offset=(ssize_t) (p-primitive);
elements=AcquireString(primitive);
elements[offset]='\0';
(void) ConcatenateString(&elements,mvg_class);
(void) ConcatenateString(&elements,"\n");
(void) ConcatenateString(&elements,q);
primitive=DestroyString(primitive);
primitive=elements;
q=primitive+offset;
}
break;
}
if (LocaleCompare("clip-path",keyword) == 0)
{
const char
*clip_path;
/*
Take a node from within the MVG document, and duplicate it here.
*/
(void) GetNextToken(q,&q,extent,token);
if (*token == '\0')
{
status=MagickFalse;
break;
}
(void) CloneString(&graphic_context[n]->clip_mask,token);
clip_path=(const char *) GetValueFromSplayTree(macros,token);
if (clip_path != (const char *) NULL)
{
if (graphic_context[n]->clipping_mask != (Image *) NULL)
graphic_context[n]->clipping_mask=
DestroyImage(graphic_context[n]->clipping_mask);
graphic_context[n]->clipping_mask=DrawClippingMask(image,
graphic_context[n],token,clip_path,exception);
if (graphic_context[n]->compliance != SVGCompliance)
{
clip_path=(const char *) GetValueFromSplayTree(macros,
graphic_context[n]->clip_mask);
if (clip_path != (const char *) NULL)
(void) SetImageArtifact(image,
graphic_context[n]->clip_mask,clip_path);
status&=DrawClipPath(image,graphic_context[n],
graphic_context[n]->clip_mask,exception);
}
}
break;
}
if (LocaleCompare("clip-rule",keyword) == 0)
{
ssize_t
fill_rule;
(void) GetNextToken(q,&q,extent,token);
fill_rule=ParseCommandOption(MagickFillRuleOptions,MagickFalse,
token);
if (fill_rule == -1)
{
status=MagickFalse;
break;
}
graphic_context[n]->fill_rule=(FillRule) fill_rule;
break;
}
if (LocaleCompare("clip-units",keyword) == 0)
{
ssize_t
clip_units;
(void) GetNextToken(q,&q,extent,token);
clip_units=ParseCommandOption(MagickClipPathOptions,MagickFalse,
token);
if (clip_units == -1)
{
status=MagickFalse;
break;
}
graphic_context[n]->clip_units=(ClipPathUnits) clip_units;
if (clip_units == ObjectBoundingBox)
{
GetAffineMatrix(¤t);
affine.sx=draw_info->bounds.x2;
affine.sy=draw_info->bounds.y2;
affine.tx=draw_info->bounds.x1;
affine.ty=draw_info->bounds.y1;
break;
}
break;
}
if (LocaleCompare("circle",keyword) == 0)
{
primitive_type=CirclePrimitive;
break;
}
if (LocaleCompare("color",keyword) == 0)
{
primitive_type=ColorPrimitive;
break;
}
if (LocaleCompare("compliance",keyword) == 0)
{
/*
MVG compliance associates a clipping mask with an image; SVG
compliance associates a clipping mask with a graphics context.
*/
(void) GetNextToken(q,&q,extent,token);
graphic_context[n]->compliance=(ComplianceType) ParseCommandOption(
MagickComplianceOptions,MagickFalse,token);
break;
}
status=MagickFalse;
break;
}
case 'd':
case 'D':
{
if (LocaleCompare("decorate",keyword) == 0)
{
ssize_t
decorate;
(void) GetNextToken(q,&q,extent,token);
decorate=ParseCommandOption(MagickDecorateOptions,MagickFalse,
token);
if (decorate == -1)
{
status=MagickFalse;
break;
}
graphic_context[n]->decorate=(DecorationType) decorate;
break;
}
if (LocaleCompare("density",keyword) == 0)
{
(void) GetNextToken(q,&q,extent,token);
(void) CloneString(&graphic_context[n]->density,token);
break;
}
if (LocaleCompare("direction",keyword) == 0)
{
ssize_t
direction;
(void) GetNextToken(q,&q,extent,token);
direction=ParseCommandOption(MagickDirectionOptions,MagickFalse,
token);
if (direction == -1)
status=MagickFalse;
else
graphic_context[n]->direction=(DirectionType) direction;
break;
}
status=MagickFalse;
break;
}
case 'e':
case 'E':
{
if (LocaleCompare("ellipse",keyword) == 0)
{
primitive_type=EllipsePrimitive;
break;
}
if (LocaleCompare("encoding",keyword) == 0)
{
(void) GetNextToken(q,&q,extent,token);
(void) CloneString(&graphic_context[n]->encoding,token);
break;
}
status=MagickFalse;
break;
}
case 'f':
case 'F':
{
if (LocaleCompare("fill",keyword) == 0)
{
(void) GetNextToken(q,&q,extent,token);
if (graphic_context[n]->clip_path != MagickFalse)
break;
(void) FormatLocaleString(pattern,MagickPathExtent,"%s",token);
if (GetImageArtifact(image,pattern) != (const char *) NULL)
(void) DrawPatternPath(image,draw_info,token,
&graphic_context[n]->fill_pattern,exception);
else
{
status&=QueryColorCompliance(token,AllCompliance,
&graphic_context[n]->fill,exception);
if (graphic_context[n]->fill_alpha != OpaqueAlpha)
graphic_context[n]->fill.alpha=graphic_context[n]->fill_alpha;
}
break;
}
if (LocaleCompare("fill-opacity",keyword) == 0)
{
double
opacity;
(void) GetNextToken(q,&q,extent,token);
if (graphic_context[n]->clip_path != MagickFalse)
break;
factor=strchr(token,'%') != (char *) NULL ? 0.01 : 1.0;
opacity=MagickMin(MagickMax(factor*
StringToDouble(token,&next_token),0.0),1.0);
if (token == next_token)
ThrowPointExpectedException(token,exception);
graphic_context[n]->fill_alpha*=opacity;
if (graphic_context[n]->fill.alpha != TransparentAlpha)
graphic_context[n]->fill.alpha=graphic_context[n]->fill_alpha;
else
graphic_context[n]->fill.alpha=(MagickRealType)
ClampToQuantum(QuantumRange*(1.0-opacity));
break;
}
if (LocaleCompare("fill-rule",keyword) == 0)
{
ssize_t
fill_rule;
(void) GetNextToken(q,&q,extent,token);
fill_rule=ParseCommandOption(MagickFillRuleOptions,MagickFalse,
token);
if (fill_rule == -1)
{
status=MagickFalse;
break;
}
graphic_context[n]->fill_rule=(FillRule) fill_rule;
break;
}
if (LocaleCompare("font",keyword) == 0)
{
(void) GetNextToken(q,&q,extent,token);
(void) CloneString(&graphic_context[n]->font,token);
if (LocaleCompare("none",token) == 0)
graphic_context[n]->font=(char *) RelinquishMagickMemory(
graphic_context[n]->font);
break;
}
if (LocaleCompare("font-family",keyword) == 0)
{
(void) GetNextToken(q,&q,extent,token);
(void) CloneString(&graphic_context[n]->family,token);
break;
}
if (LocaleCompare("font-size",keyword) == 0)
{
(void) GetNextToken(q,&q,extent,token);
graphic_context[n]->pointsize=StringToDouble(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
break;
}
if (LocaleCompare("font-stretch",keyword) == 0)
{
ssize_t
stretch;
(void) GetNextToken(q,&q,extent,token);
stretch=ParseCommandOption(MagickStretchOptions,MagickFalse,token);
if (stretch == -1)
{
status=MagickFalse;
break;
}
graphic_context[n]->stretch=(StretchType) stretch;
break;
}
if (LocaleCompare("font-style",keyword) == 0)
{
ssize_t
style;
(void) GetNextToken(q,&q,extent,token);
style=ParseCommandOption(MagickStyleOptions,MagickFalse,token);
if (style == -1)
{
status=MagickFalse;
break;
}
graphic_context[n]->style=(StyleType) style;
break;
}
if (LocaleCompare("font-weight",keyword) == 0)
{
ssize_t
weight;
(void) GetNextToken(q,&q,extent,token);
weight=ParseCommandOption(MagickWeightOptions,MagickFalse,token);
if (weight == -1)
weight=(ssize_t) StringToUnsignedLong(token);
graphic_context[n]->weight=(size_t) weight;
break;
}
status=MagickFalse;
break;
}
case 'g':
case 'G':
{
if (LocaleCompare("gradient-units",keyword) == 0)
{
(void) GetNextToken(q,&q,extent,token);
break;
}
if (LocaleCompare("gravity",keyword) == 0)
{
ssize_t
gravity;
(void) GetNextToken(q,&q,extent,token);
gravity=ParseCommandOption(MagickGravityOptions,MagickFalse,token);
if (gravity == -1)
{
status=MagickFalse;
break;
}
graphic_context[n]->gravity=(GravityType) gravity;
break;
}
status=MagickFalse;
break;
}
case 'i':
case 'I':
{
if (LocaleCompare("image",keyword) == 0)
{
ssize_t
compose;
primitive_type=ImagePrimitive;
(void) GetNextToken(q,&q,extent,token);
compose=ParseCommandOption(MagickComposeOptions,MagickFalse,token);
if (compose == -1)
{
status=MagickFalse;
break;
}
graphic_context[n]->compose=(CompositeOperator) compose;
break;
}
if (LocaleCompare("interline-spacing",keyword) == 0)
{
(void) GetNextToken(q,&q,extent,token);
graphic_context[n]->interline_spacing=StringToDouble(token,
&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
break;
}
if (LocaleCompare("interword-spacing",keyword) == 0)
{
(void) GetNextToken(q,&q,extent,token);
graphic_context[n]->interword_spacing=StringToDouble(token,
&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
break;
}
status=MagickFalse;
break;
}
case 'k':
case 'K':
{
if (LocaleCompare("kerning",keyword) == 0)
{
(void) GetNextToken(q,&q,extent,token);
graphic_context[n]->kerning=StringToDouble(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
break;
}
status=MagickFalse;
break;
}
case 'l':
case 'L':
{
if (LocaleCompare("letter-spacing",keyword) == 0)
{
(void) GetNextToken(q,&q,extent,token);
clone_info=CloneDrawInfo((ImageInfo *) NULL,graphic_context[n]);
clone_info->text=AcquireString(" ");
status&=GetTypeMetrics(image,clone_info,&metrics,exception);
graphic_context[n]->kerning=metrics.width*
StringToDouble(token,&next_token);
clone_info=DestroyDrawInfo(clone_info);
if (token == next_token)
ThrowPointExpectedException(token,exception);
break;
}
if (LocaleCompare("line",keyword) == 0)
{
primitive_type=LinePrimitive;
break;
}
status=MagickFalse;
break;
}
case 'm':
case 'M':
{
if (LocaleCompare("mask",keyword) == 0)
{
const char
*mask_path;
/*
Take a node from within the MVG document, and duplicate it here.
*/
(void) GetNextToken(q,&q,extent,token);
mask_path=(const char *) GetValueFromSplayTree(macros,token);
if (mask_path != (const char *) NULL)
{
if (graphic_context[n]->composite_mask != (Image *) NULL)
graphic_context[n]->composite_mask=
DestroyImage(graphic_context[n]->composite_mask);
graphic_context[n]->composite_mask=DrawCompositeMask(image,
graphic_context[n],token,mask_path,exception);
if (graphic_context[n]->compliance != SVGCompliance)
status=SetImageMask(image,CompositePixelMask,
graphic_context[n]->composite_mask,exception);
}
break;
}
break;
}
case 'o':
case 'O':
{
if (LocaleCompare("offset",keyword) == 0)
{
(void) GetNextToken(q,&q,extent,token);
break;
}
if (LocaleCompare("opacity",keyword) == 0)
{
double
opacity;
(void) GetNextToken(q,&q,extent,token);
if (graphic_context[n]->clip_path != MagickFalse)
break;
factor=strchr(token,'%') != (char *) NULL ? 0.01 : 1.0;
opacity=MagickMin(MagickMax(factor*
StringToDouble(token,&next_token),0.0),1.0);
if (token == next_token)
ThrowPointExpectedException(token,exception);
graphic_context[n]->fill_alpha*=opacity;
graphic_context[n]->stroke_alpha*=opacity;
break;
}
status=MagickFalse;
break;
}
case 'p':
case 'P':
{
if (LocaleCompare("path",keyword) == 0)
{
primitive_type=PathPrimitive;
break;
}
if (LocaleCompare("point",keyword) == 0)
{
primitive_type=PointPrimitive;
break;
}
if (LocaleCompare("polyline",keyword) == 0)
{
primitive_type=PolylinePrimitive;
break;
}
if (LocaleCompare("polygon",keyword) == 0)
{
primitive_type=PolygonPrimitive;
break;
}
if (LocaleCompare("pop",keyword) == 0)
{
(void) GetNextToken(q,&q,extent,token);
if (LocaleCompare("class",token) == 0)
break;
if (LocaleCompare("clip-path",token) == 0)
break;
if (LocaleCompare("defs",token) == 0)
{
defsDepth--;
graphic_context[n]->render=defsDepth > 0 ? MagickFalse :
MagickTrue;
break;
}
if (LocaleCompare("gradient",token) == 0)
break;
if (LocaleCompare("graphic-context",token) == 0)
{
if (n <= 0)
{
(void) ThrowMagickException(exception,GetMagickModule(),
DrawError,"UnbalancedGraphicContextPushPop","`%s'",token);
status=MagickFalse;
n=0;
break;
}
if ((graphic_context[n]->clip_mask != (char *) NULL) &&
(graphic_context[n]->compliance != SVGCompliance))
if (LocaleCompare(graphic_context[n]->clip_mask,
graphic_context[n-1]->clip_mask) != 0)
status=SetImageMask(image,WritePixelMask,(Image *) NULL,
exception);
graphic_context[n]=DestroyDrawInfo(graphic_context[n]);
n--;
break;
}
if (LocaleCompare("mask",token) == 0)
break;
if (LocaleCompare("pattern",token) == 0)
break;
if (LocaleCompare("symbol",token) == 0)
{
symbolDepth--;
graphic_context[n]->render=symbolDepth > 0 ? MagickFalse :
MagickTrue;
break;
}
status=MagickFalse;
break;
}
if (LocaleCompare("push",keyword) == 0)
{
(void) GetNextToken(q,&q,extent,token);
if (LocaleCompare("class",token) == 0)
{
/*
Class context.
*/
for (p=q; *q != '\0'; )
{
(void) GetNextToken(q,&q,extent,token);
if (LocaleCompare(token,"pop") != 0)
continue;
(void) GetNextToken(q,(const char **) NULL,extent,token);
if (LocaleCompare(token,"class") != 0)
continue;
break;
}
(void) GetNextToken(q,&q,extent,token);
break;
}
if (LocaleCompare("clip-path",token) == 0)
{
(void) GetNextToken(q,&q,extent,token);
for (p=q; *q != '\0'; )
{
(void) GetNextToken(q,&q,extent,token);
if (LocaleCompare(token,"pop") != 0)
continue;
(void) GetNextToken(q,(const char **) NULL,extent,token);
if (LocaleCompare(token,"clip-path") != 0)
continue;
break;
}
if ((q == (char *) NULL) || (p == (char *) NULL) || ((q-4) < p))
{
status=MagickFalse;
break;
}
(void) GetNextToken(q,&q,extent,token);
break;
}
if (LocaleCompare("defs",token) == 0)
{
defsDepth++;
graphic_context[n]->render=defsDepth > 0 ? MagickFalse :
MagickTrue;
break;
}
if (LocaleCompare("gradient",token) == 0)
{
char
key[2*MagickPathExtent],
name[MagickPathExtent],
type[MagickPathExtent];
SegmentInfo
segment;
(void) GetNextToken(q,&q,extent,token);
(void) CopyMagickString(name,token,MagickPathExtent);
(void) GetNextToken(q,&q,extent,token);
(void) CopyMagickString(type,token,MagickPathExtent);
(void) GetNextToken(q,&q,extent,token);
segment.x1=StringToDouble(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
(void) GetNextToken(q,&q,extent,token);
if (*token == ',')
(void) GetNextToken(q,&q,extent,token);
segment.y1=StringToDouble(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
(void) GetNextToken(q,&q,extent,token);
if (*token == ',')
(void) GetNextToken(q,&q,extent,token);
segment.x2=StringToDouble(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
(void) GetNextToken(q,&q,extent,token);
if (*token == ',')
(void) GetNextToken(q,&q,extent,token);
segment.y2=StringToDouble(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
if (LocaleCompare(type,"radial") == 0)
{
(void) GetNextToken(q,&q,extent,token);
if (*token == ',')
(void) GetNextToken(q,&q,extent,token);
}
for (p=q; *q != '\0'; )
{
(void) GetNextToken(q,&q,extent,token);
if (LocaleCompare(token,"pop") != 0)
continue;
(void) GetNextToken(q,(const char **) NULL,extent,token);
if (LocaleCompare(token,"gradient") != 0)
continue;
break;
}
if ((q == (char *) NULL) || (p == (char *) NULL) || ((q-4) < p))
{
status=MagickFalse;
break;
}
(void) CopyMagickString(token,p,(size_t) (q-p-4+1));
bounds.x1=graphic_context[n]->affine.sx*segment.x1+
graphic_context[n]->affine.ry*segment.y1+
graphic_context[n]->affine.tx;
bounds.y1=graphic_context[n]->affine.rx*segment.x1+
graphic_context[n]->affine.sy*segment.y1+
graphic_context[n]->affine.ty;
bounds.x2=graphic_context[n]->affine.sx*segment.x2+
graphic_context[n]->affine.ry*segment.y2+
graphic_context[n]->affine.tx;
bounds.y2=graphic_context[n]->affine.rx*segment.x2+
graphic_context[n]->affine.sy*segment.y2+
graphic_context[n]->affine.ty;
(void) FormatLocaleString(key,MagickPathExtent,"%s",name);
(void) SetImageArtifact(image,key,token);
(void) FormatLocaleString(key,MagickPathExtent,"%s-type",name);
(void) SetImageArtifact(image,key,type);
(void) FormatLocaleString(key,MagickPathExtent,"%s-geometry",
name);
(void) FormatLocaleString(geometry,MagickPathExtent,
"%gx%g%+.15g%+.15g",
MagickMax(fabs(bounds.x2-bounds.x1+1.0),1.0),
MagickMax(fabs(bounds.y2-bounds.y1+1.0),1.0),
bounds.x1,bounds.y1);
(void) SetImageArtifact(image,key,geometry);
(void) GetNextToken(q,&q,extent,token);
break;
}
if (LocaleCompare("graphic-context",token) == 0)
{
n++;
graphic_context=(DrawInfo **) ResizeQuantumMemory(
graphic_context,(size_t) (n+1),sizeof(*graphic_context));
if (graphic_context == (DrawInfo **) NULL)
{
(void) ThrowMagickException(exception,GetMagickModule(),
ResourceLimitError,"MemoryAllocationFailed","`%s'",
image->filename);
break;
}
graphic_context[n]=CloneDrawInfo((ImageInfo *) NULL,
graphic_context[n-1]);
if (*q == '"')
(void) GetNextToken(q,&q,extent,token);
break;
}
if (LocaleCompare("mask",token) == 0)
{
(void) GetNextToken(q,&q,extent,token);
break;
}
if (LocaleCompare("pattern",token) == 0)
{
char
key[2*MagickPathExtent],
name[MagickPathExtent];
RectangleInfo
bounds;
(void) GetNextToken(q,&q,extent,token);
(void) CopyMagickString(name,token,MagickPathExtent);
(void) GetNextToken(q,&q,extent,token);
bounds.x=(ssize_t) ceil(StringToDouble(token,&next_token)-0.5);
if (token == next_token)
ThrowPointExpectedException(token,exception);
(void) GetNextToken(q,&q,extent,token);
if (*token == ',')
(void) GetNextToken(q,&q,extent,token);
bounds.y=(ssize_t) ceil(StringToDouble(token,&next_token)-0.5);
if (token == next_token)
ThrowPointExpectedException(token,exception);
(void) GetNextToken(q,&q,extent,token);
if (*token == ',')
(void) GetNextToken(q,&q,extent,token);
bounds.width=(size_t) floor(StringToDouble(token,&next_token)+
0.5);
if (token == next_token)
ThrowPointExpectedException(token,exception);
(void) GetNextToken(q,&q,extent,token);
if (*token == ',')
(void) GetNextToken(q,&q,extent,token);
bounds.height=(size_t) floor(StringToDouble(token,&next_token)+
0.5);
if (token == next_token)
ThrowPointExpectedException(token,exception);
for (p=q; *q != '\0'; )
{
(void) GetNextToken(q,&q,extent,token);
if (LocaleCompare(token,"pop") != 0)
continue;
(void) GetNextToken(q,(const char **) NULL,extent,token);
if (LocaleCompare(token,"pattern") != 0)
continue;
break;
}
if ((q == (char *) NULL) || (p == (char *) NULL) || ((q-4) < p))
{
status=MagickFalse;
break;
}
(void) CopyMagickString(token,p,(size_t) (q-p-4+1));
(void) FormatLocaleString(key,MagickPathExtent,"%s",name);
(void) SetImageArtifact(image,key,token);
(void) FormatLocaleString(key,MagickPathExtent,"%s-geometry",
name);
(void) FormatLocaleString(geometry,MagickPathExtent,
"%.20gx%.20g%+.20g%+.20g",(double) bounds.width,(double)
bounds.height,(double) bounds.x,(double) bounds.y);
(void) SetImageArtifact(image,key,geometry);
(void) GetNextToken(q,&q,extent,token);
break;
}
if (LocaleCompare("symbol",token) == 0)
{
symbolDepth++;
graphic_context[n]->render=symbolDepth > 0 ? MagickFalse :
MagickTrue;
break;
}
status=MagickFalse;
break;
}
status=MagickFalse;
break;
}
case 'r':
case 'R':
{
if (LocaleCompare("rectangle",keyword) == 0)
{
primitive_type=RectanglePrimitive;
break;
}
if (LocaleCompare("rotate",keyword) == 0)
{
(void) GetNextToken(q,&q,extent,token);
angle=StringToDouble(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
affine.sx=cos(DegreesToRadians(fmod((double) angle,360.0)));
affine.rx=sin(DegreesToRadians(fmod((double) angle,360.0)));
affine.ry=(-sin(DegreesToRadians(fmod((double) angle,360.0))));
affine.sy=cos(DegreesToRadians(fmod((double) angle,360.0)));
break;
}
if (LocaleCompare("roundRectangle",keyword) == 0)
{
primitive_type=RoundRectanglePrimitive;
break;
}
status=MagickFalse;
break;
}
case 's':
case 'S':
{
if (LocaleCompare("scale",keyword) == 0)
{
(void) GetNextToken(q,&q,extent,token);
affine.sx=StringToDouble(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
(void) GetNextToken(q,&q,extent,token);
if (*token == ',')
(void) GetNextToken(q,&q,extent,token);
affine.sy=StringToDouble(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
break;
}
if (LocaleCompare("skewX",keyword) == 0)
{
(void) GetNextToken(q,&q,extent,token);
angle=StringToDouble(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
affine.ry=sin(DegreesToRadians(angle));
break;
}
if (LocaleCompare("skewY",keyword) == 0)
{
(void) GetNextToken(q,&q,extent,token);
angle=StringToDouble(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
affine.rx=(-tan(DegreesToRadians(angle)/2.0));
break;
}
if (LocaleCompare("stop-color",keyword) == 0)
{
PixelInfo
stop_color;
number_stops++;
if (number_stops == 1)
stops=(StopInfo *) AcquireQuantumMemory(2,sizeof(*stops));
else
if (number_stops > 2)
stops=(StopInfo *) ResizeQuantumMemory(stops,number_stops,
sizeof(*stops));
if (stops == (StopInfo *) NULL)
{
(void) ThrowMagickException(exception,GetMagickModule(),
ResourceLimitError,"MemoryAllocationFailed","`%s'",
image->filename);
break;
}
(void) GetNextToken(q,&q,extent,token);
status&=QueryColorCompliance(token,AllCompliance,&stop_color,
exception);
stops[number_stops-1].color=stop_color;
(void) GetNextToken(q,&q,extent,token);
factor=strchr(token,'%') != (char *) NULL ? 0.01 : 1.0;
stops[number_stops-1].offset=factor*StringToDouble(token,
&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
break;
}
if (LocaleCompare("stroke",keyword) == 0)
{
(void) GetNextToken(q,&q,extent,token);
if (graphic_context[n]->clip_path != MagickFalse)
break;
(void) FormatLocaleString(pattern,MagickPathExtent,"%s",token);
if (GetImageArtifact(image,pattern) != (const char *) NULL)
(void) DrawPatternPath(image,draw_info,token,
&graphic_context[n]->stroke_pattern,exception);
else
{
status&=QueryColorCompliance(token,AllCompliance,
&graphic_context[n]->stroke,exception);
if (graphic_context[n]->stroke_alpha != OpaqueAlpha)
graphic_context[n]->stroke.alpha=
graphic_context[n]->stroke_alpha;
}
break;
}
if (LocaleCompare("stroke-antialias",keyword) == 0)
{
(void) GetNextToken(q,&q,extent,token);
graphic_context[n]->stroke_antialias=StringToLong(token) != 0 ?
MagickTrue : MagickFalse;
break;
}
if (LocaleCompare("stroke-dasharray",keyword) == 0)
{
if (graphic_context[n]->dash_pattern != (double *) NULL)
graphic_context[n]->dash_pattern=(double *)
RelinquishMagickMemory(graphic_context[n]->dash_pattern);
if (IsPoint(q) != MagickFalse)
{
const char
*r;
r=q;
(void) GetNextToken(r,&r,extent,token);
if (*token == ',')
(void) GetNextToken(r,&r,extent,token);
for (x=0; IsPoint(token) != MagickFalse; x++)
{
(void) GetNextToken(r,&r,extent,token);
if (*token == ',')
(void) GetNextToken(r,&r,extent,token);
}
graphic_context[n]->dash_pattern=(double *)
AcquireQuantumMemory((size_t) (2*x+2),
sizeof(*graphic_context[n]->dash_pattern));
if (graphic_context[n]->dash_pattern == (double *) NULL)
{
(void) ThrowMagickException(exception,GetMagickModule(),
ResourceLimitError,"MemoryAllocationFailed","`%s'",
image->filename);
status=MagickFalse;
break;
}
(void) memset(graphic_context[n]->dash_pattern,0,(size_t)
(2*x+2)*sizeof(*graphic_context[n]->dash_pattern));
for (j=0; j < x; j++)
{
(void) GetNextToken(q,&q,extent,token);
if (*token == ',')
(void) GetNextToken(q,&q,extent,token);
graphic_context[n]->dash_pattern[j]=StringToDouble(token,
&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
if (graphic_context[n]->dash_pattern[j] < 0.0)
status=MagickFalse;
}
if ((x & 0x01) != 0)
for ( ; j < (2*x); j++)
graphic_context[n]->dash_pattern[j]=
graphic_context[n]->dash_pattern[j-x];
graphic_context[n]->dash_pattern[j]=0.0;
break;
}
(void) GetNextToken(q,&q,extent,token);
break;
}
if (LocaleCompare("stroke-dashoffset",keyword) == 0)
{
(void) GetNextToken(q,&q,extent,token);
graphic_context[n]->dash_offset=StringToDouble(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
break;
}
if (LocaleCompare("stroke-linecap",keyword) == 0)
{
ssize_t
linecap;
(void) GetNextToken(q,&q,extent,token);
linecap=ParseCommandOption(MagickLineCapOptions,MagickFalse,token);
if (linecap == -1)
{
status=MagickFalse;
break;
}
graphic_context[n]->linecap=(LineCap) linecap;
break;
}
if (LocaleCompare("stroke-linejoin",keyword) == 0)
{
ssize_t
linejoin;
(void) GetNextToken(q,&q,extent,token);
linejoin=ParseCommandOption(MagickLineJoinOptions,MagickFalse,
token);
if (linejoin == -1)
{
status=MagickFalse;
break;
}
graphic_context[n]->linejoin=(LineJoin) linejoin;
break;
}
if (LocaleCompare("stroke-miterlimit",keyword) == 0)
{
(void) GetNextToken(q,&q,extent,token);
graphic_context[n]->miterlimit=StringToUnsignedLong(token);
break;
}
if (LocaleCompare("stroke-opacity",keyword) == 0)
{
double
opacity;
(void) GetNextToken(q,&q,extent,token);
if (graphic_context[n]->clip_path != MagickFalse)
break;
factor=strchr(token,'%') != (char *) NULL ? 0.01 : 1.0;
opacity=MagickMin(MagickMax(factor*
StringToDouble(token,&next_token),0.0),1.0);
if (token == next_token)
ThrowPointExpectedException(token,exception);
graphic_context[n]->stroke_alpha*=opacity;
if (graphic_context[n]->stroke.alpha != TransparentAlpha)
graphic_context[n]->stroke.alpha=graphic_context[n]->stroke_alpha;
else
graphic_context[n]->stroke.alpha=(MagickRealType)
ClampToQuantum(QuantumRange*(1.0-opacity));
break;
}
if (LocaleCompare("stroke-width",keyword) == 0)
{
(void) GetNextToken(q,&q,extent,token);
if (graphic_context[n]->clip_path != MagickFalse)
break;
graphic_context[n]->stroke_width=StringToDouble(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
break;
}
status=MagickFalse;
break;
}
case 't':
case 'T':
{
if (LocaleCompare("text",keyword) == 0)
{
primitive_type=TextPrimitive;
break;
}
if (LocaleCompare("text-align",keyword) == 0)
{
ssize_t
align;
(void) GetNextToken(q,&q,extent,token);
align=ParseCommandOption(MagickAlignOptions,MagickFalse,token);
if (align == -1)
{
status=MagickFalse;
break;
}
graphic_context[n]->align=(AlignType) align;
break;
}
if (LocaleCompare("text-anchor",keyword) == 0)
{
ssize_t
align;
(void) GetNextToken(q,&q,extent,token);
align=ParseCommandOption(MagickAlignOptions,MagickFalse,token);
if (align == -1)
{
status=MagickFalse;
break;
}
graphic_context[n]->align=(AlignType) align;
break;
}
if (LocaleCompare("text-antialias",keyword) == 0)
{
(void) GetNextToken(q,&q,extent,token);
graphic_context[n]->text_antialias=StringToLong(token) != 0 ?
MagickTrue : MagickFalse;
break;
}
if (LocaleCompare("text-undercolor",keyword) == 0)
{
(void) GetNextToken(q,&q,extent,token);
status&=QueryColorCompliance(token,AllCompliance,
&graphic_context[n]->undercolor,exception);
break;
}
if (LocaleCompare("translate",keyword) == 0)
{
(void) GetNextToken(q,&q,extent,token);
affine.tx=StringToDouble(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
(void) GetNextToken(q,&q,extent,token);
if (*token == ',')
(void) GetNextToken(q,&q,extent,token);
affine.ty=StringToDouble(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
cursor=0.0;
break;
}
status=MagickFalse;
break;
}
case 'u':
case 'U':
{
if (LocaleCompare("use",keyword) == 0)
{
const char
*use;
/*
Get a macro from the MVG document, and "use" it here.
*/
(void) GetNextToken(q,&q,extent,token);
use=(const char *) GetValueFromSplayTree(macros,token);
if (use != (const char *) NULL)
{
clone_info=CloneDrawInfo((ImageInfo *) NULL,graphic_context[n]);
(void) CloneString(&clone_info->primitive,use);
status=RenderMVGContent(image,clone_info,depth+1,exception);
clone_info=DestroyDrawInfo(clone_info);
}
break;
}
break;
}
case 'v':
case 'V':
{
if (LocaleCompare("viewbox",keyword) == 0)
{
(void) GetNextToken(q,&q,extent,token);
graphic_context[n]->viewbox.x=(ssize_t) ceil(StringToDouble(token,
&next_token)-0.5);
if (token == next_token)
ThrowPointExpectedException(token,exception);
(void) GetNextToken(q,&q,extent,token);
if (*token == ',')
(void) GetNextToken(q,&q,extent,token);
graphic_context[n]->viewbox.y=(ssize_t) ceil(StringToDouble(token,
&next_token)-0.5);
if (token == next_token)
ThrowPointExpectedException(token,exception);
(void) GetNextToken(q,&q,extent,token);
if (*token == ',')
(void) GetNextToken(q,&q,extent,token);
graphic_context[n]->viewbox.width=(size_t) floor(StringToDouble(
token,&next_token)+0.5);
if (token == next_token)
ThrowPointExpectedException(token,exception);
(void) GetNextToken(q,&q,extent,token);
if (*token == ',')
(void) GetNextToken(q,&q,extent,token);
graphic_context[n]->viewbox.height=(size_t) floor(StringToDouble(
token,&next_token)+0.5);
if (token == next_token)
ThrowPointExpectedException(token,exception);
break;
}
status=MagickFalse;
break;
}
case 'w':
case 'W':
{
if (LocaleCompare("word-spacing",keyword) == 0)
{
(void) GetNextToken(q,&q,extent,token);
graphic_context[n]->interword_spacing=StringToDouble(token,
&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
break;
}
status=MagickFalse;
break;
}
default:
{
status=MagickFalse;
break;
}
}
if (status == MagickFalse)
break;
if ((fabs(affine.sx-1.0) >= MagickEpsilon) ||
(fabs(affine.rx) >= MagickEpsilon) || (fabs(affine.ry) >= MagickEpsilon) ||
(fabs(affine.sy-1.0) >= MagickEpsilon) ||
(fabs(affine.tx) >= MagickEpsilon) || (fabs(affine.ty) >= MagickEpsilon))
{
graphic_context[n]->affine.sx=current.sx*affine.sx+current.ry*affine.rx;
graphic_context[n]->affine.rx=current.rx*affine.sx+current.sy*affine.rx;
graphic_context[n]->affine.ry=current.sx*affine.ry+current.ry*affine.sy;
graphic_context[n]->affine.sy=current.rx*affine.ry+current.sy*affine.sy;
graphic_context[n]->affine.tx=current.sx*affine.tx+current.ry*affine.ty+
current.tx;
graphic_context[n]->affine.ty=current.rx*affine.tx+current.sy*affine.ty+
current.ty;
}
if (primitive_type == UndefinedPrimitive)
{
if (*q == '\0')
{
if (number_stops > 1)
{
GradientType
type;
type=LinearGradient;
if (draw_info->gradient.type == RadialGradient)
type=RadialGradient;
(void) GradientImage(image,type,PadSpread,stops,number_stops,
exception);
}
if (number_stops > 0)
stops=(StopInfo *) RelinquishMagickMemory(stops);
}
if ((image->debug != MagickFalse) && (q > p))
(void) LogMagickEvent(DrawEvent,GetMagickModule()," %.*s",(int)
(q-p-1),p);
continue;
}
/*
Parse the primitive attributes.
*/
for (i=0; primitive_info[i].primitive != UndefinedPrimitive; i++)
if ((primitive_info[i].primitive == TextPrimitive) ||
(primitive_info[i].primitive == ImagePrimitive))
if (primitive_info[i].text != (char *) NULL)
primitive_info[i].text=DestroyString(primitive_info[i].text);
i=0;
mvg_info.offset=i;
j=0;
primitive_info[0].point.x=0.0;
primitive_info[0].point.y=0.0;
primitive_info[0].coordinates=0;
primitive_info[0].method=FloodfillMethod;
primitive_info[0].closed_subpath=MagickFalse;
for (x=0; *q != '\0'; x++)
{
/*
Define points.
*/
if (IsPoint(q) == MagickFalse)
break;
(void) GetNextToken(q,&q,extent,token);
point.x=StringToDouble(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
(void) GetNextToken(q,&q,extent,token);
if (*token == ',')
(void) GetNextToken(q,&q,extent,token);
point.y=StringToDouble(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
(void) GetNextToken(q,(const char **) NULL,extent,token);
if (*token == ',')
(void) GetNextToken(q,&q,extent,token);
primitive_info[i].primitive=primitive_type;
primitive_info[i].point=point;
primitive_info[i].coordinates=0;
primitive_info[i].method=FloodfillMethod;
primitive_info[i].closed_subpath=MagickFalse;
i++;
mvg_info.offset=i;
if (i < (ssize_t) number_points)
continue;
status&=CheckPrimitiveExtent(&mvg_info,number_points);
}
if (status == MagickFalse)
break;
if ((primitive_info[j].primitive == TextPrimitive) ||
(primitive_info[j].primitive == ImagePrimitive))
if (primitive_info[j].text != (char *) NULL)
primitive_info[j].text=DestroyString(primitive_info[j].text);
primitive_info[j].primitive=primitive_type;
primitive_info[j].coordinates=(size_t) x;
primitive_info[j].method=FloodfillMethod;
primitive_info[j].closed_subpath=MagickFalse;
/*
Circumscribe primitive within a circle.
*/
bounds.x1=primitive_info[j].point.x;
bounds.y1=primitive_info[j].point.y;
bounds.x2=primitive_info[j].point.x;
bounds.y2=primitive_info[j].point.y;
for (k=1; k < (ssize_t) primitive_info[j].coordinates; k++)
{
point=primitive_info[j+k].point;
if (point.x < bounds.x1)
bounds.x1=point.x;
if (point.y < bounds.y1)
bounds.y1=point.y;
if (point.x > bounds.x2)
bounds.x2=point.x;
if (point.y > bounds.y2)
bounds.y2=point.y;
}
/*
Speculate how many points our primitive might consume.
*/
coordinates=(double) primitive_info[j].coordinates;
switch (primitive_type)
{
case RectanglePrimitive:
{
coordinates*=5.0;
break;
}
case RoundRectanglePrimitive:
{
double
alpha,
beta,
radius;
alpha=bounds.x2-bounds.x1;
beta=bounds.y2-bounds.y1;
radius=hypot((double) alpha,(double) beta);
coordinates*=5.0;
coordinates+=2.0*((size_t) ceil((double) MagickPI*radius))+6.0*
BezierQuantum+360.0;
break;
}
case BezierPrimitive:
{
coordinates=(double) (BezierQuantum*primitive_info[j].coordinates);
if (primitive_info[j].coordinates > (107*BezierQuantum))
{
(void) ThrowMagickException(exception,GetMagickModule(),DrawError,
"TooManyBezierCoordinates","`%s'",token);
status=MagickFalse;
break;
}
break;
}
case PathPrimitive:
{
char
*s,
*t;
(void) GetNextToken(q,&q,extent,token);
coordinates=1.0;
t=token;
for (s=token; *s != '\0'; s=t)
{
double
value;
value=StringToDouble(s,&t);
(void) value;
if (s == t)
{
t++;
continue;
}
coordinates++;
}
for (s=token; *s != '\0'; s++)
if (strspn(s,"AaCcQqSsTt") != 0)
coordinates+=(20.0*BezierQuantum)+360.0;
break;
}
case CirclePrimitive:
case ArcPrimitive:
case EllipsePrimitive:
{
double
alpha,
beta,
radius;
alpha=bounds.x2-bounds.x1;
beta=bounds.y2-bounds.y1;
radius=hypot(alpha,beta);
coordinates=2.0*(ceil(MagickPI*radius))+6.0*BezierQuantum+360.0;
break;
}
default:
break;
}
if (coordinates > MaxBezierCoordinates)
{
(void) ThrowMagickException(exception,GetMagickModule(),
ResourceLimitError,"TooManyBezierCoordinates","`%s'",token);
status=MagickFalse;
}
if (status == MagickFalse)
break;
if (((size_t) (i+coordinates)) >= number_points)
{
/*
Resize based on speculative points required by primitive.
*/
number_points+=coordinates+1;
if (number_points < (size_t) coordinates)
{
(void) ThrowMagickException(exception,GetMagickModule(),
ResourceLimitError,"MemoryAllocationFailed","`%s'",
image->filename);
break;
}
mvg_info.offset=i;
status&=CheckPrimitiveExtent(&mvg_info,number_points);
}
status&=CheckPrimitiveExtent(&mvg_info,PrimitiveExtentPad);
if (status == MagickFalse)
break;
mvg_info.offset=j;
switch (primitive_type)
{
case PointPrimitive:
default:
{
if (primitive_info[j].coordinates != 1)
{
status=MagickFalse;
break;
}
status&=TracePoint(primitive_info+j,primitive_info[j].point);
i=(ssize_t) (j+primitive_info[j].coordinates);
break;
}
case LinePrimitive:
{
if (primitive_info[j].coordinates != 2)
{
status=MagickFalse;
break;
}
status&=TraceLine(primitive_info+j,primitive_info[j].point,
primitive_info[j+1].point);
i=(ssize_t) (j+primitive_info[j].coordinates);
break;
}
case RectanglePrimitive:
{
if (primitive_info[j].coordinates != 2)
{
status=MagickFalse;
break;
}
status&=TraceRectangle(primitive_info+j,primitive_info[j].point,
primitive_info[j+1].point);
i=(ssize_t) (j+primitive_info[j].coordinates);
break;
}
case RoundRectanglePrimitive:
{
if (primitive_info[j].coordinates != 3)
{
status=MagickFalse;
break;
}
if ((primitive_info[j+2].point.x < 0.0) ||
(primitive_info[j+2].point.y < 0.0))
{
status=MagickFalse;
break;
}
if ((primitive_info[j+1].point.x-primitive_info[j].point.x) < 0.0)
{
status=MagickFalse;
break;
}
if ((primitive_info[j+1].point.y-primitive_info[j].point.y) < 0.0)
{
status=MagickFalse;
break;
}
status&=TraceRoundRectangle(&mvg_info,primitive_info[j].point,
primitive_info[j+1].point,primitive_info[j+2].point);
i=(ssize_t) (j+primitive_info[j].coordinates);
break;
}
case ArcPrimitive:
{
if (primitive_info[j].coordinates != 3)
{
primitive_type=UndefinedPrimitive;
break;
}
status&=TraceArc(&mvg_info,primitive_info[j].point,
primitive_info[j+1].point,primitive_info[j+2].point);
i=(ssize_t) (j+primitive_info[j].coordinates);
break;
}
case EllipsePrimitive:
{
if (primitive_info[j].coordinates != 3)
{
status=MagickFalse;
break;
}
if ((primitive_info[j+1].point.x < 0.0) ||
(primitive_info[j+1].point.y < 0.0))
{
status=MagickFalse;
break;
}
status&=TraceEllipse(&mvg_info,primitive_info[j].point,
primitive_info[j+1].point,primitive_info[j+2].point);
i=(ssize_t) (j+primitive_info[j].coordinates);
break;
}
case CirclePrimitive:
{
if (primitive_info[j].coordinates != 2)
{
status=MagickFalse;
break;
}
status&=TraceCircle(&mvg_info,primitive_info[j].point,
primitive_info[j+1].point);
i=(ssize_t) (j+primitive_info[j].coordinates);
break;
}
case PolylinePrimitive:
{
if (primitive_info[j].coordinates < 1)
{
status=MagickFalse;
break;
}
break;
}
case PolygonPrimitive:
{
if (primitive_info[j].coordinates < 3)
{
status=MagickFalse;
break;
}
primitive_info[i]=primitive_info[j];
primitive_info[i].coordinates=0;
primitive_info[j].coordinates++;
primitive_info[j].closed_subpath=MagickTrue;
i++;
break;
}
case BezierPrimitive:
{
if (primitive_info[j].coordinates < 3)
{
status=MagickFalse;
break;
}
status&=TraceBezier(&mvg_info,primitive_info[j].coordinates);
i=(ssize_t) (j+primitive_info[j].coordinates);
break;
}
case PathPrimitive:
{
coordinates=(double) TracePath(&mvg_info,token,exception);
if (coordinates == 0.0)
{
status=MagickFalse;
break;
}
i=(ssize_t) (j+coordinates);
break;
}
case AlphaPrimitive:
case ColorPrimitive:
{
ssize_t
method;
if (primitive_info[j].coordinates != 1)
{
status=MagickFalse;
break;
}
(void) GetNextToken(q,&q,extent,token);
method=ParseCommandOption(MagickMethodOptions,MagickFalse,token);
if (method == -1)
{
status=MagickFalse;
break;
}
primitive_info[j].method=(PaintMethod) method;
break;
}
case TextPrimitive:
{
char
geometry[MagickPathExtent];
if (primitive_info[j].coordinates != 1)
{
status=MagickFalse;
break;
}
if (*token != ',')
(void) GetNextToken(q,&q,extent,token);
(void) CloneString(&primitive_info[j].text,token);
/*
Compute text cursor offset.
*/
clone_info=CloneDrawInfo((ImageInfo *) NULL,graphic_context[n]);
if ((fabs(mvg_info.point.x-primitive_info->point.x) < MagickEpsilon) &&
(fabs(mvg_info.point.y-primitive_info->point.y) < MagickEpsilon))
{
mvg_info.point=primitive_info->point;
primitive_info->point.x+=cursor;
}
else
{
mvg_info.point=primitive_info->point;
cursor=0.0;
}
(void) FormatLocaleString(geometry,MagickPathExtent,"%+f%+f",
primitive_info->point.x,primitive_info->point.y);
clone_info->render=MagickFalse;
clone_info->text=AcquireString(token);
status&=GetTypeMetrics(image,clone_info,&metrics,exception);
clone_info=DestroyDrawInfo(clone_info);
cursor+=metrics.width;
if (graphic_context[n]->compliance != SVGCompliance)
cursor=0.0;
break;
}
case ImagePrimitive:
{
if (primitive_info[j].coordinates != 2)
{
status=MagickFalse;
break;
}
(void) GetNextToken(q,&q,extent,token);
(void) CloneString(&primitive_info[j].text,token);
break;
}
}
mvg_info.offset=i;
if ((image->debug != MagickFalse) && (q > p))
(void) LogMagickEvent(DrawEvent,GetMagickModule()," %.*s",(int) (q-p-1),
p);
if (status == MagickFalse)
break;
primitive_info[i].primitive=UndefinedPrimitive;
if (i == 0)
continue;
/*
Transform points.
*/
for (i=0; primitive_info[i].primitive != UndefinedPrimitive; i++)
{
point=primitive_info[i].point;
primitive_info[i].point.x=graphic_context[n]->affine.sx*point.x+
graphic_context[n]->affine.ry*point.y+graphic_context[n]->affine.tx;
primitive_info[i].point.y=graphic_context[n]->affine.rx*point.x+
graphic_context[n]->affine.sy*point.y+graphic_context[n]->affine.ty;
point=primitive_info[i].point;
if (point.x < graphic_context[n]->bounds.x1)
graphic_context[n]->bounds.x1=point.x;
if (point.y < graphic_context[n]->bounds.y1)
graphic_context[n]->bounds.y1=point.y;
if (point.x > graphic_context[n]->bounds.x2)
graphic_context[n]->bounds.x2=point.x;
if (point.y > graphic_context[n]->bounds.y2)
graphic_context[n]->bounds.y2=point.y;
if (primitive_info[i].primitive == ImagePrimitive)
break;
if (i >= (ssize_t) number_points)
ThrowFatalException(ResourceLimitFatalError,"MemoryAllocationFailed");
}
if (graphic_context[n]->render != MagickFalse)
{
if ((n != 0) && (graphic_context[n]->compliance != SVGCompliance) &&
(graphic_context[n]->clip_mask != (char *) NULL) &&
(LocaleCompare(graphic_context[n]->clip_mask,
graphic_context[n-1]->clip_mask) != 0))
{
const char
*clip_path;
clip_path=(const char *) GetValueFromSplayTree(macros,
graphic_context[n]->clip_mask);
if (clip_path != (const char *) NULL)
(void) SetImageArtifact(image,graphic_context[n]->clip_mask,
clip_path);
status&=DrawClipPath(image,graphic_context[n],
graphic_context[n]->clip_mask,exception);
}
status&=DrawPrimitive(image,graphic_context[n],primitive_info,
exception);
}
proceed=SetImageProgress(image,RenderImageTag,q-primitive,(MagickSizeType)
primitive_extent);
if (proceed == MagickFalse)
break;
if (status == 0)
break;
}
if (image->debug != MagickFalse)
(void) LogMagickEvent(DrawEvent,GetMagickModule(),"end draw-image");
/*
Relinquish resources.
*/
macros=DestroySplayTree(macros);
token=DestroyString(token);
if (primitive_info != (PrimitiveInfo *) NULL)
{
for (i=0; primitive_info[i].primitive != UndefinedPrimitive; i++)
if ((primitive_info[i].primitive == TextPrimitive) ||
(primitive_info[i].primitive == ImagePrimitive))
if (primitive_info[i].text != (char *) NULL)
primitive_info[i].text=DestroyString(primitive_info[i].text);
primitive_info=(PrimitiveInfo *) RelinquishMagickMemory(primitive_info);
}
primitive=DestroyString(primitive);
if (stops != (StopInfo *) NULL)
stops=(StopInfo *) RelinquishMagickMemory(stops);
for ( ; n >= 0; n--)
graphic_context[n]=DestroyDrawInfo(graphic_context[n]);
graphic_context=(DrawInfo **) RelinquishMagickMemory(graphic_context);
if (status == MagickFalse)
ThrowBinaryException(DrawError,"NonconformingDrawingPrimitiveDefinition",
keyword);
return(status != 0 ? MagickTrue : MagickFalse);
}
MagickExport MagickBooleanType DrawImage(Image *image,const DrawInfo *draw_info,
ExceptionInfo *exception)
{
return(RenderMVGContent(image,draw_info,0,exception));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% D r a w P a t t e r n P a t h %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DrawPatternPath() draws a pattern.
%
% The format of the DrawPatternPath method is:
%
% MagickBooleanType DrawPatternPath(Image *image,const DrawInfo *draw_info,
% const char *name,Image **pattern,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o draw_info: the draw info.
%
% o name: the pattern name.
%
% o image: the image.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType DrawPatternPath(Image *image,
const DrawInfo *draw_info,const char *name,Image **pattern,
ExceptionInfo *exception)
{
char
property[MagickPathExtent];
const char
*geometry,
*path,
*type;
DrawInfo
*clone_info;
ImageInfo
*image_info;
MagickBooleanType
status;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(draw_info != (const DrawInfo *) NULL);
assert(name != (const char *) NULL);
(void) FormatLocaleString(property,MagickPathExtent,"%s",name);
path=GetImageArtifact(image,property);
if (path == (const char *) NULL)
return(MagickFalse);
(void) FormatLocaleString(property,MagickPathExtent,"%s-geometry",name);
geometry=GetImageArtifact(image,property);
if (geometry == (const char *) NULL)
return(MagickFalse);
if ((*pattern) != (Image *) NULL)
*pattern=DestroyImage(*pattern);
image_info=AcquireImageInfo();
image_info->size=AcquireString(geometry);
*pattern=AcquireImage(image_info,exception);
image_info=DestroyImageInfo(image_info);
(void) QueryColorCompliance("#000000ff",AllCompliance,
&(*pattern)->background_color,exception);
(void) SetImageBackgroundColor(*pattern,exception);
if (image->debug != MagickFalse)
(void) LogMagickEvent(DrawEvent,GetMagickModule(),
"begin pattern-path %s %s",name,geometry);
clone_info=CloneDrawInfo((ImageInfo *) NULL,draw_info);
clone_info->fill_pattern=NewImageList();
clone_info->stroke_pattern=NewImageList();
(void) FormatLocaleString(property,MagickPathExtent,"%s-type",name);
type=GetImageArtifact(image,property);
if (type != (const char *) NULL)
clone_info->gradient.type=(GradientType) ParseCommandOption(
MagickGradientOptions,MagickFalse,type);
(void) CloneString(&clone_info->primitive,path);
status=RenderMVGContent(*pattern,clone_info,0,exception);
clone_info=DestroyDrawInfo(clone_info);
if (image->debug != MagickFalse)
(void) LogMagickEvent(DrawEvent,GetMagickModule(),"end pattern-path");
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ D r a w P o l y g o n P r i m i t i v e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DrawPolygonPrimitive() draws a polygon on the image.
%
% The format of the DrawPolygonPrimitive method is:
%
% MagickBooleanType DrawPolygonPrimitive(Image *image,
% const DrawInfo *draw_info,const PrimitiveInfo *primitive_info,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o draw_info: the draw info.
%
% o primitive_info: Specifies a pointer to a PrimitiveInfo structure.
%
% o exception: return any errors or warnings in this structure.
%
*/
static PolygonInfo **DestroyPolygonThreadSet(PolygonInfo **polygon_info)
{
register ssize_t
i;
assert(polygon_info != (PolygonInfo **) NULL);
for (i=0; i < (ssize_t) GetMagickResourceLimit(ThreadResource); i++)
if (polygon_info[i] != (PolygonInfo *) NULL)
polygon_info[i]=DestroyPolygonInfo(polygon_info[i]);
polygon_info=(PolygonInfo **) RelinquishMagickMemory(polygon_info);
return(polygon_info);
}
static PolygonInfo **AcquirePolygonThreadSet(
const PrimitiveInfo *primitive_info)
{
PathInfo
*magick_restrict path_info;
PolygonInfo
**polygon_info;
register ssize_t
i;
size_t
number_threads;
number_threads=(size_t) GetMagickResourceLimit(ThreadResource);
polygon_info=(PolygonInfo **) AcquireQuantumMemory(number_threads,
sizeof(*polygon_info));
if (polygon_info == (PolygonInfo **) NULL)
return((PolygonInfo **) NULL);
(void) memset(polygon_info,0,number_threads*sizeof(*polygon_info));
path_info=ConvertPrimitiveToPath(primitive_info);
if (path_info == (PathInfo *) NULL)
return(DestroyPolygonThreadSet(polygon_info));
for (i=0; i < (ssize_t) number_threads; i++)
{
polygon_info[i]=ConvertPathToPolygon(path_info);
if (polygon_info[i] == (PolygonInfo *) NULL)
return(DestroyPolygonThreadSet(polygon_info));
}
path_info=(PathInfo *) RelinquishMagickMemory(path_info);
return(polygon_info);
}
static double GetFillAlpha(PolygonInfo *polygon_info,const double mid,
const MagickBooleanType fill,const FillRule fill_rule,const ssize_t x,
const ssize_t y,double *stroke_alpha)
{
double
alpha,
beta,
distance,
subpath_alpha;
PointInfo
delta;
register const PointInfo
*q;
register EdgeInfo
*p;
register ssize_t
i;
ssize_t
j,
winding_number;
/*
Compute fill & stroke opacity for this (x,y) point.
*/
*stroke_alpha=0.0;
subpath_alpha=0.0;
p=polygon_info->edges;
for (j=0; j < (ssize_t) polygon_info->number_edges; j++, p++)
{
if ((double) y <= (p->bounds.y1-mid-0.5))
break;
if ((double) y > (p->bounds.y2+mid+0.5))
{
(void) DestroyEdge(polygon_info,(size_t) j);
continue;
}
if (((double) x <= (p->bounds.x1-mid-0.5)) ||
((double) x > (p->bounds.x2+mid+0.5)))
continue;
i=(ssize_t) MagickMax((double) p->highwater,1.0);
for ( ; i < (ssize_t) p->number_points; i++)
{
if ((double) y <= (p->points[i-1].y-mid-0.5))
break;
if ((double) y > (p->points[i].y+mid+0.5))
continue;
if (p->scanline != (double) y)
{
p->scanline=(double) y;
p->highwater=(size_t) i;
}
/*
Compute distance between a point and an edge.
*/
q=p->points+i-1;
delta.x=(q+1)->x-q->x;
delta.y=(q+1)->y-q->y;
beta=delta.x*(x-q->x)+delta.y*(y-q->y);
if (beta <= 0.0)
{
delta.x=(double) x-q->x;
delta.y=(double) y-q->y;
distance=delta.x*delta.x+delta.y*delta.y;
}
else
{
alpha=delta.x*delta.x+delta.y*delta.y;
if (beta >= alpha)
{
delta.x=(double) x-(q+1)->x;
delta.y=(double) y-(q+1)->y;
distance=delta.x*delta.x+delta.y*delta.y;
}
else
{
alpha=PerceptibleReciprocal(alpha);
beta=delta.x*(y-q->y)-delta.y*(x-q->x);
distance=alpha*beta*beta;
}
}
/*
Compute stroke & subpath opacity.
*/
beta=0.0;
if (p->ghostline == MagickFalse)
{
alpha=mid+0.5;
if ((*stroke_alpha < 1.0) &&
(distance <= ((alpha+0.25)*(alpha+0.25))))
{
alpha=mid-0.5;
if (distance <= ((alpha+0.25)*(alpha+0.25)))
*stroke_alpha=1.0;
else
{
beta=1.0;
if (fabs(distance-1.0) >= MagickEpsilon)
beta=sqrt((double) distance);
alpha=beta-mid-0.5;
if (*stroke_alpha < ((alpha-0.25)*(alpha-0.25)))
*stroke_alpha=(alpha-0.25)*(alpha-0.25);
}
}
}
if ((fill == MagickFalse) || (distance > 1.0) || (subpath_alpha >= 1.0))
continue;
if (distance <= 0.0)
{
subpath_alpha=1.0;
continue;
}
if (distance > 1.0)
continue;
if (fabs(beta) < MagickEpsilon)
{
beta=1.0;
if (fabs(distance-1.0) >= MagickEpsilon)
beta=sqrt(distance);
}
alpha=beta-1.0;
if (subpath_alpha < (alpha*alpha))
subpath_alpha=alpha*alpha;
}
}
/*
Compute fill opacity.
*/
if (fill == MagickFalse)
return(0.0);
if (subpath_alpha >= 1.0)
return(1.0);
/*
Determine winding number.
*/
winding_number=0;
p=polygon_info->edges;
for (j=0; j < (ssize_t) polygon_info->number_edges; j++, p++)
{
if ((double) y <= p->bounds.y1)
break;
if (((double) y > p->bounds.y2) || ((double) x <= p->bounds.x1))
continue;
if ((double) x > p->bounds.x2)
{
winding_number+=p->direction ? 1 : -1;
continue;
}
i=(ssize_t) MagickMax((double) p->highwater,1.0);
for ( ; i < (ssize_t) (p->number_points-1); i++)
if ((double) y <= p->points[i].y)
break;
q=p->points+i-1;
if ((((q+1)->x-q->x)*(y-q->y)) <= (((q+1)->y-q->y)*(x-q->x)))
winding_number+=p->direction ? 1 : -1;
}
if (fill_rule != NonZeroRule)
{
if ((MagickAbsoluteValue(winding_number) & 0x01) != 0)
return(1.0);
}
else
if (MagickAbsoluteValue(winding_number) != 0)
return(1.0);
return(subpath_alpha);
}
static MagickBooleanType DrawPolygonPrimitive(Image *image,
const DrawInfo *draw_info,const PrimitiveInfo *primitive_info,
ExceptionInfo *exception)
{
CacheView
*image_view;
MagickBooleanType
fill,
status;
double
mid;
PolygonInfo
**magick_restrict polygon_info;
register EdgeInfo
*p;
register ssize_t
i;
SegmentInfo
bounds;
ssize_t
start_y,
stop_y,
y;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(draw_info != (DrawInfo *) NULL);
assert(draw_info->signature == MagickCoreSignature);
assert(primitive_info != (PrimitiveInfo *) NULL);
if (primitive_info->coordinates <= 1)
return(MagickTrue);
/*
Compute bounding box.
*/
polygon_info=AcquirePolygonThreadSet(primitive_info);
if (polygon_info == (PolygonInfo **) NULL)
return(MagickFalse);
DisableMSCWarning(4127)
if (0)
{
status=DrawBoundingRectangles(image,draw_info,polygon_info[0],exception);
if (status == MagickFalse)
{
polygon_info=DestroyPolygonThreadSet(polygon_info);
return(status);
}
}
RestoreMSCWarning
if (image->debug != MagickFalse)
(void) LogMagickEvent(DrawEvent,GetMagickModule()," begin draw-polygon");
fill=(primitive_info->method == FillToBorderMethod) ||
(primitive_info->method == FloodfillMethod) ? MagickTrue : MagickFalse;
mid=ExpandAffine(&draw_info->affine)*SaneStrokeWidth(image,draw_info)/2.0;
bounds=polygon_info[0]->edges[0].bounds;
for (i=1; i < (ssize_t) polygon_info[0]->number_edges; i++)
{
p=polygon_info[0]->edges+i;
if (p->bounds.x1 < bounds.x1)
bounds.x1=p->bounds.x1;
if (p->bounds.y1 < bounds.y1)
bounds.y1=p->bounds.y1;
if (p->bounds.x2 > bounds.x2)
bounds.x2=p->bounds.x2;
if (p->bounds.y2 > bounds.y2)
bounds.y2=p->bounds.y2;
}
bounds.x1-=(mid+1.0);
bounds.y1-=(mid+1.0);
bounds.x2+=(mid+1.0);
bounds.y2+=(mid+1.0);
if ((bounds.x1 >= (double) image->columns) ||
(bounds.y1 >= (double) image->rows) ||
(bounds.x2 <= 0.0) || (bounds.y2 <= 0.0))
{
polygon_info=DestroyPolygonThreadSet(polygon_info);
return(MagickTrue); /* virtual polygon */
}
bounds.x1=bounds.x1 < 0.0 ? 0.0 : bounds.x1 >= (double) image->columns-1.0 ?
(double) image->columns-1.0 : bounds.x1;
bounds.y1=bounds.y1 < 0.0 ? 0.0 : bounds.y1 >= (double) image->rows-1.0 ?
(double) image->rows-1.0 : bounds.y1;
bounds.x2=bounds.x2 < 0.0 ? 0.0 : bounds.x2 >= (double) image->columns-1.0 ?
(double) image->columns-1.0 : bounds.x2;
bounds.y2=bounds.y2 < 0.0 ? 0.0 : bounds.y2 >= (double) image->rows-1.0 ?
(double) image->rows-1.0 : bounds.y2;
status=MagickTrue;
image_view=AcquireAuthenticCacheView(image,exception);
if ((primitive_info->coordinates == 1) ||
(polygon_info[0]->number_edges == 0))
{
/*
Draw point.
*/
start_y=(ssize_t) ceil(bounds.y1-0.5);
stop_y=(ssize_t) floor(bounds.y2+0.5);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
magick_number_threads(image,image,stop_y-start_y+1,1)
#endif
for (y=start_y; y <= stop_y; y++)
{
MagickBooleanType
sync;
PixelInfo
pixel;
register ssize_t
x;
register Quantum
*magick_restrict q;
ssize_t
start_x,
stop_x;
if (status == MagickFalse)
continue;
start_x=(ssize_t) ceil(bounds.x1-0.5);
stop_x=(ssize_t) floor(bounds.x2+0.5);
x=start_x;
q=GetCacheViewAuthenticPixels(image_view,x,y,(size_t) (stop_x-x+1),1,
exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
GetPixelInfo(image,&pixel);
for ( ; x <= stop_x; x++)
{
if ((x == (ssize_t) ceil(primitive_info->point.x-0.5)) &&
(y == (ssize_t) ceil(primitive_info->point.y-0.5)))
{
GetFillColor(draw_info,x-start_x,y-start_y,&pixel,exception);
SetPixelViaPixelInfo(image,&pixel,q);
}
q+=GetPixelChannels(image);
}
sync=SyncCacheViewAuthenticPixels(image_view,exception);
if (sync == MagickFalse)
status=MagickFalse;
}
image_view=DestroyCacheView(image_view);
polygon_info=DestroyPolygonThreadSet(polygon_info);
if (image->debug != MagickFalse)
(void) LogMagickEvent(DrawEvent,GetMagickModule(),
" end draw-polygon");
return(status);
}
/*
Draw polygon or line.
*/
start_y=(ssize_t) ceil(bounds.y1-0.5);
stop_y=(ssize_t) floor(bounds.y2+0.5);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
magick_number_threads(image,image,stop_y-start_y+1,1)
#endif
for (y=start_y; y <= stop_y; y++)
{
const int
id = GetOpenMPThreadId();
register Quantum
*magick_restrict q;
register ssize_t
x;
ssize_t
start_x,
stop_x;
if (status == MagickFalse)
continue;
start_x=(ssize_t) ceil(bounds.x1-0.5);
stop_x=(ssize_t) floor(bounds.x2+0.5);
q=GetCacheViewAuthenticPixels(image_view,start_x,y,(size_t) (stop_x-start_x+
1),1,exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
for (x=start_x; x <= stop_x; x++)
{
double
fill_alpha,
stroke_alpha;
PixelInfo
fill_color,
stroke_color;
/*
Fill and/or stroke.
*/
fill_alpha=GetFillAlpha(polygon_info[id],mid,fill,draw_info->fill_rule,
x,y,&stroke_alpha);
if (draw_info->stroke_antialias == MagickFalse)
{
fill_alpha=fill_alpha > 0.25 ? 1.0 : 0.0;
stroke_alpha=stroke_alpha > 0.25 ? 1.0 : 0.0;
}
GetFillColor(draw_info,x-start_x,y-start_y,&fill_color,exception);
CompositePixelOver(image,&fill_color,fill_alpha*fill_color.alpha,q,
(double) GetPixelAlpha(image,q),q);
GetStrokeColor(draw_info,x-start_x,y-start_y,&stroke_color,exception);
CompositePixelOver(image,&stroke_color,stroke_alpha*stroke_color.alpha,q,
(double) GetPixelAlpha(image,q),q);
q+=GetPixelChannels(image);
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
}
image_view=DestroyCacheView(image_view);
polygon_info=DestroyPolygonThreadSet(polygon_info);
if (image->debug != MagickFalse)
(void) LogMagickEvent(DrawEvent,GetMagickModule()," end draw-polygon");
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% D r a w P r i m i t i v e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DrawPrimitive() draws a primitive (line, rectangle, ellipse) on the image.
%
% The format of the DrawPrimitive method is:
%
% MagickBooleanType DrawPrimitive(Image *image,const DrawInfo *draw_info,
% PrimitiveInfo *primitive_info,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o draw_info: the draw info.
%
% o primitive_info: Specifies a pointer to a PrimitiveInfo structure.
%
% o exception: return any errors or warnings in this structure.
%
*/
static inline double ConstrainCoordinate(double x)
{
if (x < (double) -SSIZE_MAX)
return((double) -SSIZE_MAX);
if (x > (double) SSIZE_MAX)
return((double) SSIZE_MAX);
return(x);
}
static void LogPrimitiveInfo(const PrimitiveInfo *primitive_info)
{
const char
*methods[] =
{
"point",
"replace",
"floodfill",
"filltoborder",
"reset",
"?"
};
PointInfo
p,
point,
q;
register ssize_t
i,
x;
ssize_t
coordinates,
y;
x=(ssize_t) ceil(primitive_info->point.x-0.5);
y=(ssize_t) ceil(primitive_info->point.y-0.5);
switch (primitive_info->primitive)
{
case AlphaPrimitive:
{
(void) LogMagickEvent(DrawEvent,GetMagickModule(),
"AlphaPrimitive %.20g,%.20g %s",(double) x,(double) y,
methods[primitive_info->method]);
return;
}
case ColorPrimitive:
{
(void) LogMagickEvent(DrawEvent,GetMagickModule(),
"ColorPrimitive %.20g,%.20g %s",(double) x,(double) y,
methods[primitive_info->method]);
return;
}
case ImagePrimitive:
{
(void) LogMagickEvent(DrawEvent,GetMagickModule(),
"ImagePrimitive %.20g,%.20g",(double) x,(double) y);
return;
}
case PointPrimitive:
{
(void) LogMagickEvent(DrawEvent,GetMagickModule(),
"PointPrimitive %.20g,%.20g %s",(double) x,(double) y,
methods[primitive_info->method]);
return;
}
case TextPrimitive:
{
(void) LogMagickEvent(DrawEvent,GetMagickModule(),
"TextPrimitive %.20g,%.20g",(double) x,(double) y);
return;
}
default:
break;
}
coordinates=0;
p=primitive_info[0].point;
q.x=(-1.0);
q.y=(-1.0);
for (i=0; primitive_info[i].primitive != UndefinedPrimitive; i++)
{
point=primitive_info[i].point;
if (coordinates <= 0)
{
coordinates=(ssize_t) primitive_info[i].coordinates;
(void) LogMagickEvent(DrawEvent,GetMagickModule(),
" begin open (%.20g)",(double) coordinates);
p=point;
}
point=primitive_info[i].point;
if ((fabs(q.x-point.x) >= MagickEpsilon) ||
(fabs(q.y-point.y) >= MagickEpsilon))
(void) LogMagickEvent(DrawEvent,GetMagickModule(),
" %.20g: %.18g,%.18g",(double) coordinates,point.x,point.y);
else
(void) LogMagickEvent(DrawEvent,GetMagickModule(),
" %.20g: %g %g (duplicate)",(double) coordinates,point.x,point.y);
q=point;
coordinates--;
if (coordinates > 0)
continue;
if ((fabs(p.x-point.x) >= MagickEpsilon) ||
(fabs(p.y-point.y) >= MagickEpsilon))
(void) LogMagickEvent(DrawEvent,GetMagickModule()," end last (%.20g)",
(double) coordinates);
else
(void) LogMagickEvent(DrawEvent,GetMagickModule()," end open (%.20g)",
(double) coordinates);
}
}
MagickExport MagickBooleanType DrawPrimitive(Image *image,
const DrawInfo *draw_info,const PrimitiveInfo *primitive_info,
ExceptionInfo *exception)
{
CacheView
*image_view;
MagickStatusType
status;
register ssize_t
i,
x;
ssize_t
y;
if (image->debug != MagickFalse)
{
(void) LogMagickEvent(DrawEvent,GetMagickModule(),
" begin draw-primitive");
(void) LogMagickEvent(DrawEvent,GetMagickModule(),
" affine: %g,%g,%g,%g,%g,%g",draw_info->affine.sx,
draw_info->affine.rx,draw_info->affine.ry,draw_info->affine.sy,
draw_info->affine.tx,draw_info->affine.ty);
}
status=MagickTrue;
if ((IsGrayColorspace(image->colorspace) != MagickFalse) &&
((IsPixelInfoGray(&draw_info->fill) == MagickFalse) ||
(IsPixelInfoGray(&draw_info->stroke) == MagickFalse)))
status=SetImageColorspace(image,sRGBColorspace,exception);
if (draw_info->compliance == SVGCompliance)
{
status&=SetImageMask(image,WritePixelMask,draw_info->clipping_mask,
exception);
status&=SetImageMask(image,CompositePixelMask,draw_info->composite_mask,
exception);
}
x=(ssize_t) ceil(ConstrainCoordinate(primitive_info->point.x-0.5));
y=(ssize_t) ceil(ConstrainCoordinate(primitive_info->point.y-0.5));
image_view=AcquireAuthenticCacheView(image,exception);
switch (primitive_info->primitive)
{
case AlphaPrimitive:
{
if (image->alpha_trait == UndefinedPixelTrait)
(void) SetImageAlphaChannel(image,OpaqueAlphaChannel,exception);
switch (primitive_info->method)
{
case PointMethod:
default:
{
PixelInfo
pixel;
register Quantum
*q;
q=GetCacheViewAuthenticPixels(image_view,x,y,1,1,exception);
if (q == (Quantum *) NULL)
break;
GetFillColor(draw_info,x,y,&pixel,exception);
SetPixelAlpha(image,ClampToQuantum(pixel.alpha),q);
(void) SyncCacheViewAuthenticPixels(image_view,exception);
break;
}
case ReplaceMethod:
{
MagickBooleanType
sync;
PixelInfo
pixel,
target;
(void) GetOneCacheViewVirtualPixelInfo(image_view,x,y,&target,
exception);
GetPixelInfo(image,&pixel);
for (y=0; y < (ssize_t) image->rows; y++)
{
register Quantum
*magick_restrict q;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,
exception);
if (q == (Quantum *) NULL)
break;
for (x=0; x < (ssize_t) image->columns; x++)
{
GetPixelInfoPixel(image,q,&pixel);
if (IsFuzzyEquivalencePixelInfo(&pixel,&target) == MagickFalse)
{
q+=GetPixelChannels(image);
continue;
}
GetFillColor(draw_info,x,y,&pixel,exception);
SetPixelAlpha(image,ClampToQuantum(pixel.alpha),q);
q+=GetPixelChannels(image);
}
sync=SyncCacheViewAuthenticPixels(image_view,exception);
if (sync == MagickFalse)
break;
}
break;
}
case FloodfillMethod:
case FillToBorderMethod:
{
ChannelType
channel_mask;
PixelInfo
target;
(void) GetOneVirtualPixelInfo(image,TileVirtualPixelMethod,x,y,
&target,exception);
if (primitive_info->method == FillToBorderMethod)
{
target.red=(double) draw_info->border_color.red;
target.green=(double) draw_info->border_color.green;
target.blue=(double) draw_info->border_color.blue;
}
channel_mask=SetImageChannelMask(image,AlphaChannel);
status&=FloodfillPaintImage(image,draw_info,&target,x,y,
primitive_info->method == FloodfillMethod ? MagickFalse :
MagickTrue,exception);
(void) SetImageChannelMask(image,channel_mask);
break;
}
case ResetMethod:
{
MagickBooleanType
sync;
PixelInfo
pixel;
for (y=0; y < (ssize_t) image->rows; y++)
{
register Quantum
*magick_restrict q;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,
exception);
if (q == (Quantum *) NULL)
break;
for (x=0; x < (ssize_t) image->columns; x++)
{
GetFillColor(draw_info,x,y,&pixel,exception);
SetPixelAlpha(image,ClampToQuantum(pixel.alpha),q);
q+=GetPixelChannels(image);
}
sync=SyncCacheViewAuthenticPixels(image_view,exception);
if (sync == MagickFalse)
break;
}
break;
}
}
break;
}
case ColorPrimitive:
{
switch (primitive_info->method)
{
case PointMethod:
default:
{
PixelInfo
pixel;
register Quantum
*q;
q=GetCacheViewAuthenticPixels(image_view,x,y,1,1,exception);
if (q == (Quantum *) NULL)
break;
GetPixelInfo(image,&pixel);
GetFillColor(draw_info,x,y,&pixel,exception);
SetPixelViaPixelInfo(image,&pixel,q);
(void) SyncCacheViewAuthenticPixels(image_view,exception);
break;
}
case ReplaceMethod:
{
MagickBooleanType
sync;
PixelInfo
pixel,
target;
(void) GetOneCacheViewVirtualPixelInfo(image_view,x,y,&target,
exception);
for (y=0; y < (ssize_t) image->rows; y++)
{
register Quantum
*magick_restrict q;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,
exception);
if (q == (Quantum *) NULL)
break;
for (x=0; x < (ssize_t) image->columns; x++)
{
GetPixelInfoPixel(image,q,&pixel);
if (IsFuzzyEquivalencePixelInfo(&pixel,&target) == MagickFalse)
{
q+=GetPixelChannels(image);
continue;
}
GetFillColor(draw_info,x,y,&pixel,exception);
SetPixelViaPixelInfo(image,&pixel,q);
q+=GetPixelChannels(image);
}
sync=SyncCacheViewAuthenticPixels(image_view,exception);
if (sync == MagickFalse)
break;
}
break;
}
case FloodfillMethod:
case FillToBorderMethod:
{
PixelInfo
target;
(void) GetOneVirtualPixelInfo(image,TileVirtualPixelMethod,x,y,
&target,exception);
if (primitive_info->method == FillToBorderMethod)
{
target.red=(double) draw_info->border_color.red;
target.green=(double) draw_info->border_color.green;
target.blue=(double) draw_info->border_color.blue;
}
status&=FloodfillPaintImage(image,draw_info,&target,x,y,
primitive_info->method == FloodfillMethod ? MagickFalse :
MagickTrue,exception);
break;
}
case ResetMethod:
{
MagickBooleanType
sync;
PixelInfo
pixel;
GetPixelInfo(image,&pixel);
for (y=0; y < (ssize_t) image->rows; y++)
{
register Quantum
*magick_restrict q;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,
exception);
if (q == (Quantum *) NULL)
break;
for (x=0; x < (ssize_t) image->columns; x++)
{
GetFillColor(draw_info,x,y,&pixel,exception);
SetPixelViaPixelInfo(image,&pixel,q);
q+=GetPixelChannels(image);
}
sync=SyncCacheViewAuthenticPixels(image_view,exception);
if (sync == MagickFalse)
break;
}
break;
}
}
break;
}
case ImagePrimitive:
{
AffineMatrix
affine;
char
composite_geometry[MagickPathExtent];
Image
*composite_image,
*composite_images;
ImageInfo
*clone_info;
RectangleInfo
geometry;
ssize_t
x1,
y1;
if (primitive_info->text == (char *) NULL)
break;
clone_info=AcquireImageInfo();
if (LocaleNCompare(primitive_info->text,"data:",5) == 0)
composite_images=ReadInlineImage(clone_info,primitive_info->text,
exception);
else
{
(void) CopyMagickString(clone_info->filename,primitive_info->text,
MagickPathExtent);
composite_images=ReadImage(clone_info,exception);
}
clone_info=DestroyImageInfo(clone_info);
if (composite_images == (Image *) NULL)
{
status=0;
break;
}
composite_image=RemoveFirstImageFromList(&composite_images);
composite_images=DestroyImageList(composite_images);
(void) SetImageProgressMonitor(composite_image,(MagickProgressMonitor)
NULL,(void *) NULL);
x1=(ssize_t) ceil(primitive_info[1].point.x-0.5);
y1=(ssize_t) ceil(primitive_info[1].point.y-0.5);
if (((x1 != 0L) && (x1 != (ssize_t) composite_image->columns)) ||
((y1 != 0L) && (y1 != (ssize_t) composite_image->rows)))
{
/*
Resize image.
*/
(void) FormatLocaleString(composite_geometry,MagickPathExtent,
"%gx%g!",primitive_info[1].point.x,primitive_info[1].point.y);
composite_image->filter=image->filter;
(void) TransformImage(&composite_image,(char *) NULL,
composite_geometry,exception);
}
if (composite_image->alpha_trait == UndefinedPixelTrait)
(void) SetImageAlphaChannel(composite_image,OpaqueAlphaChannel,
exception);
if (draw_info->alpha != OpaqueAlpha)
(void) SetImageAlpha(composite_image,draw_info->alpha,exception);
SetGeometry(image,&geometry);
image->gravity=draw_info->gravity;
geometry.x=x;
geometry.y=y;
(void) FormatLocaleString(composite_geometry,MagickPathExtent,
"%.20gx%.20g%+.20g%+.20g",(double) composite_image->columns,(double)
composite_image->rows,(double) geometry.x,(double) geometry.y);
(void) ParseGravityGeometry(image,composite_geometry,&geometry,exception);
affine=draw_info->affine;
affine.tx=(double) geometry.x;
affine.ty=(double) geometry.y;
composite_image->interpolate=image->interpolate;
if ((draw_info->compose == OverCompositeOp) ||
(draw_info->compose == SrcOverCompositeOp))
(void) DrawAffineImage(image,composite_image,&affine,exception);
else
(void) CompositeImage(image,composite_image,draw_info->compose,
MagickTrue,geometry.x,geometry.y,exception);
composite_image=DestroyImage(composite_image);
break;
}
case PointPrimitive:
{
PixelInfo
fill_color;
register Quantum
*q;
if ((y < 0) || (y >= (ssize_t) image->rows))
break;
if ((x < 0) || (x >= (ssize_t) image->columns))
break;
q=GetCacheViewAuthenticPixels(image_view,x,y,1,1,exception);
if (q == (Quantum *) NULL)
break;
GetFillColor(draw_info,x,y,&fill_color,exception);
CompositePixelOver(image,&fill_color,(double) fill_color.alpha,q,
(double) GetPixelAlpha(image,q),q);
(void) SyncCacheViewAuthenticPixels(image_view,exception);
break;
}
case TextPrimitive:
{
char
geometry[MagickPathExtent];
DrawInfo
*clone_info;
if (primitive_info->text == (char *) NULL)
break;
clone_info=CloneDrawInfo((ImageInfo *) NULL,draw_info);
(void) CloneString(&clone_info->text,primitive_info->text);
(void) FormatLocaleString(geometry,MagickPathExtent,"%+f%+f",
primitive_info->point.x,primitive_info->point.y);
(void) CloneString(&clone_info->geometry,geometry);
status&=AnnotateImage(image,clone_info,exception);
clone_info=DestroyDrawInfo(clone_info);
break;
}
default:
{
double
mid,
scale;
DrawInfo
*clone_info;
if (IsEventLogging() != MagickFalse)
LogPrimitiveInfo(primitive_info);
scale=ExpandAffine(&draw_info->affine);
if ((draw_info->dash_pattern != (double *) NULL) &&
(fabs(draw_info->dash_pattern[0]) >= MagickEpsilon) &&
(fabs(scale*draw_info->stroke_width) >= MagickEpsilon) &&
(draw_info->stroke.alpha != (Quantum) TransparentAlpha))
{
/*
Draw dash polygon.
*/
clone_info=CloneDrawInfo((ImageInfo *) NULL,draw_info);
clone_info->stroke_width=0.0;
clone_info->stroke.alpha=(MagickRealType) TransparentAlpha;
status&=DrawPolygonPrimitive(image,clone_info,primitive_info,
exception);
clone_info=DestroyDrawInfo(clone_info);
status=DrawDashPolygon(draw_info,primitive_info,image,exception);
break;
}
mid=ExpandAffine(&draw_info->affine)*SaneStrokeWidth(image,draw_info)/2.0;
if ((mid > 1.0) &&
((draw_info->stroke.alpha != (Quantum) TransparentAlpha) ||
(draw_info->stroke_pattern != (Image *) NULL)))
{
double
x,
y;
MagickBooleanType
closed_path;
/*
Draw strokes while respecting line cap/join attributes.
*/
closed_path=primitive_info[0].closed_subpath;
i=(ssize_t) primitive_info[0].coordinates;
x=fabs(primitive_info[i-1].point.x-primitive_info[0].point.x);
y=fabs(primitive_info[i-1].point.y-primitive_info[0].point.y);
if ((x < MagickEpsilon) && (y < MagickEpsilon))
closed_path=MagickTrue;
if ((((draw_info->linecap == RoundCap) ||
(closed_path != MagickFalse)) &&
(draw_info->linejoin == RoundJoin)) ||
(primitive_info[i].primitive != UndefinedPrimitive))
{
status=DrawPolygonPrimitive(image,draw_info,primitive_info,
exception);
break;
}
clone_info=CloneDrawInfo((ImageInfo *) NULL,draw_info);
clone_info->stroke_width=0.0;
clone_info->stroke.alpha=(MagickRealType) TransparentAlpha;
status&=DrawPolygonPrimitive(image,clone_info,primitive_info,
exception);
clone_info=DestroyDrawInfo(clone_info);
status&=DrawStrokePolygon(image,draw_info,primitive_info,exception);
break;
}
status&=DrawPolygonPrimitive(image,draw_info,primitive_info,exception);
break;
}
}
image_view=DestroyCacheView(image_view);
if (draw_info->compliance == SVGCompliance)
{
status&=SetImageMask(image,WritePixelMask,(Image *) NULL,exception);
status&=SetImageMask(image,CompositePixelMask,(Image *) NULL,exception);
}
if (image->debug != MagickFalse)
(void) LogMagickEvent(DrawEvent,GetMagickModule()," end draw-primitive");
return(status != 0 ? MagickTrue : MagickFalse);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ D r a w S t r o k e P o l y g o n %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DrawStrokePolygon() draws a stroked polygon (line, rectangle, ellipse) on
% the image while respecting the line cap and join attributes.
%
% The format of the DrawStrokePolygon method is:
%
% MagickBooleanType DrawStrokePolygon(Image *image,
% const DrawInfo *draw_info,const PrimitiveInfo *primitive_info)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o draw_info: the draw info.
%
% o primitive_info: Specifies a pointer to a PrimitiveInfo structure.
%
%
*/
static MagickBooleanType DrawRoundLinecap(Image *image,
const DrawInfo *draw_info,const PrimitiveInfo *primitive_info,
ExceptionInfo *exception)
{
PrimitiveInfo
linecap[5];
register ssize_t
i;
for (i=0; i < 4; i++)
linecap[i]=(*primitive_info);
linecap[0].coordinates=4;
linecap[1].point.x+=2.0*MagickEpsilon;
linecap[2].point.x+=2.0*MagickEpsilon;
linecap[2].point.y+=2.0*MagickEpsilon;
linecap[3].point.y+=2.0*MagickEpsilon;
linecap[4].primitive=UndefinedPrimitive;
return(DrawPolygonPrimitive(image,draw_info,linecap,exception));
}
static MagickBooleanType DrawStrokePolygon(Image *image,
const DrawInfo *draw_info,const PrimitiveInfo *primitive_info,
ExceptionInfo *exception)
{
DrawInfo
*clone_info;
MagickBooleanType
closed_path;
MagickStatusType
status;
PrimitiveInfo
*stroke_polygon;
register const PrimitiveInfo
*p,
*q;
/*
Draw stroked polygon.
*/
if (image->debug != MagickFalse)
(void) LogMagickEvent(DrawEvent,GetMagickModule(),
" begin draw-stroke-polygon");
clone_info=CloneDrawInfo((ImageInfo *) NULL,draw_info);
clone_info->fill=draw_info->stroke;
if (clone_info->fill_pattern != (Image *) NULL)
clone_info->fill_pattern=DestroyImage(clone_info->fill_pattern);
if (clone_info->stroke_pattern != (Image *) NULL)
clone_info->fill_pattern=CloneImage(clone_info->stroke_pattern,0,0,
MagickTrue,exception);
clone_info->stroke.alpha=(MagickRealType) TransparentAlpha;
clone_info->stroke_width=0.0;
clone_info->fill_rule=NonZeroRule;
status=MagickTrue;
for (p=primitive_info; p->primitive != UndefinedPrimitive; p+=p->coordinates)
{
if (p->coordinates == 1)
continue;
stroke_polygon=TraceStrokePolygon(image,draw_info,p);
if (stroke_polygon == (PrimitiveInfo *) NULL)
{
status=0;
break;
}
status&=DrawPolygonPrimitive(image,clone_info,stroke_polygon,exception);
stroke_polygon=(PrimitiveInfo *) RelinquishMagickMemory(stroke_polygon);
if (status == 0)
break;
q=p+p->coordinates-1;
closed_path=p->closed_subpath;
if ((draw_info->linecap == RoundCap) && (closed_path == MagickFalse))
{
status&=DrawRoundLinecap(image,draw_info,p,exception);
status&=DrawRoundLinecap(image,draw_info,q,exception);
}
}
clone_info=DestroyDrawInfo(clone_info);
if (image->debug != MagickFalse)
(void) LogMagickEvent(DrawEvent,GetMagickModule(),
" end draw-stroke-polygon");
return(status != 0 ? MagickTrue : MagickFalse);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t A f f i n e M a t r i x %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetAffineMatrix() returns an AffineMatrix initialized to the identity
% matrix.
%
% The format of the GetAffineMatrix method is:
%
% void GetAffineMatrix(AffineMatrix *affine_matrix)
%
% A description of each parameter follows:
%
% o affine_matrix: the affine matrix.
%
*/
MagickExport void GetAffineMatrix(AffineMatrix *affine_matrix)
{
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"...");
assert(affine_matrix != (AffineMatrix *) NULL);
(void) memset(affine_matrix,0,sizeof(*affine_matrix));
affine_matrix->sx=1.0;
affine_matrix->sy=1.0;
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ G e t D r a w I n f o %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetDrawInfo() initializes draw_info to default values from image_info.
%
% The format of the GetDrawInfo method is:
%
% void GetDrawInfo(const ImageInfo *image_info,DrawInfo *draw_info)
%
% A description of each parameter follows:
%
% o image_info: the image info..
%
% o draw_info: the draw info.
%
*/
MagickExport void GetDrawInfo(const ImageInfo *image_info,DrawInfo *draw_info)
{
char
*next_token;
const char
*option;
ExceptionInfo
*exception;
ImageInfo
*clone_info;
/*
Initialize draw attributes.
*/
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"...");
assert(draw_info != (DrawInfo *) NULL);
(void) memset(draw_info,0,sizeof(*draw_info));
clone_info=CloneImageInfo(image_info);
GetAffineMatrix(&draw_info->affine);
exception=AcquireExceptionInfo();
(void) QueryColorCompliance("#000F",AllCompliance,&draw_info->fill,
exception);
(void) QueryColorCompliance("#FFF0",AllCompliance,&draw_info->stroke,
exception);
draw_info->stroke_antialias=clone_info->antialias;
draw_info->stroke_width=1.0;
draw_info->fill_rule=EvenOddRule;
draw_info->alpha=OpaqueAlpha;
draw_info->fill_alpha=OpaqueAlpha;
draw_info->stroke_alpha=OpaqueAlpha;
draw_info->linecap=ButtCap;
draw_info->linejoin=MiterJoin;
draw_info->miterlimit=10;
draw_info->decorate=NoDecoration;
draw_info->pointsize=12.0;
draw_info->undercolor.alpha=(MagickRealType) TransparentAlpha;
draw_info->compose=OverCompositeOp;
draw_info->render=MagickTrue;
draw_info->clip_path=MagickFalse;
draw_info->debug=IsEventLogging();
if (clone_info->font != (char *) NULL)
draw_info->font=AcquireString(clone_info->font);
if (clone_info->density != (char *) NULL)
draw_info->density=AcquireString(clone_info->density);
draw_info->text_antialias=clone_info->antialias;
if (fabs(clone_info->pointsize) >= MagickEpsilon)
draw_info->pointsize=clone_info->pointsize;
draw_info->border_color=clone_info->border_color;
if (clone_info->server_name != (char *) NULL)
draw_info->server_name=AcquireString(clone_info->server_name);
option=GetImageOption(clone_info,"direction");
if (option != (const char *) NULL)
draw_info->direction=(DirectionType) ParseCommandOption(
MagickDirectionOptions,MagickFalse,option);
else
draw_info->direction=UndefinedDirection;
option=GetImageOption(clone_info,"encoding");
if (option != (const char *) NULL)
(void) CloneString(&draw_info->encoding,option);
option=GetImageOption(clone_info,"family");
if (option != (const char *) NULL)
(void) CloneString(&draw_info->family,option);
option=GetImageOption(clone_info,"fill");
if (option != (const char *) NULL)
(void) QueryColorCompliance(option,AllCompliance,&draw_info->fill,
exception);
option=GetImageOption(clone_info,"gravity");
if (option != (const char *) NULL)
draw_info->gravity=(GravityType) ParseCommandOption(MagickGravityOptions,
MagickFalse,option);
option=GetImageOption(clone_info,"interline-spacing");
if (option != (const char *) NULL)
draw_info->interline_spacing=StringToDouble(option,&next_token);
option=GetImageOption(clone_info,"interword-spacing");
if (option != (const char *) NULL)
draw_info->interword_spacing=StringToDouble(option,&next_token);
option=GetImageOption(clone_info,"kerning");
if (option != (const char *) NULL)
draw_info->kerning=StringToDouble(option,&next_token);
option=GetImageOption(clone_info,"stroke");
if (option != (const char *) NULL)
(void) QueryColorCompliance(option,AllCompliance,&draw_info->stroke,
exception);
option=GetImageOption(clone_info,"strokewidth");
if (option != (const char *) NULL)
draw_info->stroke_width=StringToDouble(option,&next_token);
option=GetImageOption(clone_info,"style");
if (option != (const char *) NULL)
draw_info->style=(StyleType) ParseCommandOption(MagickStyleOptions,
MagickFalse,option);
option=GetImageOption(clone_info,"undercolor");
if (option != (const char *) NULL)
(void) QueryColorCompliance(option,AllCompliance,&draw_info->undercolor,
exception);
option=GetImageOption(clone_info,"weight");
if (option != (const char *) NULL)
{
ssize_t
weight;
weight=ParseCommandOption(MagickWeightOptions,MagickFalse,option);
if (weight == -1)
weight=(ssize_t) StringToUnsignedLong(option);
draw_info->weight=(size_t) weight;
}
exception=DestroyExceptionInfo(exception);
draw_info->signature=MagickCoreSignature;
clone_info=DestroyImageInfo(clone_info);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ P e r m u t a t e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% Permutate() returns the permuation of the (n,k).
%
% The format of the Permutate method is:
%
% void Permutate(ssize_t n,ssize_t k)
%
% A description of each parameter follows:
%
% o n:
%
% o k:
%
%
*/
static inline double Permutate(const ssize_t n,const ssize_t k)
{
double
r;
register ssize_t
i;
r=1.0;
for (i=k+1; i <= n; i++)
r*=i;
for (i=1; i <= (n-k); i++)
r/=i;
return(r);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ T r a c e P r i m i t i v e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% TracePrimitive is a collection of methods for generating graphic
% primitives such as arcs, ellipses, paths, etc.
%
*/
static MagickBooleanType TraceArc(MVGInfo *mvg_info,const PointInfo start,
const PointInfo end,const PointInfo degrees)
{
PointInfo
center,
radius;
center.x=0.5*(end.x+start.x);
center.y=0.5*(end.y+start.y);
radius.x=fabs(center.x-start.x);
radius.y=fabs(center.y-start.y);
return(TraceEllipse(mvg_info,center,radius,degrees));
}
static MagickBooleanType TraceArcPath(MVGInfo *mvg_info,const PointInfo start,
const PointInfo end,const PointInfo arc,const double angle,
const MagickBooleanType large_arc,const MagickBooleanType sweep)
{
double
alpha,
beta,
delta,
factor,
gamma,
theta;
MagickStatusType
status;
PointInfo
center,
points[3],
radii;
register double
cosine,
sine;
PrimitiveInfo
*primitive_info;
register PrimitiveInfo
*p;
register ssize_t
i;
size_t
arc_segments;
ssize_t
offset;
offset=mvg_info->offset;
primitive_info=(*mvg_info->primitive_info)+mvg_info->offset;
primitive_info->coordinates=0;
if ((fabs(start.x-end.x) < MagickEpsilon) &&
(fabs(start.y-end.y) < MagickEpsilon))
return(TracePoint(primitive_info,end));
radii.x=fabs(arc.x);
radii.y=fabs(arc.y);
if ((fabs(radii.x) < MagickEpsilon) || (fabs(radii.y) < MagickEpsilon))
return(TraceLine(primitive_info,start,end));
cosine=cos(DegreesToRadians(fmod((double) angle,360.0)));
sine=sin(DegreesToRadians(fmod((double) angle,360.0)));
center.x=(double) (cosine*(end.x-start.x)/2+sine*(end.y-start.y)/2);
center.y=(double) (cosine*(end.y-start.y)/2-sine*(end.x-start.x)/2);
delta=(center.x*center.x)/(radii.x*radii.x)+(center.y*center.y)/
(radii.y*radii.y);
if (delta < MagickEpsilon)
return(TraceLine(primitive_info,start,end));
if (delta > 1.0)
{
radii.x*=sqrt((double) delta);
radii.y*=sqrt((double) delta);
}
points[0].x=(double) (cosine*start.x/radii.x+sine*start.y/radii.x);
points[0].y=(double) (cosine*start.y/radii.y-sine*start.x/radii.y);
points[1].x=(double) (cosine*end.x/radii.x+sine*end.y/radii.x);
points[1].y=(double) (cosine*end.y/radii.y-sine*end.x/radii.y);
alpha=points[1].x-points[0].x;
beta=points[1].y-points[0].y;
factor=PerceptibleReciprocal(alpha*alpha+beta*beta)-0.25;
if (factor <= 0.0)
factor=0.0;
else
{
factor=sqrt((double) factor);
if (sweep == large_arc)
factor=(-factor);
}
center.x=(double) ((points[0].x+points[1].x)/2-factor*beta);
center.y=(double) ((points[0].y+points[1].y)/2+factor*alpha);
alpha=atan2(points[0].y-center.y,points[0].x-center.x);
theta=atan2(points[1].y-center.y,points[1].x-center.x)-alpha;
if ((theta < 0.0) && (sweep != MagickFalse))
theta+=2.0*MagickPI;
else
if ((theta > 0.0) && (sweep == MagickFalse))
theta-=2.0*MagickPI;
arc_segments=(size_t) ceil(fabs((double) (theta/(0.5*MagickPI+
MagickEpsilon))));
status=MagickTrue;
p=primitive_info;
for (i=0; i < (ssize_t) arc_segments; i++)
{
beta=0.5*((alpha+(i+1)*theta/arc_segments)-(alpha+i*theta/arc_segments));
gamma=(8.0/3.0)*sin(fmod((double) (0.5*beta),DegreesToRadians(360.0)))*
sin(fmod((double) (0.5*beta),DegreesToRadians(360.0)))/
sin(fmod((double) beta,DegreesToRadians(360.0)));
points[0].x=(double) (center.x+cos(fmod((double) (alpha+(double) i*theta/
arc_segments),DegreesToRadians(360.0)))-gamma*sin(fmod((double) (alpha+
(double) i*theta/arc_segments),DegreesToRadians(360.0))));
points[0].y=(double) (center.y+sin(fmod((double) (alpha+(double) i*theta/
arc_segments),DegreesToRadians(360.0)))+gamma*cos(fmod((double) (alpha+
(double) i*theta/arc_segments),DegreesToRadians(360.0))));
points[2].x=(double) (center.x+cos(fmod((double) (alpha+(double) (i+1)*
theta/arc_segments),DegreesToRadians(360.0))));
points[2].y=(double) (center.y+sin(fmod((double) (alpha+(double) (i+1)*
theta/arc_segments),DegreesToRadians(360.0))));
points[1].x=(double) (points[2].x+gamma*sin(fmod((double) (alpha+(double)
(i+1)*theta/arc_segments),DegreesToRadians(360.0))));
points[1].y=(double) (points[2].y-gamma*cos(fmod((double) (alpha+(double)
(i+1)*theta/arc_segments),DegreesToRadians(360.0))));
p->point.x=(p == primitive_info) ? start.x : (p-1)->point.x;
p->point.y=(p == primitive_info) ? start.y : (p-1)->point.y;
(p+1)->point.x=(double) (cosine*radii.x*points[0].x-sine*radii.y*
points[0].y);
(p+1)->point.y=(double) (sine*radii.x*points[0].x+cosine*radii.y*
points[0].y);
(p+2)->point.x=(double) (cosine*radii.x*points[1].x-sine*radii.y*
points[1].y);
(p+2)->point.y=(double) (sine*radii.x*points[1].x+cosine*radii.y*
points[1].y);
(p+3)->point.x=(double) (cosine*radii.x*points[2].x-sine*radii.y*
points[2].y);
(p+3)->point.y=(double) (sine*radii.x*points[2].x+cosine*radii.y*
points[2].y);
if (i == (ssize_t) (arc_segments-1))
(p+3)->point=end;
status&=TraceBezier(mvg_info,4);
p=(*mvg_info->primitive_info)+mvg_info->offset;
mvg_info->offset+=p->coordinates;
p+=p->coordinates;
}
mvg_info->offset=offset;
primitive_info=(*mvg_info->primitive_info)+mvg_info->offset;
primitive_info->coordinates=(size_t) (p-primitive_info);
primitive_info->closed_subpath=MagickFalse;
for (i=0; i < (ssize_t) primitive_info->coordinates; i++)
{
p->primitive=primitive_info->primitive;
p--;
}
return(status == 0 ? MagickFalse : MagickTrue);
}
static MagickBooleanType TraceBezier(MVGInfo *mvg_info,
const size_t number_coordinates)
{
double
alpha,
*coefficients,
weight;
PointInfo
end,
point,
*points;
PrimitiveInfo
*primitive_info;
register PrimitiveInfo
*p;
register ssize_t
i,
j;
size_t
control_points,
quantum;
/*
Allocate coefficients.
*/
primitive_info=(*mvg_info->primitive_info)+mvg_info->offset;
quantum=number_coordinates;
for (i=0; i < (ssize_t) number_coordinates; i++)
{
for (j=i+1; j < (ssize_t) number_coordinates; j++)
{
alpha=fabs(primitive_info[j].point.x-primitive_info[i].point.x);
if (alpha > (double) SSIZE_MAX)
{
(void) ThrowMagickException(mvg_info->exception,GetMagickModule(),
ResourceLimitError,"MemoryAllocationFailed","`%s'","");
return(MagickFalse);
}
if (alpha > (double) quantum)
quantum=(size_t) alpha;
alpha=fabs(primitive_info[j].point.y-primitive_info[i].point.y);
if (alpha > (double) SSIZE_MAX)
{
(void) ThrowMagickException(mvg_info->exception,GetMagickModule(),
ResourceLimitError,"MemoryAllocationFailed","`%s'","");
return(MagickFalse);
}
if (alpha > (double) quantum)
quantum=(size_t) alpha;
}
}
primitive_info=(*mvg_info->primitive_info)+mvg_info->offset;
quantum=MagickMin(quantum/number_coordinates,BezierQuantum);
coefficients=(double *) AcquireQuantumMemory(number_coordinates,
sizeof(*coefficients));
points=(PointInfo *) AcquireQuantumMemory(quantum,number_coordinates*
sizeof(*points));
if ((coefficients == (double *) NULL) || (points == (PointInfo *) NULL))
{
if (points != (PointInfo *) NULL)
points=(PointInfo *) RelinquishMagickMemory(points);
if (coefficients != (double *) NULL)
coefficients=(double *) RelinquishMagickMemory(coefficients);
(void) ThrowMagickException(mvg_info->exception,GetMagickModule(),
ResourceLimitError,"MemoryAllocationFailed","`%s'","");
return(MagickFalse);
}
control_points=quantum*number_coordinates;
if (CheckPrimitiveExtent(mvg_info,control_points+1) == MagickFalse)
{
points=(PointInfo *) RelinquishMagickMemory(points);
coefficients=(double *) RelinquishMagickMemory(coefficients);
return(MagickFalse);
}
primitive_info=(*mvg_info->primitive_info)+mvg_info->offset;
/*
Compute bezier points.
*/
end=primitive_info[number_coordinates-1].point;
for (i=0; i < (ssize_t) number_coordinates; i++)
coefficients[i]=Permutate((ssize_t) number_coordinates-1,i);
weight=0.0;
for (i=0; i < (ssize_t) control_points; i++)
{
p=primitive_info;
point.x=0.0;
point.y=0.0;
alpha=pow((double) (1.0-weight),(double) number_coordinates-1.0);
for (j=0; j < (ssize_t) number_coordinates; j++)
{
point.x+=alpha*coefficients[j]*p->point.x;
point.y+=alpha*coefficients[j]*p->point.y;
alpha*=weight/(1.0-weight);
p++;
}
points[i]=point;
weight+=1.0/control_points;
}
/*
Bezier curves are just short segmented polys.
*/
p=primitive_info;
for (i=0; i < (ssize_t) control_points; i++)
{
if (TracePoint(p,points[i]) == MagickFalse)
{
points=(PointInfo *) RelinquishMagickMemory(points);
coefficients=(double *) RelinquishMagickMemory(coefficients);
return(MagickFalse);
}
p+=p->coordinates;
}
if (TracePoint(p,end) == MagickFalse)
{
points=(PointInfo *) RelinquishMagickMemory(points);
coefficients=(double *) RelinquishMagickMemory(coefficients);
return(MagickFalse);
}
p+=p->coordinates;
primitive_info->coordinates=(size_t) (p-primitive_info);
primitive_info->closed_subpath=MagickFalse;
for (i=0; i < (ssize_t) primitive_info->coordinates; i++)
{
p->primitive=primitive_info->primitive;
p--;
}
points=(PointInfo *) RelinquishMagickMemory(points);
coefficients=(double *) RelinquishMagickMemory(coefficients);
return(MagickTrue);
}
static MagickBooleanType TraceCircle(MVGInfo *mvg_info,const PointInfo start,
const PointInfo end)
{
double
alpha,
beta,
radius;
PointInfo
offset,
degrees;
alpha=end.x-start.x;
beta=end.y-start.y;
radius=hypot((double) alpha,(double) beta);
offset.x=(double) radius;
offset.y=(double) radius;
degrees.x=0.0;
degrees.y=360.0;
return(TraceEllipse(mvg_info,start,offset,degrees));
}
static MagickBooleanType TraceEllipse(MVGInfo *mvg_info,const PointInfo center,
const PointInfo radii,const PointInfo arc)
{
double
coordinates,
delta,
step,
x,
y;
PointInfo
angle,
point;
PrimitiveInfo
*primitive_info;
register PrimitiveInfo
*p;
register ssize_t
i;
/*
Ellipses are just short segmented polys.
*/
primitive_info=(*mvg_info->primitive_info)+mvg_info->offset;
primitive_info->coordinates=0;
if ((fabs(radii.x) < MagickEpsilon) || (fabs(radii.y) < MagickEpsilon))
return(MagickTrue);
delta=2.0*PerceptibleReciprocal(MagickMax(radii.x,radii.y));
step=MagickPI/8.0;
if ((delta >= 0.0) && (delta < (MagickPI/8.0)))
step=MagickPI/4.0/(MagickPI*PerceptibleReciprocal(delta)/2.0);
angle.x=DegreesToRadians(arc.x);
y=arc.y;
while (y < arc.x)
y+=360.0;
angle.y=DegreesToRadians(y);
coordinates=ceil((angle.y-angle.x)/step+1.0);
if ((coordinates > (double) SSIZE_MAX) ||
(coordinates > (double) GetMaxMemoryRequest()))
{
(void) ThrowMagickException(mvg_info->exception,GetMagickModule(),
ResourceLimitError,"MemoryAllocationFailed","`%s'","");
return(MagickFalse);
}
if (CheckPrimitiveExtent(mvg_info,(size_t) coordinates) == MagickFalse)
return(MagickFalse);
primitive_info=(*mvg_info->primitive_info)+mvg_info->offset;
for (p=primitive_info; angle.x < angle.y; angle.x+=step)
{
point.x=cos(fmod(angle.x,DegreesToRadians(360.0)))*radii.x+center.x;
point.y=sin(fmod(angle.x,DegreesToRadians(360.0)))*radii.y+center.y;
if (TracePoint(p,point) == MagickFalse)
return(MagickFalse);
p+=p->coordinates;
}
point.x=cos(fmod(angle.y,DegreesToRadians(360.0)))*radii.x+center.x;
point.y=sin(fmod(angle.y,DegreesToRadians(360.0)))*radii.y+center.y;
if (TracePoint(p,point) == MagickFalse)
return(MagickFalse);
p+=p->coordinates;
primitive_info->coordinates=(size_t) (p-primitive_info);
primitive_info->closed_subpath=MagickFalse;
x=fabs(primitive_info[0].point.x-
primitive_info[primitive_info->coordinates-1].point.x);
y=fabs(primitive_info[0].point.y-
primitive_info[primitive_info->coordinates-1].point.y);
if ((x < MagickEpsilon) && (y < MagickEpsilon))
primitive_info->closed_subpath=MagickTrue;
for (i=0; i < (ssize_t) primitive_info->coordinates; i++)
{
p->primitive=primitive_info->primitive;
p--;
}
return(MagickTrue);
}
static MagickBooleanType TraceLine(PrimitiveInfo *primitive_info,
const PointInfo start,const PointInfo end)
{
if (TracePoint(primitive_info,start) == MagickFalse)
return(MagickFalse);
if ((fabs(start.x-end.x) < MagickEpsilon) &&
(fabs(start.y-end.y) < MagickEpsilon))
{
primitive_info->primitive=PointPrimitive;
primitive_info->coordinates=1;
return(MagickTrue);
}
if (TracePoint(primitive_info+1,end) == MagickFalse)
return(MagickFalse);
(primitive_info+1)->primitive=primitive_info->primitive;
primitive_info->coordinates=2;
primitive_info->closed_subpath=MagickFalse;
return(MagickTrue);
}
static size_t TracePath(MVGInfo *mvg_info,const char *path,
ExceptionInfo *exception)
{
char
*next_token,
token[MagickPathExtent];
const char
*p;
double
x,
y;
int
attribute,
last_attribute;
MagickBooleanType
status;
PointInfo
end = {0.0, 0.0},
points[4] = { {0.0, 0.0}, {0.0, 0.0}, {0.0, 0.0}, {0.0, 0.0} },
point = {0.0, 0.0},
start = {0.0, 0.0};
PrimitiveInfo
*primitive_info;
PrimitiveType
primitive_type;
register PrimitiveInfo
*q;
register ssize_t
i;
size_t
number_coordinates,
z_count;
ssize_t
subpath_offset;
subpath_offset=mvg_info->offset;
primitive_info=(*mvg_info->primitive_info)+mvg_info->offset;
status=MagickTrue;
attribute=0;
number_coordinates=0;
z_count=0;
primitive_type=primitive_info->primitive;
q=primitive_info;
for (p=path; *p != '\0'; )
{
if (status == MagickFalse)
break;
while (isspace((int) ((unsigned char) *p)) != 0)
p++;
if (*p == '\0')
break;
last_attribute=attribute;
attribute=(int) (*p++);
switch (attribute)
{
case 'a':
case 'A':
{
double
angle = 0.0;
MagickBooleanType
large_arc = MagickFalse,
sweep = MagickFalse;
PointInfo
arc = {0.0, 0.0};
/*
Elliptical arc.
*/
do
{
(void) GetNextToken(p,&p,MagickPathExtent,token);
if (*token == ',')
(void) GetNextToken(p,&p,MagickPathExtent,token);
arc.x=StringToDouble(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
(void) GetNextToken(p,&p,MagickPathExtent,token);
if (*token == ',')
(void) GetNextToken(p,&p,MagickPathExtent,token);
arc.y=StringToDouble(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
(void) GetNextToken(p,&p,MagickPathExtent,token);
if (*token == ',')
(void) GetNextToken(p,&p,MagickPathExtent,token);
angle=StringToDouble(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
(void) GetNextToken(p,&p,MagickPathExtent,token);
if (*token == ',')
(void) GetNextToken(p,&p,MagickPathExtent,token);
large_arc=StringToLong(token) != 0 ? MagickTrue : MagickFalse;
(void) GetNextToken(p,&p,MagickPathExtent,token);
if (*token == ',')
(void) GetNextToken(p,&p,MagickPathExtent,token);
sweep=StringToLong(token) != 0 ? MagickTrue : MagickFalse;
if (*token == ',')
(void) GetNextToken(p,&p,MagickPathExtent,token);
(void) GetNextToken(p,&p,MagickPathExtent,token);
if (*token == ',')
(void) GetNextToken(p,&p,MagickPathExtent,token);
x=StringToDouble(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
(void) GetNextToken(p,&p,MagickPathExtent,token);
if (*token == ',')
(void) GetNextToken(p,&p,MagickPathExtent,token);
y=StringToDouble(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
end.x=(double) (attribute == (int) 'A' ? x : point.x+x);
end.y=(double) (attribute == (int) 'A' ? y : point.y+y);
if (TraceArcPath(mvg_info,point,end,arc,angle,large_arc,sweep) == MagickFalse)
return(0);
q=(*mvg_info->primitive_info)+mvg_info->offset;
mvg_info->offset+=q->coordinates;
q+=q->coordinates;
point=end;
while (isspace((int) ((unsigned char) *p)) != 0)
p++;
if (*p == ',')
p++;
} while (IsPoint(p) != MagickFalse);
break;
}
case 'c':
case 'C':
{
/*
Cubic Bézier curve.
*/
do
{
points[0]=point;
for (i=1; i < 4; i++)
{
(void) GetNextToken(p,&p,MagickPathExtent,token);
if (*token == ',')
(void) GetNextToken(p,&p,MagickPathExtent,token);
x=StringToDouble(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
(void) GetNextToken(p,&p,MagickPathExtent,token);
if (*token == ',')
(void) GetNextToken(p,&p,MagickPathExtent,token);
y=StringToDouble(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
end.x=(double) (attribute == (int) 'C' ? x : point.x+x);
end.y=(double) (attribute == (int) 'C' ? y : point.y+y);
points[i]=end;
}
for (i=0; i < 4; i++)
(q+i)->point=points[i];
if (TraceBezier(mvg_info,4) == MagickFalse)
return(0);
q=(*mvg_info->primitive_info)+mvg_info->offset;
mvg_info->offset+=q->coordinates;
q+=q->coordinates;
point=end;
while (isspace((int) ((unsigned char) *p)) != 0)
p++;
if (*p == ',')
p++;
} while (IsPoint(p) != MagickFalse);
break;
}
case 'H':
case 'h':
{
do
{
(void) GetNextToken(p,&p,MagickPathExtent,token);
if (*token == ',')
(void) GetNextToken(p,&p,MagickPathExtent,token);
x=StringToDouble(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
point.x=(double) (attribute == (int) 'H' ? x: point.x+x);
if (CheckPrimitiveExtent(mvg_info,PrimitiveExtentPad) == MagickFalse)
return(0);
q=(*mvg_info->primitive_info)+mvg_info->offset;
if (TracePoint(q,point) == MagickFalse)
return(0);
mvg_info->offset+=q->coordinates;
q+=q->coordinates;
while (isspace((int) ((unsigned char) *p)) != 0)
p++;
if (*p == ',')
p++;
} while (IsPoint(p) != MagickFalse);
break;
}
case 'l':
case 'L':
{
/*
Line to.
*/
do
{
(void) GetNextToken(p,&p,MagickPathExtent,token);
if (*token == ',')
(void) GetNextToken(p,&p,MagickPathExtent,token);
x=StringToDouble(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
(void) GetNextToken(p,&p,MagickPathExtent,token);
if (*token == ',')
(void) GetNextToken(p,&p,MagickPathExtent,token);
y=StringToDouble(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
point.x=(double) (attribute == (int) 'L' ? x : point.x+x);
point.y=(double) (attribute == (int) 'L' ? y : point.y+y);
if (CheckPrimitiveExtent(mvg_info,PrimitiveExtentPad) == MagickFalse)
return(0);
q=(*mvg_info->primitive_info)+mvg_info->offset;
if (TracePoint(q,point) == MagickFalse)
return(0);
mvg_info->offset+=q->coordinates;
q+=q->coordinates;
while (isspace((int) ((unsigned char) *p)) != 0)
p++;
if (*p == ',')
p++;
} while (IsPoint(p) != MagickFalse);
break;
}
case 'M':
case 'm':
{
/*
Move to.
*/
if (mvg_info->offset != subpath_offset)
{
primitive_info=(*mvg_info->primitive_info)+subpath_offset;
primitive_info->coordinates=(size_t) (q-primitive_info);
number_coordinates+=primitive_info->coordinates;
primitive_info=q;
subpath_offset=mvg_info->offset;
}
i=0;
do
{
(void) GetNextToken(p,&p,MagickPathExtent,token);
if (*token == ',')
(void) GetNextToken(p,&p,MagickPathExtent,token);
x=StringToDouble(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
(void) GetNextToken(p,&p,MagickPathExtent,token);
if (*token == ',')
(void) GetNextToken(p,&p,MagickPathExtent,token);
y=StringToDouble(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
point.x=(double) (attribute == (int) 'M' ? x : point.x+x);
point.y=(double) (attribute == (int) 'M' ? y : point.y+y);
if (i == 0)
start=point;
i++;
if (CheckPrimitiveExtent(mvg_info,PrimitiveExtentPad) == MagickFalse)
return(0);
q=(*mvg_info->primitive_info)+mvg_info->offset;
if (TracePoint(q,point) == MagickFalse)
return(0);
mvg_info->offset+=q->coordinates;
q+=q->coordinates;
while (isspace((int) ((unsigned char) *p)) != 0)
p++;
if (*p == ',')
p++;
} while (IsPoint(p) != MagickFalse);
break;
}
case 'q':
case 'Q':
{
/*
Quadratic Bézier curve.
*/
do
{
points[0]=point;
for (i=1; i < 3; i++)
{
(void) GetNextToken(p,&p,MagickPathExtent,token);
if (*token == ',')
(void) GetNextToken(p,&p,MagickPathExtent,token);
x=StringToDouble(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
(void) GetNextToken(p,&p,MagickPathExtent,token);
if (*token == ',')
(void) GetNextToken(p,&p,MagickPathExtent,token);
y=StringToDouble(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
if (*p == ',')
p++;
end.x=(double) (attribute == (int) 'Q' ? x : point.x+x);
end.y=(double) (attribute == (int) 'Q' ? y : point.y+y);
points[i]=end;
}
for (i=0; i < 3; i++)
(q+i)->point=points[i];
if (TraceBezier(mvg_info,3) == MagickFalse)
return(0);
q=(*mvg_info->primitive_info)+mvg_info->offset;
mvg_info->offset+=q->coordinates;
q+=q->coordinates;
point=end;
while (isspace((int) ((unsigned char) *p)) != 0)
p++;
if (*p == ',')
p++;
} while (IsPoint(p) != MagickFalse);
break;
}
case 's':
case 'S':
{
/*
Cubic Bézier curve.
*/
do
{
points[0]=points[3];
points[1].x=2.0*points[3].x-points[2].x;
points[1].y=2.0*points[3].y-points[2].y;
for (i=2; i < 4; i++)
{
(void) GetNextToken(p,&p,MagickPathExtent,token);
if (*token == ',')
(void) GetNextToken(p,&p,MagickPathExtent,token);
x=StringToDouble(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
(void) GetNextToken(p,&p,MagickPathExtent,token);
if (*token == ',')
(void) GetNextToken(p,&p,MagickPathExtent,token);
y=StringToDouble(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
if (*p == ',')
p++;
end.x=(double) (attribute == (int) 'S' ? x : point.x+x);
end.y=(double) (attribute == (int) 'S' ? y : point.y+y);
points[i]=end;
}
if (strchr("CcSs",last_attribute) == (char *) NULL)
{
points[0]=point;
points[1]=point;
}
for (i=0; i < 4; i++)
(q+i)->point=points[i];
if (TraceBezier(mvg_info,4) == MagickFalse)
return(0);
q=(*mvg_info->primitive_info)+mvg_info->offset;
mvg_info->offset+=q->coordinates;
q+=q->coordinates;
point=end;
last_attribute=attribute;
while (isspace((int) ((unsigned char) *p)) != 0)
p++;
if (*p == ',')
p++;
} while (IsPoint(p) != MagickFalse);
break;
}
case 't':
case 'T':
{
/*
Quadratic Bézier curve.
*/
do
{
points[0]=points[2];
points[1].x=2.0*points[2].x-points[1].x;
points[1].y=2.0*points[2].y-points[1].y;
for (i=2; i < 3; i++)
{
(void) GetNextToken(p,&p,MagickPathExtent,token);
if (*token == ',')
(void) GetNextToken(p,&p,MagickPathExtent,token);
x=StringToDouble(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
(void) GetNextToken(p,&p,MagickPathExtent,token);
if (*token == ',')
(void) GetNextToken(p,&p,MagickPathExtent,token);
y=StringToDouble(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
end.x=(double) (attribute == (int) 'T' ? x : point.x+x);
end.y=(double) (attribute == (int) 'T' ? y : point.y+y);
points[i]=end;
}
if (status == MagickFalse)
break;
if (strchr("QqTt",last_attribute) == (char *) NULL)
{
points[0]=point;
points[1]=point;
}
for (i=0; i < 3; i++)
(q+i)->point=points[i];
if (TraceBezier(mvg_info,3) == MagickFalse)
return(0);
q=(*mvg_info->primitive_info)+mvg_info->offset;
mvg_info->offset+=q->coordinates;
q+=q->coordinates;
point=end;
last_attribute=attribute;
while (isspace((int) ((unsigned char) *p)) != 0)
p++;
if (*p == ',')
p++;
} while (IsPoint(p) != MagickFalse);
break;
}
case 'v':
case 'V':
{
/*
Line to.
*/
do
{
(void) GetNextToken(p,&p,MagickPathExtent,token);
if (*token == ',')
(void) GetNextToken(p,&p,MagickPathExtent,token);
y=StringToDouble(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
point.y=(double) (attribute == (int) 'V' ? y : point.y+y);
if (CheckPrimitiveExtent(mvg_info,PrimitiveExtentPad) == MagickFalse)
return(0);
q=(*mvg_info->primitive_info)+mvg_info->offset;
if (TracePoint(q,point) == MagickFalse)
return(0);
mvg_info->offset+=q->coordinates;
q+=q->coordinates;
while (isspace((int) ((unsigned char) *p)) != 0)
p++;
if (*p == ',')
p++;
} while (IsPoint(p) != MagickFalse);
break;
}
case 'z':
case 'Z':
{
/*
Close path.
*/
point=start;
if (CheckPrimitiveExtent(mvg_info,PrimitiveExtentPad) == MagickFalse)
return(0);
q=(*mvg_info->primitive_info)+mvg_info->offset;
if (TracePoint(q,point) == MagickFalse)
return(0);
mvg_info->offset+=q->coordinates;
q+=q->coordinates;
primitive_info=(*mvg_info->primitive_info)+subpath_offset;
primitive_info->coordinates=(size_t) (q-primitive_info);
primitive_info->closed_subpath=MagickTrue;
number_coordinates+=primitive_info->coordinates;
primitive_info=q;
subpath_offset=mvg_info->offset;
z_count++;
break;
}
default:
{
ThrowPointExpectedException(token,exception);
break;
}
}
}
if (status == MagickFalse)
return(0);
primitive_info=(*mvg_info->primitive_info)+subpath_offset;
primitive_info->coordinates=(size_t) (q-primitive_info);
number_coordinates+=primitive_info->coordinates;
for (i=0; i < (ssize_t) number_coordinates; i++)
{
q--;
q->primitive=primitive_type;
if (z_count > 1)
q->method=FillToBorderMethod;
}
q=primitive_info;
return(number_coordinates);
}
static MagickBooleanType TraceRectangle(PrimitiveInfo *primitive_info,
const PointInfo start,const PointInfo end)
{
PointInfo
point;
register PrimitiveInfo
*p;
register ssize_t
i;
p=primitive_info;
if (TracePoint(p,start) == MagickFalse)
return(MagickFalse);
p+=p->coordinates;
point.x=start.x;
point.y=end.y;
if (TracePoint(p,point) == MagickFalse)
return(MagickFalse);
p+=p->coordinates;
if (TracePoint(p,end) == MagickFalse)
return(MagickFalse);
p+=p->coordinates;
point.x=end.x;
point.y=start.y;
if (TracePoint(p,point) == MagickFalse)
return(MagickFalse);
p+=p->coordinates;
if (TracePoint(p,start) == MagickFalse)
return(MagickFalse);
p+=p->coordinates;
primitive_info->coordinates=(size_t) (p-primitive_info);
primitive_info->closed_subpath=MagickTrue;
for (i=0; i < (ssize_t) primitive_info->coordinates; i++)
{
p->primitive=primitive_info->primitive;
p--;
}
return(MagickTrue);
}
static MagickBooleanType TraceRoundRectangle(MVGInfo *mvg_info,
const PointInfo start,const PointInfo end,PointInfo arc)
{
PointInfo
degrees,
point,
segment;
PrimitiveInfo
*primitive_info;
register PrimitiveInfo
*p;
register ssize_t
i;
ssize_t
offset;
offset=mvg_info->offset;
segment.x=fabs(end.x-start.x);
segment.y=fabs(end.y-start.y);
if ((segment.x < MagickEpsilon) || (segment.y < MagickEpsilon))
{
(*mvg_info->primitive_info+mvg_info->offset)->coordinates=0;
return(MagickTrue);
}
if (arc.x > (0.5*segment.x))
arc.x=0.5*segment.x;
if (arc.y > (0.5*segment.y))
arc.y=0.5*segment.y;
point.x=start.x+segment.x-arc.x;
point.y=start.y+arc.y;
degrees.x=270.0;
degrees.y=360.0;
if (TraceEllipse(mvg_info,point,arc,degrees) == MagickFalse)
return(MagickFalse);
p=(*mvg_info->primitive_info)+mvg_info->offset;
mvg_info->offset+=p->coordinates;
point.x=start.x+segment.x-arc.x;
point.y=start.y+segment.y-arc.y;
degrees.x=0.0;
degrees.y=90.0;
if (TraceEllipse(mvg_info,point,arc,degrees) == MagickFalse)
return(MagickFalse);
p=(*mvg_info->primitive_info)+mvg_info->offset;
mvg_info->offset+=p->coordinates;
point.x=start.x+arc.x;
point.y=start.y+segment.y-arc.y;
degrees.x=90.0;
degrees.y=180.0;
if (TraceEllipse(mvg_info,point,arc,degrees) == MagickFalse)
return(MagickFalse);
p=(*mvg_info->primitive_info)+mvg_info->offset;
mvg_info->offset+=p->coordinates;
point.x=start.x+arc.x;
point.y=start.y+arc.y;
degrees.x=180.0;
degrees.y=270.0;
if (TraceEllipse(mvg_info,point,arc,degrees) == MagickFalse)
return(MagickFalse);
p=(*mvg_info->primitive_info)+mvg_info->offset;
mvg_info->offset+=p->coordinates;
if (CheckPrimitiveExtent(mvg_info,PrimitiveExtentPad) == MagickFalse)
return(MagickFalse);
p=(*mvg_info->primitive_info)+mvg_info->offset;
if (TracePoint(p,(*mvg_info->primitive_info+offset)->point) == MagickFalse)
return(MagickFalse);
p+=p->coordinates;
mvg_info->offset=offset;
primitive_info=(*mvg_info->primitive_info)+offset;
primitive_info->coordinates=(size_t) (p-primitive_info);
primitive_info->closed_subpath=MagickTrue;
for (i=0; i < (ssize_t) primitive_info->coordinates; i++)
{
p->primitive=primitive_info->primitive;
p--;
}
return(MagickTrue);
}
static MagickBooleanType TraceSquareLinecap(PrimitiveInfo *primitive_info,
const size_t number_vertices,const double offset)
{
double
distance;
register double
dx,
dy;
register ssize_t
i;
ssize_t
j;
dx=0.0;
dy=0.0;
for (i=1; i < (ssize_t) number_vertices; i++)
{
dx=primitive_info[0].point.x-primitive_info[i].point.x;
dy=primitive_info[0].point.y-primitive_info[i].point.y;
if ((fabs((double) dx) >= MagickEpsilon) ||
(fabs((double) dy) >= MagickEpsilon))
break;
}
if (i == (ssize_t) number_vertices)
i=(ssize_t) number_vertices-1L;
distance=hypot((double) dx,(double) dy);
primitive_info[0].point.x=(double) (primitive_info[i].point.x+
dx*(distance+offset)/distance);
primitive_info[0].point.y=(double) (primitive_info[i].point.y+
dy*(distance+offset)/distance);
for (j=(ssize_t) number_vertices-2; j >= 0; j--)
{
dx=primitive_info[number_vertices-1].point.x-primitive_info[j].point.x;
dy=primitive_info[number_vertices-1].point.y-primitive_info[j].point.y;
if ((fabs((double) dx) >= MagickEpsilon) ||
(fabs((double) dy) >= MagickEpsilon))
break;
}
distance=hypot((double) dx,(double) dy);
primitive_info[number_vertices-1].point.x=(double) (primitive_info[j].point.x+
dx*(distance+offset)/distance);
primitive_info[number_vertices-1].point.y=(double) (primitive_info[j].point.y+
dy*(distance+offset)/distance);
return(MagickTrue);
}
static PrimitiveInfo *TraceStrokePolygon(const Image *image,
const DrawInfo *draw_info,const PrimitiveInfo *primitive_info)
{
#define CheckPathExtent(pad) \
if ((ssize_t) (q+(pad)) >= (ssize_t) max_strokes) \
{ \
if (~max_strokes < (pad)) \
{ \
path_p=(PointInfo *) RelinquishMagickMemory(path_p); \
path_q=(PointInfo *) RelinquishMagickMemory(path_q); \
} \
else \
{ \
max_strokes+=(pad); \
path_p=(PointInfo *) ResizeQuantumMemory(path_p,max_strokes, \
sizeof(*path_p)); \
path_q=(PointInfo *) ResizeQuantumMemory(path_q,max_strokes, \
sizeof(*path_q)); \
} \
if ((path_p == (PointInfo *) NULL) || (path_q == (PointInfo *) NULL)) \
{ \
if (path_p != (PointInfo *) NULL) \
path_p=(PointInfo *) RelinquishMagickMemory(path_p); \
if (path_q != (PointInfo *) NULL) \
path_q=(PointInfo *) RelinquishMagickMemory(path_q); \
polygon_primitive=(PrimitiveInfo *) \
RelinquishMagickMemory(polygon_primitive); \
return((PrimitiveInfo *) NULL); \
} \
}
typedef struct _LineSegment
{
double
p,
q;
} LineSegment;
double
delta_theta,
dot_product,
mid,
miterlimit;
LineSegment
dx = {0,0},
dy = {0,0},
inverse_slope = {0,0},
slope = {0,0},
theta = {0,0};
MagickBooleanType
closed_path;
PointInfo
box_p[5],
box_q[5],
center,
offset,
*path_p,
*path_q;
PrimitiveInfo
*polygon_primitive,
*stroke_polygon;
register ssize_t
i;
size_t
arc_segments,
max_strokes,
number_vertices;
ssize_t
j,
n,
p,
q;
/*
Allocate paths.
*/
number_vertices=primitive_info->coordinates;
max_strokes=2*number_vertices+6*BezierQuantum+360;
polygon_primitive=(PrimitiveInfo *) AcquireQuantumMemory((size_t)
number_vertices+2UL,sizeof(*polygon_primitive));
if (polygon_primitive == (PrimitiveInfo *) NULL)
return((PrimitiveInfo *) NULL);
(void) memcpy(polygon_primitive,primitive_info,(size_t) number_vertices*
sizeof(*polygon_primitive));
closed_path=primitive_info[0].closed_subpath;
if (((draw_info->linejoin == RoundJoin) ||
(draw_info->linejoin == MiterJoin)) && (closed_path != MagickFalse))
{
polygon_primitive[number_vertices]=primitive_info[1];
number_vertices++;
}
polygon_primitive[number_vertices].primitive=UndefinedPrimitive;
/*
Compute the slope for the first line segment, p.
*/
dx.p=0.0;
dy.p=0.0;
for (n=1; n < (ssize_t) number_vertices; n++)
{
dx.p=polygon_primitive[n].point.x-polygon_primitive[0].point.x;
dy.p=polygon_primitive[n].point.y-polygon_primitive[0].point.y;
if ((fabs(dx.p) >= MagickEpsilon) || (fabs(dy.p) >= MagickEpsilon))
break;
}
if (n == (ssize_t) number_vertices)
{
if ((draw_info->linecap != RoundCap) || (closed_path != MagickFalse))
{
/*
Zero length subpath.
*/
stroke_polygon=(PrimitiveInfo *) AcquireCriticalMemory(
sizeof(*stroke_polygon));
stroke_polygon[0]=polygon_primitive[0];
stroke_polygon[0].coordinates=0;
polygon_primitive=(PrimitiveInfo *) RelinquishMagickMemory(
polygon_primitive);
return(stroke_polygon);
}
n=(ssize_t) number_vertices-1L;
}
path_p=(PointInfo *) AcquireQuantumMemory((size_t) max_strokes,
sizeof(*path_p));
if (path_p == (PointInfo *) NULL)
{
polygon_primitive=(PrimitiveInfo *) RelinquishMagickMemory(
polygon_primitive);
return((PrimitiveInfo *) NULL);
}
path_q=(PointInfo *) AcquireQuantumMemory((size_t) max_strokes,
sizeof(*path_q));
if (path_q == (PointInfo *) NULL)
{
path_p=(PointInfo *) RelinquishMagickMemory(path_p);
polygon_primitive=(PrimitiveInfo *) RelinquishMagickMemory(
polygon_primitive);
return((PrimitiveInfo *) NULL);
}
slope.p=0.0;
inverse_slope.p=0.0;
if (fabs(dx.p) < MagickEpsilon)
{
if (dx.p >= 0.0)
slope.p=dy.p < 0.0 ? -1.0/MagickEpsilon : 1.0/MagickEpsilon;
else
slope.p=dy.p < 0.0 ? 1.0/MagickEpsilon : -1.0/MagickEpsilon;
}
else
if (fabs(dy.p) < MagickEpsilon)
{
if (dy.p >= 0.0)
inverse_slope.p=dx.p < 0.0 ? -1.0/MagickEpsilon : 1.0/MagickEpsilon;
else
inverse_slope.p=dx.p < 0.0 ? 1.0/MagickEpsilon : -1.0/MagickEpsilon;
}
else
{
slope.p=dy.p/dx.p;
inverse_slope.p=(-1.0/slope.p);
}
mid=ExpandAffine(&draw_info->affine)*SaneStrokeWidth(image,draw_info)/2.0;
miterlimit=(double) (draw_info->miterlimit*draw_info->miterlimit*mid*mid);
if ((draw_info->linecap == SquareCap) && (closed_path == MagickFalse))
(void) TraceSquareLinecap(polygon_primitive,number_vertices,mid);
offset.x=sqrt((double) (mid*mid/(inverse_slope.p*inverse_slope.p+1.0)));
offset.y=(double) (offset.x*inverse_slope.p);
if ((dy.p*offset.x-dx.p*offset.y) > 0.0)
{
box_p[0].x=polygon_primitive[0].point.x-offset.x;
box_p[0].y=polygon_primitive[0].point.y-offset.x*inverse_slope.p;
box_p[1].x=polygon_primitive[n].point.x-offset.x;
box_p[1].y=polygon_primitive[n].point.y-offset.x*inverse_slope.p;
box_q[0].x=polygon_primitive[0].point.x+offset.x;
box_q[0].y=polygon_primitive[0].point.y+offset.x*inverse_slope.p;
box_q[1].x=polygon_primitive[n].point.x+offset.x;
box_q[1].y=polygon_primitive[n].point.y+offset.x*inverse_slope.p;
}
else
{
box_p[0].x=polygon_primitive[0].point.x+offset.x;
box_p[0].y=polygon_primitive[0].point.y+offset.y;
box_p[1].x=polygon_primitive[n].point.x+offset.x;
box_p[1].y=polygon_primitive[n].point.y+offset.y;
box_q[0].x=polygon_primitive[0].point.x-offset.x;
box_q[0].y=polygon_primitive[0].point.y-offset.y;
box_q[1].x=polygon_primitive[n].point.x-offset.x;
box_q[1].y=polygon_primitive[n].point.y-offset.y;
}
/*
Create strokes for the line join attribute: bevel, miter, round.
*/
p=0;
q=0;
path_q[p++]=box_q[0];
path_p[q++]=box_p[0];
for (i=(ssize_t) n+1; i < (ssize_t) number_vertices; i++)
{
/*
Compute the slope for this line segment, q.
*/
dx.q=polygon_primitive[i].point.x-polygon_primitive[n].point.x;
dy.q=polygon_primitive[i].point.y-polygon_primitive[n].point.y;
dot_product=dx.q*dx.q+dy.q*dy.q;
if (dot_product < 0.25)
continue;
slope.q=0.0;
inverse_slope.q=0.0;
if (fabs(dx.q) < MagickEpsilon)
{
if (dx.q >= 0.0)
slope.q=dy.q < 0.0 ? -1.0/MagickEpsilon : 1.0/MagickEpsilon;
else
slope.q=dy.q < 0.0 ? 1.0/MagickEpsilon : -1.0/MagickEpsilon;
}
else
if (fabs(dy.q) < MagickEpsilon)
{
if (dy.q >= 0.0)
inverse_slope.q=dx.q < 0.0 ? -1.0/MagickEpsilon : 1.0/MagickEpsilon;
else
inverse_slope.q=dx.q < 0.0 ? 1.0/MagickEpsilon : -1.0/MagickEpsilon;
}
else
{
slope.q=dy.q/dx.q;
inverse_slope.q=(-1.0/slope.q);
}
offset.x=sqrt((double) (mid*mid/(inverse_slope.q*inverse_slope.q+1.0)));
offset.y=(double) (offset.x*inverse_slope.q);
dot_product=dy.q*offset.x-dx.q*offset.y;
if (dot_product > 0.0)
{
box_p[2].x=polygon_primitive[n].point.x-offset.x;
box_p[2].y=polygon_primitive[n].point.y-offset.y;
box_p[3].x=polygon_primitive[i].point.x-offset.x;
box_p[3].y=polygon_primitive[i].point.y-offset.y;
box_q[2].x=polygon_primitive[n].point.x+offset.x;
box_q[2].y=polygon_primitive[n].point.y+offset.y;
box_q[3].x=polygon_primitive[i].point.x+offset.x;
box_q[3].y=polygon_primitive[i].point.y+offset.y;
}
else
{
box_p[2].x=polygon_primitive[n].point.x+offset.x;
box_p[2].y=polygon_primitive[n].point.y+offset.y;
box_p[3].x=polygon_primitive[i].point.x+offset.x;
box_p[3].y=polygon_primitive[i].point.y+offset.y;
box_q[2].x=polygon_primitive[n].point.x-offset.x;
box_q[2].y=polygon_primitive[n].point.y-offset.y;
box_q[3].x=polygon_primitive[i].point.x-offset.x;
box_q[3].y=polygon_primitive[i].point.y-offset.y;
}
if (fabs((double) (slope.p-slope.q)) < MagickEpsilon)
{
box_p[4]=box_p[1];
box_q[4]=box_q[1];
}
else
{
box_p[4].x=(double) ((slope.p*box_p[0].x-box_p[0].y-slope.q*box_p[3].x+
box_p[3].y)/(slope.p-slope.q));
box_p[4].y=(double) (slope.p*(box_p[4].x-box_p[0].x)+box_p[0].y);
box_q[4].x=(double) ((slope.p*box_q[0].x-box_q[0].y-slope.q*box_q[3].x+
box_q[3].y)/(slope.p-slope.q));
box_q[4].y=(double) (slope.p*(box_q[4].x-box_q[0].x)+box_q[0].y);
}
CheckPathExtent(6*BezierQuantum+360);
dot_product=dx.q*dy.p-dx.p*dy.q;
if (dot_product <= 0.0)
switch (draw_info->linejoin)
{
case BevelJoin:
{
path_q[q++]=box_q[1];
path_q[q++]=box_q[2];
dot_product=(box_q[4].x-box_p[4].x)*(box_q[4].x-box_p[4].x)+
(box_q[4].y-box_p[4].y)*(box_q[4].y-box_p[4].y);
if (dot_product <= miterlimit)
path_p[p++]=box_p[4];
else
{
path_p[p++]=box_p[1];
path_p[p++]=box_p[2];
}
break;
}
case MiterJoin:
{
dot_product=(box_q[4].x-box_p[4].x)*(box_q[4].x-box_p[4].x)+
(box_q[4].y-box_p[4].y)*(box_q[4].y-box_p[4].y);
if (dot_product <= miterlimit)
{
path_q[q++]=box_q[4];
path_p[p++]=box_p[4];
}
else
{
path_q[q++]=box_q[1];
path_q[q++]=box_q[2];
path_p[p++]=box_p[1];
path_p[p++]=box_p[2];
}
break;
}
case RoundJoin:
{
dot_product=(box_q[4].x-box_p[4].x)*(box_q[4].x-box_p[4].x)+
(box_q[4].y-box_p[4].y)*(box_q[4].y-box_p[4].y);
if (dot_product <= miterlimit)
path_p[p++]=box_p[4];
else
{
path_p[p++]=box_p[1];
path_p[p++]=box_p[2];
}
center=polygon_primitive[n].point;
theta.p=atan2(box_q[1].y-center.y,box_q[1].x-center.x);
theta.q=atan2(box_q[2].y-center.y,box_q[2].x-center.x);
if (theta.q < theta.p)
theta.q+=2.0*MagickPI;
arc_segments=(size_t) ceil((double) ((theta.q-theta.p)/
(2.0*sqrt((double) (1.0/mid)))));
CheckPathExtent(arc_segments+6*BezierQuantum+360);
path_q[q].x=box_q[1].x;
path_q[q].y=box_q[1].y;
q++;
for (j=1; j < (ssize_t) arc_segments; j++)
{
delta_theta=(double) (j*(theta.q-theta.p)/arc_segments);
path_q[q].x=(double) (center.x+mid*cos(fmod((double)
(theta.p+delta_theta),DegreesToRadians(360.0))));
path_q[q].y=(double) (center.y+mid*sin(fmod((double)
(theta.p+delta_theta),DegreesToRadians(360.0))));
q++;
}
path_q[q++]=box_q[2];
break;
}
default:
break;
}
else
switch (draw_info->linejoin)
{
case BevelJoin:
{
path_p[p++]=box_p[1];
path_p[p++]=box_p[2];
dot_product=(box_q[4].x-box_p[4].x)*(box_q[4].x-box_p[4].x)+
(box_q[4].y-box_p[4].y)*(box_q[4].y-box_p[4].y);
if (dot_product <= miterlimit)
path_q[q++]=box_q[4];
else
{
path_q[q++]=box_q[1];
path_q[q++]=box_q[2];
}
break;
}
case MiterJoin:
{
dot_product=(box_q[4].x-box_p[4].x)*(box_q[4].x-box_p[4].x)+
(box_q[4].y-box_p[4].y)*(box_q[4].y-box_p[4].y);
if (dot_product <= miterlimit)
{
path_q[q++]=box_q[4];
path_p[p++]=box_p[4];
}
else
{
path_q[q++]=box_q[1];
path_q[q++]=box_q[2];
path_p[p++]=box_p[1];
path_p[p++]=box_p[2];
}
break;
}
case RoundJoin:
{
dot_product=(box_q[4].x-box_p[4].x)*(box_q[4].x-box_p[4].x)+
(box_q[4].y-box_p[4].y)*(box_q[4].y-box_p[4].y);
if (dot_product <= miterlimit)
path_q[q++]=box_q[4];
else
{
path_q[q++]=box_q[1];
path_q[q++]=box_q[2];
}
center=polygon_primitive[n].point;
theta.p=atan2(box_p[1].y-center.y,box_p[1].x-center.x);
theta.q=atan2(box_p[2].y-center.y,box_p[2].x-center.x);
if (theta.p < theta.q)
theta.p+=2.0*MagickPI;
arc_segments=(size_t) ceil((double) ((theta.p-theta.q)/
(2.0*sqrt((double) (1.0/mid)))));
CheckPathExtent(arc_segments+6*BezierQuantum+360);
path_p[p++]=box_p[1];
for (j=1; j < (ssize_t) arc_segments; j++)
{
delta_theta=(double) (j*(theta.q-theta.p)/arc_segments);
path_p[p].x=(double) (center.x+mid*cos(fmod((double)
(theta.p+delta_theta),DegreesToRadians(360.0))));
path_p[p].y=(double) (center.y+mid*sin(fmod((double)
(theta.p+delta_theta),DegreesToRadians(360.0))));
p++;
}
path_p[p++]=box_p[2];
break;
}
default:
break;
}
slope.p=slope.q;
inverse_slope.p=inverse_slope.q;
box_p[0]=box_p[2];
box_p[1]=box_p[3];
box_q[0]=box_q[2];
box_q[1]=box_q[3];
dx.p=dx.q;
dy.p=dy.q;
n=i;
}
path_p[p++]=box_p[1];
path_q[q++]=box_q[1];
/*
Trace stroked polygon.
*/
stroke_polygon=(PrimitiveInfo *) AcquireQuantumMemory((size_t)
(p+q+2UL*closed_path+2UL),sizeof(*stroke_polygon));
if (stroke_polygon != (PrimitiveInfo *) NULL)
{
for (i=0; i < (ssize_t) p; i++)
{
stroke_polygon[i]=polygon_primitive[0];
stroke_polygon[i].point=path_p[i];
}
if (closed_path != MagickFalse)
{
stroke_polygon[i]=polygon_primitive[0];
stroke_polygon[i].point=stroke_polygon[0].point;
i++;
}
for ( ; i < (ssize_t) (p+q+closed_path); i++)
{
stroke_polygon[i]=polygon_primitive[0];
stroke_polygon[i].point=path_q[p+q+closed_path-(i+1)];
}
if (closed_path != MagickFalse)
{
stroke_polygon[i]=polygon_primitive[0];
stroke_polygon[i].point=stroke_polygon[p+closed_path].point;
i++;
}
stroke_polygon[i]=polygon_primitive[0];
stroke_polygon[i].point=stroke_polygon[0].point;
i++;
stroke_polygon[i].primitive=UndefinedPrimitive;
stroke_polygon[0].coordinates=(size_t) (p+q+2*closed_path+1);
}
path_p=(PointInfo *) RelinquishMagickMemory(path_p);
path_q=(PointInfo *) RelinquishMagickMemory(path_q);
polygon_primitive=(PrimitiveInfo *) RelinquishMagickMemory(polygon_primitive);
return(stroke_polygon);
}
| ./CrossVul/dataset_final_sorted/CWE-416/c/good_1186_0 |
crossvul-cpp_data_good_1857_0 | /*
* Interface between ext4 and JBD
*/
#include "ext4_jbd2.h"
#include <trace/events/ext4.h>
/* Just increment the non-pointer handle value */
static handle_t *ext4_get_nojournal(void)
{
handle_t *handle = current->journal_info;
unsigned long ref_cnt = (unsigned long)handle;
BUG_ON(ref_cnt >= EXT4_NOJOURNAL_MAX_REF_COUNT);
ref_cnt++;
handle = (handle_t *)ref_cnt;
current->journal_info = handle;
return handle;
}
/* Decrement the non-pointer handle value */
static void ext4_put_nojournal(handle_t *handle)
{
unsigned long ref_cnt = (unsigned long)handle;
BUG_ON(ref_cnt == 0);
ref_cnt--;
handle = (handle_t *)ref_cnt;
current->journal_info = handle;
}
/*
* Wrappers for jbd2_journal_start/end.
*/
static int ext4_journal_check_start(struct super_block *sb)
{
journal_t *journal;
might_sleep();
if (sb->s_flags & MS_RDONLY)
return -EROFS;
WARN_ON(sb->s_writers.frozen == SB_FREEZE_COMPLETE);
journal = EXT4_SB(sb)->s_journal;
/*
* Special case here: if the journal has aborted behind our
* backs (eg. EIO in the commit thread), then we still need to
* take the FS itself readonly cleanly.
*/
if (journal && is_journal_aborted(journal)) {
ext4_abort(sb, "Detected aborted journal");
return -EROFS;
}
return 0;
}
handle_t *__ext4_journal_start_sb(struct super_block *sb, unsigned int line,
int type, int blocks, int rsv_blocks)
{
journal_t *journal;
int err;
trace_ext4_journal_start(sb, blocks, rsv_blocks, _RET_IP_);
err = ext4_journal_check_start(sb);
if (err < 0)
return ERR_PTR(err);
journal = EXT4_SB(sb)->s_journal;
if (!journal)
return ext4_get_nojournal();
return jbd2__journal_start(journal, blocks, rsv_blocks, GFP_NOFS,
type, line);
}
int __ext4_journal_stop(const char *where, unsigned int line, handle_t *handle)
{
struct super_block *sb;
int err;
int rc;
if (!ext4_handle_valid(handle)) {
ext4_put_nojournal(handle);
return 0;
}
err = handle->h_err;
if (!handle->h_transaction) {
rc = jbd2_journal_stop(handle);
return err ? err : rc;
}
sb = handle->h_transaction->t_journal->j_private;
rc = jbd2_journal_stop(handle);
if (!err)
err = rc;
if (err)
__ext4_std_error(sb, where, line, err);
return err;
}
handle_t *__ext4_journal_start_reserved(handle_t *handle, unsigned int line,
int type)
{
struct super_block *sb;
int err;
if (!ext4_handle_valid(handle))
return ext4_get_nojournal();
sb = handle->h_journal->j_private;
trace_ext4_journal_start_reserved(sb, handle->h_buffer_credits,
_RET_IP_);
err = ext4_journal_check_start(sb);
if (err < 0) {
jbd2_journal_free_reserved(handle);
return ERR_PTR(err);
}
err = jbd2_journal_start_reserved(handle, type, line);
if (err < 0)
return ERR_PTR(err);
return handle;
}
static void ext4_journal_abort_handle(const char *caller, unsigned int line,
const char *err_fn,
struct buffer_head *bh,
handle_t *handle, int err)
{
char nbuf[16];
const char *errstr = ext4_decode_error(NULL, err, nbuf);
BUG_ON(!ext4_handle_valid(handle));
if (bh)
BUFFER_TRACE(bh, "abort");
if (!handle->h_err)
handle->h_err = err;
if (is_handle_aborted(handle))
return;
printk(KERN_ERR "EXT4-fs: %s:%d: aborting transaction: %s in %s\n",
caller, line, errstr, err_fn);
jbd2_journal_abort_handle(handle);
}
int __ext4_journal_get_write_access(const char *where, unsigned int line,
handle_t *handle, struct buffer_head *bh)
{
int err = 0;
might_sleep();
if (ext4_handle_valid(handle)) {
err = jbd2_journal_get_write_access(handle, bh);
if (err)
ext4_journal_abort_handle(where, line, __func__, bh,
handle, err);
}
return err;
}
/*
* The ext4 forget function must perform a revoke if we are freeing data
* which has been journaled. Metadata (eg. indirect blocks) must be
* revoked in all cases.
*
* "bh" may be NULL: a metadata block may have been freed from memory
* but there may still be a record of it in the journal, and that record
* still needs to be revoked.
*
* If the handle isn't valid we're not journaling, but we still need to
* call into ext4_journal_revoke() to put the buffer head.
*/
int __ext4_forget(const char *where, unsigned int line, handle_t *handle,
int is_metadata, struct inode *inode,
struct buffer_head *bh, ext4_fsblk_t blocknr)
{
int err;
might_sleep();
trace_ext4_forget(inode, is_metadata, blocknr);
BUFFER_TRACE(bh, "enter");
jbd_debug(4, "forgetting bh %p: is_metadata = %d, mode %o, "
"data mode %x\n",
bh, is_metadata, inode->i_mode,
test_opt(inode->i_sb, DATA_FLAGS));
/* In the no journal case, we can just do a bforget and return */
if (!ext4_handle_valid(handle)) {
bforget(bh);
return 0;
}
/* Never use the revoke function if we are doing full data
* journaling: there is no need to, and a V1 superblock won't
* support it. Otherwise, only skip the revoke on un-journaled
* data blocks. */
if (test_opt(inode->i_sb, DATA_FLAGS) == EXT4_MOUNT_JOURNAL_DATA ||
(!is_metadata && !ext4_should_journal_data(inode))) {
if (bh) {
BUFFER_TRACE(bh, "call jbd2_journal_forget");
err = jbd2_journal_forget(handle, bh);
if (err)
ext4_journal_abort_handle(where, line, __func__,
bh, handle, err);
return err;
}
return 0;
}
/*
* data!=journal && (is_metadata || should_journal_data(inode))
*/
BUFFER_TRACE(bh, "call jbd2_journal_revoke");
err = jbd2_journal_revoke(handle, blocknr, bh);
if (err) {
ext4_journal_abort_handle(where, line, __func__,
bh, handle, err);
__ext4_abort(inode->i_sb, where, line,
"error %d when attempting revoke", err);
}
BUFFER_TRACE(bh, "exit");
return err;
}
int __ext4_journal_get_create_access(const char *where, unsigned int line,
handle_t *handle, struct buffer_head *bh)
{
int err = 0;
if (ext4_handle_valid(handle)) {
err = jbd2_journal_get_create_access(handle, bh);
if (err)
ext4_journal_abort_handle(where, line, __func__,
bh, handle, err);
}
return err;
}
int __ext4_handle_dirty_metadata(const char *where, unsigned int line,
handle_t *handle, struct inode *inode,
struct buffer_head *bh)
{
int err = 0;
might_sleep();
set_buffer_meta(bh);
set_buffer_prio(bh);
if (ext4_handle_valid(handle)) {
err = jbd2_journal_dirty_metadata(handle, bh);
/* Errors can only happen due to aborted journal or a nasty bug */
if (!is_handle_aborted(handle) && WARN_ON_ONCE(err)) {
ext4_journal_abort_handle(where, line, __func__, bh,
handle, err);
if (inode == NULL) {
pr_err("EXT4: jbd2_journal_dirty_metadata "
"failed: handle type %u started at "
"line %u, credits %u/%u, errcode %d",
handle->h_type,
handle->h_line_no,
handle->h_requested_credits,
handle->h_buffer_credits, err);
return err;
}
ext4_error_inode(inode, where, line,
bh->b_blocknr,
"journal_dirty_metadata failed: "
"handle type %u started at line %u, "
"credits %u/%u, errcode %d",
handle->h_type,
handle->h_line_no,
handle->h_requested_credits,
handle->h_buffer_credits, err);
}
} else {
if (inode)
mark_buffer_dirty_inode(bh, inode);
else
mark_buffer_dirty(bh);
if (inode && inode_needs_sync(inode)) {
sync_dirty_buffer(bh);
if (buffer_req(bh) && !buffer_uptodate(bh)) {
struct ext4_super_block *es;
es = EXT4_SB(inode->i_sb)->s_es;
es->s_last_error_block =
cpu_to_le64(bh->b_blocknr);
ext4_error_inode(inode, where, line,
bh->b_blocknr,
"IO error syncing itable block");
err = -EIO;
}
}
}
return err;
}
int __ext4_handle_dirty_super(const char *where, unsigned int line,
handle_t *handle, struct super_block *sb)
{
struct buffer_head *bh = EXT4_SB(sb)->s_sbh;
int err = 0;
ext4_superblock_csum_set(sb);
if (ext4_handle_valid(handle)) {
err = jbd2_journal_dirty_metadata(handle, bh);
if (err)
ext4_journal_abort_handle(where, line, __func__,
bh, handle, err);
} else
mark_buffer_dirty(bh);
return err;
}
| ./CrossVul/dataset_final_sorted/CWE-416/c/good_1857_0 |
crossvul-cpp_data_bad_3994_0 | // SPDX-License-Identifier: GPL-2.0
/*
* message.c - synchronous message handling
*
* Released under the GPLv2 only.
*/
#include <linux/acpi.h>
#include <linux/pci.h> /* for scatterlist macros */
#include <linux/usb.h>
#include <linux/module.h>
#include <linux/slab.h>
#include <linux/mm.h>
#include <linux/timer.h>
#include <linux/ctype.h>
#include <linux/nls.h>
#include <linux/device.h>
#include <linux/scatterlist.h>
#include <linux/usb/cdc.h>
#include <linux/usb/quirks.h>
#include <linux/usb/hcd.h> /* for usbcore internals */
#include <linux/usb/of.h>
#include <asm/byteorder.h>
#include "usb.h"
static void cancel_async_set_config(struct usb_device *udev);
struct api_context {
struct completion done;
int status;
};
static void usb_api_blocking_completion(struct urb *urb)
{
struct api_context *ctx = urb->context;
ctx->status = urb->status;
complete(&ctx->done);
}
/*
* Starts urb and waits for completion or timeout. Note that this call
* is NOT interruptible. Many device driver i/o requests should be
* interruptible and therefore these drivers should implement their
* own interruptible routines.
*/
static int usb_start_wait_urb(struct urb *urb, int timeout, int *actual_length)
{
struct api_context ctx;
unsigned long expire;
int retval;
init_completion(&ctx.done);
urb->context = &ctx;
urb->actual_length = 0;
retval = usb_submit_urb(urb, GFP_NOIO);
if (unlikely(retval))
goto out;
expire = timeout ? msecs_to_jiffies(timeout) : MAX_SCHEDULE_TIMEOUT;
if (!wait_for_completion_timeout(&ctx.done, expire)) {
usb_kill_urb(urb);
retval = (ctx.status == -ENOENT ? -ETIMEDOUT : ctx.status);
dev_dbg(&urb->dev->dev,
"%s timed out on ep%d%s len=%u/%u\n",
current->comm,
usb_endpoint_num(&urb->ep->desc),
usb_urb_dir_in(urb) ? "in" : "out",
urb->actual_length,
urb->transfer_buffer_length);
} else
retval = ctx.status;
out:
if (actual_length)
*actual_length = urb->actual_length;
usb_free_urb(urb);
return retval;
}
/*-------------------------------------------------------------------*/
/* returns status (negative) or length (positive) */
static int usb_internal_control_msg(struct usb_device *usb_dev,
unsigned int pipe,
struct usb_ctrlrequest *cmd,
void *data, int len, int timeout)
{
struct urb *urb;
int retv;
int length;
urb = usb_alloc_urb(0, GFP_NOIO);
if (!urb)
return -ENOMEM;
usb_fill_control_urb(urb, usb_dev, pipe, (unsigned char *)cmd, data,
len, usb_api_blocking_completion, NULL);
retv = usb_start_wait_urb(urb, timeout, &length);
if (retv < 0)
return retv;
else
return length;
}
/**
* usb_control_msg - Builds a control urb, sends it off and waits for completion
* @dev: pointer to the usb device to send the message to
* @pipe: endpoint "pipe" to send the message to
* @request: USB message request value
* @requesttype: USB message request type value
* @value: USB message value
* @index: USB message index value
* @data: pointer to the data to send
* @size: length in bytes of the data to send
* @timeout: time in msecs to wait for the message to complete before timing
* out (if 0 the wait is forever)
*
* Context: !in_interrupt ()
*
* This function sends a simple control message to a specified endpoint and
* waits for the message to complete, or timeout.
*
* Don't use this function from within an interrupt context. If you need
* an asynchronous message, or need to send a message from within interrupt
* context, use usb_submit_urb(). If a thread in your driver uses this call,
* make sure your disconnect() method can wait for it to complete. Since you
* don't have a handle on the URB used, you can't cancel the request.
*
* Return: If successful, the number of bytes transferred. Otherwise, a negative
* error number.
*/
int usb_control_msg(struct usb_device *dev, unsigned int pipe, __u8 request,
__u8 requesttype, __u16 value, __u16 index, void *data,
__u16 size, int timeout)
{
struct usb_ctrlrequest *dr;
int ret;
dr = kmalloc(sizeof(struct usb_ctrlrequest), GFP_NOIO);
if (!dr)
return -ENOMEM;
dr->bRequestType = requesttype;
dr->bRequest = request;
dr->wValue = cpu_to_le16(value);
dr->wIndex = cpu_to_le16(index);
dr->wLength = cpu_to_le16(size);
ret = usb_internal_control_msg(dev, pipe, dr, data, size, timeout);
/* Linger a bit, prior to the next control message. */
if (dev->quirks & USB_QUIRK_DELAY_CTRL_MSG)
msleep(200);
kfree(dr);
return ret;
}
EXPORT_SYMBOL_GPL(usb_control_msg);
/**
* usb_interrupt_msg - Builds an interrupt urb, sends it off and waits for completion
* @usb_dev: pointer to the usb device to send the message to
* @pipe: endpoint "pipe" to send the message to
* @data: pointer to the data to send
* @len: length in bytes of the data to send
* @actual_length: pointer to a location to put the actual length transferred
* in bytes
* @timeout: time in msecs to wait for the message to complete before
* timing out (if 0 the wait is forever)
*
* Context: !in_interrupt ()
*
* This function sends a simple interrupt message to a specified endpoint and
* waits for the message to complete, or timeout.
*
* Don't use this function from within an interrupt context. If you need
* an asynchronous message, or need to send a message from within interrupt
* context, use usb_submit_urb() If a thread in your driver uses this call,
* make sure your disconnect() method can wait for it to complete. Since you
* don't have a handle on the URB used, you can't cancel the request.
*
* Return:
* If successful, 0. Otherwise a negative error number. The number of actual
* bytes transferred will be stored in the @actual_length parameter.
*/
int usb_interrupt_msg(struct usb_device *usb_dev, unsigned int pipe,
void *data, int len, int *actual_length, int timeout)
{
return usb_bulk_msg(usb_dev, pipe, data, len, actual_length, timeout);
}
EXPORT_SYMBOL_GPL(usb_interrupt_msg);
/**
* usb_bulk_msg - Builds a bulk urb, sends it off and waits for completion
* @usb_dev: pointer to the usb device to send the message to
* @pipe: endpoint "pipe" to send the message to
* @data: pointer to the data to send
* @len: length in bytes of the data to send
* @actual_length: pointer to a location to put the actual length transferred
* in bytes
* @timeout: time in msecs to wait for the message to complete before
* timing out (if 0 the wait is forever)
*
* Context: !in_interrupt ()
*
* This function sends a simple bulk message to a specified endpoint
* and waits for the message to complete, or timeout.
*
* Don't use this function from within an interrupt context. If you need
* an asynchronous message, or need to send a message from within interrupt
* context, use usb_submit_urb() If a thread in your driver uses this call,
* make sure your disconnect() method can wait for it to complete. Since you
* don't have a handle on the URB used, you can't cancel the request.
*
* Because there is no usb_interrupt_msg() and no USBDEVFS_INTERRUPT ioctl,
* users are forced to abuse this routine by using it to submit URBs for
* interrupt endpoints. We will take the liberty of creating an interrupt URB
* (with the default interval) if the target is an interrupt endpoint.
*
* Return:
* If successful, 0. Otherwise a negative error number. The number of actual
* bytes transferred will be stored in the @actual_length parameter.
*
*/
int usb_bulk_msg(struct usb_device *usb_dev, unsigned int pipe,
void *data, int len, int *actual_length, int timeout)
{
struct urb *urb;
struct usb_host_endpoint *ep;
ep = usb_pipe_endpoint(usb_dev, pipe);
if (!ep || len < 0)
return -EINVAL;
urb = usb_alloc_urb(0, GFP_KERNEL);
if (!urb)
return -ENOMEM;
if ((ep->desc.bmAttributes & USB_ENDPOINT_XFERTYPE_MASK) ==
USB_ENDPOINT_XFER_INT) {
pipe = (pipe & ~(3 << 30)) | (PIPE_INTERRUPT << 30);
usb_fill_int_urb(urb, usb_dev, pipe, data, len,
usb_api_blocking_completion, NULL,
ep->desc.bInterval);
} else
usb_fill_bulk_urb(urb, usb_dev, pipe, data, len,
usb_api_blocking_completion, NULL);
return usb_start_wait_urb(urb, timeout, actual_length);
}
EXPORT_SYMBOL_GPL(usb_bulk_msg);
/*-------------------------------------------------------------------*/
static void sg_clean(struct usb_sg_request *io)
{
if (io->urbs) {
while (io->entries--)
usb_free_urb(io->urbs[io->entries]);
kfree(io->urbs);
io->urbs = NULL;
}
io->dev = NULL;
}
static void sg_complete(struct urb *urb)
{
unsigned long flags;
struct usb_sg_request *io = urb->context;
int status = urb->status;
spin_lock_irqsave(&io->lock, flags);
/* In 2.5 we require hcds' endpoint queues not to progress after fault
* reports, until the completion callback (this!) returns. That lets
* device driver code (like this routine) unlink queued urbs first,
* if it needs to, since the HC won't work on them at all. So it's
* not possible for page N+1 to overwrite page N, and so on.
*
* That's only for "hard" faults; "soft" faults (unlinks) sometimes
* complete before the HCD can get requests away from hardware,
* though never during cleanup after a hard fault.
*/
if (io->status
&& (io->status != -ECONNRESET
|| status != -ECONNRESET)
&& urb->actual_length) {
dev_err(io->dev->bus->controller,
"dev %s ep%d%s scatterlist error %d/%d\n",
io->dev->devpath,
usb_endpoint_num(&urb->ep->desc),
usb_urb_dir_in(urb) ? "in" : "out",
status, io->status);
/* BUG (); */
}
if (io->status == 0 && status && status != -ECONNRESET) {
int i, found, retval;
io->status = status;
/* the previous urbs, and this one, completed already.
* unlink pending urbs so they won't rx/tx bad data.
* careful: unlink can sometimes be synchronous...
*/
spin_unlock_irqrestore(&io->lock, flags);
for (i = 0, found = 0; i < io->entries; i++) {
if (!io->urbs[i])
continue;
if (found) {
usb_block_urb(io->urbs[i]);
retval = usb_unlink_urb(io->urbs[i]);
if (retval != -EINPROGRESS &&
retval != -ENODEV &&
retval != -EBUSY &&
retval != -EIDRM)
dev_err(&io->dev->dev,
"%s, unlink --> %d\n",
__func__, retval);
} else if (urb == io->urbs[i])
found = 1;
}
spin_lock_irqsave(&io->lock, flags);
}
/* on the last completion, signal usb_sg_wait() */
io->bytes += urb->actual_length;
io->count--;
if (!io->count)
complete(&io->complete);
spin_unlock_irqrestore(&io->lock, flags);
}
/**
* usb_sg_init - initializes scatterlist-based bulk/interrupt I/O request
* @io: request block being initialized. until usb_sg_wait() returns,
* treat this as a pointer to an opaque block of memory,
* @dev: the usb device that will send or receive the data
* @pipe: endpoint "pipe" used to transfer the data
* @period: polling rate for interrupt endpoints, in frames or
* (for high speed endpoints) microframes; ignored for bulk
* @sg: scatterlist entries
* @nents: how many entries in the scatterlist
* @length: how many bytes to send from the scatterlist, or zero to
* send every byte identified in the list.
* @mem_flags: SLAB_* flags affecting memory allocations in this call
*
* This initializes a scatter/gather request, allocating resources such as
* I/O mappings and urb memory (except maybe memory used by USB controller
* drivers).
*
* The request must be issued using usb_sg_wait(), which waits for the I/O to
* complete (or to be canceled) and then cleans up all resources allocated by
* usb_sg_init().
*
* The request may be canceled with usb_sg_cancel(), either before or after
* usb_sg_wait() is called.
*
* Return: Zero for success, else a negative errno value.
*/
int usb_sg_init(struct usb_sg_request *io, struct usb_device *dev,
unsigned pipe, unsigned period, struct scatterlist *sg,
int nents, size_t length, gfp_t mem_flags)
{
int i;
int urb_flags;
int use_sg;
if (!io || !dev || !sg
|| usb_pipecontrol(pipe)
|| usb_pipeisoc(pipe)
|| nents <= 0)
return -EINVAL;
spin_lock_init(&io->lock);
io->dev = dev;
io->pipe = pipe;
if (dev->bus->sg_tablesize > 0) {
use_sg = true;
io->entries = 1;
} else {
use_sg = false;
io->entries = nents;
}
/* initialize all the urbs we'll use */
io->urbs = kmalloc_array(io->entries, sizeof(*io->urbs), mem_flags);
if (!io->urbs)
goto nomem;
urb_flags = URB_NO_INTERRUPT;
if (usb_pipein(pipe))
urb_flags |= URB_SHORT_NOT_OK;
for_each_sg(sg, sg, io->entries, i) {
struct urb *urb;
unsigned len;
urb = usb_alloc_urb(0, mem_flags);
if (!urb) {
io->entries = i;
goto nomem;
}
io->urbs[i] = urb;
urb->dev = NULL;
urb->pipe = pipe;
urb->interval = period;
urb->transfer_flags = urb_flags;
urb->complete = sg_complete;
urb->context = io;
urb->sg = sg;
if (use_sg) {
/* There is no single transfer buffer */
urb->transfer_buffer = NULL;
urb->num_sgs = nents;
/* A length of zero means transfer the whole sg list */
len = length;
if (len == 0) {
struct scatterlist *sg2;
int j;
for_each_sg(sg, sg2, nents, j)
len += sg2->length;
}
} else {
/*
* Some systems can't use DMA; they use PIO instead.
* For their sakes, transfer_buffer is set whenever
* possible.
*/
if (!PageHighMem(sg_page(sg)))
urb->transfer_buffer = sg_virt(sg);
else
urb->transfer_buffer = NULL;
len = sg->length;
if (length) {
len = min_t(size_t, len, length);
length -= len;
if (length == 0)
io->entries = i + 1;
}
}
urb->transfer_buffer_length = len;
}
io->urbs[--i]->transfer_flags &= ~URB_NO_INTERRUPT;
/* transaction state */
io->count = io->entries;
io->status = 0;
io->bytes = 0;
init_completion(&io->complete);
return 0;
nomem:
sg_clean(io);
return -ENOMEM;
}
EXPORT_SYMBOL_GPL(usb_sg_init);
/**
* usb_sg_wait - synchronously execute scatter/gather request
* @io: request block handle, as initialized with usb_sg_init().
* some fields become accessible when this call returns.
* Context: !in_interrupt ()
*
* This function blocks until the specified I/O operation completes. It
* leverages the grouping of the related I/O requests to get good transfer
* rates, by queueing the requests. At higher speeds, such queuing can
* significantly improve USB throughput.
*
* There are three kinds of completion for this function.
*
* (1) success, where io->status is zero. The number of io->bytes
* transferred is as requested.
* (2) error, where io->status is a negative errno value. The number
* of io->bytes transferred before the error is usually less
* than requested, and can be nonzero.
* (3) cancellation, a type of error with status -ECONNRESET that
* is initiated by usb_sg_cancel().
*
* When this function returns, all memory allocated through usb_sg_init() or
* this call will have been freed. The request block parameter may still be
* passed to usb_sg_cancel(), or it may be freed. It could also be
* reinitialized and then reused.
*
* Data Transfer Rates:
*
* Bulk transfers are valid for full or high speed endpoints.
* The best full speed data rate is 19 packets of 64 bytes each
* per frame, or 1216 bytes per millisecond.
* The best high speed data rate is 13 packets of 512 bytes each
* per microframe, or 52 KBytes per millisecond.
*
* The reason to use interrupt transfers through this API would most likely
* be to reserve high speed bandwidth, where up to 24 KBytes per millisecond
* could be transferred. That capability is less useful for low or full
* speed interrupt endpoints, which allow at most one packet per millisecond,
* of at most 8 or 64 bytes (respectively).
*
* It is not necessary to call this function to reserve bandwidth for devices
* under an xHCI host controller, as the bandwidth is reserved when the
* configuration or interface alt setting is selected.
*/
void usb_sg_wait(struct usb_sg_request *io)
{
int i;
int entries = io->entries;
/* queue the urbs. */
spin_lock_irq(&io->lock);
i = 0;
while (i < entries && !io->status) {
int retval;
io->urbs[i]->dev = io->dev;
spin_unlock_irq(&io->lock);
retval = usb_submit_urb(io->urbs[i], GFP_NOIO);
switch (retval) {
/* maybe we retrying will recover */
case -ENXIO: /* hc didn't queue this one */
case -EAGAIN:
case -ENOMEM:
retval = 0;
yield();
break;
/* no error? continue immediately.
*
* NOTE: to work better with UHCI (4K I/O buffer may
* need 3K of TDs) it may be good to limit how many
* URBs are queued at once; N milliseconds?
*/
case 0:
++i;
cpu_relax();
break;
/* fail any uncompleted urbs */
default:
io->urbs[i]->status = retval;
dev_dbg(&io->dev->dev, "%s, submit --> %d\n",
__func__, retval);
usb_sg_cancel(io);
}
spin_lock_irq(&io->lock);
if (retval && (io->status == 0 || io->status == -ECONNRESET))
io->status = retval;
}
io->count -= entries - i;
if (io->count == 0)
complete(&io->complete);
spin_unlock_irq(&io->lock);
/* OK, yes, this could be packaged as non-blocking.
* So could the submit loop above ... but it's easier to
* solve neither problem than to solve both!
*/
wait_for_completion(&io->complete);
sg_clean(io);
}
EXPORT_SYMBOL_GPL(usb_sg_wait);
/**
* usb_sg_cancel - stop scatter/gather i/o issued by usb_sg_wait()
* @io: request block, initialized with usb_sg_init()
*
* This stops a request after it has been started by usb_sg_wait().
* It can also prevents one initialized by usb_sg_init() from starting,
* so that call just frees resources allocated to the request.
*/
void usb_sg_cancel(struct usb_sg_request *io)
{
unsigned long flags;
int i, retval;
spin_lock_irqsave(&io->lock, flags);
if (io->status) {
spin_unlock_irqrestore(&io->lock, flags);
return;
}
/* shut everything down */
io->status = -ECONNRESET;
spin_unlock_irqrestore(&io->lock, flags);
for (i = io->entries - 1; i >= 0; --i) {
usb_block_urb(io->urbs[i]);
retval = usb_unlink_urb(io->urbs[i]);
if (retval != -EINPROGRESS
&& retval != -ENODEV
&& retval != -EBUSY
&& retval != -EIDRM)
dev_warn(&io->dev->dev, "%s, unlink --> %d\n",
__func__, retval);
}
}
EXPORT_SYMBOL_GPL(usb_sg_cancel);
/*-------------------------------------------------------------------*/
/**
* usb_get_descriptor - issues a generic GET_DESCRIPTOR request
* @dev: the device whose descriptor is being retrieved
* @type: the descriptor type (USB_DT_*)
* @index: the number of the descriptor
* @buf: where to put the descriptor
* @size: how big is "buf"?
* Context: !in_interrupt ()
*
* Gets a USB descriptor. Convenience functions exist to simplify
* getting some types of descriptors. Use
* usb_get_string() or usb_string() for USB_DT_STRING.
* Device (USB_DT_DEVICE) and configuration descriptors (USB_DT_CONFIG)
* are part of the device structure.
* In addition to a number of USB-standard descriptors, some
* devices also use class-specific or vendor-specific descriptors.
*
* This call is synchronous, and may not be used in an interrupt context.
*
* Return: The number of bytes received on success, or else the status code
* returned by the underlying usb_control_msg() call.
*/
int usb_get_descriptor(struct usb_device *dev, unsigned char type,
unsigned char index, void *buf, int size)
{
int i;
int result;
memset(buf, 0, size); /* Make sure we parse really received data */
for (i = 0; i < 3; ++i) {
/* retry on length 0 or error; some devices are flakey */
result = usb_control_msg(dev, usb_rcvctrlpipe(dev, 0),
USB_REQ_GET_DESCRIPTOR, USB_DIR_IN,
(type << 8) + index, 0, buf, size,
USB_CTRL_GET_TIMEOUT);
if (result <= 0 && result != -ETIMEDOUT)
continue;
if (result > 1 && ((u8 *)buf)[1] != type) {
result = -ENODATA;
continue;
}
break;
}
return result;
}
EXPORT_SYMBOL_GPL(usb_get_descriptor);
/**
* usb_get_string - gets a string descriptor
* @dev: the device whose string descriptor is being retrieved
* @langid: code for language chosen (from string descriptor zero)
* @index: the number of the descriptor
* @buf: where to put the string
* @size: how big is "buf"?
* Context: !in_interrupt ()
*
* Retrieves a string, encoded using UTF-16LE (Unicode, 16 bits per character,
* in little-endian byte order).
* The usb_string() function will often be a convenient way to turn
* these strings into kernel-printable form.
*
* Strings may be referenced in device, configuration, interface, or other
* descriptors, and could also be used in vendor-specific ways.
*
* This call is synchronous, and may not be used in an interrupt context.
*
* Return: The number of bytes received on success, or else the status code
* returned by the underlying usb_control_msg() call.
*/
static int usb_get_string(struct usb_device *dev, unsigned short langid,
unsigned char index, void *buf, int size)
{
int i;
int result;
for (i = 0; i < 3; ++i) {
/* retry on length 0 or stall; some devices are flakey */
result = usb_control_msg(dev, usb_rcvctrlpipe(dev, 0),
USB_REQ_GET_DESCRIPTOR, USB_DIR_IN,
(USB_DT_STRING << 8) + index, langid, buf, size,
USB_CTRL_GET_TIMEOUT);
if (result == 0 || result == -EPIPE)
continue;
if (result > 1 && ((u8 *) buf)[1] != USB_DT_STRING) {
result = -ENODATA;
continue;
}
break;
}
return result;
}
static void usb_try_string_workarounds(unsigned char *buf, int *length)
{
int newlength, oldlength = *length;
for (newlength = 2; newlength + 1 < oldlength; newlength += 2)
if (!isprint(buf[newlength]) || buf[newlength + 1])
break;
if (newlength > 2) {
buf[0] = newlength;
*length = newlength;
}
}
static int usb_string_sub(struct usb_device *dev, unsigned int langid,
unsigned int index, unsigned char *buf)
{
int rc;
/* Try to read the string descriptor by asking for the maximum
* possible number of bytes */
if (dev->quirks & USB_QUIRK_STRING_FETCH_255)
rc = -EIO;
else
rc = usb_get_string(dev, langid, index, buf, 255);
/* If that failed try to read the descriptor length, then
* ask for just that many bytes */
if (rc < 2) {
rc = usb_get_string(dev, langid, index, buf, 2);
if (rc == 2)
rc = usb_get_string(dev, langid, index, buf, buf[0]);
}
if (rc >= 2) {
if (!buf[0] && !buf[1])
usb_try_string_workarounds(buf, &rc);
/* There might be extra junk at the end of the descriptor */
if (buf[0] < rc)
rc = buf[0];
rc = rc - (rc & 1); /* force a multiple of two */
}
if (rc < 2)
rc = (rc < 0 ? rc : -EINVAL);
return rc;
}
static int usb_get_langid(struct usb_device *dev, unsigned char *tbuf)
{
int err;
if (dev->have_langid)
return 0;
if (dev->string_langid < 0)
return -EPIPE;
err = usb_string_sub(dev, 0, 0, tbuf);
/* If the string was reported but is malformed, default to english
* (0x0409) */
if (err == -ENODATA || (err > 0 && err < 4)) {
dev->string_langid = 0x0409;
dev->have_langid = 1;
dev_err(&dev->dev,
"language id specifier not provided by device, defaulting to English\n");
return 0;
}
/* In case of all other errors, we assume the device is not able to
* deal with strings at all. Set string_langid to -1 in order to
* prevent any string to be retrieved from the device */
if (err < 0) {
dev_info(&dev->dev, "string descriptor 0 read error: %d\n",
err);
dev->string_langid = -1;
return -EPIPE;
}
/* always use the first langid listed */
dev->string_langid = tbuf[2] | (tbuf[3] << 8);
dev->have_langid = 1;
dev_dbg(&dev->dev, "default language 0x%04x\n",
dev->string_langid);
return 0;
}
/**
* usb_string - returns UTF-8 version of a string descriptor
* @dev: the device whose string descriptor is being retrieved
* @index: the number of the descriptor
* @buf: where to put the string
* @size: how big is "buf"?
* Context: !in_interrupt ()
*
* This converts the UTF-16LE encoded strings returned by devices, from
* usb_get_string_descriptor(), to null-terminated UTF-8 encoded ones
* that are more usable in most kernel contexts. Note that this function
* chooses strings in the first language supported by the device.
*
* This call is synchronous, and may not be used in an interrupt context.
*
* Return: length of the string (>= 0) or usb_control_msg status (< 0).
*/
int usb_string(struct usb_device *dev, int index, char *buf, size_t size)
{
unsigned char *tbuf;
int err;
if (dev->state == USB_STATE_SUSPENDED)
return -EHOSTUNREACH;
if (size <= 0 || !buf)
return -EINVAL;
buf[0] = 0;
if (index <= 0 || index >= 256)
return -EINVAL;
tbuf = kmalloc(256, GFP_NOIO);
if (!tbuf)
return -ENOMEM;
err = usb_get_langid(dev, tbuf);
if (err < 0)
goto errout;
err = usb_string_sub(dev, dev->string_langid, index, tbuf);
if (err < 0)
goto errout;
size--; /* leave room for trailing NULL char in output buffer */
err = utf16s_to_utf8s((wchar_t *) &tbuf[2], (err - 2) / 2,
UTF16_LITTLE_ENDIAN, buf, size);
buf[err] = 0;
if (tbuf[1] != USB_DT_STRING)
dev_dbg(&dev->dev,
"wrong descriptor type %02x for string %d (\"%s\")\n",
tbuf[1], index, buf);
errout:
kfree(tbuf);
return err;
}
EXPORT_SYMBOL_GPL(usb_string);
/* one UTF-8-encoded 16-bit character has at most three bytes */
#define MAX_USB_STRING_SIZE (127 * 3 + 1)
/**
* usb_cache_string - read a string descriptor and cache it for later use
* @udev: the device whose string descriptor is being read
* @index: the descriptor index
*
* Return: A pointer to a kmalloc'ed buffer containing the descriptor string,
* or %NULL if the index is 0 or the string could not be read.
*/
char *usb_cache_string(struct usb_device *udev, int index)
{
char *buf;
char *smallbuf = NULL;
int len;
if (index <= 0)
return NULL;
buf = kmalloc(MAX_USB_STRING_SIZE, GFP_NOIO);
if (buf) {
len = usb_string(udev, index, buf, MAX_USB_STRING_SIZE);
if (len > 0) {
smallbuf = kmalloc(++len, GFP_NOIO);
if (!smallbuf)
return buf;
memcpy(smallbuf, buf, len);
}
kfree(buf);
}
return smallbuf;
}
/*
* usb_get_device_descriptor - (re)reads the device descriptor (usbcore)
* @dev: the device whose device descriptor is being updated
* @size: how much of the descriptor to read
* Context: !in_interrupt ()
*
* Updates the copy of the device descriptor stored in the device structure,
* which dedicates space for this purpose.
*
* Not exported, only for use by the core. If drivers really want to read
* the device descriptor directly, they can call usb_get_descriptor() with
* type = USB_DT_DEVICE and index = 0.
*
* This call is synchronous, and may not be used in an interrupt context.
*
* Return: The number of bytes received on success, or else the status code
* returned by the underlying usb_control_msg() call.
*/
int usb_get_device_descriptor(struct usb_device *dev, unsigned int size)
{
struct usb_device_descriptor *desc;
int ret;
if (size > sizeof(*desc))
return -EINVAL;
desc = kmalloc(sizeof(*desc), GFP_NOIO);
if (!desc)
return -ENOMEM;
ret = usb_get_descriptor(dev, USB_DT_DEVICE, 0, desc, size);
if (ret >= 0)
memcpy(&dev->descriptor, desc, size);
kfree(desc);
return ret;
}
/*
* usb_set_isoch_delay - informs the device of the packet transmit delay
* @dev: the device whose delay is to be informed
* Context: !in_interrupt()
*
* Since this is an optional request, we don't bother if it fails.
*/
int usb_set_isoch_delay(struct usb_device *dev)
{
/* skip hub devices */
if (dev->descriptor.bDeviceClass == USB_CLASS_HUB)
return 0;
/* skip non-SS/non-SSP devices */
if (dev->speed < USB_SPEED_SUPER)
return 0;
return usb_control_msg(dev, usb_sndctrlpipe(dev, 0),
USB_REQ_SET_ISOCH_DELAY,
USB_DIR_OUT | USB_TYPE_STANDARD | USB_RECIP_DEVICE,
dev->hub_delay, 0, NULL, 0,
USB_CTRL_SET_TIMEOUT);
}
/**
* usb_get_status - issues a GET_STATUS call
* @dev: the device whose status is being checked
* @recip: USB_RECIP_*; for device, interface, or endpoint
* @type: USB_STATUS_TYPE_*; for standard or PTM status types
* @target: zero (for device), else interface or endpoint number
* @data: pointer to two bytes of bitmap data
* Context: !in_interrupt ()
*
* Returns device, interface, or endpoint status. Normally only of
* interest to see if the device is self powered, or has enabled the
* remote wakeup facility; or whether a bulk or interrupt endpoint
* is halted ("stalled").
*
* Bits in these status bitmaps are set using the SET_FEATURE request,
* and cleared using the CLEAR_FEATURE request. The usb_clear_halt()
* function should be used to clear halt ("stall") status.
*
* This call is synchronous, and may not be used in an interrupt context.
*
* Returns 0 and the status value in *@data (in host byte order) on success,
* or else the status code from the underlying usb_control_msg() call.
*/
int usb_get_status(struct usb_device *dev, int recip, int type, int target,
void *data)
{
int ret;
void *status;
int length;
switch (type) {
case USB_STATUS_TYPE_STANDARD:
length = 2;
break;
case USB_STATUS_TYPE_PTM:
if (recip != USB_RECIP_DEVICE)
return -EINVAL;
length = 4;
break;
default:
return -EINVAL;
}
status = kmalloc(length, GFP_KERNEL);
if (!status)
return -ENOMEM;
ret = usb_control_msg(dev, usb_rcvctrlpipe(dev, 0),
USB_REQ_GET_STATUS, USB_DIR_IN | recip, USB_STATUS_TYPE_STANDARD,
target, status, length, USB_CTRL_GET_TIMEOUT);
switch (ret) {
case 4:
if (type != USB_STATUS_TYPE_PTM) {
ret = -EIO;
break;
}
*(u32 *) data = le32_to_cpu(*(__le32 *) status);
ret = 0;
break;
case 2:
if (type != USB_STATUS_TYPE_STANDARD) {
ret = -EIO;
break;
}
*(u16 *) data = le16_to_cpu(*(__le16 *) status);
ret = 0;
break;
default:
ret = -EIO;
}
kfree(status);
return ret;
}
EXPORT_SYMBOL_GPL(usb_get_status);
/**
* usb_clear_halt - tells device to clear endpoint halt/stall condition
* @dev: device whose endpoint is halted
* @pipe: endpoint "pipe" being cleared
* Context: !in_interrupt ()
*
* This is used to clear halt conditions for bulk and interrupt endpoints,
* as reported by URB completion status. Endpoints that are halted are
* sometimes referred to as being "stalled". Such endpoints are unable
* to transmit or receive data until the halt status is cleared. Any URBs
* queued for such an endpoint should normally be unlinked by the driver
* before clearing the halt condition, as described in sections 5.7.5
* and 5.8.5 of the USB 2.0 spec.
*
* Note that control and isochronous endpoints don't halt, although control
* endpoints report "protocol stall" (for unsupported requests) using the
* same status code used to report a true stall.
*
* This call is synchronous, and may not be used in an interrupt context.
*
* Return: Zero on success, or else the status code returned by the
* underlying usb_control_msg() call.
*/
int usb_clear_halt(struct usb_device *dev, int pipe)
{
int result;
int endp = usb_pipeendpoint(pipe);
if (usb_pipein(pipe))
endp |= USB_DIR_IN;
/* we don't care if it wasn't halted first. in fact some devices
* (like some ibmcam model 1 units) seem to expect hosts to make
* this request for iso endpoints, which can't halt!
*/
result = usb_control_msg(dev, usb_sndctrlpipe(dev, 0),
USB_REQ_CLEAR_FEATURE, USB_RECIP_ENDPOINT,
USB_ENDPOINT_HALT, endp, NULL, 0,
USB_CTRL_SET_TIMEOUT);
/* don't un-halt or force to DATA0 except on success */
if (result < 0)
return result;
/* NOTE: seems like Microsoft and Apple don't bother verifying
* the clear "took", so some devices could lock up if you check...
* such as the Hagiwara FlashGate DUAL. So we won't bother.
*
* NOTE: make sure the logic here doesn't diverge much from
* the copy in usb-storage, for as long as we need two copies.
*/
usb_reset_endpoint(dev, endp);
return 0;
}
EXPORT_SYMBOL_GPL(usb_clear_halt);
static int create_intf_ep_devs(struct usb_interface *intf)
{
struct usb_device *udev = interface_to_usbdev(intf);
struct usb_host_interface *alt = intf->cur_altsetting;
int i;
if (intf->ep_devs_created || intf->unregistering)
return 0;
for (i = 0; i < alt->desc.bNumEndpoints; ++i)
(void) usb_create_ep_devs(&intf->dev, &alt->endpoint[i], udev);
intf->ep_devs_created = 1;
return 0;
}
static void remove_intf_ep_devs(struct usb_interface *intf)
{
struct usb_host_interface *alt = intf->cur_altsetting;
int i;
if (!intf->ep_devs_created)
return;
for (i = 0; i < alt->desc.bNumEndpoints; ++i)
usb_remove_ep_devs(&alt->endpoint[i]);
intf->ep_devs_created = 0;
}
/**
* usb_disable_endpoint -- Disable an endpoint by address
* @dev: the device whose endpoint is being disabled
* @epaddr: the endpoint's address. Endpoint number for output,
* endpoint number + USB_DIR_IN for input
* @reset_hardware: flag to erase any endpoint state stored in the
* controller hardware
*
* Disables the endpoint for URB submission and nukes all pending URBs.
* If @reset_hardware is set then also deallocates hcd/hardware state
* for the endpoint.
*/
void usb_disable_endpoint(struct usb_device *dev, unsigned int epaddr,
bool reset_hardware)
{
unsigned int epnum = epaddr & USB_ENDPOINT_NUMBER_MASK;
struct usb_host_endpoint *ep;
if (!dev)
return;
if (usb_endpoint_out(epaddr)) {
ep = dev->ep_out[epnum];
if (reset_hardware)
dev->ep_out[epnum] = NULL;
} else {
ep = dev->ep_in[epnum];
if (reset_hardware)
dev->ep_in[epnum] = NULL;
}
if (ep) {
ep->enabled = 0;
usb_hcd_flush_endpoint(dev, ep);
if (reset_hardware)
usb_hcd_disable_endpoint(dev, ep);
}
}
/**
* usb_reset_endpoint - Reset an endpoint's state.
* @dev: the device whose endpoint is to be reset
* @epaddr: the endpoint's address. Endpoint number for output,
* endpoint number + USB_DIR_IN for input
*
* Resets any host-side endpoint state such as the toggle bit,
* sequence number or current window.
*/
void usb_reset_endpoint(struct usb_device *dev, unsigned int epaddr)
{
unsigned int epnum = epaddr & USB_ENDPOINT_NUMBER_MASK;
struct usb_host_endpoint *ep;
if (usb_endpoint_out(epaddr))
ep = dev->ep_out[epnum];
else
ep = dev->ep_in[epnum];
if (ep)
usb_hcd_reset_endpoint(dev, ep);
}
EXPORT_SYMBOL_GPL(usb_reset_endpoint);
/**
* usb_disable_interface -- Disable all endpoints for an interface
* @dev: the device whose interface is being disabled
* @intf: pointer to the interface descriptor
* @reset_hardware: flag to erase any endpoint state stored in the
* controller hardware
*
* Disables all the endpoints for the interface's current altsetting.
*/
void usb_disable_interface(struct usb_device *dev, struct usb_interface *intf,
bool reset_hardware)
{
struct usb_host_interface *alt = intf->cur_altsetting;
int i;
for (i = 0; i < alt->desc.bNumEndpoints; ++i) {
usb_disable_endpoint(dev,
alt->endpoint[i].desc.bEndpointAddress,
reset_hardware);
}
}
/**
* usb_disable_device - Disable all the endpoints for a USB device
* @dev: the device whose endpoints are being disabled
* @skip_ep0: 0 to disable endpoint 0, 1 to skip it.
*
* Disables all the device's endpoints, potentially including endpoint 0.
* Deallocates hcd/hardware state for the endpoints (nuking all or most
* pending urbs) and usbcore state for the interfaces, so that usbcore
* must usb_set_configuration() before any interfaces could be used.
*/
void usb_disable_device(struct usb_device *dev, int skip_ep0)
{
int i;
struct usb_hcd *hcd = bus_to_hcd(dev->bus);
/* getting rid of interfaces will disconnect
* any drivers bound to them (a key side effect)
*/
if (dev->actconfig) {
/*
* FIXME: In order to avoid self-deadlock involving the
* bandwidth_mutex, we have to mark all the interfaces
* before unregistering any of them.
*/
for (i = 0; i < dev->actconfig->desc.bNumInterfaces; i++)
dev->actconfig->interface[i]->unregistering = 1;
for (i = 0; i < dev->actconfig->desc.bNumInterfaces; i++) {
struct usb_interface *interface;
/* remove this interface if it has been registered */
interface = dev->actconfig->interface[i];
if (!device_is_registered(&interface->dev))
continue;
dev_dbg(&dev->dev, "unregistering interface %s\n",
dev_name(&interface->dev));
remove_intf_ep_devs(interface);
device_del(&interface->dev);
}
/* Now that the interfaces are unbound, nobody should
* try to access them.
*/
for (i = 0; i < dev->actconfig->desc.bNumInterfaces; i++) {
put_device(&dev->actconfig->interface[i]->dev);
dev->actconfig->interface[i] = NULL;
}
usb_disable_usb2_hardware_lpm(dev);
usb_unlocked_disable_lpm(dev);
usb_disable_ltm(dev);
dev->actconfig = NULL;
if (dev->state == USB_STATE_CONFIGURED)
usb_set_device_state(dev, USB_STATE_ADDRESS);
}
dev_dbg(&dev->dev, "%s nuking %s URBs\n", __func__,
skip_ep0 ? "non-ep0" : "all");
if (hcd->driver->check_bandwidth) {
/* First pass: Cancel URBs, leave endpoint pointers intact. */
for (i = skip_ep0; i < 16; ++i) {
usb_disable_endpoint(dev, i, false);
usb_disable_endpoint(dev, i + USB_DIR_IN, false);
}
/* Remove endpoints from the host controller internal state */
mutex_lock(hcd->bandwidth_mutex);
usb_hcd_alloc_bandwidth(dev, NULL, NULL, NULL);
mutex_unlock(hcd->bandwidth_mutex);
/* Second pass: remove endpoint pointers */
}
for (i = skip_ep0; i < 16; ++i) {
usb_disable_endpoint(dev, i, true);
usb_disable_endpoint(dev, i + USB_DIR_IN, true);
}
}
/**
* usb_enable_endpoint - Enable an endpoint for USB communications
* @dev: the device whose interface is being enabled
* @ep: the endpoint
* @reset_ep: flag to reset the endpoint state
*
* Resets the endpoint state if asked, and sets dev->ep_{in,out} pointers.
* For control endpoints, both the input and output sides are handled.
*/
void usb_enable_endpoint(struct usb_device *dev, struct usb_host_endpoint *ep,
bool reset_ep)
{
int epnum = usb_endpoint_num(&ep->desc);
int is_out = usb_endpoint_dir_out(&ep->desc);
int is_control = usb_endpoint_xfer_control(&ep->desc);
if (reset_ep)
usb_hcd_reset_endpoint(dev, ep);
if (is_out || is_control)
dev->ep_out[epnum] = ep;
if (!is_out || is_control)
dev->ep_in[epnum] = ep;
ep->enabled = 1;
}
/**
* usb_enable_interface - Enable all the endpoints for an interface
* @dev: the device whose interface is being enabled
* @intf: pointer to the interface descriptor
* @reset_eps: flag to reset the endpoints' state
*
* Enables all the endpoints for the interface's current altsetting.
*/
void usb_enable_interface(struct usb_device *dev,
struct usb_interface *intf, bool reset_eps)
{
struct usb_host_interface *alt = intf->cur_altsetting;
int i;
for (i = 0; i < alt->desc.bNumEndpoints; ++i)
usb_enable_endpoint(dev, &alt->endpoint[i], reset_eps);
}
/**
* usb_set_interface - Makes a particular alternate setting be current
* @dev: the device whose interface is being updated
* @interface: the interface being updated
* @alternate: the setting being chosen.
* Context: !in_interrupt ()
*
* This is used to enable data transfers on interfaces that may not
* be enabled by default. Not all devices support such configurability.
* Only the driver bound to an interface may change its setting.
*
* Within any given configuration, each interface may have several
* alternative settings. These are often used to control levels of
* bandwidth consumption. For example, the default setting for a high
* speed interrupt endpoint may not send more than 64 bytes per microframe,
* while interrupt transfers of up to 3KBytes per microframe are legal.
* Also, isochronous endpoints may never be part of an
* interface's default setting. To access such bandwidth, alternate
* interface settings must be made current.
*
* Note that in the Linux USB subsystem, bandwidth associated with
* an endpoint in a given alternate setting is not reserved until an URB
* is submitted that needs that bandwidth. Some other operating systems
* allocate bandwidth early, when a configuration is chosen.
*
* xHCI reserves bandwidth and configures the alternate setting in
* usb_hcd_alloc_bandwidth(). If it fails the original interface altsetting
* may be disabled. Drivers cannot rely on any particular alternate
* setting being in effect after a failure.
*
* This call is synchronous, and may not be used in an interrupt context.
* Also, drivers must not change altsettings while urbs are scheduled for
* endpoints in that interface; all such urbs must first be completed
* (perhaps forced by unlinking).
*
* Return: Zero on success, or else the status code returned by the
* underlying usb_control_msg() call.
*/
int usb_set_interface(struct usb_device *dev, int interface, int alternate)
{
struct usb_interface *iface;
struct usb_host_interface *alt;
struct usb_hcd *hcd = bus_to_hcd(dev->bus);
int i, ret, manual = 0;
unsigned int epaddr;
unsigned int pipe;
if (dev->state == USB_STATE_SUSPENDED)
return -EHOSTUNREACH;
iface = usb_ifnum_to_if(dev, interface);
if (!iface) {
dev_dbg(&dev->dev, "selecting invalid interface %d\n",
interface);
return -EINVAL;
}
if (iface->unregistering)
return -ENODEV;
alt = usb_altnum_to_altsetting(iface, alternate);
if (!alt) {
dev_warn(&dev->dev, "selecting invalid altsetting %d\n",
alternate);
return -EINVAL;
}
/*
* usb3 hosts configure the interface in usb_hcd_alloc_bandwidth,
* including freeing dropped endpoint ring buffers.
* Make sure the interface endpoints are flushed before that
*/
usb_disable_interface(dev, iface, false);
/* Make sure we have enough bandwidth for this alternate interface.
* Remove the current alt setting and add the new alt setting.
*/
mutex_lock(hcd->bandwidth_mutex);
/* Disable LPM, and re-enable it once the new alt setting is installed,
* so that the xHCI driver can recalculate the U1/U2 timeouts.
*/
if (usb_disable_lpm(dev)) {
dev_err(&iface->dev, "%s Failed to disable LPM\n", __func__);
mutex_unlock(hcd->bandwidth_mutex);
return -ENOMEM;
}
/* Changing alt-setting also frees any allocated streams */
for (i = 0; i < iface->cur_altsetting->desc.bNumEndpoints; i++)
iface->cur_altsetting->endpoint[i].streams = 0;
ret = usb_hcd_alloc_bandwidth(dev, NULL, iface->cur_altsetting, alt);
if (ret < 0) {
dev_info(&dev->dev, "Not enough bandwidth for altsetting %d\n",
alternate);
usb_enable_lpm(dev);
mutex_unlock(hcd->bandwidth_mutex);
return ret;
}
if (dev->quirks & USB_QUIRK_NO_SET_INTF)
ret = -EPIPE;
else
ret = usb_control_msg(dev, usb_sndctrlpipe(dev, 0),
USB_REQ_SET_INTERFACE, USB_RECIP_INTERFACE,
alternate, interface, NULL, 0, 5000);
/* 9.4.10 says devices don't need this and are free to STALL the
* request if the interface only has one alternate setting.
*/
if (ret == -EPIPE && iface->num_altsetting == 1) {
dev_dbg(&dev->dev,
"manual set_interface for iface %d, alt %d\n",
interface, alternate);
manual = 1;
} else if (ret < 0) {
/* Re-instate the old alt setting */
usb_hcd_alloc_bandwidth(dev, NULL, alt, iface->cur_altsetting);
usb_enable_lpm(dev);
mutex_unlock(hcd->bandwidth_mutex);
return ret;
}
mutex_unlock(hcd->bandwidth_mutex);
/* FIXME drivers shouldn't need to replicate/bugfix the logic here
* when they implement async or easily-killable versions of this or
* other "should-be-internal" functions (like clear_halt).
* should hcd+usbcore postprocess control requests?
*/
/* prevent submissions using previous endpoint settings */
if (iface->cur_altsetting != alt) {
remove_intf_ep_devs(iface);
usb_remove_sysfs_intf_files(iface);
}
usb_disable_interface(dev, iface, true);
iface->cur_altsetting = alt;
/* Now that the interface is installed, re-enable LPM. */
usb_unlocked_enable_lpm(dev);
/* If the interface only has one altsetting and the device didn't
* accept the request, we attempt to carry out the equivalent action
* by manually clearing the HALT feature for each endpoint in the
* new altsetting.
*/
if (manual) {
for (i = 0; i < alt->desc.bNumEndpoints; i++) {
epaddr = alt->endpoint[i].desc.bEndpointAddress;
pipe = __create_pipe(dev,
USB_ENDPOINT_NUMBER_MASK & epaddr) |
(usb_endpoint_out(epaddr) ?
USB_DIR_OUT : USB_DIR_IN);
usb_clear_halt(dev, pipe);
}
}
/* 9.1.1.5: reset toggles for all endpoints in the new altsetting
*
* Note:
* Despite EP0 is always present in all interfaces/AS, the list of
* endpoints from the descriptor does not contain EP0. Due to its
* omnipresence one might expect EP0 being considered "affected" by
* any SetInterface request and hence assume toggles need to be reset.
* However, EP0 toggles are re-synced for every individual transfer
* during the SETUP stage - hence EP0 toggles are "don't care" here.
* (Likewise, EP0 never "halts" on well designed devices.)
*/
usb_enable_interface(dev, iface, true);
if (device_is_registered(&iface->dev)) {
usb_create_sysfs_intf_files(iface);
create_intf_ep_devs(iface);
}
return 0;
}
EXPORT_SYMBOL_GPL(usb_set_interface);
/**
* usb_reset_configuration - lightweight device reset
* @dev: the device whose configuration is being reset
*
* This issues a standard SET_CONFIGURATION request to the device using
* the current configuration. The effect is to reset most USB-related
* state in the device, including interface altsettings (reset to zero),
* endpoint halts (cleared), and endpoint state (only for bulk and interrupt
* endpoints). Other usbcore state is unchanged, including bindings of
* usb device drivers to interfaces.
*
* Because this affects multiple interfaces, avoid using this with composite
* (multi-interface) devices. Instead, the driver for each interface may
* use usb_set_interface() on the interfaces it claims. Be careful though;
* some devices don't support the SET_INTERFACE request, and others won't
* reset all the interface state (notably endpoint state). Resetting the whole
* configuration would affect other drivers' interfaces.
*
* The caller must own the device lock.
*
* Return: Zero on success, else a negative error code.
*/
int usb_reset_configuration(struct usb_device *dev)
{
int i, retval;
struct usb_host_config *config;
struct usb_hcd *hcd = bus_to_hcd(dev->bus);
if (dev->state == USB_STATE_SUSPENDED)
return -EHOSTUNREACH;
/* caller must have locked the device and must own
* the usb bus readlock (so driver bindings are stable);
* calls during probe() are fine
*/
for (i = 1; i < 16; ++i) {
usb_disable_endpoint(dev, i, true);
usb_disable_endpoint(dev, i + USB_DIR_IN, true);
}
config = dev->actconfig;
retval = 0;
mutex_lock(hcd->bandwidth_mutex);
/* Disable LPM, and re-enable it once the configuration is reset, so
* that the xHCI driver can recalculate the U1/U2 timeouts.
*/
if (usb_disable_lpm(dev)) {
dev_err(&dev->dev, "%s Failed to disable LPM\n", __func__);
mutex_unlock(hcd->bandwidth_mutex);
return -ENOMEM;
}
/* Make sure we have enough bandwidth for each alternate setting 0 */
for (i = 0; i < config->desc.bNumInterfaces; i++) {
struct usb_interface *intf = config->interface[i];
struct usb_host_interface *alt;
alt = usb_altnum_to_altsetting(intf, 0);
if (!alt)
alt = &intf->altsetting[0];
if (alt != intf->cur_altsetting)
retval = usb_hcd_alloc_bandwidth(dev, NULL,
intf->cur_altsetting, alt);
if (retval < 0)
break;
}
/* If not, reinstate the old alternate settings */
if (retval < 0) {
reset_old_alts:
for (i--; i >= 0; i--) {
struct usb_interface *intf = config->interface[i];
struct usb_host_interface *alt;
alt = usb_altnum_to_altsetting(intf, 0);
if (!alt)
alt = &intf->altsetting[0];
if (alt != intf->cur_altsetting)
usb_hcd_alloc_bandwidth(dev, NULL,
alt, intf->cur_altsetting);
}
usb_enable_lpm(dev);
mutex_unlock(hcd->bandwidth_mutex);
return retval;
}
retval = usb_control_msg(dev, usb_sndctrlpipe(dev, 0),
USB_REQ_SET_CONFIGURATION, 0,
config->desc.bConfigurationValue, 0,
NULL, 0, USB_CTRL_SET_TIMEOUT);
if (retval < 0)
goto reset_old_alts;
mutex_unlock(hcd->bandwidth_mutex);
/* re-init hc/hcd interface/endpoint state */
for (i = 0; i < config->desc.bNumInterfaces; i++) {
struct usb_interface *intf = config->interface[i];
struct usb_host_interface *alt;
alt = usb_altnum_to_altsetting(intf, 0);
/* No altsetting 0? We'll assume the first altsetting.
* We could use a GetInterface call, but if a device is
* so non-compliant that it doesn't have altsetting 0
* then I wouldn't trust its reply anyway.
*/
if (!alt)
alt = &intf->altsetting[0];
if (alt != intf->cur_altsetting) {
remove_intf_ep_devs(intf);
usb_remove_sysfs_intf_files(intf);
}
intf->cur_altsetting = alt;
usb_enable_interface(dev, intf, true);
if (device_is_registered(&intf->dev)) {
usb_create_sysfs_intf_files(intf);
create_intf_ep_devs(intf);
}
}
/* Now that the interfaces are installed, re-enable LPM. */
usb_unlocked_enable_lpm(dev);
return 0;
}
EXPORT_SYMBOL_GPL(usb_reset_configuration);
static void usb_release_interface(struct device *dev)
{
struct usb_interface *intf = to_usb_interface(dev);
struct usb_interface_cache *intfc =
altsetting_to_usb_interface_cache(intf->altsetting);
kref_put(&intfc->ref, usb_release_interface_cache);
usb_put_dev(interface_to_usbdev(intf));
of_node_put(dev->of_node);
kfree(intf);
}
/*
* usb_deauthorize_interface - deauthorize an USB interface
*
* @intf: USB interface structure
*/
void usb_deauthorize_interface(struct usb_interface *intf)
{
struct device *dev = &intf->dev;
device_lock(dev->parent);
if (intf->authorized) {
device_lock(dev);
intf->authorized = 0;
device_unlock(dev);
usb_forced_unbind_intf(intf);
}
device_unlock(dev->parent);
}
/*
* usb_authorize_interface - authorize an USB interface
*
* @intf: USB interface structure
*/
void usb_authorize_interface(struct usb_interface *intf)
{
struct device *dev = &intf->dev;
if (!intf->authorized) {
device_lock(dev);
intf->authorized = 1; /* authorize interface */
device_unlock(dev);
}
}
static int usb_if_uevent(struct device *dev, struct kobj_uevent_env *env)
{
struct usb_device *usb_dev;
struct usb_interface *intf;
struct usb_host_interface *alt;
intf = to_usb_interface(dev);
usb_dev = interface_to_usbdev(intf);
alt = intf->cur_altsetting;
if (add_uevent_var(env, "INTERFACE=%d/%d/%d",
alt->desc.bInterfaceClass,
alt->desc.bInterfaceSubClass,
alt->desc.bInterfaceProtocol))
return -ENOMEM;
if (add_uevent_var(env,
"MODALIAS=usb:"
"v%04Xp%04Xd%04Xdc%02Xdsc%02Xdp%02Xic%02Xisc%02Xip%02Xin%02X",
le16_to_cpu(usb_dev->descriptor.idVendor),
le16_to_cpu(usb_dev->descriptor.idProduct),
le16_to_cpu(usb_dev->descriptor.bcdDevice),
usb_dev->descriptor.bDeviceClass,
usb_dev->descriptor.bDeviceSubClass,
usb_dev->descriptor.bDeviceProtocol,
alt->desc.bInterfaceClass,
alt->desc.bInterfaceSubClass,
alt->desc.bInterfaceProtocol,
alt->desc.bInterfaceNumber))
return -ENOMEM;
return 0;
}
struct device_type usb_if_device_type = {
.name = "usb_interface",
.release = usb_release_interface,
.uevent = usb_if_uevent,
};
static struct usb_interface_assoc_descriptor *find_iad(struct usb_device *dev,
struct usb_host_config *config,
u8 inum)
{
struct usb_interface_assoc_descriptor *retval = NULL;
struct usb_interface_assoc_descriptor *intf_assoc;
int first_intf;
int last_intf;
int i;
for (i = 0; (i < USB_MAXIADS && config->intf_assoc[i]); i++) {
intf_assoc = config->intf_assoc[i];
if (intf_assoc->bInterfaceCount == 0)
continue;
first_intf = intf_assoc->bFirstInterface;
last_intf = first_intf + (intf_assoc->bInterfaceCount - 1);
if (inum >= first_intf && inum <= last_intf) {
if (!retval)
retval = intf_assoc;
else
dev_err(&dev->dev, "Interface #%d referenced"
" by multiple IADs\n", inum);
}
}
return retval;
}
/*
* Internal function to queue a device reset
* See usb_queue_reset_device() for more details
*/
static void __usb_queue_reset_device(struct work_struct *ws)
{
int rc;
struct usb_interface *iface =
container_of(ws, struct usb_interface, reset_ws);
struct usb_device *udev = interface_to_usbdev(iface);
rc = usb_lock_device_for_reset(udev, iface);
if (rc >= 0) {
usb_reset_device(udev);
usb_unlock_device(udev);
}
usb_put_intf(iface); /* Undo _get_ in usb_queue_reset_device() */
}
/*
* usb_set_configuration - Makes a particular device setting be current
* @dev: the device whose configuration is being updated
* @configuration: the configuration being chosen.
* Context: !in_interrupt(), caller owns the device lock
*
* This is used to enable non-default device modes. Not all devices
* use this kind of configurability; many devices only have one
* configuration.
*
* @configuration is the value of the configuration to be installed.
* According to the USB spec (e.g. section 9.1.1.5), configuration values
* must be non-zero; a value of zero indicates that the device in
* unconfigured. However some devices erroneously use 0 as one of their
* configuration values. To help manage such devices, this routine will
* accept @configuration = -1 as indicating the device should be put in
* an unconfigured state.
*
* USB device configurations may affect Linux interoperability,
* power consumption and the functionality available. For example,
* the default configuration is limited to using 100mA of bus power,
* so that when certain device functionality requires more power,
* and the device is bus powered, that functionality should be in some
* non-default device configuration. Other device modes may also be
* reflected as configuration options, such as whether two ISDN
* channels are available independently; and choosing between open
* standard device protocols (like CDC) or proprietary ones.
*
* Note that a non-authorized device (dev->authorized == 0) will only
* be put in unconfigured mode.
*
* Note that USB has an additional level of device configurability,
* associated with interfaces. That configurability is accessed using
* usb_set_interface().
*
* This call is synchronous. The calling context must be able to sleep,
* must own the device lock, and must not hold the driver model's USB
* bus mutex; usb interface driver probe() methods cannot use this routine.
*
* Returns zero on success, or else the status code returned by the
* underlying call that failed. On successful completion, each interface
* in the original device configuration has been destroyed, and each one
* in the new configuration has been probed by all relevant usb device
* drivers currently known to the kernel.
*/
int usb_set_configuration(struct usb_device *dev, int configuration)
{
int i, ret;
struct usb_host_config *cp = NULL;
struct usb_interface **new_interfaces = NULL;
struct usb_hcd *hcd = bus_to_hcd(dev->bus);
int n, nintf;
if (dev->authorized == 0 || configuration == -1)
configuration = 0;
else {
for (i = 0; i < dev->descriptor.bNumConfigurations; i++) {
if (dev->config[i].desc.bConfigurationValue ==
configuration) {
cp = &dev->config[i];
break;
}
}
}
if ((!cp && configuration != 0))
return -EINVAL;
/* The USB spec says configuration 0 means unconfigured.
* But if a device includes a configuration numbered 0,
* we will accept it as a correctly configured state.
* Use -1 if you really want to unconfigure the device.
*/
if (cp && configuration == 0)
dev_warn(&dev->dev, "config 0 descriptor??\n");
/* Allocate memory for new interfaces before doing anything else,
* so that if we run out then nothing will have changed. */
n = nintf = 0;
if (cp) {
nintf = cp->desc.bNumInterfaces;
new_interfaces = kmalloc_array(nintf, sizeof(*new_interfaces),
GFP_NOIO);
if (!new_interfaces)
return -ENOMEM;
for (; n < nintf; ++n) {
new_interfaces[n] = kzalloc(
sizeof(struct usb_interface),
GFP_NOIO);
if (!new_interfaces[n]) {
ret = -ENOMEM;
free_interfaces:
while (--n >= 0)
kfree(new_interfaces[n]);
kfree(new_interfaces);
return ret;
}
}
i = dev->bus_mA - usb_get_max_power(dev, cp);
if (i < 0)
dev_warn(&dev->dev, "new config #%d exceeds power "
"limit by %dmA\n",
configuration, -i);
}
/* Wake up the device so we can send it the Set-Config request */
ret = usb_autoresume_device(dev);
if (ret)
goto free_interfaces;
/* if it's already configured, clear out old state first.
* getting rid of old interfaces means unbinding their drivers.
*/
if (dev->state != USB_STATE_ADDRESS)
usb_disable_device(dev, 1); /* Skip ep0 */
/* Get rid of pending async Set-Config requests for this device */
cancel_async_set_config(dev);
/* Make sure we have bandwidth (and available HCD resources) for this
* configuration. Remove endpoints from the schedule if we're dropping
* this configuration to set configuration 0. After this point, the
* host controller will not allow submissions to dropped endpoints. If
* this call fails, the device state is unchanged.
*/
mutex_lock(hcd->bandwidth_mutex);
/* Disable LPM, and re-enable it once the new configuration is
* installed, so that the xHCI driver can recalculate the U1/U2
* timeouts.
*/
if (dev->actconfig && usb_disable_lpm(dev)) {
dev_err(&dev->dev, "%s Failed to disable LPM\n", __func__);
mutex_unlock(hcd->bandwidth_mutex);
ret = -ENOMEM;
goto free_interfaces;
}
ret = usb_hcd_alloc_bandwidth(dev, cp, NULL, NULL);
if (ret < 0) {
if (dev->actconfig)
usb_enable_lpm(dev);
mutex_unlock(hcd->bandwidth_mutex);
usb_autosuspend_device(dev);
goto free_interfaces;
}
/*
* Initialize the new interface structures and the
* hc/hcd/usbcore interface/endpoint state.
*/
for (i = 0; i < nintf; ++i) {
struct usb_interface_cache *intfc;
struct usb_interface *intf;
struct usb_host_interface *alt;
u8 ifnum;
cp->interface[i] = intf = new_interfaces[i];
intfc = cp->intf_cache[i];
intf->altsetting = intfc->altsetting;
intf->num_altsetting = intfc->num_altsetting;
intf->authorized = !!HCD_INTF_AUTHORIZED(hcd);
kref_get(&intfc->ref);
alt = usb_altnum_to_altsetting(intf, 0);
/* No altsetting 0? We'll assume the first altsetting.
* We could use a GetInterface call, but if a device is
* so non-compliant that it doesn't have altsetting 0
* then I wouldn't trust its reply anyway.
*/
if (!alt)
alt = &intf->altsetting[0];
ifnum = alt->desc.bInterfaceNumber;
intf->intf_assoc = find_iad(dev, cp, ifnum);
intf->cur_altsetting = alt;
usb_enable_interface(dev, intf, true);
intf->dev.parent = &dev->dev;
if (usb_of_has_combined_node(dev)) {
device_set_of_node_from_dev(&intf->dev, &dev->dev);
} else {
intf->dev.of_node = usb_of_get_interface_node(dev,
configuration, ifnum);
}
ACPI_COMPANION_SET(&intf->dev, ACPI_COMPANION(&dev->dev));
intf->dev.driver = NULL;
intf->dev.bus = &usb_bus_type;
intf->dev.type = &usb_if_device_type;
intf->dev.groups = usb_interface_groups;
/*
* Please refer to usb_alloc_dev() to see why we set
* dma_mask and dma_pfn_offset.
*/
intf->dev.dma_mask = dev->dev.dma_mask;
intf->dev.dma_pfn_offset = dev->dev.dma_pfn_offset;
INIT_WORK(&intf->reset_ws, __usb_queue_reset_device);
intf->minor = -1;
device_initialize(&intf->dev);
pm_runtime_no_callbacks(&intf->dev);
dev_set_name(&intf->dev, "%d-%s:%d.%d", dev->bus->busnum,
dev->devpath, configuration, ifnum);
usb_get_dev(dev);
}
kfree(new_interfaces);
ret = usb_control_msg(dev, usb_sndctrlpipe(dev, 0),
USB_REQ_SET_CONFIGURATION, 0, configuration, 0,
NULL, 0, USB_CTRL_SET_TIMEOUT);
if (ret < 0 && cp) {
/*
* All the old state is gone, so what else can we do?
* The device is probably useless now anyway.
*/
usb_hcd_alloc_bandwidth(dev, NULL, NULL, NULL);
for (i = 0; i < nintf; ++i) {
usb_disable_interface(dev, cp->interface[i], true);
put_device(&cp->interface[i]->dev);
cp->interface[i] = NULL;
}
cp = NULL;
}
dev->actconfig = cp;
mutex_unlock(hcd->bandwidth_mutex);
if (!cp) {
usb_set_device_state(dev, USB_STATE_ADDRESS);
/* Leave LPM disabled while the device is unconfigured. */
usb_autosuspend_device(dev);
return ret;
}
usb_set_device_state(dev, USB_STATE_CONFIGURED);
if (cp->string == NULL &&
!(dev->quirks & USB_QUIRK_CONFIG_INTF_STRINGS))
cp->string = usb_cache_string(dev, cp->desc.iConfiguration);
/* Now that the interfaces are installed, re-enable LPM. */
usb_unlocked_enable_lpm(dev);
/* Enable LTM if it was turned off by usb_disable_device. */
usb_enable_ltm(dev);
/* Now that all the interfaces are set up, register them
* to trigger binding of drivers to interfaces. probe()
* routines may install different altsettings and may
* claim() any interfaces not yet bound. Many class drivers
* need that: CDC, audio, video, etc.
*/
for (i = 0; i < nintf; ++i) {
struct usb_interface *intf = cp->interface[i];
if (intf->dev.of_node &&
!of_device_is_available(intf->dev.of_node)) {
dev_info(&dev->dev, "skipping disabled interface %d\n",
intf->cur_altsetting->desc.bInterfaceNumber);
continue;
}
dev_dbg(&dev->dev,
"adding %s (config #%d, interface %d)\n",
dev_name(&intf->dev), configuration,
intf->cur_altsetting->desc.bInterfaceNumber);
device_enable_async_suspend(&intf->dev);
ret = device_add(&intf->dev);
if (ret != 0) {
dev_err(&dev->dev, "device_add(%s) --> %d\n",
dev_name(&intf->dev), ret);
continue;
}
create_intf_ep_devs(intf);
}
usb_autosuspend_device(dev);
return 0;
}
EXPORT_SYMBOL_GPL(usb_set_configuration);
static LIST_HEAD(set_config_list);
static DEFINE_SPINLOCK(set_config_lock);
struct set_config_request {
struct usb_device *udev;
int config;
struct work_struct work;
struct list_head node;
};
/* Worker routine for usb_driver_set_configuration() */
static void driver_set_config_work(struct work_struct *work)
{
struct set_config_request *req =
container_of(work, struct set_config_request, work);
struct usb_device *udev = req->udev;
usb_lock_device(udev);
spin_lock(&set_config_lock);
list_del(&req->node);
spin_unlock(&set_config_lock);
if (req->config >= -1) /* Is req still valid? */
usb_set_configuration(udev, req->config);
usb_unlock_device(udev);
usb_put_dev(udev);
kfree(req);
}
/* Cancel pending Set-Config requests for a device whose configuration
* was just changed
*/
static void cancel_async_set_config(struct usb_device *udev)
{
struct set_config_request *req;
spin_lock(&set_config_lock);
list_for_each_entry(req, &set_config_list, node) {
if (req->udev == udev)
req->config = -999; /* Mark as cancelled */
}
spin_unlock(&set_config_lock);
}
/**
* usb_driver_set_configuration - Provide a way for drivers to change device configurations
* @udev: the device whose configuration is being updated
* @config: the configuration being chosen.
* Context: In process context, must be able to sleep
*
* Device interface drivers are not allowed to change device configurations.
* This is because changing configurations will destroy the interface the
* driver is bound to and create new ones; it would be like a floppy-disk
* driver telling the computer to replace the floppy-disk drive with a
* tape drive!
*
* Still, in certain specialized circumstances the need may arise. This
* routine gets around the normal restrictions by using a work thread to
* submit the change-config request.
*
* Return: 0 if the request was successfully queued, error code otherwise.
* The caller has no way to know whether the queued request will eventually
* succeed.
*/
int usb_driver_set_configuration(struct usb_device *udev, int config)
{
struct set_config_request *req;
req = kmalloc(sizeof(*req), GFP_KERNEL);
if (!req)
return -ENOMEM;
req->udev = udev;
req->config = config;
INIT_WORK(&req->work, driver_set_config_work);
spin_lock(&set_config_lock);
list_add(&req->node, &set_config_list);
spin_unlock(&set_config_lock);
usb_get_dev(udev);
schedule_work(&req->work);
return 0;
}
EXPORT_SYMBOL_GPL(usb_driver_set_configuration);
/**
* cdc_parse_cdc_header - parse the extra headers present in CDC devices
* @hdr: the place to put the results of the parsing
* @intf: the interface for which parsing is requested
* @buffer: pointer to the extra headers to be parsed
* @buflen: length of the extra headers
*
* This evaluates the extra headers present in CDC devices which
* bind the interfaces for data and control and provide details
* about the capabilities of the device.
*
* Return: number of descriptors parsed or -EINVAL
* if the header is contradictory beyond salvage
*/
int cdc_parse_cdc_header(struct usb_cdc_parsed_header *hdr,
struct usb_interface *intf,
u8 *buffer,
int buflen)
{
/* duplicates are ignored */
struct usb_cdc_union_desc *union_header = NULL;
/* duplicates are not tolerated */
struct usb_cdc_header_desc *header = NULL;
struct usb_cdc_ether_desc *ether = NULL;
struct usb_cdc_mdlm_detail_desc *detail = NULL;
struct usb_cdc_mdlm_desc *desc = NULL;
unsigned int elength;
int cnt = 0;
memset(hdr, 0x00, sizeof(struct usb_cdc_parsed_header));
hdr->phonet_magic_present = false;
while (buflen > 0) {
elength = buffer[0];
if (!elength) {
dev_err(&intf->dev, "skipping garbage byte\n");
elength = 1;
goto next_desc;
}
if ((buflen < elength) || (elength < 3)) {
dev_err(&intf->dev, "invalid descriptor buffer length\n");
break;
}
if (buffer[1] != USB_DT_CS_INTERFACE) {
dev_err(&intf->dev, "skipping garbage\n");
goto next_desc;
}
switch (buffer[2]) {
case USB_CDC_UNION_TYPE: /* we've found it */
if (elength < sizeof(struct usb_cdc_union_desc))
goto next_desc;
if (union_header) {
dev_err(&intf->dev, "More than one union descriptor, skipping ...\n");
goto next_desc;
}
union_header = (struct usb_cdc_union_desc *)buffer;
break;
case USB_CDC_COUNTRY_TYPE:
if (elength < sizeof(struct usb_cdc_country_functional_desc))
goto next_desc;
hdr->usb_cdc_country_functional_desc =
(struct usb_cdc_country_functional_desc *)buffer;
break;
case USB_CDC_HEADER_TYPE:
if (elength != sizeof(struct usb_cdc_header_desc))
goto next_desc;
if (header)
return -EINVAL;
header = (struct usb_cdc_header_desc *)buffer;
break;
case USB_CDC_ACM_TYPE:
if (elength < sizeof(struct usb_cdc_acm_descriptor))
goto next_desc;
hdr->usb_cdc_acm_descriptor =
(struct usb_cdc_acm_descriptor *)buffer;
break;
case USB_CDC_ETHERNET_TYPE:
if (elength != sizeof(struct usb_cdc_ether_desc))
goto next_desc;
if (ether)
return -EINVAL;
ether = (struct usb_cdc_ether_desc *)buffer;
break;
case USB_CDC_CALL_MANAGEMENT_TYPE:
if (elength < sizeof(struct usb_cdc_call_mgmt_descriptor))
goto next_desc;
hdr->usb_cdc_call_mgmt_descriptor =
(struct usb_cdc_call_mgmt_descriptor *)buffer;
break;
case USB_CDC_DMM_TYPE:
if (elength < sizeof(struct usb_cdc_dmm_desc))
goto next_desc;
hdr->usb_cdc_dmm_desc =
(struct usb_cdc_dmm_desc *)buffer;
break;
case USB_CDC_MDLM_TYPE:
if (elength < sizeof(struct usb_cdc_mdlm_desc))
goto next_desc;
if (desc)
return -EINVAL;
desc = (struct usb_cdc_mdlm_desc *)buffer;
break;
case USB_CDC_MDLM_DETAIL_TYPE:
if (elength < sizeof(struct usb_cdc_mdlm_detail_desc))
goto next_desc;
if (detail)
return -EINVAL;
detail = (struct usb_cdc_mdlm_detail_desc *)buffer;
break;
case USB_CDC_NCM_TYPE:
if (elength < sizeof(struct usb_cdc_ncm_desc))
goto next_desc;
hdr->usb_cdc_ncm_desc = (struct usb_cdc_ncm_desc *)buffer;
break;
case USB_CDC_MBIM_TYPE:
if (elength < sizeof(struct usb_cdc_mbim_desc))
goto next_desc;
hdr->usb_cdc_mbim_desc = (struct usb_cdc_mbim_desc *)buffer;
break;
case USB_CDC_MBIM_EXTENDED_TYPE:
if (elength < sizeof(struct usb_cdc_mbim_extended_desc))
break;
hdr->usb_cdc_mbim_extended_desc =
(struct usb_cdc_mbim_extended_desc *)buffer;
break;
case CDC_PHONET_MAGIC_NUMBER:
hdr->phonet_magic_present = true;
break;
default:
/*
* there are LOTS more CDC descriptors that
* could legitimately be found here.
*/
dev_dbg(&intf->dev, "Ignoring descriptor: type %02x, length %ud\n",
buffer[2], elength);
goto next_desc;
}
cnt++;
next_desc:
buflen -= elength;
buffer += elength;
}
hdr->usb_cdc_union_desc = union_header;
hdr->usb_cdc_header_desc = header;
hdr->usb_cdc_mdlm_detail_desc = detail;
hdr->usb_cdc_mdlm_desc = desc;
hdr->usb_cdc_ether_desc = ether;
return cnt;
}
EXPORT_SYMBOL(cdc_parse_cdc_header);
| ./CrossVul/dataset_final_sorted/CWE-416/c/bad_3994_0 |
crossvul-cpp_data_bad_4655_0 | // SPDX-License-Identifier: GPL-2.0
/*
* linux/fs/namei.c
*
* Copyright (C) 1991, 1992 Linus Torvalds
*/
/*
* Some corrections by tytso.
*/
/* [Feb 1997 T. Schoebel-Theuer] Complete rewrite of the pathname
* lookup logic.
*/
/* [Feb-Apr 2000, AV] Rewrite to the new namespace architecture.
*/
#include <linux/init.h>
#include <linux/export.h>
#include <linux/kernel.h>
#include <linux/slab.h>
#include <linux/fs.h>
#include <linux/namei.h>
#include <linux/pagemap.h>
#include <linux/fsnotify.h>
#include <linux/personality.h>
#include <linux/security.h>
#include <linux/ima.h>
#include <linux/syscalls.h>
#include <linux/mount.h>
#include <linux/audit.h>
#include <linux/capability.h>
#include <linux/file.h>
#include <linux/fcntl.h>
#include <linux/device_cgroup.h>
#include <linux/fs_struct.h>
#include <linux/posix_acl.h>
#include <linux/hash.h>
#include <linux/bitops.h>
#include <linux/init_task.h>
#include <linux/uaccess.h>
#include "internal.h"
#include "mount.h"
/* [Feb-1997 T. Schoebel-Theuer]
* Fundamental changes in the pathname lookup mechanisms (namei)
* were necessary because of omirr. The reason is that omirr needs
* to know the _real_ pathname, not the user-supplied one, in case
* of symlinks (and also when transname replacements occur).
*
* The new code replaces the old recursive symlink resolution with
* an iterative one (in case of non-nested symlink chains). It does
* this with calls to <fs>_follow_link().
* As a side effect, dir_namei(), _namei() and follow_link() are now
* replaced with a single function lookup_dentry() that can handle all
* the special cases of the former code.
*
* With the new dcache, the pathname is stored at each inode, at least as
* long as the refcount of the inode is positive. As a side effect, the
* size of the dcache depends on the inode cache and thus is dynamic.
*
* [29-Apr-1998 C. Scott Ananian] Updated above description of symlink
* resolution to correspond with current state of the code.
*
* Note that the symlink resolution is not *completely* iterative.
* There is still a significant amount of tail- and mid- recursion in
* the algorithm. Also, note that <fs>_readlink() is not used in
* lookup_dentry(): lookup_dentry() on the result of <fs>_readlink()
* may return different results than <fs>_follow_link(). Many virtual
* filesystems (including /proc) exhibit this behavior.
*/
/* [24-Feb-97 T. Schoebel-Theuer] Side effects caused by new implementation:
* New symlink semantics: when open() is called with flags O_CREAT | O_EXCL
* and the name already exists in form of a symlink, try to create the new
* name indicated by the symlink. The old code always complained that the
* name already exists, due to not following the symlink even if its target
* is nonexistent. The new semantics affects also mknod() and link() when
* the name is a symlink pointing to a non-existent name.
*
* I don't know which semantics is the right one, since I have no access
* to standards. But I found by trial that HP-UX 9.0 has the full "new"
* semantics implemented, while SunOS 4.1.1 and Solaris (SunOS 5.4) have the
* "old" one. Personally, I think the new semantics is much more logical.
* Note that "ln old new" where "new" is a symlink pointing to a non-existing
* file does succeed in both HP-UX and SunOs, but not in Solaris
* and in the old Linux semantics.
*/
/* [16-Dec-97 Kevin Buhr] For security reasons, we change some symlink
* semantics. See the comments in "open_namei" and "do_link" below.
*
* [10-Sep-98 Alan Modra] Another symlink change.
*/
/* [Feb-Apr 2000 AV] Complete rewrite. Rules for symlinks:
* inside the path - always follow.
* in the last component in creation/removal/renaming - never follow.
* if LOOKUP_FOLLOW passed - follow.
* if the pathname has trailing slashes - follow.
* otherwise - don't follow.
* (applied in that order).
*
* [Jun 2000 AV] Inconsistent behaviour of open() in case if flags==O_CREAT
* restored for 2.4. This is the last surviving part of old 4.2BSD bug.
* During the 2.4 we need to fix the userland stuff depending on it -
* hopefully we will be able to get rid of that wart in 2.5. So far only
* XEmacs seems to be relying on it...
*/
/*
* [Sep 2001 AV] Single-semaphore locking scheme (kudos to David Holland)
* implemented. Let's see if raised priority of ->s_vfs_rename_mutex gives
* any extra contention...
*/
/* In order to reduce some races, while at the same time doing additional
* checking and hopefully speeding things up, we copy filenames to the
* kernel data space before using them..
*
* POSIX.1 2.4: an empty pathname is invalid (ENOENT).
* PATH_MAX includes the nul terminator --RR.
*/
#define EMBEDDED_NAME_MAX (PATH_MAX - offsetof(struct filename, iname))
struct filename *
getname_flags(const char __user *filename, int flags, int *empty)
{
struct filename *result;
char *kname;
int len;
result = audit_reusename(filename);
if (result)
return result;
result = __getname();
if (unlikely(!result))
return ERR_PTR(-ENOMEM);
/*
* First, try to embed the struct filename inside the names_cache
* allocation
*/
kname = (char *)result->iname;
result->name = kname;
len = strncpy_from_user(kname, filename, EMBEDDED_NAME_MAX);
if (unlikely(len < 0)) {
__putname(result);
return ERR_PTR(len);
}
/*
* Uh-oh. We have a name that's approaching PATH_MAX. Allocate a
* separate struct filename so we can dedicate the entire
* names_cache allocation for the pathname, and re-do the copy from
* userland.
*/
if (unlikely(len == EMBEDDED_NAME_MAX)) {
const size_t size = offsetof(struct filename, iname[1]);
kname = (char *)result;
/*
* size is chosen that way we to guarantee that
* result->iname[0] is within the same object and that
* kname can't be equal to result->iname, no matter what.
*/
result = kzalloc(size, GFP_KERNEL);
if (unlikely(!result)) {
__putname(kname);
return ERR_PTR(-ENOMEM);
}
result->name = kname;
len = strncpy_from_user(kname, filename, PATH_MAX);
if (unlikely(len < 0)) {
__putname(kname);
kfree(result);
return ERR_PTR(len);
}
if (unlikely(len == PATH_MAX)) {
__putname(kname);
kfree(result);
return ERR_PTR(-ENAMETOOLONG);
}
}
result->refcnt = 1;
/* The empty path is special. */
if (unlikely(!len)) {
if (empty)
*empty = 1;
if (!(flags & LOOKUP_EMPTY)) {
putname(result);
return ERR_PTR(-ENOENT);
}
}
result->uptr = filename;
result->aname = NULL;
audit_getname(result);
return result;
}
struct filename *
getname(const char __user * filename)
{
return getname_flags(filename, 0, NULL);
}
struct filename *
getname_kernel(const char * filename)
{
struct filename *result;
int len = strlen(filename) + 1;
result = __getname();
if (unlikely(!result))
return ERR_PTR(-ENOMEM);
if (len <= EMBEDDED_NAME_MAX) {
result->name = (char *)result->iname;
} else if (len <= PATH_MAX) {
const size_t size = offsetof(struct filename, iname[1]);
struct filename *tmp;
tmp = kmalloc(size, GFP_KERNEL);
if (unlikely(!tmp)) {
__putname(result);
return ERR_PTR(-ENOMEM);
}
tmp->name = (char *)result;
result = tmp;
} else {
__putname(result);
return ERR_PTR(-ENAMETOOLONG);
}
memcpy((char *)result->name, filename, len);
result->uptr = NULL;
result->aname = NULL;
result->refcnt = 1;
audit_getname(result);
return result;
}
void putname(struct filename *name)
{
BUG_ON(name->refcnt <= 0);
if (--name->refcnt > 0)
return;
if (name->name != name->iname) {
__putname(name->name);
kfree(name);
} else
__putname(name);
}
static int check_acl(struct inode *inode, int mask)
{
#ifdef CONFIG_FS_POSIX_ACL
struct posix_acl *acl;
if (mask & MAY_NOT_BLOCK) {
acl = get_cached_acl_rcu(inode, ACL_TYPE_ACCESS);
if (!acl)
return -EAGAIN;
/* no ->get_acl() calls in RCU mode... */
if (is_uncached_acl(acl))
return -ECHILD;
return posix_acl_permission(inode, acl, mask & ~MAY_NOT_BLOCK);
}
acl = get_acl(inode, ACL_TYPE_ACCESS);
if (IS_ERR(acl))
return PTR_ERR(acl);
if (acl) {
int error = posix_acl_permission(inode, acl, mask);
posix_acl_release(acl);
return error;
}
#endif
return -EAGAIN;
}
/*
* This does the basic permission checking
*/
static int acl_permission_check(struct inode *inode, int mask)
{
unsigned int mode = inode->i_mode;
if (likely(uid_eq(current_fsuid(), inode->i_uid)))
mode >>= 6;
else {
if (IS_POSIXACL(inode) && (mode & S_IRWXG)) {
int error = check_acl(inode, mask);
if (error != -EAGAIN)
return error;
}
if (in_group_p(inode->i_gid))
mode >>= 3;
}
/*
* If the DACs are ok we don't need any capability check.
*/
if ((mask & ~mode & (MAY_READ | MAY_WRITE | MAY_EXEC)) == 0)
return 0;
return -EACCES;
}
/**
* generic_permission - check for access rights on a Posix-like filesystem
* @inode: inode to check access rights for
* @mask: right to check for (%MAY_READ, %MAY_WRITE, %MAY_EXEC, ...)
*
* Used to check for read/write/execute permissions on a file.
* We use "fsuid" for this, letting us set arbitrary permissions
* for filesystem access without changing the "normal" uids which
* are used for other things.
*
* generic_permission is rcu-walk aware. It returns -ECHILD in case an rcu-walk
* request cannot be satisfied (eg. requires blocking or too much complexity).
* It would then be called again in ref-walk mode.
*/
int generic_permission(struct inode *inode, int mask)
{
int ret;
/*
* Do the basic permission checks.
*/
ret = acl_permission_check(inode, mask);
if (ret != -EACCES)
return ret;
if (S_ISDIR(inode->i_mode)) {
/* DACs are overridable for directories */
if (!(mask & MAY_WRITE))
if (capable_wrt_inode_uidgid(inode,
CAP_DAC_READ_SEARCH))
return 0;
if (capable_wrt_inode_uidgid(inode, CAP_DAC_OVERRIDE))
return 0;
return -EACCES;
}
/*
* Searching includes executable on directories, else just read.
*/
mask &= MAY_READ | MAY_WRITE | MAY_EXEC;
if (mask == MAY_READ)
if (capable_wrt_inode_uidgid(inode, CAP_DAC_READ_SEARCH))
return 0;
/*
* Read/write DACs are always overridable.
* Executable DACs are overridable when there is
* at least one exec bit set.
*/
if (!(mask & MAY_EXEC) || (inode->i_mode & S_IXUGO))
if (capable_wrt_inode_uidgid(inode, CAP_DAC_OVERRIDE))
return 0;
return -EACCES;
}
EXPORT_SYMBOL(generic_permission);
/*
* We _really_ want to just do "generic_permission()" without
* even looking at the inode->i_op values. So we keep a cache
* flag in inode->i_opflags, that says "this has not special
* permission function, use the fast case".
*/
static inline int do_inode_permission(struct inode *inode, int mask)
{
if (unlikely(!(inode->i_opflags & IOP_FASTPERM))) {
if (likely(inode->i_op->permission))
return inode->i_op->permission(inode, mask);
/* This gets set once for the inode lifetime */
spin_lock(&inode->i_lock);
inode->i_opflags |= IOP_FASTPERM;
spin_unlock(&inode->i_lock);
}
return generic_permission(inode, mask);
}
/**
* sb_permission - Check superblock-level permissions
* @sb: Superblock of inode to check permission on
* @inode: Inode to check permission on
* @mask: Right to check for (%MAY_READ, %MAY_WRITE, %MAY_EXEC)
*
* Separate out file-system wide checks from inode-specific permission checks.
*/
static int sb_permission(struct super_block *sb, struct inode *inode, int mask)
{
if (unlikely(mask & MAY_WRITE)) {
umode_t mode = inode->i_mode;
/* Nobody gets write access to a read-only fs. */
if (sb_rdonly(sb) && (S_ISREG(mode) || S_ISDIR(mode) || S_ISLNK(mode)))
return -EROFS;
}
return 0;
}
/**
* inode_permission - Check for access rights to a given inode
* @inode: Inode to check permission on
* @mask: Right to check for (%MAY_READ, %MAY_WRITE, %MAY_EXEC)
*
* Check for read/write/execute permissions on an inode. We use fs[ug]id for
* this, letting us set arbitrary permissions for filesystem access without
* changing the "normal" UIDs which are used for other things.
*
* When checking for MAY_APPEND, MAY_WRITE must also be set in @mask.
*/
int inode_permission(struct inode *inode, int mask)
{
int retval;
retval = sb_permission(inode->i_sb, inode, mask);
if (retval)
return retval;
if (unlikely(mask & MAY_WRITE)) {
/*
* Nobody gets write access to an immutable file.
*/
if (IS_IMMUTABLE(inode))
return -EPERM;
/*
* Updating mtime will likely cause i_uid and i_gid to be
* written back improperly if their true value is unknown
* to the vfs.
*/
if (HAS_UNMAPPED_ID(inode))
return -EACCES;
}
retval = do_inode_permission(inode, mask);
if (retval)
return retval;
retval = devcgroup_inode_permission(inode, mask);
if (retval)
return retval;
return security_inode_permission(inode, mask);
}
EXPORT_SYMBOL(inode_permission);
/**
* path_get - get a reference to a path
* @path: path to get the reference to
*
* Given a path increment the reference count to the dentry and the vfsmount.
*/
void path_get(const struct path *path)
{
mntget(path->mnt);
dget(path->dentry);
}
EXPORT_SYMBOL(path_get);
/**
* path_put - put a reference to a path
* @path: path to put the reference to
*
* Given a path decrement the reference count to the dentry and the vfsmount.
*/
void path_put(const struct path *path)
{
dput(path->dentry);
mntput(path->mnt);
}
EXPORT_SYMBOL(path_put);
#define EMBEDDED_LEVELS 2
struct nameidata {
struct path path;
struct qstr last;
struct path root;
struct inode *inode; /* path.dentry.d_inode */
unsigned int flags;
unsigned seq, m_seq;
int last_type;
unsigned depth;
int total_link_count;
struct saved {
struct path link;
struct delayed_call done;
const char *name;
unsigned seq;
} *stack, internal[EMBEDDED_LEVELS];
struct filename *name;
struct nameidata *saved;
struct inode *link_inode;
unsigned root_seq;
int dfd;
} __randomize_layout;
static void set_nameidata(struct nameidata *p, int dfd, struct filename *name)
{
struct nameidata *old = current->nameidata;
p->stack = p->internal;
p->dfd = dfd;
p->name = name;
p->total_link_count = old ? old->total_link_count : 0;
p->saved = old;
current->nameidata = p;
}
static void restore_nameidata(void)
{
struct nameidata *now = current->nameidata, *old = now->saved;
current->nameidata = old;
if (old)
old->total_link_count = now->total_link_count;
if (now->stack != now->internal)
kfree(now->stack);
}
static int __nd_alloc_stack(struct nameidata *nd)
{
struct saved *p;
if (nd->flags & LOOKUP_RCU) {
p= kmalloc_array(MAXSYMLINKS, sizeof(struct saved),
GFP_ATOMIC);
if (unlikely(!p))
return -ECHILD;
} else {
p= kmalloc_array(MAXSYMLINKS, sizeof(struct saved),
GFP_KERNEL);
if (unlikely(!p))
return -ENOMEM;
}
memcpy(p, nd->internal, sizeof(nd->internal));
nd->stack = p;
return 0;
}
/**
* path_connected - Verify that a path->dentry is below path->mnt.mnt_root
* @path: nameidate to verify
*
* Rename can sometimes move a file or directory outside of a bind
* mount, path_connected allows those cases to be detected.
*/
static bool path_connected(const struct path *path)
{
struct vfsmount *mnt = path->mnt;
struct super_block *sb = mnt->mnt_sb;
/* Bind mounts and multi-root filesystems can have disconnected paths */
if (!(sb->s_iflags & SB_I_MULTIROOT) && (mnt->mnt_root == sb->s_root))
return true;
return is_subdir(path->dentry, mnt->mnt_root);
}
static inline int nd_alloc_stack(struct nameidata *nd)
{
if (likely(nd->depth != EMBEDDED_LEVELS))
return 0;
if (likely(nd->stack != nd->internal))
return 0;
return __nd_alloc_stack(nd);
}
static void drop_links(struct nameidata *nd)
{
int i = nd->depth;
while (i--) {
struct saved *last = nd->stack + i;
do_delayed_call(&last->done);
clear_delayed_call(&last->done);
}
}
static void terminate_walk(struct nameidata *nd)
{
drop_links(nd);
if (!(nd->flags & LOOKUP_RCU)) {
int i;
path_put(&nd->path);
for (i = 0; i < nd->depth; i++)
path_put(&nd->stack[i].link);
if (nd->flags & LOOKUP_ROOT_GRABBED) {
path_put(&nd->root);
nd->flags &= ~LOOKUP_ROOT_GRABBED;
}
} else {
nd->flags &= ~LOOKUP_RCU;
rcu_read_unlock();
}
nd->depth = 0;
}
/* path_put is needed afterwards regardless of success or failure */
static bool legitimize_path(struct nameidata *nd,
struct path *path, unsigned seq)
{
int res = __legitimize_mnt(path->mnt, nd->m_seq);
if (unlikely(res)) {
if (res > 0)
path->mnt = NULL;
path->dentry = NULL;
return false;
}
if (unlikely(!lockref_get_not_dead(&path->dentry->d_lockref))) {
path->dentry = NULL;
return false;
}
return !read_seqcount_retry(&path->dentry->d_seq, seq);
}
static bool legitimize_links(struct nameidata *nd)
{
int i;
for (i = 0; i < nd->depth; i++) {
struct saved *last = nd->stack + i;
if (unlikely(!legitimize_path(nd, &last->link, last->seq))) {
drop_links(nd);
nd->depth = i + 1;
return false;
}
}
return true;
}
static bool legitimize_root(struct nameidata *nd)
{
if (!nd->root.mnt || (nd->flags & LOOKUP_ROOT))
return true;
nd->flags |= LOOKUP_ROOT_GRABBED;
return legitimize_path(nd, &nd->root, nd->root_seq);
}
/*
* Path walking has 2 modes, rcu-walk and ref-walk (see
* Documentation/filesystems/path-lookup.txt). In situations when we can't
* continue in RCU mode, we attempt to drop out of rcu-walk mode and grab
* normal reference counts on dentries and vfsmounts to transition to ref-walk
* mode. Refcounts are grabbed at the last known good point before rcu-walk
* got stuck, so ref-walk may continue from there. If this is not successful
* (eg. a seqcount has changed), then failure is returned and it's up to caller
* to restart the path walk from the beginning in ref-walk mode.
*/
/**
* unlazy_walk - try to switch to ref-walk mode.
* @nd: nameidata pathwalk data
* Returns: 0 on success, -ECHILD on failure
*
* unlazy_walk attempts to legitimize the current nd->path and nd->root
* for ref-walk mode.
* Must be called from rcu-walk context.
* Nothing should touch nameidata between unlazy_walk() failure and
* terminate_walk().
*/
static int unlazy_walk(struct nameidata *nd)
{
struct dentry *parent = nd->path.dentry;
BUG_ON(!(nd->flags & LOOKUP_RCU));
nd->flags &= ~LOOKUP_RCU;
if (unlikely(!legitimize_links(nd)))
goto out1;
if (unlikely(!legitimize_path(nd, &nd->path, nd->seq)))
goto out;
if (unlikely(!legitimize_root(nd)))
goto out;
rcu_read_unlock();
BUG_ON(nd->inode != parent->d_inode);
return 0;
out1:
nd->path.mnt = NULL;
nd->path.dentry = NULL;
out:
rcu_read_unlock();
return -ECHILD;
}
/**
* unlazy_child - try to switch to ref-walk mode.
* @nd: nameidata pathwalk data
* @dentry: child of nd->path.dentry
* @seq: seq number to check dentry against
* Returns: 0 on success, -ECHILD on failure
*
* unlazy_child attempts to legitimize the current nd->path, nd->root and dentry
* for ref-walk mode. @dentry must be a path found by a do_lookup call on
* @nd. Must be called from rcu-walk context.
* Nothing should touch nameidata between unlazy_child() failure and
* terminate_walk().
*/
static int unlazy_child(struct nameidata *nd, struct dentry *dentry, unsigned seq)
{
BUG_ON(!(nd->flags & LOOKUP_RCU));
nd->flags &= ~LOOKUP_RCU;
if (unlikely(!legitimize_links(nd)))
goto out2;
if (unlikely(!legitimize_mnt(nd->path.mnt, nd->m_seq)))
goto out2;
if (unlikely(!lockref_get_not_dead(&nd->path.dentry->d_lockref)))
goto out1;
/*
* We need to move both the parent and the dentry from the RCU domain
* to be properly refcounted. And the sequence number in the dentry
* validates *both* dentry counters, since we checked the sequence
* number of the parent after we got the child sequence number. So we
* know the parent must still be valid if the child sequence number is
*/
if (unlikely(!lockref_get_not_dead(&dentry->d_lockref)))
goto out;
if (unlikely(read_seqcount_retry(&dentry->d_seq, seq)))
goto out_dput;
/*
* Sequence counts matched. Now make sure that the root is
* still valid and get it if required.
*/
if (unlikely(!legitimize_root(nd)))
goto out_dput;
rcu_read_unlock();
return 0;
out2:
nd->path.mnt = NULL;
out1:
nd->path.dentry = NULL;
out:
rcu_read_unlock();
return -ECHILD;
out_dput:
rcu_read_unlock();
dput(dentry);
return -ECHILD;
}
static inline int d_revalidate(struct dentry *dentry, unsigned int flags)
{
if (unlikely(dentry->d_flags & DCACHE_OP_REVALIDATE))
return dentry->d_op->d_revalidate(dentry, flags);
else
return 1;
}
/**
* complete_walk - successful completion of path walk
* @nd: pointer nameidata
*
* If we had been in RCU mode, drop out of it and legitimize nd->path.
* Revalidate the final result, unless we'd already done that during
* the path walk or the filesystem doesn't ask for it. Return 0 on
* success, -error on failure. In case of failure caller does not
* need to drop nd->path.
*/
static int complete_walk(struct nameidata *nd)
{
struct dentry *dentry = nd->path.dentry;
int status;
if (nd->flags & LOOKUP_RCU) {
if (!(nd->flags & LOOKUP_ROOT))
nd->root.mnt = NULL;
if (unlikely(unlazy_walk(nd)))
return -ECHILD;
}
if (likely(!(nd->flags & LOOKUP_JUMPED)))
return 0;
if (likely(!(dentry->d_flags & DCACHE_OP_WEAK_REVALIDATE)))
return 0;
status = dentry->d_op->d_weak_revalidate(dentry, nd->flags);
if (status > 0)
return 0;
if (!status)
status = -ESTALE;
return status;
}
static void set_root(struct nameidata *nd)
{
struct fs_struct *fs = current->fs;
if (nd->flags & LOOKUP_RCU) {
unsigned seq;
do {
seq = read_seqcount_begin(&fs->seq);
nd->root = fs->root;
nd->root_seq = __read_seqcount_begin(&nd->root.dentry->d_seq);
} while (read_seqcount_retry(&fs->seq, seq));
} else {
get_fs_root(fs, &nd->root);
nd->flags |= LOOKUP_ROOT_GRABBED;
}
}
static void path_put_conditional(struct path *path, struct nameidata *nd)
{
dput(path->dentry);
if (path->mnt != nd->path.mnt)
mntput(path->mnt);
}
static inline void path_to_nameidata(const struct path *path,
struct nameidata *nd)
{
if (!(nd->flags & LOOKUP_RCU)) {
dput(nd->path.dentry);
if (nd->path.mnt != path->mnt)
mntput(nd->path.mnt);
}
nd->path.mnt = path->mnt;
nd->path.dentry = path->dentry;
}
static int nd_jump_root(struct nameidata *nd)
{
if (nd->flags & LOOKUP_RCU) {
struct dentry *d;
nd->path = nd->root;
d = nd->path.dentry;
nd->inode = d->d_inode;
nd->seq = nd->root_seq;
if (unlikely(read_seqcount_retry(&d->d_seq, nd->seq)))
return -ECHILD;
} else {
path_put(&nd->path);
nd->path = nd->root;
path_get(&nd->path);
nd->inode = nd->path.dentry->d_inode;
}
nd->flags |= LOOKUP_JUMPED;
return 0;
}
/*
* Helper to directly jump to a known parsed path from ->get_link,
* caller must have taken a reference to path beforehand.
*/
void nd_jump_link(struct path *path)
{
struct nameidata *nd = current->nameidata;
path_put(&nd->path);
nd->path = *path;
nd->inode = nd->path.dentry->d_inode;
nd->flags |= LOOKUP_JUMPED;
}
static inline void put_link(struct nameidata *nd)
{
struct saved *last = nd->stack + --nd->depth;
do_delayed_call(&last->done);
if (!(nd->flags & LOOKUP_RCU))
path_put(&last->link);
}
int sysctl_protected_symlinks __read_mostly = 0;
int sysctl_protected_hardlinks __read_mostly = 0;
int sysctl_protected_fifos __read_mostly;
int sysctl_protected_regular __read_mostly;
/**
* may_follow_link - Check symlink following for unsafe situations
* @nd: nameidata pathwalk data
*
* In the case of the sysctl_protected_symlinks sysctl being enabled,
* CAP_DAC_OVERRIDE needs to be specifically ignored if the symlink is
* in a sticky world-writable directory. This is to protect privileged
* processes from failing races against path names that may change out
* from under them by way of other users creating malicious symlinks.
* It will permit symlinks to be followed only when outside a sticky
* world-writable directory, or when the uid of the symlink and follower
* match, or when the directory owner matches the symlink's owner.
*
* Returns 0 if following the symlink is allowed, -ve on error.
*/
static inline int may_follow_link(struct nameidata *nd)
{
const struct inode *inode;
const struct inode *parent;
kuid_t puid;
if (!sysctl_protected_symlinks)
return 0;
/* Allowed if owner and follower match. */
inode = nd->link_inode;
if (uid_eq(current_cred()->fsuid, inode->i_uid))
return 0;
/* Allowed if parent directory not sticky and world-writable. */
parent = nd->inode;
if ((parent->i_mode & (S_ISVTX|S_IWOTH)) != (S_ISVTX|S_IWOTH))
return 0;
/* Allowed if parent directory and link owner match. */
puid = parent->i_uid;
if (uid_valid(puid) && uid_eq(puid, inode->i_uid))
return 0;
if (nd->flags & LOOKUP_RCU)
return -ECHILD;
audit_inode(nd->name, nd->stack[0].link.dentry, 0);
audit_log_path_denied(AUDIT_ANOM_LINK, "follow_link");
return -EACCES;
}
/**
* safe_hardlink_source - Check for safe hardlink conditions
* @inode: the source inode to hardlink from
*
* Return false if at least one of the following conditions:
* - inode is not a regular file
* - inode is setuid
* - inode is setgid and group-exec
* - access failure for read and write
*
* Otherwise returns true.
*/
static bool safe_hardlink_source(struct inode *inode)
{
umode_t mode = inode->i_mode;
/* Special files should not get pinned to the filesystem. */
if (!S_ISREG(mode))
return false;
/* Setuid files should not get pinned to the filesystem. */
if (mode & S_ISUID)
return false;
/* Executable setgid files should not get pinned to the filesystem. */
if ((mode & (S_ISGID | S_IXGRP)) == (S_ISGID | S_IXGRP))
return false;
/* Hardlinking to unreadable or unwritable sources is dangerous. */
if (inode_permission(inode, MAY_READ | MAY_WRITE))
return false;
return true;
}
/**
* may_linkat - Check permissions for creating a hardlink
* @link: the source to hardlink from
*
* Block hardlink when all of:
* - sysctl_protected_hardlinks enabled
* - fsuid does not match inode
* - hardlink source is unsafe (see safe_hardlink_source() above)
* - not CAP_FOWNER in a namespace with the inode owner uid mapped
*
* Returns 0 if successful, -ve on error.
*/
static int may_linkat(struct path *link)
{
struct inode *inode = link->dentry->d_inode;
/* Inode writeback is not safe when the uid or gid are invalid. */
if (!uid_valid(inode->i_uid) || !gid_valid(inode->i_gid))
return -EOVERFLOW;
if (!sysctl_protected_hardlinks)
return 0;
/* Source inode owner (or CAP_FOWNER) can hardlink all they like,
* otherwise, it must be a safe source.
*/
if (safe_hardlink_source(inode) || inode_owner_or_capable(inode))
return 0;
audit_log_path_denied(AUDIT_ANOM_LINK, "linkat");
return -EPERM;
}
/**
* may_create_in_sticky - Check whether an O_CREAT open in a sticky directory
* should be allowed, or not, on files that already
* exist.
* @dir: the sticky parent directory
* @inode: the inode of the file to open
*
* Block an O_CREAT open of a FIFO (or a regular file) when:
* - sysctl_protected_fifos (or sysctl_protected_regular) is enabled
* - the file already exists
* - we are in a sticky directory
* - we don't own the file
* - the owner of the directory doesn't own the file
* - the directory is world writable
* If the sysctl_protected_fifos (or sysctl_protected_regular) is set to 2
* the directory doesn't have to be world writable: being group writable will
* be enough.
*
* Returns 0 if the open is allowed, -ve on error.
*/
static int may_create_in_sticky(struct dentry * const dir,
struct inode * const inode)
{
if ((!sysctl_protected_fifos && S_ISFIFO(inode->i_mode)) ||
(!sysctl_protected_regular && S_ISREG(inode->i_mode)) ||
likely(!(dir->d_inode->i_mode & S_ISVTX)) ||
uid_eq(inode->i_uid, dir->d_inode->i_uid) ||
uid_eq(current_fsuid(), inode->i_uid))
return 0;
if (likely(dir->d_inode->i_mode & 0002) ||
(dir->d_inode->i_mode & 0020 &&
((sysctl_protected_fifos >= 2 && S_ISFIFO(inode->i_mode)) ||
(sysctl_protected_regular >= 2 && S_ISREG(inode->i_mode))))) {
const char *operation = S_ISFIFO(inode->i_mode) ?
"sticky_create_fifo" :
"sticky_create_regular";
audit_log_path_denied(AUDIT_ANOM_CREAT, operation);
return -EACCES;
}
return 0;
}
static __always_inline
const char *get_link(struct nameidata *nd)
{
struct saved *last = nd->stack + nd->depth - 1;
struct dentry *dentry = last->link.dentry;
struct inode *inode = nd->link_inode;
int error;
const char *res;
if (!(nd->flags & LOOKUP_RCU)) {
touch_atime(&last->link);
cond_resched();
} else if (atime_needs_update(&last->link, inode)) {
if (unlikely(unlazy_walk(nd)))
return ERR_PTR(-ECHILD);
touch_atime(&last->link);
}
error = security_inode_follow_link(dentry, inode,
nd->flags & LOOKUP_RCU);
if (unlikely(error))
return ERR_PTR(error);
nd->last_type = LAST_BIND;
res = READ_ONCE(inode->i_link);
if (!res) {
const char * (*get)(struct dentry *, struct inode *,
struct delayed_call *);
get = inode->i_op->get_link;
if (nd->flags & LOOKUP_RCU) {
res = get(NULL, inode, &last->done);
if (res == ERR_PTR(-ECHILD)) {
if (unlikely(unlazy_walk(nd)))
return ERR_PTR(-ECHILD);
res = get(dentry, inode, &last->done);
}
} else {
res = get(dentry, inode, &last->done);
}
if (IS_ERR_OR_NULL(res))
return res;
}
if (*res == '/') {
if (!nd->root.mnt)
set_root(nd);
if (unlikely(nd_jump_root(nd)))
return ERR_PTR(-ECHILD);
while (unlikely(*++res == '/'))
;
}
if (!*res)
res = NULL;
return res;
}
/*
* follow_up - Find the mountpoint of path's vfsmount
*
* Given a path, find the mountpoint of its source file system.
* Replace @path with the path of the mountpoint in the parent mount.
* Up is towards /.
*
* Return 1 if we went up a level and 0 if we were already at the
* root.
*/
int follow_up(struct path *path)
{
struct mount *mnt = real_mount(path->mnt);
struct mount *parent;
struct dentry *mountpoint;
read_seqlock_excl(&mount_lock);
parent = mnt->mnt_parent;
if (parent == mnt) {
read_sequnlock_excl(&mount_lock);
return 0;
}
mntget(&parent->mnt);
mountpoint = dget(mnt->mnt_mountpoint);
read_sequnlock_excl(&mount_lock);
dput(path->dentry);
path->dentry = mountpoint;
mntput(path->mnt);
path->mnt = &parent->mnt;
return 1;
}
EXPORT_SYMBOL(follow_up);
/*
* Perform an automount
* - return -EISDIR to tell follow_managed() to stop and return the path we
* were called with.
*/
static int follow_automount(struct path *path, struct nameidata *nd,
bool *need_mntput)
{
struct vfsmount *mnt;
int err;
if (!path->dentry->d_op || !path->dentry->d_op->d_automount)
return -EREMOTE;
/* We don't want to mount if someone's just doing a stat -
* unless they're stat'ing a directory and appended a '/' to
* the name.
*
* We do, however, want to mount if someone wants to open or
* create a file of any type under the mountpoint, wants to
* traverse through the mountpoint or wants to open the
* mounted directory. Also, autofs may mark negative dentries
* as being automount points. These will need the attentions
* of the daemon to instantiate them before they can be used.
*/
if (!(nd->flags & (LOOKUP_PARENT | LOOKUP_DIRECTORY |
LOOKUP_OPEN | LOOKUP_CREATE | LOOKUP_AUTOMOUNT)) &&
path->dentry->d_inode)
return -EISDIR;
nd->total_link_count++;
if (nd->total_link_count >= 40)
return -ELOOP;
mnt = path->dentry->d_op->d_automount(path);
if (IS_ERR(mnt)) {
/*
* The filesystem is allowed to return -EISDIR here to indicate
* it doesn't want to automount. For instance, autofs would do
* this so that its userspace daemon can mount on this dentry.
*
* However, we can only permit this if it's a terminal point in
* the path being looked up; if it wasn't then the remainder of
* the path is inaccessible and we should say so.
*/
if (PTR_ERR(mnt) == -EISDIR && (nd->flags & LOOKUP_PARENT))
return -EREMOTE;
return PTR_ERR(mnt);
}
if (!mnt) /* mount collision */
return 0;
if (!*need_mntput) {
/* lock_mount() may release path->mnt on error */
mntget(path->mnt);
*need_mntput = true;
}
err = finish_automount(mnt, path);
switch (err) {
case -EBUSY:
/* Someone else made a mount here whilst we were busy */
return 0;
case 0:
path_put(path);
path->mnt = mnt;
path->dentry = dget(mnt->mnt_root);
return 0;
default:
return err;
}
}
/*
* Handle a dentry that is managed in some way.
* - Flagged for transit management (autofs)
* - Flagged as mountpoint
* - Flagged as automount point
*
* This may only be called in refwalk mode.
* On success path->dentry is known positive.
*
* Serialization is taken care of in namespace.c
*/
static int follow_managed(struct path *path, struct nameidata *nd)
{
struct vfsmount *mnt = path->mnt; /* held by caller, must be left alone */
unsigned flags;
bool need_mntput = false;
int ret = 0;
/* Given that we're not holding a lock here, we retain the value in a
* local variable for each dentry as we look at it so that we don't see
* the components of that value change under us */
while (flags = smp_load_acquire(&path->dentry->d_flags),
unlikely(flags & DCACHE_MANAGED_DENTRY)) {
/* Allow the filesystem to manage the transit without i_mutex
* being held. */
if (flags & DCACHE_MANAGE_TRANSIT) {
BUG_ON(!path->dentry->d_op);
BUG_ON(!path->dentry->d_op->d_manage);
ret = path->dentry->d_op->d_manage(path, false);
flags = smp_load_acquire(&path->dentry->d_flags);
if (ret < 0)
break;
}
/* Transit to a mounted filesystem. */
if (flags & DCACHE_MOUNTED) {
struct vfsmount *mounted = lookup_mnt(path);
if (mounted) {
dput(path->dentry);
if (need_mntput)
mntput(path->mnt);
path->mnt = mounted;
path->dentry = dget(mounted->mnt_root);
need_mntput = true;
continue;
}
/* Something is mounted on this dentry in another
* namespace and/or whatever was mounted there in this
* namespace got unmounted before lookup_mnt() could
* get it */
}
/* Handle an automount point */
if (flags & DCACHE_NEED_AUTOMOUNT) {
ret = follow_automount(path, nd, &need_mntput);
if (ret < 0)
break;
continue;
}
/* We didn't change the current path point */
break;
}
if (need_mntput && path->mnt == mnt)
mntput(path->mnt);
if (need_mntput)
nd->flags |= LOOKUP_JUMPED;
if (ret == -EISDIR || !ret)
ret = 1;
if (ret > 0 && unlikely(d_flags_negative(flags)))
ret = -ENOENT;
if (unlikely(ret < 0))
path_put_conditional(path, nd);
return ret;
}
int follow_down_one(struct path *path)
{
struct vfsmount *mounted;
mounted = lookup_mnt(path);
if (mounted) {
dput(path->dentry);
mntput(path->mnt);
path->mnt = mounted;
path->dentry = dget(mounted->mnt_root);
return 1;
}
return 0;
}
EXPORT_SYMBOL(follow_down_one);
static inline int managed_dentry_rcu(const struct path *path)
{
return (path->dentry->d_flags & DCACHE_MANAGE_TRANSIT) ?
path->dentry->d_op->d_manage(path, true) : 0;
}
/*
* Try to skip to top of mountpoint pile in rcuwalk mode. Fail if
* we meet a managed dentry that would need blocking.
*/
static bool __follow_mount_rcu(struct nameidata *nd, struct path *path,
struct inode **inode, unsigned *seqp)
{
for (;;) {
struct mount *mounted;
/*
* Don't forget we might have a non-mountpoint managed dentry
* that wants to block transit.
*/
switch (managed_dentry_rcu(path)) {
case -ECHILD:
default:
return false;
case -EISDIR:
return true;
case 0:
break;
}
if (!d_mountpoint(path->dentry))
return !(path->dentry->d_flags & DCACHE_NEED_AUTOMOUNT);
mounted = __lookup_mnt(path->mnt, path->dentry);
if (!mounted)
break;
path->mnt = &mounted->mnt;
path->dentry = mounted->mnt.mnt_root;
nd->flags |= LOOKUP_JUMPED;
*seqp = read_seqcount_begin(&path->dentry->d_seq);
/*
* Update the inode too. We don't need to re-check the
* dentry sequence number here after this d_inode read,
* because a mount-point is always pinned.
*/
*inode = path->dentry->d_inode;
}
return !read_seqretry(&mount_lock, nd->m_seq) &&
!(path->dentry->d_flags & DCACHE_NEED_AUTOMOUNT);
}
static int follow_dotdot_rcu(struct nameidata *nd)
{
struct inode *inode = nd->inode;
while (1) {
if (path_equal(&nd->path, &nd->root))
break;
if (nd->path.dentry != nd->path.mnt->mnt_root) {
struct dentry *old = nd->path.dentry;
struct dentry *parent = old->d_parent;
unsigned seq;
inode = parent->d_inode;
seq = read_seqcount_begin(&parent->d_seq);
if (unlikely(read_seqcount_retry(&old->d_seq, nd->seq)))
return -ECHILD;
nd->path.dentry = parent;
nd->seq = seq;
if (unlikely(!path_connected(&nd->path)))
return -ENOENT;
break;
} else {
struct mount *mnt = real_mount(nd->path.mnt);
struct mount *mparent = mnt->mnt_parent;
struct dentry *mountpoint = mnt->mnt_mountpoint;
struct inode *inode2 = mountpoint->d_inode;
unsigned seq = read_seqcount_begin(&mountpoint->d_seq);
if (unlikely(read_seqretry(&mount_lock, nd->m_seq)))
return -ECHILD;
if (&mparent->mnt == nd->path.mnt)
break;
/* we know that mountpoint was pinned */
nd->path.dentry = mountpoint;
nd->path.mnt = &mparent->mnt;
inode = inode2;
nd->seq = seq;
}
}
while (unlikely(d_mountpoint(nd->path.dentry))) {
struct mount *mounted;
mounted = __lookup_mnt(nd->path.mnt, nd->path.dentry);
if (unlikely(read_seqretry(&mount_lock, nd->m_seq)))
return -ECHILD;
if (!mounted)
break;
nd->path.mnt = &mounted->mnt;
nd->path.dentry = mounted->mnt.mnt_root;
inode = nd->path.dentry->d_inode;
nd->seq = read_seqcount_begin(&nd->path.dentry->d_seq);
}
nd->inode = inode;
return 0;
}
/*
* Follow down to the covering mount currently visible to userspace. At each
* point, the filesystem owning that dentry may be queried as to whether the
* caller is permitted to proceed or not.
*/
int follow_down(struct path *path)
{
unsigned managed;
int ret;
while (managed = READ_ONCE(path->dentry->d_flags),
unlikely(managed & DCACHE_MANAGED_DENTRY)) {
/* Allow the filesystem to manage the transit without i_mutex
* being held.
*
* We indicate to the filesystem if someone is trying to mount
* something here. This gives autofs the chance to deny anyone
* other than its daemon the right to mount on its
* superstructure.
*
* The filesystem may sleep at this point.
*/
if (managed & DCACHE_MANAGE_TRANSIT) {
BUG_ON(!path->dentry->d_op);
BUG_ON(!path->dentry->d_op->d_manage);
ret = path->dentry->d_op->d_manage(path, false);
if (ret < 0)
return ret == -EISDIR ? 0 : ret;
}
/* Transit to a mounted filesystem. */
if (managed & DCACHE_MOUNTED) {
struct vfsmount *mounted = lookup_mnt(path);
if (!mounted)
break;
dput(path->dentry);
mntput(path->mnt);
path->mnt = mounted;
path->dentry = dget(mounted->mnt_root);
continue;
}
/* Don't handle automount points here */
break;
}
return 0;
}
EXPORT_SYMBOL(follow_down);
/*
* Skip to top of mountpoint pile in refwalk mode for follow_dotdot()
*/
static void follow_mount(struct path *path)
{
while (d_mountpoint(path->dentry)) {
struct vfsmount *mounted = lookup_mnt(path);
if (!mounted)
break;
dput(path->dentry);
mntput(path->mnt);
path->mnt = mounted;
path->dentry = dget(mounted->mnt_root);
}
}
static int path_parent_directory(struct path *path)
{
struct dentry *old = path->dentry;
/* rare case of legitimate dget_parent()... */
path->dentry = dget_parent(path->dentry);
dput(old);
if (unlikely(!path_connected(path)))
return -ENOENT;
return 0;
}
static int follow_dotdot(struct nameidata *nd)
{
while(1) {
if (path_equal(&nd->path, &nd->root))
break;
if (nd->path.dentry != nd->path.mnt->mnt_root) {
int ret = path_parent_directory(&nd->path);
if (ret)
return ret;
break;
}
if (!follow_up(&nd->path))
break;
}
follow_mount(&nd->path);
nd->inode = nd->path.dentry->d_inode;
return 0;
}
/*
* This looks up the name in dcache and possibly revalidates the found dentry.
* NULL is returned if the dentry does not exist in the cache.
*/
static struct dentry *lookup_dcache(const struct qstr *name,
struct dentry *dir,
unsigned int flags)
{
struct dentry *dentry = d_lookup(dir, name);
if (dentry) {
int error = d_revalidate(dentry, flags);
if (unlikely(error <= 0)) {
if (!error)
d_invalidate(dentry);
dput(dentry);
return ERR_PTR(error);
}
}
return dentry;
}
/*
* Parent directory has inode locked exclusive. This is one
* and only case when ->lookup() gets called on non in-lookup
* dentries - as the matter of fact, this only gets called
* when directory is guaranteed to have no in-lookup children
* at all.
*/
static struct dentry *__lookup_hash(const struct qstr *name,
struct dentry *base, unsigned int flags)
{
struct dentry *dentry = lookup_dcache(name, base, flags);
struct dentry *old;
struct inode *dir = base->d_inode;
if (dentry)
return dentry;
/* Don't create child dentry for a dead directory. */
if (unlikely(IS_DEADDIR(dir)))
return ERR_PTR(-ENOENT);
dentry = d_alloc(base, name);
if (unlikely(!dentry))
return ERR_PTR(-ENOMEM);
old = dir->i_op->lookup(dir, dentry, flags);
if (unlikely(old)) {
dput(dentry);
dentry = old;
}
return dentry;
}
static int lookup_fast(struct nameidata *nd,
struct path *path, struct inode **inode,
unsigned *seqp)
{
struct vfsmount *mnt = nd->path.mnt;
struct dentry *dentry, *parent = nd->path.dentry;
int status = 1;
int err;
/*
* Rename seqlock is not required here because in the off chance
* of a false negative due to a concurrent rename, the caller is
* going to fall back to non-racy lookup.
*/
if (nd->flags & LOOKUP_RCU) {
unsigned seq;
bool negative;
dentry = __d_lookup_rcu(parent, &nd->last, &seq);
if (unlikely(!dentry)) {
if (unlazy_walk(nd))
return -ECHILD;
return 0;
}
/*
* This sequence count validates that the inode matches
* the dentry name information from lookup.
*/
*inode = d_backing_inode(dentry);
negative = d_is_negative(dentry);
if (unlikely(read_seqcount_retry(&dentry->d_seq, seq)))
return -ECHILD;
/*
* This sequence count validates that the parent had no
* changes while we did the lookup of the dentry above.
*
* The memory barrier in read_seqcount_begin of child is
* enough, we can use __read_seqcount_retry here.
*/
if (unlikely(__read_seqcount_retry(&parent->d_seq, nd->seq)))
return -ECHILD;
*seqp = seq;
status = d_revalidate(dentry, nd->flags);
if (likely(status > 0)) {
/*
* Note: do negative dentry check after revalidation in
* case that drops it.
*/
if (unlikely(negative))
return -ENOENT;
path->mnt = mnt;
path->dentry = dentry;
if (likely(__follow_mount_rcu(nd, path, inode, seqp)))
return 1;
}
if (unlazy_child(nd, dentry, seq))
return -ECHILD;
if (unlikely(status == -ECHILD))
/* we'd been told to redo it in non-rcu mode */
status = d_revalidate(dentry, nd->flags);
} else {
dentry = __d_lookup(parent, &nd->last);
if (unlikely(!dentry))
return 0;
status = d_revalidate(dentry, nd->flags);
}
if (unlikely(status <= 0)) {
if (!status)
d_invalidate(dentry);
dput(dentry);
return status;
}
path->mnt = mnt;
path->dentry = dentry;
err = follow_managed(path, nd);
if (likely(err > 0))
*inode = d_backing_inode(path->dentry);
return err;
}
/* Fast lookup failed, do it the slow way */
static struct dentry *__lookup_slow(const struct qstr *name,
struct dentry *dir,
unsigned int flags)
{
struct dentry *dentry, *old;
struct inode *inode = dir->d_inode;
DECLARE_WAIT_QUEUE_HEAD_ONSTACK(wq);
/* Don't go there if it's already dead */
if (unlikely(IS_DEADDIR(inode)))
return ERR_PTR(-ENOENT);
again:
dentry = d_alloc_parallel(dir, name, &wq);
if (IS_ERR(dentry))
return dentry;
if (unlikely(!d_in_lookup(dentry))) {
int error = d_revalidate(dentry, flags);
if (unlikely(error <= 0)) {
if (!error) {
d_invalidate(dentry);
dput(dentry);
goto again;
}
dput(dentry);
dentry = ERR_PTR(error);
}
} else {
old = inode->i_op->lookup(inode, dentry, flags);
d_lookup_done(dentry);
if (unlikely(old)) {
dput(dentry);
dentry = old;
}
}
return dentry;
}
static struct dentry *lookup_slow(const struct qstr *name,
struct dentry *dir,
unsigned int flags)
{
struct inode *inode = dir->d_inode;
struct dentry *res;
inode_lock_shared(inode);
res = __lookup_slow(name, dir, flags);
inode_unlock_shared(inode);
return res;
}
static inline int may_lookup(struct nameidata *nd)
{
if (nd->flags & LOOKUP_RCU) {
int err = inode_permission(nd->inode, MAY_EXEC|MAY_NOT_BLOCK);
if (err != -ECHILD)
return err;
if (unlazy_walk(nd))
return -ECHILD;
}
return inode_permission(nd->inode, MAY_EXEC);
}
static inline int handle_dots(struct nameidata *nd, int type)
{
if (type == LAST_DOTDOT) {
if (!nd->root.mnt)
set_root(nd);
if (nd->flags & LOOKUP_RCU) {
return follow_dotdot_rcu(nd);
} else
return follow_dotdot(nd);
}
return 0;
}
static int pick_link(struct nameidata *nd, struct path *link,
struct inode *inode, unsigned seq)
{
int error;
struct saved *last;
if (unlikely(nd->total_link_count++ >= MAXSYMLINKS)) {
path_to_nameidata(link, nd);
return -ELOOP;
}
if (!(nd->flags & LOOKUP_RCU)) {
if (link->mnt == nd->path.mnt)
mntget(link->mnt);
}
error = nd_alloc_stack(nd);
if (unlikely(error)) {
if (error == -ECHILD) {
if (unlikely(!legitimize_path(nd, link, seq))) {
drop_links(nd);
nd->depth = 0;
nd->flags &= ~LOOKUP_RCU;
nd->path.mnt = NULL;
nd->path.dentry = NULL;
rcu_read_unlock();
} else if (likely(unlazy_walk(nd)) == 0)
error = nd_alloc_stack(nd);
}
if (error) {
path_put(link);
return error;
}
}
last = nd->stack + nd->depth++;
last->link = *link;
clear_delayed_call(&last->done);
nd->link_inode = inode;
last->seq = seq;
return 1;
}
enum {WALK_FOLLOW = 1, WALK_MORE = 2};
/*
* Do we need to follow links? We _really_ want to be able
* to do this check without having to look at inode->i_op,
* so we keep a cache of "no, this doesn't need follow_link"
* for the common case.
*/
static inline int step_into(struct nameidata *nd, struct path *path,
int flags, struct inode *inode, unsigned seq)
{
if (!(flags & WALK_MORE) && nd->depth)
put_link(nd);
if (likely(!d_is_symlink(path->dentry)) ||
!(flags & WALK_FOLLOW || nd->flags & LOOKUP_FOLLOW)) {
/* not a symlink or should not follow */
path_to_nameidata(path, nd);
nd->inode = inode;
nd->seq = seq;
return 0;
}
/* make sure that d_is_symlink above matches inode */
if (nd->flags & LOOKUP_RCU) {
if (read_seqcount_retry(&path->dentry->d_seq, seq))
return -ECHILD;
}
return pick_link(nd, path, inode, seq);
}
static int walk_component(struct nameidata *nd, int flags)
{
struct path path;
struct inode *inode;
unsigned seq;
int err;
/*
* "." and ".." are special - ".." especially so because it has
* to be able to know about the current root directory and
* parent relationships.
*/
if (unlikely(nd->last_type != LAST_NORM)) {
err = handle_dots(nd, nd->last_type);
if (!(flags & WALK_MORE) && nd->depth)
put_link(nd);
return err;
}
err = lookup_fast(nd, &path, &inode, &seq);
if (unlikely(err <= 0)) {
if (err < 0)
return err;
path.dentry = lookup_slow(&nd->last, nd->path.dentry,
nd->flags);
if (IS_ERR(path.dentry))
return PTR_ERR(path.dentry);
path.mnt = nd->path.mnt;
err = follow_managed(&path, nd);
if (unlikely(err < 0))
return err;
seq = 0; /* we are already out of RCU mode */
inode = d_backing_inode(path.dentry);
}
return step_into(nd, &path, flags, inode, seq);
}
/*
* We can do the critical dentry name comparison and hashing
* operations one word at a time, but we are limited to:
*
* - Architectures with fast unaligned word accesses. We could
* do a "get_unaligned()" if this helps and is sufficiently
* fast.
*
* - non-CONFIG_DEBUG_PAGEALLOC configurations (so that we
* do not trap on the (extremely unlikely) case of a page
* crossing operation.
*
* - Furthermore, we need an efficient 64-bit compile for the
* 64-bit case in order to generate the "number of bytes in
* the final mask". Again, that could be replaced with a
* efficient population count instruction or similar.
*/
#ifdef CONFIG_DCACHE_WORD_ACCESS
#include <asm/word-at-a-time.h>
#ifdef HASH_MIX
/* Architecture provides HASH_MIX and fold_hash() in <asm/hash.h> */
#elif defined(CONFIG_64BIT)
/*
* Register pressure in the mixing function is an issue, particularly
* on 32-bit x86, but almost any function requires one state value and
* one temporary. Instead, use a function designed for two state values
* and no temporaries.
*
* This function cannot create a collision in only two iterations, so
* we have two iterations to achieve avalanche. In those two iterations,
* we have six layers of mixing, which is enough to spread one bit's
* influence out to 2^6 = 64 state bits.
*
* Rotate constants are scored by considering either 64 one-bit input
* deltas or 64*63/2 = 2016 two-bit input deltas, and finding the
* probability of that delta causing a change to each of the 128 output
* bits, using a sample of random initial states.
*
* The Shannon entropy of the computed probabilities is then summed
* to produce a score. Ideally, any input change has a 50% chance of
* toggling any given output bit.
*
* Mixing scores (in bits) for (12,45):
* Input delta: 1-bit 2-bit
* 1 round: 713.3 42542.6
* 2 rounds: 2753.7 140389.8
* 3 rounds: 5954.1 233458.2
* 4 rounds: 7862.6 256672.2
* Perfect: 8192 258048
* (64*128) (64*63/2 * 128)
*/
#define HASH_MIX(x, y, a) \
( x ^= (a), \
y ^= x, x = rol64(x,12),\
x += y, y = rol64(y,45),\
y *= 9 )
/*
* Fold two longs into one 32-bit hash value. This must be fast, but
* latency isn't quite as critical, as there is a fair bit of additional
* work done before the hash value is used.
*/
static inline unsigned int fold_hash(unsigned long x, unsigned long y)
{
y ^= x * GOLDEN_RATIO_64;
y *= GOLDEN_RATIO_64;
return y >> 32;
}
#else /* 32-bit case */
/*
* Mixing scores (in bits) for (7,20):
* Input delta: 1-bit 2-bit
* 1 round: 330.3 9201.6
* 2 rounds: 1246.4 25475.4
* 3 rounds: 1907.1 31295.1
* 4 rounds: 2042.3 31718.6
* Perfect: 2048 31744
* (32*64) (32*31/2 * 64)
*/
#define HASH_MIX(x, y, a) \
( x ^= (a), \
y ^= x, x = rol32(x, 7),\
x += y, y = rol32(y,20),\
y *= 9 )
static inline unsigned int fold_hash(unsigned long x, unsigned long y)
{
/* Use arch-optimized multiply if one exists */
return __hash_32(y ^ __hash_32(x));
}
#endif
/*
* Return the hash of a string of known length. This is carfully
* designed to match hash_name(), which is the more critical function.
* In particular, we must end by hashing a final word containing 0..7
* payload bytes, to match the way that hash_name() iterates until it
* finds the delimiter after the name.
*/
unsigned int full_name_hash(const void *salt, const char *name, unsigned int len)
{
unsigned long a, x = 0, y = (unsigned long)salt;
for (;;) {
if (!len)
goto done;
a = load_unaligned_zeropad(name);
if (len < sizeof(unsigned long))
break;
HASH_MIX(x, y, a);
name += sizeof(unsigned long);
len -= sizeof(unsigned long);
}
x ^= a & bytemask_from_count(len);
done:
return fold_hash(x, y);
}
EXPORT_SYMBOL(full_name_hash);
/* Return the "hash_len" (hash and length) of a null-terminated string */
u64 hashlen_string(const void *salt, const char *name)
{
unsigned long a = 0, x = 0, y = (unsigned long)salt;
unsigned long adata, mask, len;
const struct word_at_a_time constants = WORD_AT_A_TIME_CONSTANTS;
len = 0;
goto inside;
do {
HASH_MIX(x, y, a);
len += sizeof(unsigned long);
inside:
a = load_unaligned_zeropad(name+len);
} while (!has_zero(a, &adata, &constants));
adata = prep_zero_mask(a, adata, &constants);
mask = create_zero_mask(adata);
x ^= a & zero_bytemask(mask);
return hashlen_create(fold_hash(x, y), len + find_zero(mask));
}
EXPORT_SYMBOL(hashlen_string);
/*
* Calculate the length and hash of the path component, and
* return the "hash_len" as the result.
*/
static inline u64 hash_name(const void *salt, const char *name)
{
unsigned long a = 0, b, x = 0, y = (unsigned long)salt;
unsigned long adata, bdata, mask, len;
const struct word_at_a_time constants = WORD_AT_A_TIME_CONSTANTS;
len = 0;
goto inside;
do {
HASH_MIX(x, y, a);
len += sizeof(unsigned long);
inside:
a = load_unaligned_zeropad(name+len);
b = a ^ REPEAT_BYTE('/');
} while (!(has_zero(a, &adata, &constants) | has_zero(b, &bdata, &constants)));
adata = prep_zero_mask(a, adata, &constants);
bdata = prep_zero_mask(b, bdata, &constants);
mask = create_zero_mask(adata | bdata);
x ^= a & zero_bytemask(mask);
return hashlen_create(fold_hash(x, y), len + find_zero(mask));
}
#else /* !CONFIG_DCACHE_WORD_ACCESS: Slow, byte-at-a-time version */
/* Return the hash of a string of known length */
unsigned int full_name_hash(const void *salt, const char *name, unsigned int len)
{
unsigned long hash = init_name_hash(salt);
while (len--)
hash = partial_name_hash((unsigned char)*name++, hash);
return end_name_hash(hash);
}
EXPORT_SYMBOL(full_name_hash);
/* Return the "hash_len" (hash and length) of a null-terminated string */
u64 hashlen_string(const void *salt, const char *name)
{
unsigned long hash = init_name_hash(salt);
unsigned long len = 0, c;
c = (unsigned char)*name;
while (c) {
len++;
hash = partial_name_hash(c, hash);
c = (unsigned char)name[len];
}
return hashlen_create(end_name_hash(hash), len);
}
EXPORT_SYMBOL(hashlen_string);
/*
* We know there's a real path component here of at least
* one character.
*/
static inline u64 hash_name(const void *salt, const char *name)
{
unsigned long hash = init_name_hash(salt);
unsigned long len = 0, c;
c = (unsigned char)*name;
do {
len++;
hash = partial_name_hash(c, hash);
c = (unsigned char)name[len];
} while (c && c != '/');
return hashlen_create(end_name_hash(hash), len);
}
#endif
/*
* Name resolution.
* This is the basic name resolution function, turning a pathname into
* the final dentry. We expect 'base' to be positive and a directory.
*
* Returns 0 and nd will have valid dentry and mnt on success.
* Returns error and drops reference to input namei data on failure.
*/
static int link_path_walk(const char *name, struct nameidata *nd)
{
int err;
if (IS_ERR(name))
return PTR_ERR(name);
while (*name=='/')
name++;
if (!*name)
return 0;
/* At this point we know we have a real path component. */
for(;;) {
u64 hash_len;
int type;
err = may_lookup(nd);
if (err)
return err;
hash_len = hash_name(nd->path.dentry, name);
type = LAST_NORM;
if (name[0] == '.') switch (hashlen_len(hash_len)) {
case 2:
if (name[1] == '.') {
type = LAST_DOTDOT;
nd->flags |= LOOKUP_JUMPED;
}
break;
case 1:
type = LAST_DOT;
}
if (likely(type == LAST_NORM)) {
struct dentry *parent = nd->path.dentry;
nd->flags &= ~LOOKUP_JUMPED;
if (unlikely(parent->d_flags & DCACHE_OP_HASH)) {
struct qstr this = { { .hash_len = hash_len }, .name = name };
err = parent->d_op->d_hash(parent, &this);
if (err < 0)
return err;
hash_len = this.hash_len;
name = this.name;
}
}
nd->last.hash_len = hash_len;
nd->last.name = name;
nd->last_type = type;
name += hashlen_len(hash_len);
if (!*name)
goto OK;
/*
* If it wasn't NUL, we know it was '/'. Skip that
* slash, and continue until no more slashes.
*/
do {
name++;
} while (unlikely(*name == '/'));
if (unlikely(!*name)) {
OK:
/* pathname body, done */
if (!nd->depth)
return 0;
name = nd->stack[nd->depth - 1].name;
/* trailing symlink, done */
if (!name)
return 0;
/* last component of nested symlink */
err = walk_component(nd, WALK_FOLLOW);
} else {
/* not the last component */
err = walk_component(nd, WALK_FOLLOW | WALK_MORE);
}
if (err < 0)
return err;
if (err) {
const char *s = get_link(nd);
if (IS_ERR(s))
return PTR_ERR(s);
err = 0;
if (unlikely(!s)) {
/* jumped */
put_link(nd);
} else {
nd->stack[nd->depth - 1].name = name;
name = s;
continue;
}
}
if (unlikely(!d_can_lookup(nd->path.dentry))) {
if (nd->flags & LOOKUP_RCU) {
if (unlazy_walk(nd))
return -ECHILD;
}
return -ENOTDIR;
}
}
}
/* must be paired with terminate_walk() */
static const char *path_init(struct nameidata *nd, unsigned flags)
{
const char *s = nd->name->name;
if (!*s)
flags &= ~LOOKUP_RCU;
if (flags & LOOKUP_RCU)
rcu_read_lock();
nd->last_type = LAST_ROOT; /* if there are only slashes... */
nd->flags = flags | LOOKUP_JUMPED | LOOKUP_PARENT;
nd->depth = 0;
if (flags & LOOKUP_ROOT) {
struct dentry *root = nd->root.dentry;
struct inode *inode = root->d_inode;
if (*s && unlikely(!d_can_lookup(root)))
return ERR_PTR(-ENOTDIR);
nd->path = nd->root;
nd->inode = inode;
if (flags & LOOKUP_RCU) {
nd->seq = __read_seqcount_begin(&nd->path.dentry->d_seq);
nd->root_seq = nd->seq;
nd->m_seq = read_seqbegin(&mount_lock);
} else {
path_get(&nd->path);
}
return s;
}
nd->root.mnt = NULL;
nd->path.mnt = NULL;
nd->path.dentry = NULL;
nd->m_seq = read_seqbegin(&mount_lock);
if (*s == '/') {
set_root(nd);
if (likely(!nd_jump_root(nd)))
return s;
return ERR_PTR(-ECHILD);
} else if (nd->dfd == AT_FDCWD) {
if (flags & LOOKUP_RCU) {
struct fs_struct *fs = current->fs;
unsigned seq;
do {
seq = read_seqcount_begin(&fs->seq);
nd->path = fs->pwd;
nd->inode = nd->path.dentry->d_inode;
nd->seq = __read_seqcount_begin(&nd->path.dentry->d_seq);
} while (read_seqcount_retry(&fs->seq, seq));
} else {
get_fs_pwd(current->fs, &nd->path);
nd->inode = nd->path.dentry->d_inode;
}
return s;
} else {
/* Caller must check execute permissions on the starting path component */
struct fd f = fdget_raw(nd->dfd);
struct dentry *dentry;
if (!f.file)
return ERR_PTR(-EBADF);
dentry = f.file->f_path.dentry;
if (*s && unlikely(!d_can_lookup(dentry))) {
fdput(f);
return ERR_PTR(-ENOTDIR);
}
nd->path = f.file->f_path;
if (flags & LOOKUP_RCU) {
nd->inode = nd->path.dentry->d_inode;
nd->seq = read_seqcount_begin(&nd->path.dentry->d_seq);
} else {
path_get(&nd->path);
nd->inode = nd->path.dentry->d_inode;
}
fdput(f);
return s;
}
}
static const char *trailing_symlink(struct nameidata *nd)
{
const char *s;
int error = may_follow_link(nd);
if (unlikely(error))
return ERR_PTR(error);
nd->flags |= LOOKUP_PARENT;
nd->stack[0].name = NULL;
s = get_link(nd);
return s ? s : "";
}
static inline int lookup_last(struct nameidata *nd)
{
if (nd->last_type == LAST_NORM && nd->last.name[nd->last.len])
nd->flags |= LOOKUP_FOLLOW | LOOKUP_DIRECTORY;
nd->flags &= ~LOOKUP_PARENT;
return walk_component(nd, 0);
}
static int handle_lookup_down(struct nameidata *nd)
{
struct path path = nd->path;
struct inode *inode = nd->inode;
unsigned seq = nd->seq;
int err;
if (nd->flags & LOOKUP_RCU) {
/*
* don't bother with unlazy_walk on failure - we are
* at the very beginning of walk, so we lose nothing
* if we simply redo everything in non-RCU mode
*/
if (unlikely(!__follow_mount_rcu(nd, &path, &inode, &seq)))
return -ECHILD;
} else {
dget(path.dentry);
err = follow_managed(&path, nd);
if (unlikely(err < 0))
return err;
inode = d_backing_inode(path.dentry);
seq = 0;
}
path_to_nameidata(&path, nd);
nd->inode = inode;
nd->seq = seq;
return 0;
}
/* Returns 0 and nd will be valid on success; Retuns error, otherwise. */
static int path_lookupat(struct nameidata *nd, unsigned flags, struct path *path)
{
const char *s = path_init(nd, flags);
int err;
if (unlikely(flags & LOOKUP_DOWN) && !IS_ERR(s)) {
err = handle_lookup_down(nd);
if (unlikely(err < 0))
s = ERR_PTR(err);
}
while (!(err = link_path_walk(s, nd))
&& ((err = lookup_last(nd)) > 0)) {
s = trailing_symlink(nd);
}
if (!err)
err = complete_walk(nd);
if (!err && nd->flags & LOOKUP_DIRECTORY)
if (!d_can_lookup(nd->path.dentry))
err = -ENOTDIR;
if (!err) {
*path = nd->path;
nd->path.mnt = NULL;
nd->path.dentry = NULL;
}
terminate_walk(nd);
return err;
}
int filename_lookup(int dfd, struct filename *name, unsigned flags,
struct path *path, struct path *root)
{
int retval;
struct nameidata nd;
if (IS_ERR(name))
return PTR_ERR(name);
if (unlikely(root)) {
nd.root = *root;
flags |= LOOKUP_ROOT;
}
set_nameidata(&nd, dfd, name);
retval = path_lookupat(&nd, flags | LOOKUP_RCU, path);
if (unlikely(retval == -ECHILD))
retval = path_lookupat(&nd, flags, path);
if (unlikely(retval == -ESTALE))
retval = path_lookupat(&nd, flags | LOOKUP_REVAL, path);
if (likely(!retval))
audit_inode(name, path->dentry, 0);
restore_nameidata();
putname(name);
return retval;
}
/* Returns 0 and nd will be valid on success; Retuns error, otherwise. */
static int path_parentat(struct nameidata *nd, unsigned flags,
struct path *parent)
{
const char *s = path_init(nd, flags);
int err = link_path_walk(s, nd);
if (!err)
err = complete_walk(nd);
if (!err) {
*parent = nd->path;
nd->path.mnt = NULL;
nd->path.dentry = NULL;
}
terminate_walk(nd);
return err;
}
static struct filename *filename_parentat(int dfd, struct filename *name,
unsigned int flags, struct path *parent,
struct qstr *last, int *type)
{
int retval;
struct nameidata nd;
if (IS_ERR(name))
return name;
set_nameidata(&nd, dfd, name);
retval = path_parentat(&nd, flags | LOOKUP_RCU, parent);
if (unlikely(retval == -ECHILD))
retval = path_parentat(&nd, flags, parent);
if (unlikely(retval == -ESTALE))
retval = path_parentat(&nd, flags | LOOKUP_REVAL, parent);
if (likely(!retval)) {
*last = nd.last;
*type = nd.last_type;
audit_inode(name, parent->dentry, AUDIT_INODE_PARENT);
} else {
putname(name);
name = ERR_PTR(retval);
}
restore_nameidata();
return name;
}
/* does lookup, returns the object with parent locked */
struct dentry *kern_path_locked(const char *name, struct path *path)
{
struct filename *filename;
struct dentry *d;
struct qstr last;
int type;
filename = filename_parentat(AT_FDCWD, getname_kernel(name), 0, path,
&last, &type);
if (IS_ERR(filename))
return ERR_CAST(filename);
if (unlikely(type != LAST_NORM)) {
path_put(path);
putname(filename);
return ERR_PTR(-EINVAL);
}
inode_lock_nested(path->dentry->d_inode, I_MUTEX_PARENT);
d = __lookup_hash(&last, path->dentry, 0);
if (IS_ERR(d)) {
inode_unlock(path->dentry->d_inode);
path_put(path);
}
putname(filename);
return d;
}
int kern_path(const char *name, unsigned int flags, struct path *path)
{
return filename_lookup(AT_FDCWD, getname_kernel(name),
flags, path, NULL);
}
EXPORT_SYMBOL(kern_path);
/**
* vfs_path_lookup - lookup a file path relative to a dentry-vfsmount pair
* @dentry: pointer to dentry of the base directory
* @mnt: pointer to vfs mount of the base directory
* @name: pointer to file name
* @flags: lookup flags
* @path: pointer to struct path to fill
*/
int vfs_path_lookup(struct dentry *dentry, struct vfsmount *mnt,
const char *name, unsigned int flags,
struct path *path)
{
struct path root = {.mnt = mnt, .dentry = dentry};
/* the first argument of filename_lookup() is ignored with root */
return filename_lookup(AT_FDCWD, getname_kernel(name),
flags , path, &root);
}
EXPORT_SYMBOL(vfs_path_lookup);
static int lookup_one_len_common(const char *name, struct dentry *base,
int len, struct qstr *this)
{
this->name = name;
this->len = len;
this->hash = full_name_hash(base, name, len);
if (!len)
return -EACCES;
if (unlikely(name[0] == '.')) {
if (len < 2 || (len == 2 && name[1] == '.'))
return -EACCES;
}
while (len--) {
unsigned int c = *(const unsigned char *)name++;
if (c == '/' || c == '\0')
return -EACCES;
}
/*
* See if the low-level filesystem might want
* to use its own hash..
*/
if (base->d_flags & DCACHE_OP_HASH) {
int err = base->d_op->d_hash(base, this);
if (err < 0)
return err;
}
return inode_permission(base->d_inode, MAY_EXEC);
}
/**
* try_lookup_one_len - filesystem helper to lookup single pathname component
* @name: pathname component to lookup
* @base: base directory to lookup from
* @len: maximum length @len should be interpreted to
*
* Look up a dentry by name in the dcache, returning NULL if it does not
* currently exist. The function does not try to create a dentry.
*
* Note that this routine is purely a helper for filesystem usage and should
* not be called by generic code.
*
* The caller must hold base->i_mutex.
*/
struct dentry *try_lookup_one_len(const char *name, struct dentry *base, int len)
{
struct qstr this;
int err;
WARN_ON_ONCE(!inode_is_locked(base->d_inode));
err = lookup_one_len_common(name, base, len, &this);
if (err)
return ERR_PTR(err);
return lookup_dcache(&this, base, 0);
}
EXPORT_SYMBOL(try_lookup_one_len);
/**
* lookup_one_len - filesystem helper to lookup single pathname component
* @name: pathname component to lookup
* @base: base directory to lookup from
* @len: maximum length @len should be interpreted to
*
* Note that this routine is purely a helper for filesystem usage and should
* not be called by generic code.
*
* The caller must hold base->i_mutex.
*/
struct dentry *lookup_one_len(const char *name, struct dentry *base, int len)
{
struct dentry *dentry;
struct qstr this;
int err;
WARN_ON_ONCE(!inode_is_locked(base->d_inode));
err = lookup_one_len_common(name, base, len, &this);
if (err)
return ERR_PTR(err);
dentry = lookup_dcache(&this, base, 0);
return dentry ? dentry : __lookup_slow(&this, base, 0);
}
EXPORT_SYMBOL(lookup_one_len);
/**
* lookup_one_len_unlocked - filesystem helper to lookup single pathname component
* @name: pathname component to lookup
* @base: base directory to lookup from
* @len: maximum length @len should be interpreted to
*
* Note that this routine is purely a helper for filesystem usage and should
* not be called by generic code.
*
* Unlike lookup_one_len, it should be called without the parent
* i_mutex held, and will take the i_mutex itself if necessary.
*/
struct dentry *lookup_one_len_unlocked(const char *name,
struct dentry *base, int len)
{
struct qstr this;
int err;
struct dentry *ret;
err = lookup_one_len_common(name, base, len, &this);
if (err)
return ERR_PTR(err);
ret = lookup_dcache(&this, base, 0);
if (!ret)
ret = lookup_slow(&this, base, 0);
return ret;
}
EXPORT_SYMBOL(lookup_one_len_unlocked);
/*
* Like lookup_one_len_unlocked(), except that it yields ERR_PTR(-ENOENT)
* on negatives. Returns known positive or ERR_PTR(); that's what
* most of the users want. Note that pinned negative with unlocked parent
* _can_ become positive at any time, so callers of lookup_one_len_unlocked()
* need to be very careful; pinned positives have ->d_inode stable, so
* this one avoids such problems.
*/
struct dentry *lookup_positive_unlocked(const char *name,
struct dentry *base, int len)
{
struct dentry *ret = lookup_one_len_unlocked(name, base, len);
if (!IS_ERR(ret) && d_flags_negative(smp_load_acquire(&ret->d_flags))) {
dput(ret);
ret = ERR_PTR(-ENOENT);
}
return ret;
}
EXPORT_SYMBOL(lookup_positive_unlocked);
#ifdef CONFIG_UNIX98_PTYS
int path_pts(struct path *path)
{
/* Find something mounted on "pts" in the same directory as
* the input path.
*/
struct dentry *child, *parent;
struct qstr this;
int ret;
ret = path_parent_directory(path);
if (ret)
return ret;
parent = path->dentry;
this.name = "pts";
this.len = 3;
child = d_hash_and_lookup(parent, &this);
if (!child)
return -ENOENT;
path->dentry = child;
dput(parent);
follow_mount(path);
return 0;
}
#endif
int user_path_at_empty(int dfd, const char __user *name, unsigned flags,
struct path *path, int *empty)
{
return filename_lookup(dfd, getname_flags(name, flags, empty),
flags, path, NULL);
}
EXPORT_SYMBOL(user_path_at_empty);
/**
* path_mountpoint - look up a path to be umounted
* @nd: lookup context
* @flags: lookup flags
* @path: pointer to container for result
*
* Look up the given name, but don't attempt to revalidate the last component.
* Returns 0 and "path" will be valid on success; Returns error otherwise.
*/
static int
path_mountpoint(struct nameidata *nd, unsigned flags, struct path *path)
{
const char *s = path_init(nd, flags);
int err;
while (!(err = link_path_walk(s, nd)) &&
(err = lookup_last(nd)) > 0) {
s = trailing_symlink(nd);
}
if (!err && (nd->flags & LOOKUP_RCU))
err = unlazy_walk(nd);
if (!err)
err = handle_lookup_down(nd);
if (!err) {
*path = nd->path;
nd->path.mnt = NULL;
nd->path.dentry = NULL;
}
terminate_walk(nd);
return err;
}
static int
filename_mountpoint(int dfd, struct filename *name, struct path *path,
unsigned int flags)
{
struct nameidata nd;
int error;
if (IS_ERR(name))
return PTR_ERR(name);
set_nameidata(&nd, dfd, name);
error = path_mountpoint(&nd, flags | LOOKUP_RCU, path);
if (unlikely(error == -ECHILD))
error = path_mountpoint(&nd, flags, path);
if (unlikely(error == -ESTALE))
error = path_mountpoint(&nd, flags | LOOKUP_REVAL, path);
if (likely(!error))
audit_inode(name, path->dentry, AUDIT_INODE_NOEVAL);
restore_nameidata();
putname(name);
return error;
}
/**
* user_path_mountpoint_at - lookup a path from userland in order to umount it
* @dfd: directory file descriptor
* @name: pathname from userland
* @flags: lookup flags
* @path: pointer to container to hold result
*
* A umount is a special case for path walking. We're not actually interested
* in the inode in this situation, and ESTALE errors can be a problem. We
* simply want track down the dentry and vfsmount attached at the mountpoint
* and avoid revalidating the last component.
*
* Returns 0 and populates "path" on success.
*/
int
user_path_mountpoint_at(int dfd, const char __user *name, unsigned int flags,
struct path *path)
{
return filename_mountpoint(dfd, getname(name), path, flags);
}
int
kern_path_mountpoint(int dfd, const char *name, struct path *path,
unsigned int flags)
{
return filename_mountpoint(dfd, getname_kernel(name), path, flags);
}
EXPORT_SYMBOL(kern_path_mountpoint);
int __check_sticky(struct inode *dir, struct inode *inode)
{
kuid_t fsuid = current_fsuid();
if (uid_eq(inode->i_uid, fsuid))
return 0;
if (uid_eq(dir->i_uid, fsuid))
return 0;
return !capable_wrt_inode_uidgid(inode, CAP_FOWNER);
}
EXPORT_SYMBOL(__check_sticky);
/*
* Check whether we can remove a link victim from directory dir, check
* whether the type of victim is right.
* 1. We can't do it if dir is read-only (done in permission())
* 2. We should have write and exec permissions on dir
* 3. We can't remove anything from append-only dir
* 4. We can't do anything with immutable dir (done in permission())
* 5. If the sticky bit on dir is set we should either
* a. be owner of dir, or
* b. be owner of victim, or
* c. have CAP_FOWNER capability
* 6. If the victim is append-only or immutable we can't do antyhing with
* links pointing to it.
* 7. If the victim has an unknown uid or gid we can't change the inode.
* 8. If we were asked to remove a directory and victim isn't one - ENOTDIR.
* 9. If we were asked to remove a non-directory and victim isn't one - EISDIR.
* 10. We can't remove a root or mountpoint.
* 11. We don't allow removal of NFS sillyrenamed files; it's handled by
* nfs_async_unlink().
*/
static int may_delete(struct inode *dir, struct dentry *victim, bool isdir)
{
struct inode *inode = d_backing_inode(victim);
int error;
if (d_is_negative(victim))
return -ENOENT;
BUG_ON(!inode);
BUG_ON(victim->d_parent->d_inode != dir);
/* Inode writeback is not safe when the uid or gid are invalid. */
if (!uid_valid(inode->i_uid) || !gid_valid(inode->i_gid))
return -EOVERFLOW;
audit_inode_child(dir, victim, AUDIT_TYPE_CHILD_DELETE);
error = inode_permission(dir, MAY_WRITE | MAY_EXEC);
if (error)
return error;
if (IS_APPEND(dir))
return -EPERM;
if (check_sticky(dir, inode) || IS_APPEND(inode) ||
IS_IMMUTABLE(inode) || IS_SWAPFILE(inode) || HAS_UNMAPPED_ID(inode))
return -EPERM;
if (isdir) {
if (!d_is_dir(victim))
return -ENOTDIR;
if (IS_ROOT(victim))
return -EBUSY;
} else if (d_is_dir(victim))
return -EISDIR;
if (IS_DEADDIR(dir))
return -ENOENT;
if (victim->d_flags & DCACHE_NFSFS_RENAMED)
return -EBUSY;
return 0;
}
/* Check whether we can create an object with dentry child in directory
* dir.
* 1. We can't do it if child already exists (open has special treatment for
* this case, but since we are inlined it's OK)
* 2. We can't do it if dir is read-only (done in permission())
* 3. We can't do it if the fs can't represent the fsuid or fsgid.
* 4. We should have write and exec permissions on dir
* 5. We can't do it if dir is immutable (done in permission())
*/
static inline int may_create(struct inode *dir, struct dentry *child)
{
struct user_namespace *s_user_ns;
audit_inode_child(dir, child, AUDIT_TYPE_CHILD_CREATE);
if (child->d_inode)
return -EEXIST;
if (IS_DEADDIR(dir))
return -ENOENT;
s_user_ns = dir->i_sb->s_user_ns;
if (!kuid_has_mapping(s_user_ns, current_fsuid()) ||
!kgid_has_mapping(s_user_ns, current_fsgid()))
return -EOVERFLOW;
return inode_permission(dir, MAY_WRITE | MAY_EXEC);
}
/*
* p1 and p2 should be directories on the same fs.
*/
struct dentry *lock_rename(struct dentry *p1, struct dentry *p2)
{
struct dentry *p;
if (p1 == p2) {
inode_lock_nested(p1->d_inode, I_MUTEX_PARENT);
return NULL;
}
mutex_lock(&p1->d_sb->s_vfs_rename_mutex);
p = d_ancestor(p2, p1);
if (p) {
inode_lock_nested(p2->d_inode, I_MUTEX_PARENT);
inode_lock_nested(p1->d_inode, I_MUTEX_CHILD);
return p;
}
p = d_ancestor(p1, p2);
if (p) {
inode_lock_nested(p1->d_inode, I_MUTEX_PARENT);
inode_lock_nested(p2->d_inode, I_MUTEX_CHILD);
return p;
}
inode_lock_nested(p1->d_inode, I_MUTEX_PARENT);
inode_lock_nested(p2->d_inode, I_MUTEX_PARENT2);
return NULL;
}
EXPORT_SYMBOL(lock_rename);
void unlock_rename(struct dentry *p1, struct dentry *p2)
{
inode_unlock(p1->d_inode);
if (p1 != p2) {
inode_unlock(p2->d_inode);
mutex_unlock(&p1->d_sb->s_vfs_rename_mutex);
}
}
EXPORT_SYMBOL(unlock_rename);
int vfs_create(struct inode *dir, struct dentry *dentry, umode_t mode,
bool want_excl)
{
int error = may_create(dir, dentry);
if (error)
return error;
if (!dir->i_op->create)
return -EACCES; /* shouldn't it be ENOSYS? */
mode &= S_IALLUGO;
mode |= S_IFREG;
error = security_inode_create(dir, dentry, mode);
if (error)
return error;
error = dir->i_op->create(dir, dentry, mode, want_excl);
if (!error)
fsnotify_create(dir, dentry);
return error;
}
EXPORT_SYMBOL(vfs_create);
int vfs_mkobj(struct dentry *dentry, umode_t mode,
int (*f)(struct dentry *, umode_t, void *),
void *arg)
{
struct inode *dir = dentry->d_parent->d_inode;
int error = may_create(dir, dentry);
if (error)
return error;
mode &= S_IALLUGO;
mode |= S_IFREG;
error = security_inode_create(dir, dentry, mode);
if (error)
return error;
error = f(dentry, mode, arg);
if (!error)
fsnotify_create(dir, dentry);
return error;
}
EXPORT_SYMBOL(vfs_mkobj);
bool may_open_dev(const struct path *path)
{
return !(path->mnt->mnt_flags & MNT_NODEV) &&
!(path->mnt->mnt_sb->s_iflags & SB_I_NODEV);
}
static int may_open(const struct path *path, int acc_mode, int flag)
{
struct dentry *dentry = path->dentry;
struct inode *inode = dentry->d_inode;
int error;
if (!inode)
return -ENOENT;
switch (inode->i_mode & S_IFMT) {
case S_IFLNK:
return -ELOOP;
case S_IFDIR:
if (acc_mode & MAY_WRITE)
return -EISDIR;
break;
case S_IFBLK:
case S_IFCHR:
if (!may_open_dev(path))
return -EACCES;
/*FALLTHRU*/
case S_IFIFO:
case S_IFSOCK:
flag &= ~O_TRUNC;
break;
}
error = inode_permission(inode, MAY_OPEN | acc_mode);
if (error)
return error;
/*
* An append-only file must be opened in append mode for writing.
*/
if (IS_APPEND(inode)) {
if ((flag & O_ACCMODE) != O_RDONLY && !(flag & O_APPEND))
return -EPERM;
if (flag & O_TRUNC)
return -EPERM;
}
/* O_NOATIME can only be set by the owner or superuser */
if (flag & O_NOATIME && !inode_owner_or_capable(inode))
return -EPERM;
return 0;
}
static int handle_truncate(struct file *filp)
{
const struct path *path = &filp->f_path;
struct inode *inode = path->dentry->d_inode;
int error = get_write_access(inode);
if (error)
return error;
/*
* Refuse to truncate files with mandatory locks held on them.
*/
error = locks_verify_locked(filp);
if (!error)
error = security_path_truncate(path);
if (!error) {
error = do_truncate(path->dentry, 0,
ATTR_MTIME|ATTR_CTIME|ATTR_OPEN,
filp);
}
put_write_access(inode);
return error;
}
static inline int open_to_namei_flags(int flag)
{
if ((flag & O_ACCMODE) == 3)
flag--;
return flag;
}
static int may_o_create(const struct path *dir, struct dentry *dentry, umode_t mode)
{
struct user_namespace *s_user_ns;
int error = security_path_mknod(dir, dentry, mode, 0);
if (error)
return error;
s_user_ns = dir->dentry->d_sb->s_user_ns;
if (!kuid_has_mapping(s_user_ns, current_fsuid()) ||
!kgid_has_mapping(s_user_ns, current_fsgid()))
return -EOVERFLOW;
error = inode_permission(dir->dentry->d_inode, MAY_WRITE | MAY_EXEC);
if (error)
return error;
return security_inode_create(dir->dentry->d_inode, dentry, mode);
}
/*
* Attempt to atomically look up, create and open a file from a negative
* dentry.
*
* Returns 0 if successful. The file will have been created and attached to
* @file by the filesystem calling finish_open().
*
* If the file was looked up only or didn't need creating, FMODE_OPENED won't
* be set. The caller will need to perform the open themselves. @path will
* have been updated to point to the new dentry. This may be negative.
*
* Returns an error code otherwise.
*/
static int atomic_open(struct nameidata *nd, struct dentry *dentry,
struct path *path, struct file *file,
const struct open_flags *op,
int open_flag, umode_t mode)
{
struct dentry *const DENTRY_NOT_SET = (void *) -1UL;
struct inode *dir = nd->path.dentry->d_inode;
int error;
if (!(~open_flag & (O_EXCL | O_CREAT))) /* both O_EXCL and O_CREAT */
open_flag &= ~O_TRUNC;
if (nd->flags & LOOKUP_DIRECTORY)
open_flag |= O_DIRECTORY;
file->f_path.dentry = DENTRY_NOT_SET;
file->f_path.mnt = nd->path.mnt;
error = dir->i_op->atomic_open(dir, dentry, file,
open_to_namei_flags(open_flag), mode);
d_lookup_done(dentry);
if (!error) {
if (file->f_mode & FMODE_OPENED) {
/*
* We didn't have the inode before the open, so check open
* permission here.
*/
int acc_mode = op->acc_mode;
if (file->f_mode & FMODE_CREATED) {
WARN_ON(!(open_flag & O_CREAT));
fsnotify_create(dir, dentry);
acc_mode = 0;
}
error = may_open(&file->f_path, acc_mode, open_flag);
if (WARN_ON(error > 0))
error = -EINVAL;
} else if (WARN_ON(file->f_path.dentry == DENTRY_NOT_SET)) {
error = -EIO;
} else {
if (file->f_path.dentry) {
dput(dentry);
dentry = file->f_path.dentry;
}
if (file->f_mode & FMODE_CREATED)
fsnotify_create(dir, dentry);
if (unlikely(d_is_negative(dentry))) {
error = -ENOENT;
} else {
path->dentry = dentry;
path->mnt = nd->path.mnt;
return 0;
}
}
}
dput(dentry);
return error;
}
/*
* Look up and maybe create and open the last component.
*
* Must be called with parent locked (exclusive in O_CREAT case).
*
* Returns 0 on success, that is, if
* the file was successfully atomically created (if necessary) and opened, or
* the file was not completely opened at this time, though lookups and
* creations were performed.
* These case are distinguished by presence of FMODE_OPENED on file->f_mode.
* In the latter case dentry returned in @path might be negative if O_CREAT
* hadn't been specified.
*
* An error code is returned on failure.
*/
static int lookup_open(struct nameidata *nd, struct path *path,
struct file *file,
const struct open_flags *op,
bool got_write)
{
struct dentry *dir = nd->path.dentry;
struct inode *dir_inode = dir->d_inode;
int open_flag = op->open_flag;
struct dentry *dentry;
int error, create_error = 0;
umode_t mode = op->mode;
DECLARE_WAIT_QUEUE_HEAD_ONSTACK(wq);
if (unlikely(IS_DEADDIR(dir_inode)))
return -ENOENT;
file->f_mode &= ~FMODE_CREATED;
dentry = d_lookup(dir, &nd->last);
for (;;) {
if (!dentry) {
dentry = d_alloc_parallel(dir, &nd->last, &wq);
if (IS_ERR(dentry))
return PTR_ERR(dentry);
}
if (d_in_lookup(dentry))
break;
error = d_revalidate(dentry, nd->flags);
if (likely(error > 0))
break;
if (error)
goto out_dput;
d_invalidate(dentry);
dput(dentry);
dentry = NULL;
}
if (dentry->d_inode) {
/* Cached positive dentry: will open in f_op->open */
goto out_no_open;
}
/*
* Checking write permission is tricky, bacuse we don't know if we are
* going to actually need it: O_CREAT opens should work as long as the
* file exists. But checking existence breaks atomicity. The trick is
* to check access and if not granted clear O_CREAT from the flags.
*
* Another problem is returing the "right" error value (e.g. for an
* O_EXCL open we want to return EEXIST not EROFS).
*/
if (open_flag & O_CREAT) {
if (!IS_POSIXACL(dir->d_inode))
mode &= ~current_umask();
if (unlikely(!got_write)) {
create_error = -EROFS;
open_flag &= ~O_CREAT;
if (open_flag & (O_EXCL | O_TRUNC))
goto no_open;
/* No side effects, safe to clear O_CREAT */
} else {
create_error = may_o_create(&nd->path, dentry, mode);
if (create_error) {
open_flag &= ~O_CREAT;
if (open_flag & O_EXCL)
goto no_open;
}
}
} else if ((open_flag & (O_TRUNC|O_WRONLY|O_RDWR)) &&
unlikely(!got_write)) {
/*
* No O_CREATE -> atomicity not a requirement -> fall
* back to lookup + open
*/
goto no_open;
}
if (dir_inode->i_op->atomic_open) {
error = atomic_open(nd, dentry, path, file, op, open_flag,
mode);
if (unlikely(error == -ENOENT) && create_error)
error = create_error;
return error;
}
no_open:
if (d_in_lookup(dentry)) {
struct dentry *res = dir_inode->i_op->lookup(dir_inode, dentry,
nd->flags);
d_lookup_done(dentry);
if (unlikely(res)) {
if (IS_ERR(res)) {
error = PTR_ERR(res);
goto out_dput;
}
dput(dentry);
dentry = res;
}
}
/* Negative dentry, just create the file */
if (!dentry->d_inode && (open_flag & O_CREAT)) {
file->f_mode |= FMODE_CREATED;
audit_inode_child(dir_inode, dentry, AUDIT_TYPE_CHILD_CREATE);
if (!dir_inode->i_op->create) {
error = -EACCES;
goto out_dput;
}
error = dir_inode->i_op->create(dir_inode, dentry, mode,
open_flag & O_EXCL);
if (error)
goto out_dput;
fsnotify_create(dir_inode, dentry);
}
if (unlikely(create_error) && !dentry->d_inode) {
error = create_error;
goto out_dput;
}
out_no_open:
path->dentry = dentry;
path->mnt = nd->path.mnt;
return 0;
out_dput:
dput(dentry);
return error;
}
/*
* Handle the last step of open()
*/
static int do_last(struct nameidata *nd,
struct file *file, const struct open_flags *op)
{
struct dentry *dir = nd->path.dentry;
int open_flag = op->open_flag;
bool will_truncate = (open_flag & O_TRUNC) != 0;
bool got_write = false;
int acc_mode = op->acc_mode;
unsigned seq;
struct inode *inode;
struct path path;
int error;
nd->flags &= ~LOOKUP_PARENT;
nd->flags |= op->intent;
if (nd->last_type != LAST_NORM) {
error = handle_dots(nd, nd->last_type);
if (unlikely(error))
return error;
goto finish_open;
}
if (!(open_flag & O_CREAT)) {
if (nd->last.name[nd->last.len])
nd->flags |= LOOKUP_FOLLOW | LOOKUP_DIRECTORY;
/* we _can_ be in RCU mode here */
error = lookup_fast(nd, &path, &inode, &seq);
if (likely(error > 0))
goto finish_lookup;
if (error < 0)
return error;
BUG_ON(nd->inode != dir->d_inode);
BUG_ON(nd->flags & LOOKUP_RCU);
} else {
/* create side of things */
/*
* This will *only* deal with leaving RCU mode - LOOKUP_JUMPED
* has been cleared when we got to the last component we are
* about to look up
*/
error = complete_walk(nd);
if (error)
return error;
audit_inode(nd->name, dir, AUDIT_INODE_PARENT);
/* trailing slashes? */
if (unlikely(nd->last.name[nd->last.len]))
return -EISDIR;
}
if (open_flag & (O_CREAT | O_TRUNC | O_WRONLY | O_RDWR)) {
error = mnt_want_write(nd->path.mnt);
if (!error)
got_write = true;
/*
* do _not_ fail yet - we might not need that or fail with
* a different error; let lookup_open() decide; we'll be
* dropping this one anyway.
*/
}
if (open_flag & O_CREAT)
inode_lock(dir->d_inode);
else
inode_lock_shared(dir->d_inode);
error = lookup_open(nd, &path, file, op, got_write);
if (open_flag & O_CREAT)
inode_unlock(dir->d_inode);
else
inode_unlock_shared(dir->d_inode);
if (error)
goto out;
if (file->f_mode & FMODE_OPENED) {
if ((file->f_mode & FMODE_CREATED) ||
!S_ISREG(file_inode(file)->i_mode))
will_truncate = false;
audit_inode(nd->name, file->f_path.dentry, 0);
goto opened;
}
if (file->f_mode & FMODE_CREATED) {
/* Don't check for write permission, don't truncate */
open_flag &= ~O_TRUNC;
will_truncate = false;
acc_mode = 0;
path_to_nameidata(&path, nd);
goto finish_open_created;
}
/*
* If atomic_open() acquired write access it is dropped now due to
* possible mount and symlink following (this might be optimized away if
* necessary...)
*/
if (got_write) {
mnt_drop_write(nd->path.mnt);
got_write = false;
}
error = follow_managed(&path, nd);
if (unlikely(error < 0))
return error;
/*
* create/update audit record if it already exists.
*/
audit_inode(nd->name, path.dentry, 0);
if (unlikely((open_flag & (O_EXCL | O_CREAT)) == (O_EXCL | O_CREAT))) {
path_to_nameidata(&path, nd);
return -EEXIST;
}
seq = 0; /* out of RCU mode, so the value doesn't matter */
inode = d_backing_inode(path.dentry);
finish_lookup:
error = step_into(nd, &path, 0, inode, seq);
if (unlikely(error))
return error;
finish_open:
/* Why this, you ask? _Now_ we might have grown LOOKUP_JUMPED... */
error = complete_walk(nd);
if (error)
return error;
audit_inode(nd->name, nd->path.dentry, 0);
if (open_flag & O_CREAT) {
error = -EISDIR;
if (d_is_dir(nd->path.dentry))
goto out;
error = may_create_in_sticky(dir,
d_backing_inode(nd->path.dentry));
if (unlikely(error))
goto out;
}
error = -ENOTDIR;
if ((nd->flags & LOOKUP_DIRECTORY) && !d_can_lookup(nd->path.dentry))
goto out;
if (!d_is_reg(nd->path.dentry))
will_truncate = false;
if (will_truncate) {
error = mnt_want_write(nd->path.mnt);
if (error)
goto out;
got_write = true;
}
finish_open_created:
error = may_open(&nd->path, acc_mode, open_flag);
if (error)
goto out;
BUG_ON(file->f_mode & FMODE_OPENED); /* once it's opened, it's opened */
error = vfs_open(&nd->path, file);
if (error)
goto out;
opened:
error = ima_file_check(file, op->acc_mode);
if (!error && will_truncate)
error = handle_truncate(file);
out:
if (unlikely(error > 0)) {
WARN_ON(1);
error = -EINVAL;
}
if (got_write)
mnt_drop_write(nd->path.mnt);
return error;
}
struct dentry *vfs_tmpfile(struct dentry *dentry, umode_t mode, int open_flag)
{
struct dentry *child = NULL;
struct inode *dir = dentry->d_inode;
struct inode *inode;
int error;
/* we want directory to be writable */
error = inode_permission(dir, MAY_WRITE | MAY_EXEC);
if (error)
goto out_err;
error = -EOPNOTSUPP;
if (!dir->i_op->tmpfile)
goto out_err;
error = -ENOMEM;
child = d_alloc(dentry, &slash_name);
if (unlikely(!child))
goto out_err;
error = dir->i_op->tmpfile(dir, child, mode);
if (error)
goto out_err;
error = -ENOENT;
inode = child->d_inode;
if (unlikely(!inode))
goto out_err;
if (!(open_flag & O_EXCL)) {
spin_lock(&inode->i_lock);
inode->i_state |= I_LINKABLE;
spin_unlock(&inode->i_lock);
}
ima_post_create_tmpfile(inode);
return child;
out_err:
dput(child);
return ERR_PTR(error);
}
EXPORT_SYMBOL(vfs_tmpfile);
static int do_tmpfile(struct nameidata *nd, unsigned flags,
const struct open_flags *op,
struct file *file)
{
struct dentry *child;
struct path path;
int error = path_lookupat(nd, flags | LOOKUP_DIRECTORY, &path);
if (unlikely(error))
return error;
error = mnt_want_write(path.mnt);
if (unlikely(error))
goto out;
child = vfs_tmpfile(path.dentry, op->mode, op->open_flag);
error = PTR_ERR(child);
if (IS_ERR(child))
goto out2;
dput(path.dentry);
path.dentry = child;
audit_inode(nd->name, child, 0);
/* Don't check for other permissions, the inode was just created */
error = may_open(&path, 0, op->open_flag);
if (error)
goto out2;
file->f_path.mnt = path.mnt;
error = finish_open(file, child, NULL);
out2:
mnt_drop_write(path.mnt);
out:
path_put(&path);
return error;
}
static int do_o_path(struct nameidata *nd, unsigned flags, struct file *file)
{
struct path path;
int error = path_lookupat(nd, flags, &path);
if (!error) {
audit_inode(nd->name, path.dentry, 0);
error = vfs_open(&path, file);
path_put(&path);
}
return error;
}
static struct file *path_openat(struct nameidata *nd,
const struct open_flags *op, unsigned flags)
{
struct file *file;
int error;
file = alloc_empty_file(op->open_flag, current_cred());
if (IS_ERR(file))
return file;
if (unlikely(file->f_flags & __O_TMPFILE)) {
error = do_tmpfile(nd, flags, op, file);
} else if (unlikely(file->f_flags & O_PATH)) {
error = do_o_path(nd, flags, file);
} else {
const char *s = path_init(nd, flags);
while (!(error = link_path_walk(s, nd)) &&
(error = do_last(nd, file, op)) > 0) {
nd->flags &= ~(LOOKUP_OPEN|LOOKUP_CREATE|LOOKUP_EXCL);
s = trailing_symlink(nd);
}
terminate_walk(nd);
}
if (likely(!error)) {
if (likely(file->f_mode & FMODE_OPENED))
return file;
WARN_ON(1);
error = -EINVAL;
}
fput(file);
if (error == -EOPENSTALE) {
if (flags & LOOKUP_RCU)
error = -ECHILD;
else
error = -ESTALE;
}
return ERR_PTR(error);
}
struct file *do_filp_open(int dfd, struct filename *pathname,
const struct open_flags *op)
{
struct nameidata nd;
int flags = op->lookup_flags;
struct file *filp;
set_nameidata(&nd, dfd, pathname);
filp = path_openat(&nd, op, flags | LOOKUP_RCU);
if (unlikely(filp == ERR_PTR(-ECHILD)))
filp = path_openat(&nd, op, flags);
if (unlikely(filp == ERR_PTR(-ESTALE)))
filp = path_openat(&nd, op, flags | LOOKUP_REVAL);
restore_nameidata();
return filp;
}
struct file *do_file_open_root(struct dentry *dentry, struct vfsmount *mnt,
const char *name, const struct open_flags *op)
{
struct nameidata nd;
struct file *file;
struct filename *filename;
int flags = op->lookup_flags | LOOKUP_ROOT;
nd.root.mnt = mnt;
nd.root.dentry = dentry;
if (d_is_symlink(dentry) && op->intent & LOOKUP_OPEN)
return ERR_PTR(-ELOOP);
filename = getname_kernel(name);
if (IS_ERR(filename))
return ERR_CAST(filename);
set_nameidata(&nd, -1, filename);
file = path_openat(&nd, op, flags | LOOKUP_RCU);
if (unlikely(file == ERR_PTR(-ECHILD)))
file = path_openat(&nd, op, flags);
if (unlikely(file == ERR_PTR(-ESTALE)))
file = path_openat(&nd, op, flags | LOOKUP_REVAL);
restore_nameidata();
putname(filename);
return file;
}
static struct dentry *filename_create(int dfd, struct filename *name,
struct path *path, unsigned int lookup_flags)
{
struct dentry *dentry = ERR_PTR(-EEXIST);
struct qstr last;
int type;
int err2;
int error;
bool is_dir = (lookup_flags & LOOKUP_DIRECTORY);
/*
* Note that only LOOKUP_REVAL and LOOKUP_DIRECTORY matter here. Any
* other flags passed in are ignored!
*/
lookup_flags &= LOOKUP_REVAL;
name = filename_parentat(dfd, name, lookup_flags, path, &last, &type);
if (IS_ERR(name))
return ERR_CAST(name);
/*
* Yucky last component or no last component at all?
* (foo/., foo/.., /////)
*/
if (unlikely(type != LAST_NORM))
goto out;
/* don't fail immediately if it's r/o, at least try to report other errors */
err2 = mnt_want_write(path->mnt);
/*
* Do the final lookup.
*/
lookup_flags |= LOOKUP_CREATE | LOOKUP_EXCL;
inode_lock_nested(path->dentry->d_inode, I_MUTEX_PARENT);
dentry = __lookup_hash(&last, path->dentry, lookup_flags);
if (IS_ERR(dentry))
goto unlock;
error = -EEXIST;
if (d_is_positive(dentry))
goto fail;
/*
* Special case - lookup gave negative, but... we had foo/bar/
* From the vfs_mknod() POV we just have a negative dentry -
* all is fine. Let's be bastards - you had / on the end, you've
* been asking for (non-existent) directory. -ENOENT for you.
*/
if (unlikely(!is_dir && last.name[last.len])) {
error = -ENOENT;
goto fail;
}
if (unlikely(err2)) {
error = err2;
goto fail;
}
putname(name);
return dentry;
fail:
dput(dentry);
dentry = ERR_PTR(error);
unlock:
inode_unlock(path->dentry->d_inode);
if (!err2)
mnt_drop_write(path->mnt);
out:
path_put(path);
putname(name);
return dentry;
}
struct dentry *kern_path_create(int dfd, const char *pathname,
struct path *path, unsigned int lookup_flags)
{
return filename_create(dfd, getname_kernel(pathname),
path, lookup_flags);
}
EXPORT_SYMBOL(kern_path_create);
void done_path_create(struct path *path, struct dentry *dentry)
{
dput(dentry);
inode_unlock(path->dentry->d_inode);
mnt_drop_write(path->mnt);
path_put(path);
}
EXPORT_SYMBOL(done_path_create);
inline struct dentry *user_path_create(int dfd, const char __user *pathname,
struct path *path, unsigned int lookup_flags)
{
return filename_create(dfd, getname(pathname), path, lookup_flags);
}
EXPORT_SYMBOL(user_path_create);
int vfs_mknod(struct inode *dir, struct dentry *dentry, umode_t mode, dev_t dev)
{
int error = may_create(dir, dentry);
if (error)
return error;
if ((S_ISCHR(mode) || S_ISBLK(mode)) && !capable(CAP_MKNOD))
return -EPERM;
if (!dir->i_op->mknod)
return -EPERM;
error = devcgroup_inode_mknod(mode, dev);
if (error)
return error;
error = security_inode_mknod(dir, dentry, mode, dev);
if (error)
return error;
error = dir->i_op->mknod(dir, dentry, mode, dev);
if (!error)
fsnotify_create(dir, dentry);
return error;
}
EXPORT_SYMBOL(vfs_mknod);
static int may_mknod(umode_t mode)
{
switch (mode & S_IFMT) {
case S_IFREG:
case S_IFCHR:
case S_IFBLK:
case S_IFIFO:
case S_IFSOCK:
case 0: /* zero mode translates to S_IFREG */
return 0;
case S_IFDIR:
return -EPERM;
default:
return -EINVAL;
}
}
long do_mknodat(int dfd, const char __user *filename, umode_t mode,
unsigned int dev)
{
struct dentry *dentry;
struct path path;
int error;
unsigned int lookup_flags = 0;
error = may_mknod(mode);
if (error)
return error;
retry:
dentry = user_path_create(dfd, filename, &path, lookup_flags);
if (IS_ERR(dentry))
return PTR_ERR(dentry);
if (!IS_POSIXACL(path.dentry->d_inode))
mode &= ~current_umask();
error = security_path_mknod(&path, dentry, mode, dev);
if (error)
goto out;
switch (mode & S_IFMT) {
case 0: case S_IFREG:
error = vfs_create(path.dentry->d_inode,dentry,mode,true);
if (!error)
ima_post_path_mknod(dentry);
break;
case S_IFCHR: case S_IFBLK:
error = vfs_mknod(path.dentry->d_inode,dentry,mode,
new_decode_dev(dev));
break;
case S_IFIFO: case S_IFSOCK:
error = vfs_mknod(path.dentry->d_inode,dentry,mode,0);
break;
}
out:
done_path_create(&path, dentry);
if (retry_estale(error, lookup_flags)) {
lookup_flags |= LOOKUP_REVAL;
goto retry;
}
return error;
}
SYSCALL_DEFINE4(mknodat, int, dfd, const char __user *, filename, umode_t, mode,
unsigned int, dev)
{
return do_mknodat(dfd, filename, mode, dev);
}
SYSCALL_DEFINE3(mknod, const char __user *, filename, umode_t, mode, unsigned, dev)
{
return do_mknodat(AT_FDCWD, filename, mode, dev);
}
int vfs_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode)
{
int error = may_create(dir, dentry);
unsigned max_links = dir->i_sb->s_max_links;
if (error)
return error;
if (!dir->i_op->mkdir)
return -EPERM;
mode &= (S_IRWXUGO|S_ISVTX);
error = security_inode_mkdir(dir, dentry, mode);
if (error)
return error;
if (max_links && dir->i_nlink >= max_links)
return -EMLINK;
error = dir->i_op->mkdir(dir, dentry, mode);
if (!error)
fsnotify_mkdir(dir, dentry);
return error;
}
EXPORT_SYMBOL(vfs_mkdir);
long do_mkdirat(int dfd, const char __user *pathname, umode_t mode)
{
struct dentry *dentry;
struct path path;
int error;
unsigned int lookup_flags = LOOKUP_DIRECTORY;
retry:
dentry = user_path_create(dfd, pathname, &path, lookup_flags);
if (IS_ERR(dentry))
return PTR_ERR(dentry);
if (!IS_POSIXACL(path.dentry->d_inode))
mode &= ~current_umask();
error = security_path_mkdir(&path, dentry, mode);
if (!error)
error = vfs_mkdir(path.dentry->d_inode, dentry, mode);
done_path_create(&path, dentry);
if (retry_estale(error, lookup_flags)) {
lookup_flags |= LOOKUP_REVAL;
goto retry;
}
return error;
}
SYSCALL_DEFINE3(mkdirat, int, dfd, const char __user *, pathname, umode_t, mode)
{
return do_mkdirat(dfd, pathname, mode);
}
SYSCALL_DEFINE2(mkdir, const char __user *, pathname, umode_t, mode)
{
return do_mkdirat(AT_FDCWD, pathname, mode);
}
int vfs_rmdir(struct inode *dir, struct dentry *dentry)
{
int error = may_delete(dir, dentry, 1);
if (error)
return error;
if (!dir->i_op->rmdir)
return -EPERM;
dget(dentry);
inode_lock(dentry->d_inode);
error = -EBUSY;
if (is_local_mountpoint(dentry))
goto out;
error = security_inode_rmdir(dir, dentry);
if (error)
goto out;
error = dir->i_op->rmdir(dir, dentry);
if (error)
goto out;
shrink_dcache_parent(dentry);
dentry->d_inode->i_flags |= S_DEAD;
dont_mount(dentry);
detach_mounts(dentry);
fsnotify_rmdir(dir, dentry);
out:
inode_unlock(dentry->d_inode);
dput(dentry);
if (!error)
d_delete(dentry);
return error;
}
EXPORT_SYMBOL(vfs_rmdir);
long do_rmdir(int dfd, const char __user *pathname)
{
int error = 0;
struct filename *name;
struct dentry *dentry;
struct path path;
struct qstr last;
int type;
unsigned int lookup_flags = 0;
retry:
name = filename_parentat(dfd, getname(pathname), lookup_flags,
&path, &last, &type);
if (IS_ERR(name))
return PTR_ERR(name);
switch (type) {
case LAST_DOTDOT:
error = -ENOTEMPTY;
goto exit1;
case LAST_DOT:
error = -EINVAL;
goto exit1;
case LAST_ROOT:
error = -EBUSY;
goto exit1;
}
error = mnt_want_write(path.mnt);
if (error)
goto exit1;
inode_lock_nested(path.dentry->d_inode, I_MUTEX_PARENT);
dentry = __lookup_hash(&last, path.dentry, lookup_flags);
error = PTR_ERR(dentry);
if (IS_ERR(dentry))
goto exit2;
if (!dentry->d_inode) {
error = -ENOENT;
goto exit3;
}
error = security_path_rmdir(&path, dentry);
if (error)
goto exit3;
error = vfs_rmdir(path.dentry->d_inode, dentry);
exit3:
dput(dentry);
exit2:
inode_unlock(path.dentry->d_inode);
mnt_drop_write(path.mnt);
exit1:
path_put(&path);
putname(name);
if (retry_estale(error, lookup_flags)) {
lookup_flags |= LOOKUP_REVAL;
goto retry;
}
return error;
}
SYSCALL_DEFINE1(rmdir, const char __user *, pathname)
{
return do_rmdir(AT_FDCWD, pathname);
}
/**
* vfs_unlink - unlink a filesystem object
* @dir: parent directory
* @dentry: victim
* @delegated_inode: returns victim inode, if the inode is delegated.
*
* The caller must hold dir->i_mutex.
*
* If vfs_unlink discovers a delegation, it will return -EWOULDBLOCK and
* return a reference to the inode in delegated_inode. The caller
* should then break the delegation on that inode and retry. Because
* breaking a delegation may take a long time, the caller should drop
* dir->i_mutex before doing so.
*
* Alternatively, a caller may pass NULL for delegated_inode. This may
* be appropriate for callers that expect the underlying filesystem not
* to be NFS exported.
*/
int vfs_unlink(struct inode *dir, struct dentry *dentry, struct inode **delegated_inode)
{
struct inode *target = dentry->d_inode;
int error = may_delete(dir, dentry, 0);
if (error)
return error;
if (!dir->i_op->unlink)
return -EPERM;
inode_lock(target);
if (is_local_mountpoint(dentry))
error = -EBUSY;
else {
error = security_inode_unlink(dir, dentry);
if (!error) {
error = try_break_deleg(target, delegated_inode);
if (error)
goto out;
error = dir->i_op->unlink(dir, dentry);
if (!error) {
dont_mount(dentry);
detach_mounts(dentry);
fsnotify_unlink(dir, dentry);
}
}
}
out:
inode_unlock(target);
/* We don't d_delete() NFS sillyrenamed files--they still exist. */
if (!error && !(dentry->d_flags & DCACHE_NFSFS_RENAMED)) {
fsnotify_link_count(target);
d_delete(dentry);
}
return error;
}
EXPORT_SYMBOL(vfs_unlink);
/*
* Make sure that the actual truncation of the file will occur outside its
* directory's i_mutex. Truncate can take a long time if there is a lot of
* writeout happening, and we don't want to prevent access to the directory
* while waiting on the I/O.
*/
long do_unlinkat(int dfd, struct filename *name)
{
int error;
struct dentry *dentry;
struct path path;
struct qstr last;
int type;
struct inode *inode = NULL;
struct inode *delegated_inode = NULL;
unsigned int lookup_flags = 0;
retry:
name = filename_parentat(dfd, name, lookup_flags, &path, &last, &type);
if (IS_ERR(name))
return PTR_ERR(name);
error = -EISDIR;
if (type != LAST_NORM)
goto exit1;
error = mnt_want_write(path.mnt);
if (error)
goto exit1;
retry_deleg:
inode_lock_nested(path.dentry->d_inode, I_MUTEX_PARENT);
dentry = __lookup_hash(&last, path.dentry, lookup_flags);
error = PTR_ERR(dentry);
if (!IS_ERR(dentry)) {
/* Why not before? Because we want correct error value */
if (last.name[last.len])
goto slashes;
inode = dentry->d_inode;
if (d_is_negative(dentry))
goto slashes;
ihold(inode);
error = security_path_unlink(&path, dentry);
if (error)
goto exit2;
error = vfs_unlink(path.dentry->d_inode, dentry, &delegated_inode);
exit2:
dput(dentry);
}
inode_unlock(path.dentry->d_inode);
if (inode)
iput(inode); /* truncate the inode here */
inode = NULL;
if (delegated_inode) {
error = break_deleg_wait(&delegated_inode);
if (!error)
goto retry_deleg;
}
mnt_drop_write(path.mnt);
exit1:
path_put(&path);
if (retry_estale(error, lookup_flags)) {
lookup_flags |= LOOKUP_REVAL;
inode = NULL;
goto retry;
}
putname(name);
return error;
slashes:
if (d_is_negative(dentry))
error = -ENOENT;
else if (d_is_dir(dentry))
error = -EISDIR;
else
error = -ENOTDIR;
goto exit2;
}
SYSCALL_DEFINE3(unlinkat, int, dfd, const char __user *, pathname, int, flag)
{
if ((flag & ~AT_REMOVEDIR) != 0)
return -EINVAL;
if (flag & AT_REMOVEDIR)
return do_rmdir(dfd, pathname);
return do_unlinkat(dfd, getname(pathname));
}
SYSCALL_DEFINE1(unlink, const char __user *, pathname)
{
return do_unlinkat(AT_FDCWD, getname(pathname));
}
int vfs_symlink(struct inode *dir, struct dentry *dentry, const char *oldname)
{
int error = may_create(dir, dentry);
if (error)
return error;
if (!dir->i_op->symlink)
return -EPERM;
error = security_inode_symlink(dir, dentry, oldname);
if (error)
return error;
error = dir->i_op->symlink(dir, dentry, oldname);
if (!error)
fsnotify_create(dir, dentry);
return error;
}
EXPORT_SYMBOL(vfs_symlink);
long do_symlinkat(const char __user *oldname, int newdfd,
const char __user *newname)
{
int error;
struct filename *from;
struct dentry *dentry;
struct path path;
unsigned int lookup_flags = 0;
from = getname(oldname);
if (IS_ERR(from))
return PTR_ERR(from);
retry:
dentry = user_path_create(newdfd, newname, &path, lookup_flags);
error = PTR_ERR(dentry);
if (IS_ERR(dentry))
goto out_putname;
error = security_path_symlink(&path, dentry, from->name);
if (!error)
error = vfs_symlink(path.dentry->d_inode, dentry, from->name);
done_path_create(&path, dentry);
if (retry_estale(error, lookup_flags)) {
lookup_flags |= LOOKUP_REVAL;
goto retry;
}
out_putname:
putname(from);
return error;
}
SYSCALL_DEFINE3(symlinkat, const char __user *, oldname,
int, newdfd, const char __user *, newname)
{
return do_symlinkat(oldname, newdfd, newname);
}
SYSCALL_DEFINE2(symlink, const char __user *, oldname, const char __user *, newname)
{
return do_symlinkat(oldname, AT_FDCWD, newname);
}
/**
* vfs_link - create a new link
* @old_dentry: object to be linked
* @dir: new parent
* @new_dentry: where to create the new link
* @delegated_inode: returns inode needing a delegation break
*
* The caller must hold dir->i_mutex
*
* If vfs_link discovers a delegation on the to-be-linked file in need
* of breaking, it will return -EWOULDBLOCK and return a reference to the
* inode in delegated_inode. The caller should then break the delegation
* and retry. Because breaking a delegation may take a long time, the
* caller should drop the i_mutex before doing so.
*
* Alternatively, a caller may pass NULL for delegated_inode. This may
* be appropriate for callers that expect the underlying filesystem not
* to be NFS exported.
*/
int vfs_link(struct dentry *old_dentry, struct inode *dir, struct dentry *new_dentry, struct inode **delegated_inode)
{
struct inode *inode = old_dentry->d_inode;
unsigned max_links = dir->i_sb->s_max_links;
int error;
if (!inode)
return -ENOENT;
error = may_create(dir, new_dentry);
if (error)
return error;
if (dir->i_sb != inode->i_sb)
return -EXDEV;
/*
* A link to an append-only or immutable file cannot be created.
*/
if (IS_APPEND(inode) || IS_IMMUTABLE(inode))
return -EPERM;
/*
* Updating the link count will likely cause i_uid and i_gid to
* be writen back improperly if their true value is unknown to
* the vfs.
*/
if (HAS_UNMAPPED_ID(inode))
return -EPERM;
if (!dir->i_op->link)
return -EPERM;
if (S_ISDIR(inode->i_mode))
return -EPERM;
error = security_inode_link(old_dentry, dir, new_dentry);
if (error)
return error;
inode_lock(inode);
/* Make sure we don't allow creating hardlink to an unlinked file */
if (inode->i_nlink == 0 && !(inode->i_state & I_LINKABLE))
error = -ENOENT;
else if (max_links && inode->i_nlink >= max_links)
error = -EMLINK;
else {
error = try_break_deleg(inode, delegated_inode);
if (!error)
error = dir->i_op->link(old_dentry, dir, new_dentry);
}
if (!error && (inode->i_state & I_LINKABLE)) {
spin_lock(&inode->i_lock);
inode->i_state &= ~I_LINKABLE;
spin_unlock(&inode->i_lock);
}
inode_unlock(inode);
if (!error)
fsnotify_link(dir, inode, new_dentry);
return error;
}
EXPORT_SYMBOL(vfs_link);
/*
* Hardlinks are often used in delicate situations. We avoid
* security-related surprises by not following symlinks on the
* newname. --KAB
*
* We don't follow them on the oldname either to be compatible
* with linux 2.0, and to avoid hard-linking to directories
* and other special files. --ADM
*/
int do_linkat(int olddfd, const char __user *oldname, int newdfd,
const char __user *newname, int flags)
{
struct dentry *new_dentry;
struct path old_path, new_path;
struct inode *delegated_inode = NULL;
int how = 0;
int error;
if ((flags & ~(AT_SYMLINK_FOLLOW | AT_EMPTY_PATH)) != 0)
return -EINVAL;
/*
* To use null names we require CAP_DAC_READ_SEARCH
* This ensures that not everyone will be able to create
* handlink using the passed filedescriptor.
*/
if (flags & AT_EMPTY_PATH) {
if (!capable(CAP_DAC_READ_SEARCH))
return -ENOENT;
how = LOOKUP_EMPTY;
}
if (flags & AT_SYMLINK_FOLLOW)
how |= LOOKUP_FOLLOW;
retry:
error = user_path_at(olddfd, oldname, how, &old_path);
if (error)
return error;
new_dentry = user_path_create(newdfd, newname, &new_path,
(how & LOOKUP_REVAL));
error = PTR_ERR(new_dentry);
if (IS_ERR(new_dentry))
goto out;
error = -EXDEV;
if (old_path.mnt != new_path.mnt)
goto out_dput;
error = may_linkat(&old_path);
if (unlikely(error))
goto out_dput;
error = security_path_link(old_path.dentry, &new_path, new_dentry);
if (error)
goto out_dput;
error = vfs_link(old_path.dentry, new_path.dentry->d_inode, new_dentry, &delegated_inode);
out_dput:
done_path_create(&new_path, new_dentry);
if (delegated_inode) {
error = break_deleg_wait(&delegated_inode);
if (!error) {
path_put(&old_path);
goto retry;
}
}
if (retry_estale(error, how)) {
path_put(&old_path);
how |= LOOKUP_REVAL;
goto retry;
}
out:
path_put(&old_path);
return error;
}
SYSCALL_DEFINE5(linkat, int, olddfd, const char __user *, oldname,
int, newdfd, const char __user *, newname, int, flags)
{
return do_linkat(olddfd, oldname, newdfd, newname, flags);
}
SYSCALL_DEFINE2(link, const char __user *, oldname, const char __user *, newname)
{
return do_linkat(AT_FDCWD, oldname, AT_FDCWD, newname, 0);
}
/**
* vfs_rename - rename a filesystem object
* @old_dir: parent of source
* @old_dentry: source
* @new_dir: parent of destination
* @new_dentry: destination
* @delegated_inode: returns an inode needing a delegation break
* @flags: rename flags
*
* The caller must hold multiple mutexes--see lock_rename()).
*
* If vfs_rename discovers a delegation in need of breaking at either
* the source or destination, it will return -EWOULDBLOCK and return a
* reference to the inode in delegated_inode. The caller should then
* break the delegation and retry. Because breaking a delegation may
* take a long time, the caller should drop all locks before doing
* so.
*
* Alternatively, a caller may pass NULL for delegated_inode. This may
* be appropriate for callers that expect the underlying filesystem not
* to be NFS exported.
*
* The worst of all namespace operations - renaming directory. "Perverted"
* doesn't even start to describe it. Somebody in UCB had a heck of a trip...
* Problems:
*
* a) we can get into loop creation.
* b) race potential - two innocent renames can create a loop together.
* That's where 4.4 screws up. Current fix: serialization on
* sb->s_vfs_rename_mutex. We might be more accurate, but that's another
* story.
* c) we have to lock _four_ objects - parents and victim (if it exists),
* and source (if it is not a directory).
* And that - after we got ->i_mutex on parents (until then we don't know
* whether the target exists). Solution: try to be smart with locking
* order for inodes. We rely on the fact that tree topology may change
* only under ->s_vfs_rename_mutex _and_ that parent of the object we
* move will be locked. Thus we can rank directories by the tree
* (ancestors first) and rank all non-directories after them.
* That works since everybody except rename does "lock parent, lookup,
* lock child" and rename is under ->s_vfs_rename_mutex.
* HOWEVER, it relies on the assumption that any object with ->lookup()
* has no more than 1 dentry. If "hybrid" objects will ever appear,
* we'd better make sure that there's no link(2) for them.
* d) conversion from fhandle to dentry may come in the wrong moment - when
* we are removing the target. Solution: we will have to grab ->i_mutex
* in the fhandle_to_dentry code. [FIXME - current nfsfh.c relies on
* ->i_mutex on parents, which works but leads to some truly excessive
* locking].
*/
int vfs_rename(struct inode *old_dir, struct dentry *old_dentry,
struct inode *new_dir, struct dentry *new_dentry,
struct inode **delegated_inode, unsigned int flags)
{
int error;
bool is_dir = d_is_dir(old_dentry);
struct inode *source = old_dentry->d_inode;
struct inode *target = new_dentry->d_inode;
bool new_is_dir = false;
unsigned max_links = new_dir->i_sb->s_max_links;
struct name_snapshot old_name;
if (source == target)
return 0;
error = may_delete(old_dir, old_dentry, is_dir);
if (error)
return error;
if (!target) {
error = may_create(new_dir, new_dentry);
} else {
new_is_dir = d_is_dir(new_dentry);
if (!(flags & RENAME_EXCHANGE))
error = may_delete(new_dir, new_dentry, is_dir);
else
error = may_delete(new_dir, new_dentry, new_is_dir);
}
if (error)
return error;
if (!old_dir->i_op->rename)
return -EPERM;
/*
* If we are going to change the parent - check write permissions,
* we'll need to flip '..'.
*/
if (new_dir != old_dir) {
if (is_dir) {
error = inode_permission(source, MAY_WRITE);
if (error)
return error;
}
if ((flags & RENAME_EXCHANGE) && new_is_dir) {
error = inode_permission(target, MAY_WRITE);
if (error)
return error;
}
}
error = security_inode_rename(old_dir, old_dentry, new_dir, new_dentry,
flags);
if (error)
return error;
take_dentry_name_snapshot(&old_name, old_dentry);
dget(new_dentry);
if (!is_dir || (flags & RENAME_EXCHANGE))
lock_two_nondirectories(source, target);
else if (target)
inode_lock(target);
error = -EBUSY;
if (is_local_mountpoint(old_dentry) || is_local_mountpoint(new_dentry))
goto out;
if (max_links && new_dir != old_dir) {
error = -EMLINK;
if (is_dir && !new_is_dir && new_dir->i_nlink >= max_links)
goto out;
if ((flags & RENAME_EXCHANGE) && !is_dir && new_is_dir &&
old_dir->i_nlink >= max_links)
goto out;
}
if (!is_dir) {
error = try_break_deleg(source, delegated_inode);
if (error)
goto out;
}
if (target && !new_is_dir) {
error = try_break_deleg(target, delegated_inode);
if (error)
goto out;
}
error = old_dir->i_op->rename(old_dir, old_dentry,
new_dir, new_dentry, flags);
if (error)
goto out;
if (!(flags & RENAME_EXCHANGE) && target) {
if (is_dir) {
shrink_dcache_parent(new_dentry);
target->i_flags |= S_DEAD;
}
dont_mount(new_dentry);
detach_mounts(new_dentry);
}
if (!(old_dir->i_sb->s_type->fs_flags & FS_RENAME_DOES_D_MOVE)) {
if (!(flags & RENAME_EXCHANGE))
d_move(old_dentry, new_dentry);
else
d_exchange(old_dentry, new_dentry);
}
out:
if (!is_dir || (flags & RENAME_EXCHANGE))
unlock_two_nondirectories(source, target);
else if (target)
inode_unlock(target);
dput(new_dentry);
if (!error) {
fsnotify_move(old_dir, new_dir, &old_name.name, is_dir,
!(flags & RENAME_EXCHANGE) ? target : NULL, old_dentry);
if (flags & RENAME_EXCHANGE) {
fsnotify_move(new_dir, old_dir, &old_dentry->d_name,
new_is_dir, NULL, new_dentry);
}
}
release_dentry_name_snapshot(&old_name);
return error;
}
EXPORT_SYMBOL(vfs_rename);
static int do_renameat2(int olddfd, const char __user *oldname, int newdfd,
const char __user *newname, unsigned int flags)
{
struct dentry *old_dentry, *new_dentry;
struct dentry *trap;
struct path old_path, new_path;
struct qstr old_last, new_last;
int old_type, new_type;
struct inode *delegated_inode = NULL;
struct filename *from;
struct filename *to;
unsigned int lookup_flags = 0, target_flags = LOOKUP_RENAME_TARGET;
bool should_retry = false;
int error;
if (flags & ~(RENAME_NOREPLACE | RENAME_EXCHANGE | RENAME_WHITEOUT))
return -EINVAL;
if ((flags & (RENAME_NOREPLACE | RENAME_WHITEOUT)) &&
(flags & RENAME_EXCHANGE))
return -EINVAL;
if ((flags & RENAME_WHITEOUT) && !capable(CAP_MKNOD))
return -EPERM;
if (flags & RENAME_EXCHANGE)
target_flags = 0;
retry:
from = filename_parentat(olddfd, getname(oldname), lookup_flags,
&old_path, &old_last, &old_type);
if (IS_ERR(from)) {
error = PTR_ERR(from);
goto exit;
}
to = filename_parentat(newdfd, getname(newname), lookup_flags,
&new_path, &new_last, &new_type);
if (IS_ERR(to)) {
error = PTR_ERR(to);
goto exit1;
}
error = -EXDEV;
if (old_path.mnt != new_path.mnt)
goto exit2;
error = -EBUSY;
if (old_type != LAST_NORM)
goto exit2;
if (flags & RENAME_NOREPLACE)
error = -EEXIST;
if (new_type != LAST_NORM)
goto exit2;
error = mnt_want_write(old_path.mnt);
if (error)
goto exit2;
retry_deleg:
trap = lock_rename(new_path.dentry, old_path.dentry);
old_dentry = __lookup_hash(&old_last, old_path.dentry, lookup_flags);
error = PTR_ERR(old_dentry);
if (IS_ERR(old_dentry))
goto exit3;
/* source must exist */
error = -ENOENT;
if (d_is_negative(old_dentry))
goto exit4;
new_dentry = __lookup_hash(&new_last, new_path.dentry, lookup_flags | target_flags);
error = PTR_ERR(new_dentry);
if (IS_ERR(new_dentry))
goto exit4;
error = -EEXIST;
if ((flags & RENAME_NOREPLACE) && d_is_positive(new_dentry))
goto exit5;
if (flags & RENAME_EXCHANGE) {
error = -ENOENT;
if (d_is_negative(new_dentry))
goto exit5;
if (!d_is_dir(new_dentry)) {
error = -ENOTDIR;
if (new_last.name[new_last.len])
goto exit5;
}
}
/* unless the source is a directory trailing slashes give -ENOTDIR */
if (!d_is_dir(old_dentry)) {
error = -ENOTDIR;
if (old_last.name[old_last.len])
goto exit5;
if (!(flags & RENAME_EXCHANGE) && new_last.name[new_last.len])
goto exit5;
}
/* source should not be ancestor of target */
error = -EINVAL;
if (old_dentry == trap)
goto exit5;
/* target should not be an ancestor of source */
if (!(flags & RENAME_EXCHANGE))
error = -ENOTEMPTY;
if (new_dentry == trap)
goto exit5;
error = security_path_rename(&old_path, old_dentry,
&new_path, new_dentry, flags);
if (error)
goto exit5;
error = vfs_rename(old_path.dentry->d_inode, old_dentry,
new_path.dentry->d_inode, new_dentry,
&delegated_inode, flags);
exit5:
dput(new_dentry);
exit4:
dput(old_dentry);
exit3:
unlock_rename(new_path.dentry, old_path.dentry);
if (delegated_inode) {
error = break_deleg_wait(&delegated_inode);
if (!error)
goto retry_deleg;
}
mnt_drop_write(old_path.mnt);
exit2:
if (retry_estale(error, lookup_flags))
should_retry = true;
path_put(&new_path);
putname(to);
exit1:
path_put(&old_path);
putname(from);
if (should_retry) {
should_retry = false;
lookup_flags |= LOOKUP_REVAL;
goto retry;
}
exit:
return error;
}
SYSCALL_DEFINE5(renameat2, int, olddfd, const char __user *, oldname,
int, newdfd, const char __user *, newname, unsigned int, flags)
{
return do_renameat2(olddfd, oldname, newdfd, newname, flags);
}
SYSCALL_DEFINE4(renameat, int, olddfd, const char __user *, oldname,
int, newdfd, const char __user *, newname)
{
return do_renameat2(olddfd, oldname, newdfd, newname, 0);
}
SYSCALL_DEFINE2(rename, const char __user *, oldname, const char __user *, newname)
{
return do_renameat2(AT_FDCWD, oldname, AT_FDCWD, newname, 0);
}
int vfs_whiteout(struct inode *dir, struct dentry *dentry)
{
int error = may_create(dir, dentry);
if (error)
return error;
if (!dir->i_op->mknod)
return -EPERM;
return dir->i_op->mknod(dir, dentry,
S_IFCHR | WHITEOUT_MODE, WHITEOUT_DEV);
}
EXPORT_SYMBOL(vfs_whiteout);
int readlink_copy(char __user *buffer, int buflen, const char *link)
{
int len = PTR_ERR(link);
if (IS_ERR(link))
goto out;
len = strlen(link);
if (len > (unsigned) buflen)
len = buflen;
if (copy_to_user(buffer, link, len))
len = -EFAULT;
out:
return len;
}
/**
* vfs_readlink - copy symlink body into userspace buffer
* @dentry: dentry on which to get symbolic link
* @buffer: user memory pointer
* @buflen: size of buffer
*
* Does not touch atime. That's up to the caller if necessary
*
* Does not call security hook.
*/
int vfs_readlink(struct dentry *dentry, char __user *buffer, int buflen)
{
struct inode *inode = d_inode(dentry);
DEFINE_DELAYED_CALL(done);
const char *link;
int res;
if (unlikely(!(inode->i_opflags & IOP_DEFAULT_READLINK))) {
if (unlikely(inode->i_op->readlink))
return inode->i_op->readlink(dentry, buffer, buflen);
if (!d_is_symlink(dentry))
return -EINVAL;
spin_lock(&inode->i_lock);
inode->i_opflags |= IOP_DEFAULT_READLINK;
spin_unlock(&inode->i_lock);
}
link = READ_ONCE(inode->i_link);
if (!link) {
link = inode->i_op->get_link(dentry, inode, &done);
if (IS_ERR(link))
return PTR_ERR(link);
}
res = readlink_copy(buffer, buflen, link);
do_delayed_call(&done);
return res;
}
EXPORT_SYMBOL(vfs_readlink);
/**
* vfs_get_link - get symlink body
* @dentry: dentry on which to get symbolic link
* @done: caller needs to free returned data with this
*
* Calls security hook and i_op->get_link() on the supplied inode.
*
* It does not touch atime. That's up to the caller if necessary.
*
* Does not work on "special" symlinks like /proc/$$/fd/N
*/
const char *vfs_get_link(struct dentry *dentry, struct delayed_call *done)
{
const char *res = ERR_PTR(-EINVAL);
struct inode *inode = d_inode(dentry);
if (d_is_symlink(dentry)) {
res = ERR_PTR(security_inode_readlink(dentry));
if (!res)
res = inode->i_op->get_link(dentry, inode, done);
}
return res;
}
EXPORT_SYMBOL(vfs_get_link);
/* get the link contents into pagecache */
const char *page_get_link(struct dentry *dentry, struct inode *inode,
struct delayed_call *callback)
{
char *kaddr;
struct page *page;
struct address_space *mapping = inode->i_mapping;
if (!dentry) {
page = find_get_page(mapping, 0);
if (!page)
return ERR_PTR(-ECHILD);
if (!PageUptodate(page)) {
put_page(page);
return ERR_PTR(-ECHILD);
}
} else {
page = read_mapping_page(mapping, 0, NULL);
if (IS_ERR(page))
return (char*)page;
}
set_delayed_call(callback, page_put_link, page);
BUG_ON(mapping_gfp_mask(mapping) & __GFP_HIGHMEM);
kaddr = page_address(page);
nd_terminate_link(kaddr, inode->i_size, PAGE_SIZE - 1);
return kaddr;
}
EXPORT_SYMBOL(page_get_link);
void page_put_link(void *arg)
{
put_page(arg);
}
EXPORT_SYMBOL(page_put_link);
int page_readlink(struct dentry *dentry, char __user *buffer, int buflen)
{
DEFINE_DELAYED_CALL(done);
int res = readlink_copy(buffer, buflen,
page_get_link(dentry, d_inode(dentry),
&done));
do_delayed_call(&done);
return res;
}
EXPORT_SYMBOL(page_readlink);
/*
* The nofs argument instructs pagecache_write_begin to pass AOP_FLAG_NOFS
*/
int __page_symlink(struct inode *inode, const char *symname, int len, int nofs)
{
struct address_space *mapping = inode->i_mapping;
struct page *page;
void *fsdata;
int err;
unsigned int flags = 0;
if (nofs)
flags |= AOP_FLAG_NOFS;
retry:
err = pagecache_write_begin(NULL, mapping, 0, len-1,
flags, &page, &fsdata);
if (err)
goto fail;
memcpy(page_address(page), symname, len-1);
err = pagecache_write_end(NULL, mapping, 0, len-1, len-1,
page, fsdata);
if (err < 0)
goto fail;
if (err < len-1)
goto retry;
mark_inode_dirty(inode);
return 0;
fail:
return err;
}
EXPORT_SYMBOL(__page_symlink);
int page_symlink(struct inode *inode, const char *symname, int len)
{
return __page_symlink(inode, symname, len,
!mapping_gfp_constraint(inode->i_mapping, __GFP_FS));
}
EXPORT_SYMBOL(page_symlink);
const struct inode_operations page_symlink_inode_operations = {
.get_link = page_get_link,
};
EXPORT_SYMBOL(page_symlink_inode_operations);
| ./CrossVul/dataset_final_sorted/CWE-416/c/bad_4655_0 |
crossvul-cpp_data_good_5021_4 | /*
* common UDP/RAW code
* Linux INET6 implementation
*
* Authors:
* Pedro Roque <roque@di.fc.ul.pt>
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version
* 2 of the License, or (at your option) any later version.
*/
#include <linux/capability.h>
#include <linux/errno.h>
#include <linux/types.h>
#include <linux/kernel.h>
#include <linux/interrupt.h>
#include <linux/socket.h>
#include <linux/sockios.h>
#include <linux/in6.h>
#include <linux/ipv6.h>
#include <linux/route.h>
#include <linux/slab.h>
#include <linux/export.h>
#include <net/ipv6.h>
#include <net/ndisc.h>
#include <net/addrconf.h>
#include <net/transp_v6.h>
#include <net/ip6_route.h>
#include <net/tcp_states.h>
#include <net/dsfield.h>
#include <linux/errqueue.h>
#include <asm/uaccess.h>
static bool ipv6_mapped_addr_any(const struct in6_addr *a)
{
return ipv6_addr_v4mapped(a) && (a->s6_addr32[3] == 0);
}
static int __ip6_datagram_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len)
{
struct sockaddr_in6 *usin = (struct sockaddr_in6 *) uaddr;
struct inet_sock *inet = inet_sk(sk);
struct ipv6_pinfo *np = inet6_sk(sk);
struct in6_addr *daddr, *final_p, final;
struct dst_entry *dst;
struct flowi6 fl6;
struct ip6_flowlabel *flowlabel = NULL;
struct ipv6_txoptions *opt;
int addr_type;
int err;
if (usin->sin6_family == AF_INET) {
if (__ipv6_only_sock(sk))
return -EAFNOSUPPORT;
err = __ip4_datagram_connect(sk, uaddr, addr_len);
goto ipv4_connected;
}
if (addr_len < SIN6_LEN_RFC2133)
return -EINVAL;
if (usin->sin6_family != AF_INET6)
return -EAFNOSUPPORT;
memset(&fl6, 0, sizeof(fl6));
if (np->sndflow) {
fl6.flowlabel = usin->sin6_flowinfo&IPV6_FLOWINFO_MASK;
if (fl6.flowlabel&IPV6_FLOWLABEL_MASK) {
flowlabel = fl6_sock_lookup(sk, fl6.flowlabel);
if (!flowlabel)
return -EINVAL;
}
}
addr_type = ipv6_addr_type(&usin->sin6_addr);
if (addr_type == IPV6_ADDR_ANY) {
/*
* connect to self
*/
usin->sin6_addr.s6_addr[15] = 0x01;
}
daddr = &usin->sin6_addr;
if (addr_type == IPV6_ADDR_MAPPED) {
struct sockaddr_in sin;
if (__ipv6_only_sock(sk)) {
err = -ENETUNREACH;
goto out;
}
sin.sin_family = AF_INET;
sin.sin_addr.s_addr = daddr->s6_addr32[3];
sin.sin_port = usin->sin6_port;
err = __ip4_datagram_connect(sk,
(struct sockaddr *) &sin,
sizeof(sin));
ipv4_connected:
if (err)
goto out;
ipv6_addr_set_v4mapped(inet->inet_daddr, &sk->sk_v6_daddr);
if (ipv6_addr_any(&np->saddr) ||
ipv6_mapped_addr_any(&np->saddr))
ipv6_addr_set_v4mapped(inet->inet_saddr, &np->saddr);
if (ipv6_addr_any(&sk->sk_v6_rcv_saddr) ||
ipv6_mapped_addr_any(&sk->sk_v6_rcv_saddr)) {
ipv6_addr_set_v4mapped(inet->inet_rcv_saddr,
&sk->sk_v6_rcv_saddr);
if (sk->sk_prot->rehash)
sk->sk_prot->rehash(sk);
}
goto out;
}
if (__ipv6_addr_needs_scope_id(addr_type)) {
if (addr_len >= sizeof(struct sockaddr_in6) &&
usin->sin6_scope_id) {
if (sk->sk_bound_dev_if &&
sk->sk_bound_dev_if != usin->sin6_scope_id) {
err = -EINVAL;
goto out;
}
sk->sk_bound_dev_if = usin->sin6_scope_id;
}
if (!sk->sk_bound_dev_if && (addr_type & IPV6_ADDR_MULTICAST))
sk->sk_bound_dev_if = np->mcast_oif;
/* Connect to link-local address requires an interface */
if (!sk->sk_bound_dev_if) {
err = -EINVAL;
goto out;
}
}
sk->sk_v6_daddr = *daddr;
np->flow_label = fl6.flowlabel;
inet->inet_dport = usin->sin6_port;
/*
* Check for a route to destination an obtain the
* destination cache for it.
*/
fl6.flowi6_proto = sk->sk_protocol;
fl6.daddr = sk->sk_v6_daddr;
fl6.saddr = np->saddr;
fl6.flowi6_oif = sk->sk_bound_dev_if;
fl6.flowi6_mark = sk->sk_mark;
fl6.fl6_dport = inet->inet_dport;
fl6.fl6_sport = inet->inet_sport;
if (!fl6.flowi6_oif && (addr_type&IPV6_ADDR_MULTICAST))
fl6.flowi6_oif = np->mcast_oif;
security_sk_classify_flow(sk, flowi6_to_flowi(&fl6));
rcu_read_lock();
opt = flowlabel ? flowlabel->opt : rcu_dereference(np->opt);
final_p = fl6_update_dst(&fl6, opt, &final);
rcu_read_unlock();
dst = ip6_dst_lookup_flow(sk, &fl6, final_p);
err = 0;
if (IS_ERR(dst)) {
err = PTR_ERR(dst);
goto out;
}
/* source address lookup done in ip6_dst_lookup */
if (ipv6_addr_any(&np->saddr))
np->saddr = fl6.saddr;
if (ipv6_addr_any(&sk->sk_v6_rcv_saddr)) {
sk->sk_v6_rcv_saddr = fl6.saddr;
inet->inet_rcv_saddr = LOOPBACK4_IPV6;
if (sk->sk_prot->rehash)
sk->sk_prot->rehash(sk);
}
ip6_dst_store(sk, dst,
ipv6_addr_equal(&fl6.daddr, &sk->sk_v6_daddr) ?
&sk->sk_v6_daddr : NULL,
#ifdef CONFIG_IPV6_SUBTREES
ipv6_addr_equal(&fl6.saddr, &np->saddr) ?
&np->saddr :
#endif
NULL);
sk->sk_state = TCP_ESTABLISHED;
sk_set_txhash(sk);
out:
fl6_sock_release(flowlabel);
return err;
}
int ip6_datagram_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len)
{
int res;
lock_sock(sk);
res = __ip6_datagram_connect(sk, uaddr, addr_len);
release_sock(sk);
return res;
}
EXPORT_SYMBOL_GPL(ip6_datagram_connect);
int ip6_datagram_connect_v6_only(struct sock *sk, struct sockaddr *uaddr,
int addr_len)
{
DECLARE_SOCKADDR(struct sockaddr_in6 *, sin6, uaddr);
if (sin6->sin6_family != AF_INET6)
return -EAFNOSUPPORT;
return ip6_datagram_connect(sk, uaddr, addr_len);
}
EXPORT_SYMBOL_GPL(ip6_datagram_connect_v6_only);
void ipv6_icmp_error(struct sock *sk, struct sk_buff *skb, int err,
__be16 port, u32 info, u8 *payload)
{
struct ipv6_pinfo *np = inet6_sk(sk);
struct icmp6hdr *icmph = icmp6_hdr(skb);
struct sock_exterr_skb *serr;
if (!np->recverr)
return;
skb = skb_clone(skb, GFP_ATOMIC);
if (!skb)
return;
skb->protocol = htons(ETH_P_IPV6);
serr = SKB_EXT_ERR(skb);
serr->ee.ee_errno = err;
serr->ee.ee_origin = SO_EE_ORIGIN_ICMP6;
serr->ee.ee_type = icmph->icmp6_type;
serr->ee.ee_code = icmph->icmp6_code;
serr->ee.ee_pad = 0;
serr->ee.ee_info = info;
serr->ee.ee_data = 0;
serr->addr_offset = (u8 *)&(((struct ipv6hdr *)(icmph + 1))->daddr) -
skb_network_header(skb);
serr->port = port;
__skb_pull(skb, payload - skb->data);
skb_reset_transport_header(skb);
if (sock_queue_err_skb(sk, skb))
kfree_skb(skb);
}
void ipv6_local_error(struct sock *sk, int err, struct flowi6 *fl6, u32 info)
{
const struct ipv6_pinfo *np = inet6_sk(sk);
struct sock_exterr_skb *serr;
struct ipv6hdr *iph;
struct sk_buff *skb;
if (!np->recverr)
return;
skb = alloc_skb(sizeof(struct ipv6hdr), GFP_ATOMIC);
if (!skb)
return;
skb->protocol = htons(ETH_P_IPV6);
skb_put(skb, sizeof(struct ipv6hdr));
skb_reset_network_header(skb);
iph = ipv6_hdr(skb);
iph->daddr = fl6->daddr;
serr = SKB_EXT_ERR(skb);
serr->ee.ee_errno = err;
serr->ee.ee_origin = SO_EE_ORIGIN_LOCAL;
serr->ee.ee_type = 0;
serr->ee.ee_code = 0;
serr->ee.ee_pad = 0;
serr->ee.ee_info = info;
serr->ee.ee_data = 0;
serr->addr_offset = (u8 *)&iph->daddr - skb_network_header(skb);
serr->port = fl6->fl6_dport;
__skb_pull(skb, skb_tail_pointer(skb) - skb->data);
skb_reset_transport_header(skb);
if (sock_queue_err_skb(sk, skb))
kfree_skb(skb);
}
void ipv6_local_rxpmtu(struct sock *sk, struct flowi6 *fl6, u32 mtu)
{
struct ipv6_pinfo *np = inet6_sk(sk);
struct ipv6hdr *iph;
struct sk_buff *skb;
struct ip6_mtuinfo *mtu_info;
if (!np->rxopt.bits.rxpmtu)
return;
skb = alloc_skb(sizeof(struct ipv6hdr), GFP_ATOMIC);
if (!skb)
return;
skb_put(skb, sizeof(struct ipv6hdr));
skb_reset_network_header(skb);
iph = ipv6_hdr(skb);
iph->daddr = fl6->daddr;
mtu_info = IP6CBMTU(skb);
mtu_info->ip6m_mtu = mtu;
mtu_info->ip6m_addr.sin6_family = AF_INET6;
mtu_info->ip6m_addr.sin6_port = 0;
mtu_info->ip6m_addr.sin6_flowinfo = 0;
mtu_info->ip6m_addr.sin6_scope_id = fl6->flowi6_oif;
mtu_info->ip6m_addr.sin6_addr = ipv6_hdr(skb)->daddr;
__skb_pull(skb, skb_tail_pointer(skb) - skb->data);
skb_reset_transport_header(skb);
skb = xchg(&np->rxpmtu, skb);
kfree_skb(skb);
}
/* For some errors we have valid addr_offset even with zero payload and
* zero port. Also, addr_offset should be supported if port is set.
*/
static inline bool ipv6_datagram_support_addr(struct sock_exterr_skb *serr)
{
return serr->ee.ee_origin == SO_EE_ORIGIN_ICMP6 ||
serr->ee.ee_origin == SO_EE_ORIGIN_ICMP ||
serr->ee.ee_origin == SO_EE_ORIGIN_LOCAL || serr->port;
}
/* IPv6 supports cmsg on all origins aside from SO_EE_ORIGIN_LOCAL.
*
* At one point, excluding local errors was a quick test to identify icmp/icmp6
* errors. This is no longer true, but the test remained, so the v6 stack,
* unlike v4, also honors cmsg requests on all wifi and timestamp errors.
*
* Timestamp code paths do not initialize the fields expected by cmsg:
* the PKTINFO fields in skb->cb[]. Fill those in here.
*/
static bool ip6_datagram_support_cmsg(struct sk_buff *skb,
struct sock_exterr_skb *serr)
{
if (serr->ee.ee_origin == SO_EE_ORIGIN_ICMP ||
serr->ee.ee_origin == SO_EE_ORIGIN_ICMP6)
return true;
if (serr->ee.ee_origin == SO_EE_ORIGIN_LOCAL)
return false;
if (!skb->dev)
return false;
if (skb->protocol == htons(ETH_P_IPV6))
IP6CB(skb)->iif = skb->dev->ifindex;
else
PKTINFO_SKB_CB(skb)->ipi_ifindex = skb->dev->ifindex;
return true;
}
/*
* Handle MSG_ERRQUEUE
*/
int ipv6_recv_error(struct sock *sk, struct msghdr *msg, int len, int *addr_len)
{
struct ipv6_pinfo *np = inet6_sk(sk);
struct sock_exterr_skb *serr;
struct sk_buff *skb;
DECLARE_SOCKADDR(struct sockaddr_in6 *, sin, msg->msg_name);
struct {
struct sock_extended_err ee;
struct sockaddr_in6 offender;
} errhdr;
int err;
int copied;
err = -EAGAIN;
skb = sock_dequeue_err_skb(sk);
if (!skb)
goto out;
copied = skb->len;
if (copied > len) {
msg->msg_flags |= MSG_TRUNC;
copied = len;
}
err = skb_copy_datagram_msg(skb, 0, msg, copied);
if (err)
goto out_free_skb;
sock_recv_timestamp(msg, sk, skb);
serr = SKB_EXT_ERR(skb);
if (sin && ipv6_datagram_support_addr(serr)) {
const unsigned char *nh = skb_network_header(skb);
sin->sin6_family = AF_INET6;
sin->sin6_flowinfo = 0;
sin->sin6_port = serr->port;
if (skb->protocol == htons(ETH_P_IPV6)) {
const struct ipv6hdr *ip6h = container_of((struct in6_addr *)(nh + serr->addr_offset),
struct ipv6hdr, daddr);
sin->sin6_addr = ip6h->daddr;
if (np->sndflow)
sin->sin6_flowinfo = ip6_flowinfo(ip6h);
sin->sin6_scope_id =
ipv6_iface_scope_id(&sin->sin6_addr,
IP6CB(skb)->iif);
} else {
ipv6_addr_set_v4mapped(*(__be32 *)(nh + serr->addr_offset),
&sin->sin6_addr);
sin->sin6_scope_id = 0;
}
*addr_len = sizeof(*sin);
}
memcpy(&errhdr.ee, &serr->ee, sizeof(struct sock_extended_err));
sin = &errhdr.offender;
memset(sin, 0, sizeof(*sin));
if (ip6_datagram_support_cmsg(skb, serr)) {
sin->sin6_family = AF_INET6;
if (np->rxopt.all)
ip6_datagram_recv_common_ctl(sk, msg, skb);
if (skb->protocol == htons(ETH_P_IPV6)) {
sin->sin6_addr = ipv6_hdr(skb)->saddr;
if (np->rxopt.all)
ip6_datagram_recv_specific_ctl(sk, msg, skb);
sin->sin6_scope_id =
ipv6_iface_scope_id(&sin->sin6_addr,
IP6CB(skb)->iif);
} else {
ipv6_addr_set_v4mapped(ip_hdr(skb)->saddr,
&sin->sin6_addr);
if (inet_sk(sk)->cmsg_flags)
ip_cmsg_recv(msg, skb);
}
}
put_cmsg(msg, SOL_IPV6, IPV6_RECVERR, sizeof(errhdr), &errhdr);
/* Now we could try to dump offended packet options */
msg->msg_flags |= MSG_ERRQUEUE;
err = copied;
out_free_skb:
kfree_skb(skb);
out:
return err;
}
EXPORT_SYMBOL_GPL(ipv6_recv_error);
/*
* Handle IPV6_RECVPATHMTU
*/
int ipv6_recv_rxpmtu(struct sock *sk, struct msghdr *msg, int len,
int *addr_len)
{
struct ipv6_pinfo *np = inet6_sk(sk);
struct sk_buff *skb;
struct ip6_mtuinfo mtu_info;
DECLARE_SOCKADDR(struct sockaddr_in6 *, sin, msg->msg_name);
int err;
int copied;
err = -EAGAIN;
skb = xchg(&np->rxpmtu, NULL);
if (!skb)
goto out;
copied = skb->len;
if (copied > len) {
msg->msg_flags |= MSG_TRUNC;
copied = len;
}
err = skb_copy_datagram_msg(skb, 0, msg, copied);
if (err)
goto out_free_skb;
sock_recv_timestamp(msg, sk, skb);
memcpy(&mtu_info, IP6CBMTU(skb), sizeof(mtu_info));
if (sin) {
sin->sin6_family = AF_INET6;
sin->sin6_flowinfo = 0;
sin->sin6_port = 0;
sin->sin6_scope_id = mtu_info.ip6m_addr.sin6_scope_id;
sin->sin6_addr = mtu_info.ip6m_addr.sin6_addr;
*addr_len = sizeof(*sin);
}
put_cmsg(msg, SOL_IPV6, IPV6_PATHMTU, sizeof(mtu_info), &mtu_info);
err = copied;
out_free_skb:
kfree_skb(skb);
out:
return err;
}
void ip6_datagram_recv_common_ctl(struct sock *sk, struct msghdr *msg,
struct sk_buff *skb)
{
struct ipv6_pinfo *np = inet6_sk(sk);
bool is_ipv6 = skb->protocol == htons(ETH_P_IPV6);
if (np->rxopt.bits.rxinfo) {
struct in6_pktinfo src_info;
if (is_ipv6) {
src_info.ipi6_ifindex = IP6CB(skb)->iif;
src_info.ipi6_addr = ipv6_hdr(skb)->daddr;
} else {
src_info.ipi6_ifindex =
PKTINFO_SKB_CB(skb)->ipi_ifindex;
ipv6_addr_set_v4mapped(ip_hdr(skb)->daddr,
&src_info.ipi6_addr);
}
if (src_info.ipi6_ifindex >= 0)
put_cmsg(msg, SOL_IPV6, IPV6_PKTINFO,
sizeof(src_info), &src_info);
}
}
void ip6_datagram_recv_specific_ctl(struct sock *sk, struct msghdr *msg,
struct sk_buff *skb)
{
struct ipv6_pinfo *np = inet6_sk(sk);
struct inet6_skb_parm *opt = IP6CB(skb);
unsigned char *nh = skb_network_header(skb);
if (np->rxopt.bits.rxhlim) {
int hlim = ipv6_hdr(skb)->hop_limit;
put_cmsg(msg, SOL_IPV6, IPV6_HOPLIMIT, sizeof(hlim), &hlim);
}
if (np->rxopt.bits.rxtclass) {
int tclass = ipv6_get_dsfield(ipv6_hdr(skb));
put_cmsg(msg, SOL_IPV6, IPV6_TCLASS, sizeof(tclass), &tclass);
}
if (np->rxopt.bits.rxflow) {
__be32 flowinfo = ip6_flowinfo((struct ipv6hdr *)nh);
if (flowinfo)
put_cmsg(msg, SOL_IPV6, IPV6_FLOWINFO, sizeof(flowinfo), &flowinfo);
}
/* HbH is allowed only once */
if (np->rxopt.bits.hopopts && (opt->flags & IP6SKB_HOPBYHOP)) {
u8 *ptr = nh + sizeof(struct ipv6hdr);
put_cmsg(msg, SOL_IPV6, IPV6_HOPOPTS, (ptr[1]+1)<<3, ptr);
}
if (opt->lastopt &&
(np->rxopt.bits.dstopts || np->rxopt.bits.srcrt)) {
/*
* Silly enough, but we need to reparse in order to
* report extension headers (except for HbH)
* in order.
*
* Also note that IPV6_RECVRTHDRDSTOPTS is NOT
* (and WILL NOT be) defined because
* IPV6_RECVDSTOPTS is more generic. --yoshfuji
*/
unsigned int off = sizeof(struct ipv6hdr);
u8 nexthdr = ipv6_hdr(skb)->nexthdr;
while (off <= opt->lastopt) {
unsigned int len;
u8 *ptr = nh + off;
switch (nexthdr) {
case IPPROTO_DSTOPTS:
nexthdr = ptr[0];
len = (ptr[1] + 1) << 3;
if (np->rxopt.bits.dstopts)
put_cmsg(msg, SOL_IPV6, IPV6_DSTOPTS, len, ptr);
break;
case IPPROTO_ROUTING:
nexthdr = ptr[0];
len = (ptr[1] + 1) << 3;
if (np->rxopt.bits.srcrt)
put_cmsg(msg, SOL_IPV6, IPV6_RTHDR, len, ptr);
break;
case IPPROTO_AH:
nexthdr = ptr[0];
len = (ptr[1] + 2) << 2;
break;
default:
nexthdr = ptr[0];
len = (ptr[1] + 1) << 3;
break;
}
off += len;
}
}
/* socket options in old style */
if (np->rxopt.bits.rxoinfo) {
struct in6_pktinfo src_info;
src_info.ipi6_ifindex = opt->iif;
src_info.ipi6_addr = ipv6_hdr(skb)->daddr;
put_cmsg(msg, SOL_IPV6, IPV6_2292PKTINFO, sizeof(src_info), &src_info);
}
if (np->rxopt.bits.rxohlim) {
int hlim = ipv6_hdr(skb)->hop_limit;
put_cmsg(msg, SOL_IPV6, IPV6_2292HOPLIMIT, sizeof(hlim), &hlim);
}
if (np->rxopt.bits.ohopopts && (opt->flags & IP6SKB_HOPBYHOP)) {
u8 *ptr = nh + sizeof(struct ipv6hdr);
put_cmsg(msg, SOL_IPV6, IPV6_2292HOPOPTS, (ptr[1]+1)<<3, ptr);
}
if (np->rxopt.bits.odstopts && opt->dst0) {
u8 *ptr = nh + opt->dst0;
put_cmsg(msg, SOL_IPV6, IPV6_2292DSTOPTS, (ptr[1]+1)<<3, ptr);
}
if (np->rxopt.bits.osrcrt && opt->srcrt) {
struct ipv6_rt_hdr *rthdr = (struct ipv6_rt_hdr *)(nh + opt->srcrt);
put_cmsg(msg, SOL_IPV6, IPV6_2292RTHDR, (rthdr->hdrlen+1) << 3, rthdr);
}
if (np->rxopt.bits.odstopts && opt->dst1) {
u8 *ptr = nh + opt->dst1;
put_cmsg(msg, SOL_IPV6, IPV6_2292DSTOPTS, (ptr[1]+1)<<3, ptr);
}
if (np->rxopt.bits.rxorigdstaddr) {
struct sockaddr_in6 sin6;
__be16 *ports = (__be16 *) skb_transport_header(skb);
if (skb_transport_offset(skb) + 4 <= skb->len) {
/* All current transport protocols have the port numbers in the
* first four bytes of the transport header and this function is
* written with this assumption in mind.
*/
sin6.sin6_family = AF_INET6;
sin6.sin6_addr = ipv6_hdr(skb)->daddr;
sin6.sin6_port = ports[1];
sin6.sin6_flowinfo = 0;
sin6.sin6_scope_id =
ipv6_iface_scope_id(&ipv6_hdr(skb)->daddr,
opt->iif);
put_cmsg(msg, SOL_IPV6, IPV6_ORIGDSTADDR, sizeof(sin6), &sin6);
}
}
}
void ip6_datagram_recv_ctl(struct sock *sk, struct msghdr *msg,
struct sk_buff *skb)
{
ip6_datagram_recv_common_ctl(sk, msg, skb);
ip6_datagram_recv_specific_ctl(sk, msg, skb);
}
EXPORT_SYMBOL_GPL(ip6_datagram_recv_ctl);
int ip6_datagram_send_ctl(struct net *net, struct sock *sk,
struct msghdr *msg, struct flowi6 *fl6,
struct ipv6_txoptions *opt,
int *hlimit, int *tclass, int *dontfrag)
{
struct in6_pktinfo *src_info;
struct cmsghdr *cmsg;
struct ipv6_rt_hdr *rthdr;
struct ipv6_opt_hdr *hdr;
int len;
int err = 0;
for_each_cmsghdr(cmsg, msg) {
int addr_type;
if (!CMSG_OK(msg, cmsg)) {
err = -EINVAL;
goto exit_f;
}
if (cmsg->cmsg_level != SOL_IPV6)
continue;
switch (cmsg->cmsg_type) {
case IPV6_PKTINFO:
case IPV6_2292PKTINFO:
{
struct net_device *dev = NULL;
if (cmsg->cmsg_len < CMSG_LEN(sizeof(struct in6_pktinfo))) {
err = -EINVAL;
goto exit_f;
}
src_info = (struct in6_pktinfo *)CMSG_DATA(cmsg);
if (src_info->ipi6_ifindex) {
if (fl6->flowi6_oif &&
src_info->ipi6_ifindex != fl6->flowi6_oif)
return -EINVAL;
fl6->flowi6_oif = src_info->ipi6_ifindex;
}
addr_type = __ipv6_addr_type(&src_info->ipi6_addr);
rcu_read_lock();
if (fl6->flowi6_oif) {
dev = dev_get_by_index_rcu(net, fl6->flowi6_oif);
if (!dev) {
rcu_read_unlock();
return -ENODEV;
}
} else if (addr_type & IPV6_ADDR_LINKLOCAL) {
rcu_read_unlock();
return -EINVAL;
}
if (addr_type != IPV6_ADDR_ANY) {
int strict = __ipv6_addr_src_scope(addr_type) <= IPV6_ADDR_SCOPE_LINKLOCAL;
if (!(inet_sk(sk)->freebind || inet_sk(sk)->transparent) &&
!ipv6_chk_addr(net, &src_info->ipi6_addr,
strict ? dev : NULL, 0) &&
!ipv6_chk_acast_addr_src(net, dev,
&src_info->ipi6_addr))
err = -EINVAL;
else
fl6->saddr = src_info->ipi6_addr;
}
rcu_read_unlock();
if (err)
goto exit_f;
break;
}
case IPV6_FLOWINFO:
if (cmsg->cmsg_len < CMSG_LEN(4)) {
err = -EINVAL;
goto exit_f;
}
if (fl6->flowlabel&IPV6_FLOWINFO_MASK) {
if ((fl6->flowlabel^*(__be32 *)CMSG_DATA(cmsg))&~IPV6_FLOWINFO_MASK) {
err = -EINVAL;
goto exit_f;
}
}
fl6->flowlabel = IPV6_FLOWINFO_MASK & *(__be32 *)CMSG_DATA(cmsg);
break;
case IPV6_2292HOPOPTS:
case IPV6_HOPOPTS:
if (opt->hopopt || cmsg->cmsg_len < CMSG_LEN(sizeof(struct ipv6_opt_hdr))) {
err = -EINVAL;
goto exit_f;
}
hdr = (struct ipv6_opt_hdr *)CMSG_DATA(cmsg);
len = ((hdr->hdrlen + 1) << 3);
if (cmsg->cmsg_len < CMSG_LEN(len)) {
err = -EINVAL;
goto exit_f;
}
if (!ns_capable(net->user_ns, CAP_NET_RAW)) {
err = -EPERM;
goto exit_f;
}
opt->opt_nflen += len;
opt->hopopt = hdr;
break;
case IPV6_2292DSTOPTS:
if (cmsg->cmsg_len < CMSG_LEN(sizeof(struct ipv6_opt_hdr))) {
err = -EINVAL;
goto exit_f;
}
hdr = (struct ipv6_opt_hdr *)CMSG_DATA(cmsg);
len = ((hdr->hdrlen + 1) << 3);
if (cmsg->cmsg_len < CMSG_LEN(len)) {
err = -EINVAL;
goto exit_f;
}
if (!ns_capable(net->user_ns, CAP_NET_RAW)) {
err = -EPERM;
goto exit_f;
}
if (opt->dst1opt) {
err = -EINVAL;
goto exit_f;
}
opt->opt_flen += len;
opt->dst1opt = hdr;
break;
case IPV6_DSTOPTS:
case IPV6_RTHDRDSTOPTS:
if (cmsg->cmsg_len < CMSG_LEN(sizeof(struct ipv6_opt_hdr))) {
err = -EINVAL;
goto exit_f;
}
hdr = (struct ipv6_opt_hdr *)CMSG_DATA(cmsg);
len = ((hdr->hdrlen + 1) << 3);
if (cmsg->cmsg_len < CMSG_LEN(len)) {
err = -EINVAL;
goto exit_f;
}
if (!ns_capable(net->user_ns, CAP_NET_RAW)) {
err = -EPERM;
goto exit_f;
}
if (cmsg->cmsg_type == IPV6_DSTOPTS) {
opt->opt_flen += len;
opt->dst1opt = hdr;
} else {
opt->opt_nflen += len;
opt->dst0opt = hdr;
}
break;
case IPV6_2292RTHDR:
case IPV6_RTHDR:
if (cmsg->cmsg_len < CMSG_LEN(sizeof(struct ipv6_rt_hdr))) {
err = -EINVAL;
goto exit_f;
}
rthdr = (struct ipv6_rt_hdr *)CMSG_DATA(cmsg);
switch (rthdr->type) {
#if IS_ENABLED(CONFIG_IPV6_MIP6)
case IPV6_SRCRT_TYPE_2:
if (rthdr->hdrlen != 2 ||
rthdr->segments_left != 1) {
err = -EINVAL;
goto exit_f;
}
break;
#endif
default:
err = -EINVAL;
goto exit_f;
}
len = ((rthdr->hdrlen + 1) << 3);
if (cmsg->cmsg_len < CMSG_LEN(len)) {
err = -EINVAL;
goto exit_f;
}
/* segments left must also match */
if ((rthdr->hdrlen >> 1) != rthdr->segments_left) {
err = -EINVAL;
goto exit_f;
}
opt->opt_nflen += len;
opt->srcrt = rthdr;
if (cmsg->cmsg_type == IPV6_2292RTHDR && opt->dst1opt) {
int dsthdrlen = ((opt->dst1opt->hdrlen+1)<<3);
opt->opt_nflen += dsthdrlen;
opt->dst0opt = opt->dst1opt;
opt->dst1opt = NULL;
opt->opt_flen -= dsthdrlen;
}
break;
case IPV6_2292HOPLIMIT:
case IPV6_HOPLIMIT:
if (cmsg->cmsg_len != CMSG_LEN(sizeof(int))) {
err = -EINVAL;
goto exit_f;
}
*hlimit = *(int *)CMSG_DATA(cmsg);
if (*hlimit < -1 || *hlimit > 0xff) {
err = -EINVAL;
goto exit_f;
}
break;
case IPV6_TCLASS:
{
int tc;
err = -EINVAL;
if (cmsg->cmsg_len != CMSG_LEN(sizeof(int)))
goto exit_f;
tc = *(int *)CMSG_DATA(cmsg);
if (tc < -1 || tc > 0xff)
goto exit_f;
err = 0;
*tclass = tc;
break;
}
case IPV6_DONTFRAG:
{
int df;
err = -EINVAL;
if (cmsg->cmsg_len != CMSG_LEN(sizeof(int)))
goto exit_f;
df = *(int *)CMSG_DATA(cmsg);
if (df < 0 || df > 1)
goto exit_f;
err = 0;
*dontfrag = df;
break;
}
default:
net_dbg_ratelimited("invalid cmsg type: %d\n",
cmsg->cmsg_type);
err = -EINVAL;
goto exit_f;
}
}
exit_f:
return err;
}
EXPORT_SYMBOL_GPL(ip6_datagram_send_ctl);
void ip6_dgram_sock_seq_show(struct seq_file *seq, struct sock *sp,
__u16 srcp, __u16 destp, int bucket)
{
const struct in6_addr *dest, *src;
dest = &sp->sk_v6_daddr;
src = &sp->sk_v6_rcv_saddr;
seq_printf(seq,
"%5d: %08X%08X%08X%08X:%04X %08X%08X%08X%08X:%04X "
"%02X %08X:%08X %02X:%08lX %08X %5u %8d %lu %d %pK %d\n",
bucket,
src->s6_addr32[0], src->s6_addr32[1],
src->s6_addr32[2], src->s6_addr32[3], srcp,
dest->s6_addr32[0], dest->s6_addr32[1],
dest->s6_addr32[2], dest->s6_addr32[3], destp,
sp->sk_state,
sk_wmem_alloc_get(sp),
sk_rmem_alloc_get(sp),
0, 0L, 0,
from_kuid_munged(seq_user_ns(seq), sock_i_uid(sp)),
0,
sock_i_ino(sp),
atomic_read(&sp->sk_refcnt), sp,
atomic_read(&sp->sk_drops));
}
| ./CrossVul/dataset_final_sorted/CWE-416/c/good_5021_4 |
crossvul-cpp_data_bad_2550_0 | /*
* fs/timerfd.c
*
* Copyright (C) 2007 Davide Libenzi <davidel@xmailserver.org>
*
*
* Thanks to Thomas Gleixner for code reviews and useful comments.
*
*/
#include <linux/alarmtimer.h>
#include <linux/file.h>
#include <linux/poll.h>
#include <linux/init.h>
#include <linux/fs.h>
#include <linux/sched.h>
#include <linux/kernel.h>
#include <linux/slab.h>
#include <linux/list.h>
#include <linux/spinlock.h>
#include <linux/time.h>
#include <linux/hrtimer.h>
#include <linux/anon_inodes.h>
#include <linux/timerfd.h>
#include <linux/syscalls.h>
#include <linux/compat.h>
#include <linux/rcupdate.h>
struct timerfd_ctx {
union {
struct hrtimer tmr;
struct alarm alarm;
} t;
ktime_t tintv;
ktime_t moffs;
wait_queue_head_t wqh;
u64 ticks;
int clockid;
short unsigned expired;
short unsigned settime_flags; /* to show in fdinfo */
struct rcu_head rcu;
struct list_head clist;
bool might_cancel;
};
static LIST_HEAD(cancel_list);
static DEFINE_SPINLOCK(cancel_lock);
static inline bool isalarm(struct timerfd_ctx *ctx)
{
return ctx->clockid == CLOCK_REALTIME_ALARM ||
ctx->clockid == CLOCK_BOOTTIME_ALARM;
}
/*
* This gets called when the timer event triggers. We set the "expired"
* flag, but we do not re-arm the timer (in case it's necessary,
* tintv != 0) until the timer is accessed.
*/
static void timerfd_triggered(struct timerfd_ctx *ctx)
{
unsigned long flags;
spin_lock_irqsave(&ctx->wqh.lock, flags);
ctx->expired = 1;
ctx->ticks++;
wake_up_locked(&ctx->wqh);
spin_unlock_irqrestore(&ctx->wqh.lock, flags);
}
static enum hrtimer_restart timerfd_tmrproc(struct hrtimer *htmr)
{
struct timerfd_ctx *ctx = container_of(htmr, struct timerfd_ctx,
t.tmr);
timerfd_triggered(ctx);
return HRTIMER_NORESTART;
}
static enum alarmtimer_restart timerfd_alarmproc(struct alarm *alarm,
ktime_t now)
{
struct timerfd_ctx *ctx = container_of(alarm, struct timerfd_ctx,
t.alarm);
timerfd_triggered(ctx);
return ALARMTIMER_NORESTART;
}
/*
* Called when the clock was set to cancel the timers in the cancel
* list. This will wake up processes waiting on these timers. The
* wake-up requires ctx->ticks to be non zero, therefore we increment
* it before calling wake_up_locked().
*/
void timerfd_clock_was_set(void)
{
ktime_t moffs = ktime_mono_to_real(0);
struct timerfd_ctx *ctx;
unsigned long flags;
rcu_read_lock();
list_for_each_entry_rcu(ctx, &cancel_list, clist) {
if (!ctx->might_cancel)
continue;
spin_lock_irqsave(&ctx->wqh.lock, flags);
if (ctx->moffs != moffs) {
ctx->moffs = KTIME_MAX;
ctx->ticks++;
wake_up_locked(&ctx->wqh);
}
spin_unlock_irqrestore(&ctx->wqh.lock, flags);
}
rcu_read_unlock();
}
static void timerfd_remove_cancel(struct timerfd_ctx *ctx)
{
if (ctx->might_cancel) {
ctx->might_cancel = false;
spin_lock(&cancel_lock);
list_del_rcu(&ctx->clist);
spin_unlock(&cancel_lock);
}
}
static bool timerfd_canceled(struct timerfd_ctx *ctx)
{
if (!ctx->might_cancel || ctx->moffs != KTIME_MAX)
return false;
ctx->moffs = ktime_mono_to_real(0);
return true;
}
static void timerfd_setup_cancel(struct timerfd_ctx *ctx, int flags)
{
if ((ctx->clockid == CLOCK_REALTIME ||
ctx->clockid == CLOCK_REALTIME_ALARM) &&
(flags & TFD_TIMER_ABSTIME) && (flags & TFD_TIMER_CANCEL_ON_SET)) {
if (!ctx->might_cancel) {
ctx->might_cancel = true;
spin_lock(&cancel_lock);
list_add_rcu(&ctx->clist, &cancel_list);
spin_unlock(&cancel_lock);
}
} else if (ctx->might_cancel) {
timerfd_remove_cancel(ctx);
}
}
static ktime_t timerfd_get_remaining(struct timerfd_ctx *ctx)
{
ktime_t remaining;
if (isalarm(ctx))
remaining = alarm_expires_remaining(&ctx->t.alarm);
else
remaining = hrtimer_expires_remaining_adjusted(&ctx->t.tmr);
return remaining < 0 ? 0: remaining;
}
static int timerfd_setup(struct timerfd_ctx *ctx, int flags,
const struct itimerspec *ktmr)
{
enum hrtimer_mode htmode;
ktime_t texp;
int clockid = ctx->clockid;
htmode = (flags & TFD_TIMER_ABSTIME) ?
HRTIMER_MODE_ABS: HRTIMER_MODE_REL;
texp = timespec_to_ktime(ktmr->it_value);
ctx->expired = 0;
ctx->ticks = 0;
ctx->tintv = timespec_to_ktime(ktmr->it_interval);
if (isalarm(ctx)) {
alarm_init(&ctx->t.alarm,
ctx->clockid == CLOCK_REALTIME_ALARM ?
ALARM_REALTIME : ALARM_BOOTTIME,
timerfd_alarmproc);
} else {
hrtimer_init(&ctx->t.tmr, clockid, htmode);
hrtimer_set_expires(&ctx->t.tmr, texp);
ctx->t.tmr.function = timerfd_tmrproc;
}
if (texp != 0) {
if (isalarm(ctx)) {
if (flags & TFD_TIMER_ABSTIME)
alarm_start(&ctx->t.alarm, texp);
else
alarm_start_relative(&ctx->t.alarm, texp);
} else {
hrtimer_start(&ctx->t.tmr, texp, htmode);
}
if (timerfd_canceled(ctx))
return -ECANCELED;
}
ctx->settime_flags = flags & TFD_SETTIME_FLAGS;
return 0;
}
static int timerfd_release(struct inode *inode, struct file *file)
{
struct timerfd_ctx *ctx = file->private_data;
timerfd_remove_cancel(ctx);
if (isalarm(ctx))
alarm_cancel(&ctx->t.alarm);
else
hrtimer_cancel(&ctx->t.tmr);
kfree_rcu(ctx, rcu);
return 0;
}
static unsigned int timerfd_poll(struct file *file, poll_table *wait)
{
struct timerfd_ctx *ctx = file->private_data;
unsigned int events = 0;
unsigned long flags;
poll_wait(file, &ctx->wqh, wait);
spin_lock_irqsave(&ctx->wqh.lock, flags);
if (ctx->ticks)
events |= POLLIN;
spin_unlock_irqrestore(&ctx->wqh.lock, flags);
return events;
}
static ssize_t timerfd_read(struct file *file, char __user *buf, size_t count,
loff_t *ppos)
{
struct timerfd_ctx *ctx = file->private_data;
ssize_t res;
u64 ticks = 0;
if (count < sizeof(ticks))
return -EINVAL;
spin_lock_irq(&ctx->wqh.lock);
if (file->f_flags & O_NONBLOCK)
res = -EAGAIN;
else
res = wait_event_interruptible_locked_irq(ctx->wqh, ctx->ticks);
/*
* If clock has changed, we do not care about the
* ticks and we do not rearm the timer. Userspace must
* reevaluate anyway.
*/
if (timerfd_canceled(ctx)) {
ctx->ticks = 0;
ctx->expired = 0;
res = -ECANCELED;
}
if (ctx->ticks) {
ticks = ctx->ticks;
if (ctx->expired && ctx->tintv) {
/*
* If tintv != 0, this is a periodic timer that
* needs to be re-armed. We avoid doing it in the timer
* callback to avoid DoS attacks specifying a very
* short timer period.
*/
if (isalarm(ctx)) {
ticks += alarm_forward_now(
&ctx->t.alarm, ctx->tintv) - 1;
alarm_restart(&ctx->t.alarm);
} else {
ticks += hrtimer_forward_now(&ctx->t.tmr,
ctx->tintv) - 1;
hrtimer_restart(&ctx->t.tmr);
}
}
ctx->expired = 0;
ctx->ticks = 0;
}
spin_unlock_irq(&ctx->wqh.lock);
if (ticks)
res = put_user(ticks, (u64 __user *) buf) ? -EFAULT: sizeof(ticks);
return res;
}
#ifdef CONFIG_PROC_FS
static void timerfd_show(struct seq_file *m, struct file *file)
{
struct timerfd_ctx *ctx = file->private_data;
struct itimerspec t;
spin_lock_irq(&ctx->wqh.lock);
t.it_value = ktime_to_timespec(timerfd_get_remaining(ctx));
t.it_interval = ktime_to_timespec(ctx->tintv);
spin_unlock_irq(&ctx->wqh.lock);
seq_printf(m,
"clockid: %d\n"
"ticks: %llu\n"
"settime flags: 0%o\n"
"it_value: (%llu, %llu)\n"
"it_interval: (%llu, %llu)\n",
ctx->clockid,
(unsigned long long)ctx->ticks,
ctx->settime_flags,
(unsigned long long)t.it_value.tv_sec,
(unsigned long long)t.it_value.tv_nsec,
(unsigned long long)t.it_interval.tv_sec,
(unsigned long long)t.it_interval.tv_nsec);
}
#else
#define timerfd_show NULL
#endif
#ifdef CONFIG_CHECKPOINT_RESTORE
static long timerfd_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
{
struct timerfd_ctx *ctx = file->private_data;
int ret = 0;
switch (cmd) {
case TFD_IOC_SET_TICKS: {
u64 ticks;
if (copy_from_user(&ticks, (u64 __user *)arg, sizeof(ticks)))
return -EFAULT;
if (!ticks)
return -EINVAL;
spin_lock_irq(&ctx->wqh.lock);
if (!timerfd_canceled(ctx)) {
ctx->ticks = ticks;
wake_up_locked(&ctx->wqh);
} else
ret = -ECANCELED;
spin_unlock_irq(&ctx->wqh.lock);
break;
}
default:
ret = -ENOTTY;
break;
}
return ret;
}
#else
#define timerfd_ioctl NULL
#endif
static const struct file_operations timerfd_fops = {
.release = timerfd_release,
.poll = timerfd_poll,
.read = timerfd_read,
.llseek = noop_llseek,
.show_fdinfo = timerfd_show,
.unlocked_ioctl = timerfd_ioctl,
};
static int timerfd_fget(int fd, struct fd *p)
{
struct fd f = fdget(fd);
if (!f.file)
return -EBADF;
if (f.file->f_op != &timerfd_fops) {
fdput(f);
return -EINVAL;
}
*p = f;
return 0;
}
SYSCALL_DEFINE2(timerfd_create, int, clockid, int, flags)
{
int ufd;
struct timerfd_ctx *ctx;
/* Check the TFD_* constants for consistency. */
BUILD_BUG_ON(TFD_CLOEXEC != O_CLOEXEC);
BUILD_BUG_ON(TFD_NONBLOCK != O_NONBLOCK);
if ((flags & ~TFD_CREATE_FLAGS) ||
(clockid != CLOCK_MONOTONIC &&
clockid != CLOCK_REALTIME &&
clockid != CLOCK_REALTIME_ALARM &&
clockid != CLOCK_BOOTTIME &&
clockid != CLOCK_BOOTTIME_ALARM))
return -EINVAL;
if (!capable(CAP_WAKE_ALARM) &&
(clockid == CLOCK_REALTIME_ALARM ||
clockid == CLOCK_BOOTTIME_ALARM))
return -EPERM;
ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
if (!ctx)
return -ENOMEM;
init_waitqueue_head(&ctx->wqh);
ctx->clockid = clockid;
if (isalarm(ctx))
alarm_init(&ctx->t.alarm,
ctx->clockid == CLOCK_REALTIME_ALARM ?
ALARM_REALTIME : ALARM_BOOTTIME,
timerfd_alarmproc);
else
hrtimer_init(&ctx->t.tmr, clockid, HRTIMER_MODE_ABS);
ctx->moffs = ktime_mono_to_real(0);
ufd = anon_inode_getfd("[timerfd]", &timerfd_fops, ctx,
O_RDWR | (flags & TFD_SHARED_FCNTL_FLAGS));
if (ufd < 0)
kfree(ctx);
return ufd;
}
static int do_timerfd_settime(int ufd, int flags,
const struct itimerspec *new,
struct itimerspec *old)
{
struct fd f;
struct timerfd_ctx *ctx;
int ret;
if ((flags & ~TFD_SETTIME_FLAGS) ||
!timespec_valid(&new->it_value) ||
!timespec_valid(&new->it_interval))
return -EINVAL;
ret = timerfd_fget(ufd, &f);
if (ret)
return ret;
ctx = f.file->private_data;
if (!capable(CAP_WAKE_ALARM) && isalarm(ctx)) {
fdput(f);
return -EPERM;
}
timerfd_setup_cancel(ctx, flags);
/*
* We need to stop the existing timer before reprogramming
* it to the new values.
*/
for (;;) {
spin_lock_irq(&ctx->wqh.lock);
if (isalarm(ctx)) {
if (alarm_try_to_cancel(&ctx->t.alarm) >= 0)
break;
} else {
if (hrtimer_try_to_cancel(&ctx->t.tmr) >= 0)
break;
}
spin_unlock_irq(&ctx->wqh.lock);
cpu_relax();
}
/*
* If the timer is expired and it's periodic, we need to advance it
* because the caller may want to know the previous expiration time.
* We do not update "ticks" and "expired" since the timer will be
* re-programmed again in the following timerfd_setup() call.
*/
if (ctx->expired && ctx->tintv) {
if (isalarm(ctx))
alarm_forward_now(&ctx->t.alarm, ctx->tintv);
else
hrtimer_forward_now(&ctx->t.tmr, ctx->tintv);
}
old->it_value = ktime_to_timespec(timerfd_get_remaining(ctx));
old->it_interval = ktime_to_timespec(ctx->tintv);
/*
* Re-program the timer to the new value ...
*/
ret = timerfd_setup(ctx, flags, new);
spin_unlock_irq(&ctx->wqh.lock);
fdput(f);
return ret;
}
static int do_timerfd_gettime(int ufd, struct itimerspec *t)
{
struct fd f;
struct timerfd_ctx *ctx;
int ret = timerfd_fget(ufd, &f);
if (ret)
return ret;
ctx = f.file->private_data;
spin_lock_irq(&ctx->wqh.lock);
if (ctx->expired && ctx->tintv) {
ctx->expired = 0;
if (isalarm(ctx)) {
ctx->ticks +=
alarm_forward_now(
&ctx->t.alarm, ctx->tintv) - 1;
alarm_restart(&ctx->t.alarm);
} else {
ctx->ticks +=
hrtimer_forward_now(&ctx->t.tmr, ctx->tintv)
- 1;
hrtimer_restart(&ctx->t.tmr);
}
}
t->it_value = ktime_to_timespec(timerfd_get_remaining(ctx));
t->it_interval = ktime_to_timespec(ctx->tintv);
spin_unlock_irq(&ctx->wqh.lock);
fdput(f);
return 0;
}
SYSCALL_DEFINE4(timerfd_settime, int, ufd, int, flags,
const struct itimerspec __user *, utmr,
struct itimerspec __user *, otmr)
{
struct itimerspec new, old;
int ret;
if (copy_from_user(&new, utmr, sizeof(new)))
return -EFAULT;
ret = do_timerfd_settime(ufd, flags, &new, &old);
if (ret)
return ret;
if (otmr && copy_to_user(otmr, &old, sizeof(old)))
return -EFAULT;
return ret;
}
SYSCALL_DEFINE2(timerfd_gettime, int, ufd, struct itimerspec __user *, otmr)
{
struct itimerspec kotmr;
int ret = do_timerfd_gettime(ufd, &kotmr);
if (ret)
return ret;
return copy_to_user(otmr, &kotmr, sizeof(kotmr)) ? -EFAULT: 0;
}
#ifdef CONFIG_COMPAT
COMPAT_SYSCALL_DEFINE4(timerfd_settime, int, ufd, int, flags,
const struct compat_itimerspec __user *, utmr,
struct compat_itimerspec __user *, otmr)
{
struct itimerspec new, old;
int ret;
if (get_compat_itimerspec(&new, utmr))
return -EFAULT;
ret = do_timerfd_settime(ufd, flags, &new, &old);
if (ret)
return ret;
if (otmr && put_compat_itimerspec(otmr, &old))
return -EFAULT;
return ret;
}
COMPAT_SYSCALL_DEFINE2(timerfd_gettime, int, ufd,
struct compat_itimerspec __user *, otmr)
{
struct itimerspec kotmr;
int ret = do_timerfd_gettime(ufd, &kotmr);
if (ret)
return ret;
return put_compat_itimerspec(otmr, &kotmr) ? -EFAULT: 0;
}
#endif
| ./CrossVul/dataset_final_sorted/CWE-416/c/bad_2550_0 |
crossvul-cpp_data_bad_5376_0 | /*
*
* drivers/staging/android/ion/ion.c
*
* Copyright (C) 2011 Google, Inc.
*
* This software is licensed under the terms of the GNU General Public
* License version 2, as published by the Free Software Foundation, and
* may be copied, distributed, and modified under those terms.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
*/
#include <linux/device.h>
#include <linux/err.h>
#include <linux/file.h>
#include <linux/freezer.h>
#include <linux/fs.h>
#include <linux/anon_inodes.h>
#include <linux/kthread.h>
#include <linux/list.h>
#include <linux/memblock.h>
#include <linux/miscdevice.h>
#include <linux/export.h>
#include <linux/mm.h>
#include <linux/mm_types.h>
#include <linux/rbtree.h>
#include <linux/slab.h>
#include <linux/seq_file.h>
#include <linux/uaccess.h>
#include <linux/vmalloc.h>
#include <linux/debugfs.h>
#include <linux/dma-buf.h>
#include <linux/idr.h>
#include "ion.h"
#include "ion_priv.h"
#include "compat_ion.h"
/**
* struct ion_device - the metadata of the ion device node
* @dev: the actual misc device
* @buffers: an rb tree of all the existing buffers
* @buffer_lock: lock protecting the tree of buffers
* @lock: rwsem protecting the tree of heaps and clients
* @heaps: list of all the heaps in the system
* @user_clients: list of all the clients created from userspace
*/
struct ion_device {
struct miscdevice dev;
struct rb_root buffers;
struct mutex buffer_lock;
struct rw_semaphore lock;
struct plist_head heaps;
long (*custom_ioctl)(struct ion_client *client, unsigned int cmd,
unsigned long arg);
struct rb_root clients;
struct dentry *debug_root;
struct dentry *heaps_debug_root;
struct dentry *clients_debug_root;
};
/**
* struct ion_client - a process/hw block local address space
* @node: node in the tree of all clients
* @dev: backpointer to ion device
* @handles: an rb tree of all the handles in this client
* @idr: an idr space for allocating handle ids
* @lock: lock protecting the tree of handles
* @name: used for debugging
* @display_name: used for debugging (unique version of @name)
* @display_serial: used for debugging (to make display_name unique)
* @task: used for debugging
*
* A client represents a list of buffers this client may access.
* The mutex stored here is used to protect both handles tree
* as well as the handles themselves, and should be held while modifying either.
*/
struct ion_client {
struct rb_node node;
struct ion_device *dev;
struct rb_root handles;
struct idr idr;
struct mutex lock;
const char *name;
char *display_name;
int display_serial;
struct task_struct *task;
pid_t pid;
struct dentry *debug_root;
};
/**
* ion_handle - a client local reference to a buffer
* @ref: reference count
* @client: back pointer to the client the buffer resides in
* @buffer: pointer to the buffer
* @node: node in the client's handle rbtree
* @kmap_cnt: count of times this client has mapped to kernel
* @id: client-unique id allocated by client->idr
*
* Modifications to node, map_cnt or mapping should be protected by the
* lock in the client. Other fields are never changed after initialization.
*/
struct ion_handle {
struct kref ref;
struct ion_client *client;
struct ion_buffer *buffer;
struct rb_node node;
unsigned int kmap_cnt;
int id;
};
bool ion_buffer_fault_user_mappings(struct ion_buffer *buffer)
{
return (buffer->flags & ION_FLAG_CACHED) &&
!(buffer->flags & ION_FLAG_CACHED_NEEDS_SYNC);
}
bool ion_buffer_cached(struct ion_buffer *buffer)
{
return !!(buffer->flags & ION_FLAG_CACHED);
}
static inline struct page *ion_buffer_page(struct page *page)
{
return (struct page *)((unsigned long)page & ~(1UL));
}
static inline bool ion_buffer_page_is_dirty(struct page *page)
{
return !!((unsigned long)page & 1UL);
}
static inline void ion_buffer_page_dirty(struct page **page)
{
*page = (struct page *)((unsigned long)(*page) | 1UL);
}
static inline void ion_buffer_page_clean(struct page **page)
{
*page = (struct page *)((unsigned long)(*page) & ~(1UL));
}
/* this function should only be called while dev->lock is held */
static void ion_buffer_add(struct ion_device *dev,
struct ion_buffer *buffer)
{
struct rb_node **p = &dev->buffers.rb_node;
struct rb_node *parent = NULL;
struct ion_buffer *entry;
while (*p) {
parent = *p;
entry = rb_entry(parent, struct ion_buffer, node);
if (buffer < entry) {
p = &(*p)->rb_left;
} else if (buffer > entry) {
p = &(*p)->rb_right;
} else {
pr_err("%s: buffer already found.", __func__);
BUG();
}
}
rb_link_node(&buffer->node, parent, p);
rb_insert_color(&buffer->node, &dev->buffers);
}
/* this function should only be called while dev->lock is held */
static struct ion_buffer *ion_buffer_create(struct ion_heap *heap,
struct ion_device *dev,
unsigned long len,
unsigned long align,
unsigned long flags)
{
struct ion_buffer *buffer;
struct sg_table *table;
struct scatterlist *sg;
int i, ret;
buffer = kzalloc(sizeof(struct ion_buffer), GFP_KERNEL);
if (!buffer)
return ERR_PTR(-ENOMEM);
buffer->heap = heap;
buffer->flags = flags;
kref_init(&buffer->ref);
ret = heap->ops->allocate(heap, buffer, len, align, flags);
if (ret) {
if (!(heap->flags & ION_HEAP_FLAG_DEFER_FREE))
goto err2;
ion_heap_freelist_drain(heap, 0);
ret = heap->ops->allocate(heap, buffer, len, align,
flags);
if (ret)
goto err2;
}
buffer->dev = dev;
buffer->size = len;
table = heap->ops->map_dma(heap, buffer);
if (WARN_ONCE(table == NULL,
"heap->ops->map_dma should return ERR_PTR on error"))
table = ERR_PTR(-EINVAL);
if (IS_ERR(table)) {
ret = -EINVAL;
goto err1;
}
buffer->sg_table = table;
if (ion_buffer_fault_user_mappings(buffer)) {
int num_pages = PAGE_ALIGN(buffer->size) / PAGE_SIZE;
struct scatterlist *sg;
int i, j, k = 0;
buffer->pages = vmalloc(sizeof(struct page *) * num_pages);
if (!buffer->pages) {
ret = -ENOMEM;
goto err;
}
for_each_sg(table->sgl, sg, table->nents, i) {
struct page *page = sg_page(sg);
for (j = 0; j < sg->length / PAGE_SIZE; j++)
buffer->pages[k++] = page++;
}
}
buffer->dev = dev;
buffer->size = len;
INIT_LIST_HEAD(&buffer->vmas);
mutex_init(&buffer->lock);
/*
* this will set up dma addresses for the sglist -- it is not
* technically correct as per the dma api -- a specific
* device isn't really taking ownership here. However, in practice on
* our systems the only dma_address space is physical addresses.
* Additionally, we can't afford the overhead of invalidating every
* allocation via dma_map_sg. The implicit contract here is that
* memory coming from the heaps is ready for dma, ie if it has a
* cached mapping that mapping has been invalidated
*/
for_each_sg(buffer->sg_table->sgl, sg, buffer->sg_table->nents, i) {
sg_dma_address(sg) = sg_phys(sg);
sg_dma_len(sg) = sg->length;
}
mutex_lock(&dev->buffer_lock);
ion_buffer_add(dev, buffer);
mutex_unlock(&dev->buffer_lock);
return buffer;
err:
heap->ops->unmap_dma(heap, buffer);
err1:
heap->ops->free(buffer);
err2:
kfree(buffer);
return ERR_PTR(ret);
}
void ion_buffer_destroy(struct ion_buffer *buffer)
{
if (WARN_ON(buffer->kmap_cnt > 0))
buffer->heap->ops->unmap_kernel(buffer->heap, buffer);
buffer->heap->ops->unmap_dma(buffer->heap, buffer);
buffer->heap->ops->free(buffer);
vfree(buffer->pages);
kfree(buffer);
}
static void _ion_buffer_destroy(struct kref *kref)
{
struct ion_buffer *buffer = container_of(kref, struct ion_buffer, ref);
struct ion_heap *heap = buffer->heap;
struct ion_device *dev = buffer->dev;
mutex_lock(&dev->buffer_lock);
rb_erase(&buffer->node, &dev->buffers);
mutex_unlock(&dev->buffer_lock);
if (heap->flags & ION_HEAP_FLAG_DEFER_FREE)
ion_heap_freelist_add(heap, buffer);
else
ion_buffer_destroy(buffer);
}
static void ion_buffer_get(struct ion_buffer *buffer)
{
kref_get(&buffer->ref);
}
static int ion_buffer_put(struct ion_buffer *buffer)
{
return kref_put(&buffer->ref, _ion_buffer_destroy);
}
static void ion_buffer_add_to_handle(struct ion_buffer *buffer)
{
mutex_lock(&buffer->lock);
buffer->handle_count++;
mutex_unlock(&buffer->lock);
}
static void ion_buffer_remove_from_handle(struct ion_buffer *buffer)
{
/*
* when a buffer is removed from a handle, if it is not in
* any other handles, copy the taskcomm and the pid of the
* process it's being removed from into the buffer. At this
* point there will be no way to track what processes this buffer is
* being used by, it only exists as a dma_buf file descriptor.
* The taskcomm and pid can provide a debug hint as to where this fd
* is in the system
*/
mutex_lock(&buffer->lock);
buffer->handle_count--;
BUG_ON(buffer->handle_count < 0);
if (!buffer->handle_count) {
struct task_struct *task;
task = current->group_leader;
get_task_comm(buffer->task_comm, task);
buffer->pid = task_pid_nr(task);
}
mutex_unlock(&buffer->lock);
}
static struct ion_handle *ion_handle_create(struct ion_client *client,
struct ion_buffer *buffer)
{
struct ion_handle *handle;
handle = kzalloc(sizeof(struct ion_handle), GFP_KERNEL);
if (!handle)
return ERR_PTR(-ENOMEM);
kref_init(&handle->ref);
RB_CLEAR_NODE(&handle->node);
handle->client = client;
ion_buffer_get(buffer);
ion_buffer_add_to_handle(buffer);
handle->buffer = buffer;
return handle;
}
static void ion_handle_kmap_put(struct ion_handle *);
static void ion_handle_destroy(struct kref *kref)
{
struct ion_handle *handle = container_of(kref, struct ion_handle, ref);
struct ion_client *client = handle->client;
struct ion_buffer *buffer = handle->buffer;
mutex_lock(&buffer->lock);
while (handle->kmap_cnt)
ion_handle_kmap_put(handle);
mutex_unlock(&buffer->lock);
idr_remove(&client->idr, handle->id);
if (!RB_EMPTY_NODE(&handle->node))
rb_erase(&handle->node, &client->handles);
ion_buffer_remove_from_handle(buffer);
ion_buffer_put(buffer);
kfree(handle);
}
struct ion_buffer *ion_handle_buffer(struct ion_handle *handle)
{
return handle->buffer;
}
static void ion_handle_get(struct ion_handle *handle)
{
kref_get(&handle->ref);
}
static int ion_handle_put(struct ion_handle *handle)
{
struct ion_client *client = handle->client;
int ret;
mutex_lock(&client->lock);
ret = kref_put(&handle->ref, ion_handle_destroy);
mutex_unlock(&client->lock);
return ret;
}
static struct ion_handle *ion_handle_lookup(struct ion_client *client,
struct ion_buffer *buffer)
{
struct rb_node *n = client->handles.rb_node;
while (n) {
struct ion_handle *entry = rb_entry(n, struct ion_handle, node);
if (buffer < entry->buffer)
n = n->rb_left;
else if (buffer > entry->buffer)
n = n->rb_right;
else
return entry;
}
return ERR_PTR(-EINVAL);
}
static struct ion_handle *ion_handle_get_by_id(struct ion_client *client,
int id)
{
struct ion_handle *handle;
mutex_lock(&client->lock);
handle = idr_find(&client->idr, id);
if (handle)
ion_handle_get(handle);
mutex_unlock(&client->lock);
return handle ? handle : ERR_PTR(-EINVAL);
}
static bool ion_handle_validate(struct ion_client *client,
struct ion_handle *handle)
{
WARN_ON(!mutex_is_locked(&client->lock));
return idr_find(&client->idr, handle->id) == handle;
}
static int ion_handle_add(struct ion_client *client, struct ion_handle *handle)
{
int id;
struct rb_node **p = &client->handles.rb_node;
struct rb_node *parent = NULL;
struct ion_handle *entry;
id = idr_alloc(&client->idr, handle, 1, 0, GFP_KERNEL);
if (id < 0)
return id;
handle->id = id;
while (*p) {
parent = *p;
entry = rb_entry(parent, struct ion_handle, node);
if (handle->buffer < entry->buffer)
p = &(*p)->rb_left;
else if (handle->buffer > entry->buffer)
p = &(*p)->rb_right;
else
WARN(1, "%s: buffer already found.", __func__);
}
rb_link_node(&handle->node, parent, p);
rb_insert_color(&handle->node, &client->handles);
return 0;
}
struct ion_handle *ion_alloc(struct ion_client *client, size_t len,
size_t align, unsigned int heap_id_mask,
unsigned int flags)
{
struct ion_handle *handle;
struct ion_device *dev = client->dev;
struct ion_buffer *buffer = NULL;
struct ion_heap *heap;
int ret;
pr_debug("%s: len %zu align %zu heap_id_mask %u flags %x\n", __func__,
len, align, heap_id_mask, flags);
/*
* traverse the list of heaps available in this system in priority
* order. If the heap type is supported by the client, and matches the
* request of the caller allocate from it. Repeat until allocate has
* succeeded or all heaps have been tried
*/
len = PAGE_ALIGN(len);
if (!len)
return ERR_PTR(-EINVAL);
down_read(&dev->lock);
plist_for_each_entry(heap, &dev->heaps, node) {
/* if the caller didn't specify this heap id */
if (!((1 << heap->id) & heap_id_mask))
continue;
buffer = ion_buffer_create(heap, dev, len, align, flags);
if (!IS_ERR(buffer))
break;
}
up_read(&dev->lock);
if (buffer == NULL)
return ERR_PTR(-ENODEV);
if (IS_ERR(buffer))
return ERR_CAST(buffer);
handle = ion_handle_create(client, buffer);
/*
* ion_buffer_create will create a buffer with a ref_cnt of 1,
* and ion_handle_create will take a second reference, drop one here
*/
ion_buffer_put(buffer);
if (IS_ERR(handle))
return handle;
mutex_lock(&client->lock);
ret = ion_handle_add(client, handle);
mutex_unlock(&client->lock);
if (ret) {
ion_handle_put(handle);
handle = ERR_PTR(ret);
}
return handle;
}
EXPORT_SYMBOL(ion_alloc);
void ion_free(struct ion_client *client, struct ion_handle *handle)
{
bool valid_handle;
BUG_ON(client != handle->client);
mutex_lock(&client->lock);
valid_handle = ion_handle_validate(client, handle);
if (!valid_handle) {
WARN(1, "%s: invalid handle passed to free.\n", __func__);
mutex_unlock(&client->lock);
return;
}
mutex_unlock(&client->lock);
ion_handle_put(handle);
}
EXPORT_SYMBOL(ion_free);
int ion_phys(struct ion_client *client, struct ion_handle *handle,
ion_phys_addr_t *addr, size_t *len)
{
struct ion_buffer *buffer;
int ret;
mutex_lock(&client->lock);
if (!ion_handle_validate(client, handle)) {
mutex_unlock(&client->lock);
return -EINVAL;
}
buffer = handle->buffer;
if (!buffer->heap->ops->phys) {
pr_err("%s: ion_phys is not implemented by this heap (name=%s, type=%d).\n",
__func__, buffer->heap->name, buffer->heap->type);
mutex_unlock(&client->lock);
return -ENODEV;
}
mutex_unlock(&client->lock);
ret = buffer->heap->ops->phys(buffer->heap, buffer, addr, len);
return ret;
}
EXPORT_SYMBOL(ion_phys);
static void *ion_buffer_kmap_get(struct ion_buffer *buffer)
{
void *vaddr;
if (buffer->kmap_cnt) {
buffer->kmap_cnt++;
return buffer->vaddr;
}
vaddr = buffer->heap->ops->map_kernel(buffer->heap, buffer);
if (WARN_ONCE(vaddr == NULL,
"heap->ops->map_kernel should return ERR_PTR on error"))
return ERR_PTR(-EINVAL);
if (IS_ERR(vaddr))
return vaddr;
buffer->vaddr = vaddr;
buffer->kmap_cnt++;
return vaddr;
}
static void *ion_handle_kmap_get(struct ion_handle *handle)
{
struct ion_buffer *buffer = handle->buffer;
void *vaddr;
if (handle->kmap_cnt) {
handle->kmap_cnt++;
return buffer->vaddr;
}
vaddr = ion_buffer_kmap_get(buffer);
if (IS_ERR(vaddr))
return vaddr;
handle->kmap_cnt++;
return vaddr;
}
static void ion_buffer_kmap_put(struct ion_buffer *buffer)
{
buffer->kmap_cnt--;
if (!buffer->kmap_cnt) {
buffer->heap->ops->unmap_kernel(buffer->heap, buffer);
buffer->vaddr = NULL;
}
}
static void ion_handle_kmap_put(struct ion_handle *handle)
{
struct ion_buffer *buffer = handle->buffer;
if (!handle->kmap_cnt) {
WARN(1, "%s: Double unmap detected! bailing...\n", __func__);
return;
}
handle->kmap_cnt--;
if (!handle->kmap_cnt)
ion_buffer_kmap_put(buffer);
}
void *ion_map_kernel(struct ion_client *client, struct ion_handle *handle)
{
struct ion_buffer *buffer;
void *vaddr;
mutex_lock(&client->lock);
if (!ion_handle_validate(client, handle)) {
pr_err("%s: invalid handle passed to map_kernel.\n",
__func__);
mutex_unlock(&client->lock);
return ERR_PTR(-EINVAL);
}
buffer = handle->buffer;
if (!handle->buffer->heap->ops->map_kernel) {
pr_err("%s: map_kernel is not implemented by this heap.\n",
__func__);
mutex_unlock(&client->lock);
return ERR_PTR(-ENODEV);
}
mutex_lock(&buffer->lock);
vaddr = ion_handle_kmap_get(handle);
mutex_unlock(&buffer->lock);
mutex_unlock(&client->lock);
return vaddr;
}
EXPORT_SYMBOL(ion_map_kernel);
void ion_unmap_kernel(struct ion_client *client, struct ion_handle *handle)
{
struct ion_buffer *buffer;
mutex_lock(&client->lock);
buffer = handle->buffer;
mutex_lock(&buffer->lock);
ion_handle_kmap_put(handle);
mutex_unlock(&buffer->lock);
mutex_unlock(&client->lock);
}
EXPORT_SYMBOL(ion_unmap_kernel);
static struct mutex debugfs_mutex;
static struct rb_root *ion_root_client;
static int is_client_alive(struct ion_client *client)
{
struct rb_node *node;
struct ion_client *tmp;
struct ion_device *dev;
node = ion_root_client->rb_node;
dev = container_of(ion_root_client, struct ion_device, clients);
down_read(&dev->lock);
while (node) {
tmp = rb_entry(node, struct ion_client, node);
if (client < tmp) {
node = node->rb_left;
} else if (client > tmp) {
node = node->rb_right;
} else {
up_read(&dev->lock);
return 1;
}
}
up_read(&dev->lock);
return 0;
}
static int ion_debug_client_show(struct seq_file *s, void *unused)
{
struct ion_client *client = s->private;
struct rb_node *n;
size_t sizes[ION_NUM_HEAP_IDS] = {0};
const char *names[ION_NUM_HEAP_IDS] = {NULL};
int i;
mutex_lock(&debugfs_mutex);
if (!is_client_alive(client)) {
seq_printf(s, "ion_client 0x%p dead, can't dump its buffers\n",
client);
mutex_unlock(&debugfs_mutex);
return 0;
}
mutex_lock(&client->lock);
for (n = rb_first(&client->handles); n; n = rb_next(n)) {
struct ion_handle *handle = rb_entry(n, struct ion_handle,
node);
unsigned int id = handle->buffer->heap->id;
if (!names[id])
names[id] = handle->buffer->heap->name;
sizes[id] += handle->buffer->size;
}
mutex_unlock(&client->lock);
mutex_unlock(&debugfs_mutex);
seq_printf(s, "%16.16s: %16.16s\n", "heap_name", "size_in_bytes");
for (i = 0; i < ION_NUM_HEAP_IDS; i++) {
if (!names[i])
continue;
seq_printf(s, "%16.16s: %16zu\n", names[i], sizes[i]);
}
return 0;
}
static int ion_debug_client_open(struct inode *inode, struct file *file)
{
return single_open(file, ion_debug_client_show, inode->i_private);
}
static const struct file_operations debug_client_fops = {
.open = ion_debug_client_open,
.read = seq_read,
.llseek = seq_lseek,
.release = single_release,
};
static int ion_get_client_serial(const struct rb_root *root,
const unsigned char *name)
{
int serial = -1;
struct rb_node *node;
for (node = rb_first(root); node; node = rb_next(node)) {
struct ion_client *client = rb_entry(node, struct ion_client,
node);
if (strcmp(client->name, name))
continue;
serial = max(serial, client->display_serial);
}
return serial + 1;
}
struct ion_client *ion_client_create(struct ion_device *dev,
const char *name)
{
struct ion_client *client;
struct task_struct *task;
struct rb_node **p;
struct rb_node *parent = NULL;
struct ion_client *entry;
pid_t pid;
if (!name) {
pr_err("%s: Name cannot be null\n", __func__);
return ERR_PTR(-EINVAL);
}
get_task_struct(current->group_leader);
task_lock(current->group_leader);
pid = task_pid_nr(current->group_leader);
/*
* don't bother to store task struct for kernel threads,
* they can't be killed anyway
*/
if (current->group_leader->flags & PF_KTHREAD) {
put_task_struct(current->group_leader);
task = NULL;
} else {
task = current->group_leader;
}
task_unlock(current->group_leader);
client = kzalloc(sizeof(struct ion_client), GFP_KERNEL);
if (!client)
goto err_put_task_struct;
client->dev = dev;
client->handles = RB_ROOT;
idr_init(&client->idr);
mutex_init(&client->lock);
client->task = task;
client->pid = pid;
client->name = kstrdup(name, GFP_KERNEL);
if (!client->name)
goto err_free_client;
down_write(&dev->lock);
client->display_serial = ion_get_client_serial(&dev->clients, name);
client->display_name = kasprintf(
GFP_KERNEL, "%s-%d", name, client->display_serial);
if (!client->display_name) {
up_write(&dev->lock);
goto err_free_client_name;
}
p = &dev->clients.rb_node;
while (*p) {
parent = *p;
entry = rb_entry(parent, struct ion_client, node);
if (client < entry)
p = &(*p)->rb_left;
else if (client > entry)
p = &(*p)->rb_right;
}
rb_link_node(&client->node, parent, p);
rb_insert_color(&client->node, &dev->clients);
client->debug_root = debugfs_create_file(client->display_name, 0664,
dev->clients_debug_root,
client, &debug_client_fops);
if (!client->debug_root) {
char buf[256], *path;
path = dentry_path(dev->clients_debug_root, buf, 256);
pr_err("Failed to create client debugfs at %s/%s\n",
path, client->display_name);
}
up_write(&dev->lock);
return client;
err_free_client_name:
kfree(client->name);
err_free_client:
kfree(client);
err_put_task_struct:
if (task)
put_task_struct(current->group_leader);
return ERR_PTR(-ENOMEM);
}
EXPORT_SYMBOL(ion_client_create);
void ion_client_destroy(struct ion_client *client)
{
struct ion_device *dev = client->dev;
struct rb_node *n;
pr_debug("%s: %d\n", __func__, __LINE__);
mutex_lock(&debugfs_mutex);
while ((n = rb_first(&client->handles))) {
struct ion_handle *handle = rb_entry(n, struct ion_handle,
node);
ion_handle_destroy(&handle->ref);
}
idr_destroy(&client->idr);
down_write(&dev->lock);
if (client->task)
put_task_struct(client->task);
rb_erase(&client->node, &dev->clients);
debugfs_remove_recursive(client->debug_root);
up_write(&dev->lock);
kfree(client->display_name);
kfree(client->name);
kfree(client);
mutex_unlock(&debugfs_mutex);
}
EXPORT_SYMBOL(ion_client_destroy);
struct sg_table *ion_sg_table(struct ion_client *client,
struct ion_handle *handle)
{
struct ion_buffer *buffer;
struct sg_table *table;
mutex_lock(&client->lock);
if (!ion_handle_validate(client, handle)) {
pr_err("%s: invalid handle passed to map_dma.\n",
__func__);
mutex_unlock(&client->lock);
return ERR_PTR(-EINVAL);
}
buffer = handle->buffer;
table = buffer->sg_table;
mutex_unlock(&client->lock);
return table;
}
EXPORT_SYMBOL(ion_sg_table);
static void ion_buffer_sync_for_device(struct ion_buffer *buffer,
struct device *dev,
enum dma_data_direction direction);
static struct sg_table *ion_map_dma_buf(struct dma_buf_attachment *attachment,
enum dma_data_direction direction)
{
struct dma_buf *dmabuf = attachment->dmabuf;
struct ion_buffer *buffer = dmabuf->priv;
ion_buffer_sync_for_device(buffer, attachment->dev, direction);
return buffer->sg_table;
}
static void ion_unmap_dma_buf(struct dma_buf_attachment *attachment,
struct sg_table *table,
enum dma_data_direction direction)
{
}
void ion_pages_sync_for_device(struct device *dev, struct page *page,
size_t size, enum dma_data_direction dir)
{
struct scatterlist sg;
sg_init_table(&sg, 1);
sg_set_page(&sg, page, size, 0);
/*
* This is not correct - sg_dma_address needs a dma_addr_t that is valid
* for the targeted device, but this works on the currently targeted
* hardware.
*/
sg_dma_address(&sg) = page_to_phys(page);
dma_sync_sg_for_device(dev, &sg, 1, dir);
}
struct ion_vma_list {
struct list_head list;
struct vm_area_struct *vma;
};
static void ion_buffer_sync_for_device(struct ion_buffer *buffer,
struct device *dev,
enum dma_data_direction dir)
{
struct ion_vma_list *vma_list;
int pages = PAGE_ALIGN(buffer->size) / PAGE_SIZE;
int i;
pr_debug("%s: syncing for device %s\n", __func__,
dev ? dev_name(dev) : "null");
if (!ion_buffer_fault_user_mappings(buffer))
return;
mutex_lock(&buffer->lock);
for (i = 0; i < pages; i++) {
struct page *page = buffer->pages[i];
if (ion_buffer_page_is_dirty(page))
ion_pages_sync_for_device(dev, ion_buffer_page(page),
PAGE_SIZE, dir);
ion_buffer_page_clean(buffer->pages + i);
}
list_for_each_entry(vma_list, &buffer->vmas, list) {
struct vm_area_struct *vma = vma_list->vma;
zap_page_range(vma, vma->vm_start, vma->vm_end - vma->vm_start,
NULL);
}
mutex_unlock(&buffer->lock);
}
static int ion_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
{
struct ion_buffer *buffer = vma->vm_private_data;
unsigned long pfn;
int ret;
mutex_lock(&buffer->lock);
ion_buffer_page_dirty(buffer->pages + vmf->pgoff);
BUG_ON(!buffer->pages || !buffer->pages[vmf->pgoff]);
pfn = page_to_pfn(ion_buffer_page(buffer->pages[vmf->pgoff]));
ret = vm_insert_pfn(vma, (unsigned long)vmf->virtual_address, pfn);
mutex_unlock(&buffer->lock);
if (ret)
return VM_FAULT_ERROR;
return VM_FAULT_NOPAGE;
}
static void ion_vm_open(struct vm_area_struct *vma)
{
struct ion_buffer *buffer = vma->vm_private_data;
struct ion_vma_list *vma_list;
vma_list = kmalloc(sizeof(struct ion_vma_list), GFP_KERNEL);
if (!vma_list)
return;
vma_list->vma = vma;
mutex_lock(&buffer->lock);
list_add(&vma_list->list, &buffer->vmas);
mutex_unlock(&buffer->lock);
pr_debug("%s: adding %p\n", __func__, vma);
}
static void ion_vm_close(struct vm_area_struct *vma)
{
struct ion_buffer *buffer = vma->vm_private_data;
struct ion_vma_list *vma_list, *tmp;
pr_debug("%s\n", __func__);
mutex_lock(&buffer->lock);
list_for_each_entry_safe(vma_list, tmp, &buffer->vmas, list) {
if (vma_list->vma != vma)
continue;
list_del(&vma_list->list);
kfree(vma_list);
pr_debug("%s: deleting %p\n", __func__, vma);
break;
}
mutex_unlock(&buffer->lock);
}
static const struct vm_operations_struct ion_vma_ops = {
.open = ion_vm_open,
.close = ion_vm_close,
.fault = ion_vm_fault,
};
static int ion_mmap(struct dma_buf *dmabuf, struct vm_area_struct *vma)
{
struct ion_buffer *buffer = dmabuf->priv;
int ret = 0;
if (!buffer->heap->ops->map_user) {
pr_err("%s: this heap does not define a method for mapping to userspace\n",
__func__);
return -EINVAL;
}
if (ion_buffer_fault_user_mappings(buffer)) {
vma->vm_flags |= VM_IO | VM_PFNMAP | VM_DONTEXPAND |
VM_DONTDUMP;
vma->vm_private_data = buffer;
vma->vm_ops = &ion_vma_ops;
ion_vm_open(vma);
return 0;
}
if (!(buffer->flags & ION_FLAG_CACHED))
vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
mutex_lock(&buffer->lock);
/* now map it to userspace */
ret = buffer->heap->ops->map_user(buffer->heap, buffer, vma);
mutex_unlock(&buffer->lock);
if (ret)
pr_err("%s: failure mapping buffer to userspace\n",
__func__);
return ret;
}
static void ion_dma_buf_release(struct dma_buf *dmabuf)
{
struct ion_buffer *buffer = dmabuf->priv;
ion_buffer_put(buffer);
}
static void *ion_dma_buf_kmap(struct dma_buf *dmabuf, unsigned long offset)
{
struct ion_buffer *buffer = dmabuf->priv;
return buffer->vaddr + offset * PAGE_SIZE;
}
static void ion_dma_buf_kunmap(struct dma_buf *dmabuf, unsigned long offset,
void *ptr)
{
}
static int ion_dma_buf_begin_cpu_access(struct dma_buf *dmabuf, size_t start,
size_t len,
enum dma_data_direction direction)
{
struct ion_buffer *buffer = dmabuf->priv;
void *vaddr;
if (!buffer->heap->ops->map_kernel) {
pr_err("%s: map kernel is not implemented by this heap.\n",
__func__);
return -ENODEV;
}
mutex_lock(&buffer->lock);
vaddr = ion_buffer_kmap_get(buffer);
mutex_unlock(&buffer->lock);
return PTR_ERR_OR_ZERO(vaddr);
}
static void ion_dma_buf_end_cpu_access(struct dma_buf *dmabuf, size_t start,
size_t len,
enum dma_data_direction direction)
{
struct ion_buffer *buffer = dmabuf->priv;
mutex_lock(&buffer->lock);
ion_buffer_kmap_put(buffer);
mutex_unlock(&buffer->lock);
}
static struct dma_buf_ops dma_buf_ops = {
.map_dma_buf = ion_map_dma_buf,
.unmap_dma_buf = ion_unmap_dma_buf,
.mmap = ion_mmap,
.release = ion_dma_buf_release,
.begin_cpu_access = ion_dma_buf_begin_cpu_access,
.end_cpu_access = ion_dma_buf_end_cpu_access,
.kmap_atomic = ion_dma_buf_kmap,
.kunmap_atomic = ion_dma_buf_kunmap,
.kmap = ion_dma_buf_kmap,
.kunmap = ion_dma_buf_kunmap,
};
struct dma_buf *ion_share_dma_buf(struct ion_client *client,
struct ion_handle *handle)
{
DEFINE_DMA_BUF_EXPORT_INFO(exp_info);
struct ion_buffer *buffer;
struct dma_buf *dmabuf;
bool valid_handle;
mutex_lock(&client->lock);
valid_handle = ion_handle_validate(client, handle);
if (!valid_handle) {
WARN(1, "%s: invalid handle passed to share.\n", __func__);
mutex_unlock(&client->lock);
return ERR_PTR(-EINVAL);
}
buffer = handle->buffer;
ion_buffer_get(buffer);
mutex_unlock(&client->lock);
exp_info.ops = &dma_buf_ops;
exp_info.size = buffer->size;
exp_info.flags = O_RDWR;
exp_info.priv = buffer;
dmabuf = dma_buf_export(&exp_info);
if (IS_ERR(dmabuf)) {
ion_buffer_put(buffer);
return dmabuf;
}
return dmabuf;
}
EXPORT_SYMBOL(ion_share_dma_buf);
int ion_share_dma_buf_fd(struct ion_client *client, struct ion_handle *handle)
{
struct dma_buf *dmabuf;
int fd;
dmabuf = ion_share_dma_buf(client, handle);
if (IS_ERR(dmabuf))
return PTR_ERR(dmabuf);
fd = dma_buf_fd(dmabuf, O_CLOEXEC);
if (fd < 0)
dma_buf_put(dmabuf);
return fd;
}
EXPORT_SYMBOL(ion_share_dma_buf_fd);
struct ion_handle *ion_import_dma_buf(struct ion_client *client,
struct dma_buf *dmabuf)
{
struct ion_buffer *buffer;
struct ion_handle *handle;
int ret;
/* if this memory came from ion */
if (dmabuf->ops != &dma_buf_ops) {
pr_err("%s: can not import dmabuf from another exporter\n",
__func__);
return ERR_PTR(-EINVAL);
}
buffer = dmabuf->priv;
mutex_lock(&client->lock);
/* if a handle exists for this buffer just take a reference to it */
handle = ion_handle_lookup(client, buffer);
if (!IS_ERR(handle)) {
ion_handle_get(handle);
mutex_unlock(&client->lock);
goto end;
}
handle = ion_handle_create(client, buffer);
if (IS_ERR(handle)) {
mutex_unlock(&client->lock);
goto end;
}
ret = ion_handle_add(client, handle);
mutex_unlock(&client->lock);
if (ret) {
ion_handle_put(handle);
handle = ERR_PTR(ret);
}
end:
return handle;
}
EXPORT_SYMBOL(ion_import_dma_buf);
struct ion_handle *ion_import_dma_buf_fd(struct ion_client *client, int fd)
{
struct dma_buf *dmabuf;
struct ion_handle *handle;
dmabuf = dma_buf_get(fd);
if (IS_ERR(dmabuf))
return ERR_CAST(dmabuf);
handle = ion_import_dma_buf(client, dmabuf);
dma_buf_put(dmabuf);
return handle;
}
EXPORT_SYMBOL(ion_import_dma_buf_fd);
static int ion_sync_for_device(struct ion_client *client, int fd)
{
struct dma_buf *dmabuf;
struct ion_buffer *buffer;
dmabuf = dma_buf_get(fd);
if (IS_ERR(dmabuf))
return PTR_ERR(dmabuf);
/* if this memory came from ion */
if (dmabuf->ops != &dma_buf_ops) {
pr_err("%s: can not sync dmabuf from another exporter\n",
__func__);
dma_buf_put(dmabuf);
return -EINVAL;
}
buffer = dmabuf->priv;
dma_sync_sg_for_device(NULL, buffer->sg_table->sgl,
buffer->sg_table->nents, DMA_BIDIRECTIONAL);
dma_buf_put(dmabuf);
return 0;
}
/* fix up the cases where the ioctl direction bits are incorrect */
static unsigned int ion_ioctl_dir(unsigned int cmd)
{
switch (cmd) {
case ION_IOC_SYNC:
case ION_IOC_FREE:
case ION_IOC_CUSTOM:
return _IOC_WRITE;
default:
return _IOC_DIR(cmd);
}
}
static long ion_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
{
struct ion_client *client = filp->private_data;
struct ion_device *dev = client->dev;
struct ion_handle *cleanup_handle = NULL;
int ret = 0;
unsigned int dir;
union {
struct ion_fd_data fd;
struct ion_allocation_data allocation;
struct ion_handle_data handle;
struct ion_custom_data custom;
} data;
dir = ion_ioctl_dir(cmd);
if (_IOC_SIZE(cmd) > sizeof(data))
return -EINVAL;
if (dir & _IOC_WRITE)
if (copy_from_user(&data, (void __user *)arg, _IOC_SIZE(cmd)))
return -EFAULT;
switch (cmd) {
case ION_IOC_ALLOC:
{
struct ion_handle *handle;
handle = ion_alloc(client, data.allocation.len,
data.allocation.align,
data.allocation.heap_id_mask,
data.allocation.flags);
if (IS_ERR(handle))
return PTR_ERR(handle);
data.allocation.handle = handle->id;
cleanup_handle = handle;
break;
}
case ION_IOC_FREE:
{
struct ion_handle *handle;
handle = ion_handle_get_by_id(client, data.handle.handle);
if (IS_ERR(handle))
return PTR_ERR(handle);
ion_free(client, handle);
ion_handle_put(handle);
break;
}
case ION_IOC_SHARE:
case ION_IOC_MAP:
{
struct ion_handle *handle;
handle = ion_handle_get_by_id(client, data.handle.handle);
if (IS_ERR(handle))
return PTR_ERR(handle);
data.fd.fd = ion_share_dma_buf_fd(client, handle);
ion_handle_put(handle);
if (data.fd.fd < 0)
ret = data.fd.fd;
break;
}
case ION_IOC_IMPORT:
{
struct ion_handle *handle;
handle = ion_import_dma_buf_fd(client, data.fd.fd);
if (IS_ERR(handle))
ret = PTR_ERR(handle);
else
data.handle.handle = handle->id;
break;
}
case ION_IOC_SYNC:
{
ret = ion_sync_for_device(client, data.fd.fd);
break;
}
case ION_IOC_CUSTOM:
{
if (!dev->custom_ioctl)
return -ENOTTY;
ret = dev->custom_ioctl(client, data.custom.cmd,
data.custom.arg);
break;
}
default:
return -ENOTTY;
}
if (dir & _IOC_READ) {
if (copy_to_user((void __user *)arg, &data, _IOC_SIZE(cmd))) {
if (cleanup_handle)
ion_free(client, cleanup_handle);
return -EFAULT;
}
}
return ret;
}
static int ion_release(struct inode *inode, struct file *file)
{
struct ion_client *client = file->private_data;
pr_debug("%s: %d\n", __func__, __LINE__);
ion_client_destroy(client);
return 0;
}
static int ion_open(struct inode *inode, struct file *file)
{
struct miscdevice *miscdev = file->private_data;
struct ion_device *dev = container_of(miscdev, struct ion_device, dev);
struct ion_client *client;
char debug_name[64];
pr_debug("%s: %d\n", __func__, __LINE__);
snprintf(debug_name, 64, "%u", task_pid_nr(current->group_leader));
client = ion_client_create(dev, debug_name);
if (IS_ERR(client))
return PTR_ERR(client);
file->private_data = client;
return 0;
}
static const struct file_operations ion_fops = {
.owner = THIS_MODULE,
.open = ion_open,
.release = ion_release,
.unlocked_ioctl = ion_ioctl,
.compat_ioctl = compat_ion_ioctl,
};
static size_t ion_debug_heap_total(struct ion_client *client,
unsigned int id)
{
size_t size = 0;
struct rb_node *n;
mutex_lock(&client->lock);
for (n = rb_first(&client->handles); n; n = rb_next(n)) {
struct ion_handle *handle = rb_entry(n,
struct ion_handle,
node);
if (handle->buffer->heap->id == id)
size += handle->buffer->size;
}
mutex_unlock(&client->lock);
return size;
}
static int ion_debug_heap_show(struct seq_file *s, void *unused)
{
struct ion_heap *heap = s->private;
struct ion_device *dev = heap->dev;
struct rb_node *n;
size_t total_size = 0;
size_t total_orphaned_size = 0;
seq_printf(s, "%16s %16s %16s\n", "client", "pid", "size");
seq_puts(s, "----------------------------------------------------\n");
mutex_lock(&debugfs_mutex);
for (n = rb_first(&dev->clients); n; n = rb_next(n)) {
struct ion_client *client = rb_entry(n, struct ion_client,
node);
size_t size = ion_debug_heap_total(client, heap->id);
if (!size)
continue;
if (client->task) {
char task_comm[TASK_COMM_LEN];
get_task_comm(task_comm, client->task);
seq_printf(s, "%16s %16u %16zu\n", task_comm,
client->pid, size);
} else {
seq_printf(s, "%16s %16u %16zu\n", client->name,
client->pid, size);
}
}
mutex_unlock(&debugfs_mutex);
seq_puts(s, "----------------------------------------------------\n");
seq_puts(s, "orphaned allocations (info is from last known client):\n");
mutex_lock(&dev->buffer_lock);
for (n = rb_first(&dev->buffers); n; n = rb_next(n)) {
struct ion_buffer *buffer = rb_entry(n, struct ion_buffer,
node);
if (buffer->heap->id != heap->id)
continue;
total_size += buffer->size;
if (!buffer->handle_count) {
seq_printf(s, "%16s %16u %16zu %d %d\n",
buffer->task_comm, buffer->pid,
buffer->size, buffer->kmap_cnt,
atomic_read(&buffer->ref.refcount));
total_orphaned_size += buffer->size;
}
}
mutex_unlock(&dev->buffer_lock);
seq_puts(s, "----------------------------------------------------\n");
seq_printf(s, "%16s %16zu\n", "total orphaned",
total_orphaned_size);
seq_printf(s, "%16s %16zu\n", "total ", total_size);
if (heap->flags & ION_HEAP_FLAG_DEFER_FREE)
seq_printf(s, "%16s %16zu\n", "deferred free",
heap->free_list_size);
seq_puts(s, "----------------------------------------------------\n");
if (heap->debug_show)
heap->debug_show(heap, s, unused);
return 0;
}
static int ion_debug_heap_open(struct inode *inode, struct file *file)
{
return single_open(file, ion_debug_heap_show, inode->i_private);
}
static const struct file_operations debug_heap_fops = {
.open = ion_debug_heap_open,
.read = seq_read,
.llseek = seq_lseek,
.release = single_release,
};
static int debug_shrink_set(void *data, u64 val)
{
struct ion_heap *heap = data;
struct shrink_control sc;
int objs;
sc.gfp_mask = -1;
sc.nr_to_scan = val;
if (!val) {
objs = heap->shrinker.count_objects(&heap->shrinker, &sc);
sc.nr_to_scan = objs;
}
heap->shrinker.scan_objects(&heap->shrinker, &sc);
return 0;
}
static int debug_shrink_get(void *data, u64 *val)
{
struct ion_heap *heap = data;
struct shrink_control sc;
int objs;
sc.gfp_mask = -1;
sc.nr_to_scan = 0;
objs = heap->shrinker.count_objects(&heap->shrinker, &sc);
*val = objs;
return 0;
}
DEFINE_SIMPLE_ATTRIBUTE(debug_shrink_fops, debug_shrink_get,
debug_shrink_set, "%llu\n");
void ion_device_add_heap(struct ion_device *dev, struct ion_heap *heap)
{
struct dentry *debug_file;
if (!heap->ops->allocate || !heap->ops->free || !heap->ops->map_dma ||
!heap->ops->unmap_dma)
pr_err("%s: can not add heap with invalid ops struct.\n",
__func__);
spin_lock_init(&heap->free_lock);
heap->free_list_size = 0;
if (heap->flags & ION_HEAP_FLAG_DEFER_FREE)
ion_heap_init_deferred_free(heap);
if ((heap->flags & ION_HEAP_FLAG_DEFER_FREE) || heap->ops->shrink)
ion_heap_init_shrinker(heap);
heap->dev = dev;
down_write(&dev->lock);
/*
* use negative heap->id to reverse the priority -- when traversing
* the list later attempt higher id numbers first
*/
plist_node_init(&heap->node, -heap->id);
plist_add(&heap->node, &dev->heaps);
debug_file = debugfs_create_file(heap->name, 0664,
dev->heaps_debug_root, heap,
&debug_heap_fops);
if (!debug_file) {
char buf[256], *path;
path = dentry_path(dev->heaps_debug_root, buf, 256);
pr_err("Failed to create heap debugfs at %s/%s\n",
path, heap->name);
}
if (heap->shrinker.count_objects && heap->shrinker.scan_objects) {
char debug_name[64];
snprintf(debug_name, 64, "%s_shrink", heap->name);
debug_file = debugfs_create_file(
debug_name, 0644, dev->heaps_debug_root, heap,
&debug_shrink_fops);
if (!debug_file) {
char buf[256], *path;
path = dentry_path(dev->heaps_debug_root, buf, 256);
pr_err("Failed to create heap shrinker debugfs at %s/%s\n",
path, debug_name);
}
}
up_write(&dev->lock);
}
EXPORT_SYMBOL(ion_device_add_heap);
struct ion_device *ion_device_create(long (*custom_ioctl)
(struct ion_client *client,
unsigned int cmd,
unsigned long arg))
{
struct ion_device *idev;
int ret;
idev = kzalloc(sizeof(struct ion_device), GFP_KERNEL);
if (!idev)
return ERR_PTR(-ENOMEM);
idev->dev.minor = MISC_DYNAMIC_MINOR;
idev->dev.name = "ion";
idev->dev.fops = &ion_fops;
idev->dev.parent = NULL;
ret = misc_register(&idev->dev);
if (ret) {
pr_err("ion: failed to register misc device.\n");
kfree(idev);
return ERR_PTR(ret);
}
idev->debug_root = debugfs_create_dir("ion", NULL);
if (!idev->debug_root) {
pr_err("ion: failed to create debugfs root directory.\n");
goto debugfs_done;
}
idev->heaps_debug_root = debugfs_create_dir("heaps", idev->debug_root);
if (!idev->heaps_debug_root) {
pr_err("ion: failed to create debugfs heaps directory.\n");
goto debugfs_done;
}
idev->clients_debug_root = debugfs_create_dir("clients",
idev->debug_root);
if (!idev->clients_debug_root)
pr_err("ion: failed to create debugfs clients directory.\n");
debugfs_done:
idev->custom_ioctl = custom_ioctl;
idev->buffers = RB_ROOT;
mutex_init(&idev->buffer_lock);
init_rwsem(&idev->lock);
plist_head_init(&idev->heaps);
idev->clients = RB_ROOT;
ion_root_client = &idev->clients;
mutex_init(&debugfs_mutex);
return idev;
}
EXPORT_SYMBOL(ion_device_create);
void ion_device_destroy(struct ion_device *dev)
{
misc_deregister(&dev->dev);
debugfs_remove_recursive(dev->debug_root);
/* XXX need to free the heaps and clients ? */
kfree(dev);
}
EXPORT_SYMBOL(ion_device_destroy);
void __init ion_reserve(struct ion_platform_data *data)
{
int i;
for (i = 0; i < data->nr; i++) {
if (data->heaps[i].size == 0)
continue;
if (data->heaps[i].base == 0) {
phys_addr_t paddr;
paddr = memblock_alloc_base(data->heaps[i].size,
data->heaps[i].align,
MEMBLOCK_ALLOC_ANYWHERE);
if (!paddr) {
pr_err("%s: error allocating memblock for heap %d\n",
__func__, i);
continue;
}
data->heaps[i].base = paddr;
} else {
int ret = memblock_reserve(data->heaps[i].base,
data->heaps[i].size);
if (ret)
pr_err("memblock reserve of %zx@%lx failed\n",
data->heaps[i].size,
data->heaps[i].base);
}
pr_info("%s: %s reserved base %lx size %zu\n", __func__,
data->heaps[i].name,
data->heaps[i].base,
data->heaps[i].size);
}
}
| ./CrossVul/dataset_final_sorted/CWE-416/c/bad_5376_0 |
crossvul-cpp_data_bad_4020_2 | /* exif-mnote-data-olympus.c
*
* Copyright (c) 2002, 2003 Lutz Mueller <lutz@users.sourceforge.net>
*
* This library is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2 of the License, or (at your option) any later version.
*
* This library is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with this library; if not, write to the
* Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
* Boston, MA 02110-1301 USA.
*/
#include <config.h>
#include "exif-mnote-data-olympus.h"
#include <stdlib.h>
#include <string.h>
#include <stdio.h>
#include <libexif/exif-utils.h>
#include <libexif/exif-data.h>
/* Uncomment this to fix a problem with Sanyo MakerNotes. It's probably best
* not to in most cases because it seems to only affect the thumbnail tag
* which is duplicated in IFD 1, and fixing the offset could actually cause
* problems with other software that expects the broken form.
*/
/*#define EXIF_OVERCOME_SANYO_OFFSET_BUG */
#define CHECKOVERFLOW(offset,datasize,structsize) (( offset >= datasize) || (structsize > datasize) || (offset > datasize - structsize ))
static enum OlympusVersion
exif_mnote_data_olympus_identify_variant (const unsigned char *buf,
unsigned int buf_size);
static void
exif_mnote_data_olympus_clear (ExifMnoteDataOlympus *n)
{
ExifMnoteData *d = (ExifMnoteData *) n;
unsigned int i;
if (!n) return;
if (n->entries) {
for (i = 0; i < n->count; i++)
if (n->entries[i].data) {
exif_mem_free (d->mem, n->entries[i].data);
n->entries[i].data = NULL;
}
exif_mem_free (d->mem, n->entries);
n->entries = NULL;
n->count = 0;
}
}
static void
exif_mnote_data_olympus_free (ExifMnoteData *n)
{
if (!n) return;
exif_mnote_data_olympus_clear ((ExifMnoteDataOlympus *) n);
}
static char *
exif_mnote_data_olympus_get_value (ExifMnoteData *d, unsigned int i, char *val, unsigned int maxlen)
{
ExifMnoteDataOlympus *n = (ExifMnoteDataOlympus *) d;
if (!d || !val) return NULL;
if (i > n->count -1) return NULL;
/*
exif_log (d->log, EXIF_LOG_CODE_DEBUG, "ExifMnoteDataOlympus",
"Querying value for tag '%s'...",
mnote_olympus_tag_get_name (n->entries[i].tag));
*/
return mnote_olympus_entry_get_value (&n->entries[i], val, maxlen);
}
/**
* @brief save the MnoteData from ne to buf
*
* @param ne extract the data from this structure
* @param *buf write the mnoteData to this buffer (buffer will be allocated)
* @param buf_size the size of the buffer
*/
static void
exif_mnote_data_olympus_save (ExifMnoteData *ne,
unsigned char **buf, unsigned int *buf_size)
{
ExifMnoteDataOlympus *n = (ExifMnoteDataOlympus *) ne;
size_t i, o, s, doff, base = 0, o2 = 6 + 2;
size_t datao = 0;
unsigned char *t;
size_t ts;
if (!n || !buf || !buf_size) return;
/*
* Allocate enough memory for all entries and the number of entries.
*/
*buf_size = 6 + 2 + 2 + n->count * 12;
switch (n->version) {
case olympusV1:
case sanyoV1:
case epsonV1:
*buf = exif_mem_alloc (ne->mem, *buf_size);
if (!*buf) {
EXIF_LOG_NO_MEMORY(ne->log, "ExifMnoteDataOlympus", *buf_size);
return;
}
/* Write the header and the number of entries. */
strcpy ((char *)*buf, n->version==sanyoV1?"SANYO":
(n->version==epsonV1?"EPSON":"OLYMP"));
exif_set_short (*buf + 6, n->order, (ExifShort) 1);
datao = n->offset;
break;
case olympusV2:
*buf_size += 8-6 + 4;
*buf = exif_mem_alloc (ne->mem, *buf_size);
if (!*buf) {
EXIF_LOG_NO_MEMORY(ne->log, "ExifMnoteDataOlympus", *buf_size);
return;
}
/* Write the header and the number of entries. */
strcpy ((char *)*buf, "OLYMPUS");
exif_set_short (*buf + 8, n->order, (ExifShort) (
(n->order == EXIF_BYTE_ORDER_INTEL) ?
('I' << 8) | 'I' :
('M' << 8) | 'M'));
exif_set_short (*buf + 10, n->order, (ExifShort) 3);
o2 += 4;
break;
case nikonV1:
base = MNOTE_NIKON1_TAG_BASE;
/* v1 has offsets based to main IFD, not makernote IFD */
datao += n->offset + 10;
/* subtract the size here, so the increment in the next case will not harm us */
*buf_size -= 8 + 2;
/* Fall through to nikonV2 handler */
case nikonV2:
/* Write out V0 files in V2 format */
case nikonV0:
*buf_size += 8 + 2;
*buf_size += 4; /* Next IFD pointer */
*buf = exif_mem_alloc (ne->mem, *buf_size);
if (!*buf) {
EXIF_LOG_NO_MEMORY(ne->log, "ExifMnoteDataOlympus", *buf_size);
return;
}
/* Write the header and the number of entries. */
strcpy ((char *)*buf, "Nikon");
(*buf)[6] = n->version;
if (n->version != nikonV1) {
exif_set_short (*buf + 10, n->order, (ExifShort) (
(n->order == EXIF_BYTE_ORDER_INTEL) ?
('I' << 8) | 'I' :
('M' << 8) | 'M'));
exif_set_short (*buf + 12, n->order, (ExifShort) 0x2A);
exif_set_long (*buf + 14, n->order, (ExifShort) 8);
o2 += 2 + 8;
}
datao -= 10;
/* Reset next IFD pointer */
exif_set_long (*buf + o2 + 2 + n->count * 12, n->order, 0);
break;
default:
return;
}
exif_set_short (*buf + o2, n->order, (ExifShort) n->count);
o2 += 2;
/* Save each entry */
for (i = 0; i < n->count; i++) {
o = o2 + i * 12;
exif_set_short (*buf + o + 0, n->order,
(ExifShort) (n->entries[i].tag - base));
exif_set_short (*buf + o + 2, n->order,
(ExifShort) n->entries[i].format);
exif_set_long (*buf + o + 4, n->order,
n->entries[i].components);
o += 8;
s = exif_format_get_size (n->entries[i].format) *
n->entries[i].components;
if (s > 65536) {
/* Corrupt data: EXIF data size is limited to the
* maximum size of a JPEG segment (64 kb).
*/
continue;
}
if (s > 4) {
doff = *buf_size;
ts = *buf_size + s;
t = exif_mem_realloc (ne->mem, *buf,
sizeof (char) * ts);
if (!t) {
EXIF_LOG_NO_MEMORY(ne->log, "ExifMnoteDataOlympus", ts);
return;
}
*buf = t;
*buf_size = ts;
exif_set_long (*buf + o, n->order, datao + doff);
} else
doff = o;
/* Write the data. */
if (n->entries[i].data) {
memcpy (*buf + doff, n->entries[i].data, s);
} else {
/* Most certainly damaged input file */
memset (*buf + doff, 0, s);
}
}
}
static void
exif_mnote_data_olympus_load (ExifMnoteData *en,
const unsigned char *buf, unsigned int buf_size)
{
ExifMnoteDataOlympus *n = (ExifMnoteDataOlympus *) en;
ExifShort c;
size_t i, tcount, o, o2, datao = 6, base = 0;
if (!n || !buf || !buf_size) {
exif_log (en->log, EXIF_LOG_CODE_CORRUPT_DATA,
"ExifMnoteDataOlympus", "Short MakerNote");
return;
}
o2 = 6 + n->offset; /* Start of interesting data */
if (CHECKOVERFLOW(o2,buf_size,10)) {
exif_log (en->log, EXIF_LOG_CODE_CORRUPT_DATA,
"ExifMnoteDataOlympus", "Short MakerNote");
return;
}
/*
* Olympus headers start with "OLYMP" and need to have at least
* a size of 22 bytes (6 for 'OLYMP', 2 other bytes, 2 for the
* number of entries, and 12 for one entry.
*
* Sanyo format is identical and uses identical tags except that
* header starts with "SANYO".
*
* Epson format is identical and uses identical tags except that
* header starts with "EPSON".
*
* Nikon headers start with "Nikon" (6 bytes including '\0'),
* version number (1 or 2).
*
* Version 1 continues with 0, 1, 0, number_of_tags,
* or just with number_of_tags (models D1H, D1X...).
*
* Version 2 continues with an unknown byte (0 or 10),
* two unknown bytes (0), "MM" or "II", another byte 0 and
* lastly 0x2A.
*/
n->version = exif_mnote_data_olympus_identify_variant(buf+o2, buf_size-o2);
switch (n->version) {
case olympusV1:
case sanyoV1:
case epsonV1:
exif_log (en->log, EXIF_LOG_CODE_DEBUG, "ExifMnoteDataOlympus",
"Parsing Olympus/Sanyo/Epson maker note v1...");
/* The number of entries is at position 8. */
if (buf[o2 + 6] == 1)
n->order = EXIF_BYTE_ORDER_INTEL;
else if (buf[o2 + 6 + 1] == 1)
n->order = EXIF_BYTE_ORDER_MOTOROLA;
o2 += 8;
c = exif_get_short (buf + o2, n->order);
if ((!(c & 0xFF)) && (c > 0x500)) {
if (n->order == EXIF_BYTE_ORDER_INTEL) {
n->order = EXIF_BYTE_ORDER_MOTOROLA;
} else {
n->order = EXIF_BYTE_ORDER_INTEL;
}
}
break;
case olympusV2:
/* Olympus S760, S770 */
datao = o2;
o2 += 8;
if (CHECKOVERFLOW(o2,buf_size,4)) return;
exif_log (en->log, EXIF_LOG_CODE_DEBUG, "ExifMnoteDataOlympus",
"Parsing Olympus maker note v2 (0x%02x, %02x, %02x, %02x)...",
buf[o2 + 0], buf[o2 + 1], buf[o2 + 2], buf[o2 + 3]);
if ((buf[o2] == 'I') && (buf[o2 + 1] == 'I'))
n->order = EXIF_BYTE_ORDER_INTEL;
else if ((buf[o2] == 'M') && (buf[o2 + 1] == 'M'))
n->order = EXIF_BYTE_ORDER_MOTOROLA;
/* The number of entries is at position 8+4. */
o2 += 4;
break;
case nikonV1:
o2 += 6;
exif_log (en->log, EXIF_LOG_CODE_DEBUG, "ExifMnoteDataOlympus",
"Parsing Nikon maker note v1 (0x%02x, %02x, %02x, "
"%02x)...",
buf[o2 + 0], buf[o2 + 1], buf[o2 + 2], buf[o2 + 3]);
/* Skip version number */
o2 += 1;
/* Skip an unknown byte (00 or 0A). */
o2 += 1;
base = MNOTE_NIKON1_TAG_BASE;
/* Fix endianness, if needed */
c = exif_get_short (buf + o2, n->order);
if ((!(c & 0xFF)) && (c > 0x500)) {
if (n->order == EXIF_BYTE_ORDER_INTEL) {
n->order = EXIF_BYTE_ORDER_MOTOROLA;
} else {
n->order = EXIF_BYTE_ORDER_INTEL;
}
}
break;
case nikonV2:
o2 += 6;
if (CHECKOVERFLOW(o2,buf_size,12)) return;
exif_log (en->log, EXIF_LOG_CODE_DEBUG, "ExifMnoteDataOlympus",
"Parsing Nikon maker note v2 (0x%02x, %02x, %02x, "
"%02x, %02x, %02x, %02x, %02x)...",
buf[o2 + 0], buf[o2 + 1], buf[o2 + 2], buf[o2 + 3],
buf[o2 + 4], buf[o2 + 5], buf[o2 + 6], buf[o2 + 7]);
/* Skip version number */
o2 += 1;
/* Skip an unknown byte (00 or 0A). */
o2 += 1;
/* Skip 2 unknown bytes (00 00). */
o2 += 2;
/*
* Byte order. From here the data offset
* gets calculated.
*/
datao = o2;
if (!strncmp ((char *)&buf[o2], "II", 2))
n->order = EXIF_BYTE_ORDER_INTEL;
else if (!strncmp ((char *)&buf[o2], "MM", 2))
n->order = EXIF_BYTE_ORDER_MOTOROLA;
else {
exif_log (en->log, EXIF_LOG_CODE_DEBUG,
"ExifMnoteDataOlympus", "Unknown "
"byte order '%c%c'", buf[o2],
buf[o2 + 1]);
return;
}
o2 += 2;
/* Skip 2 unknown bytes (00 2A). */
o2 += 2;
/* Go to where the number of entries is. */
o2 = datao + exif_get_long (buf + o2, n->order);
break;
case nikonV0:
exif_log (en->log, EXIF_LOG_CODE_DEBUG, "ExifMnoteDataOlympus",
"Parsing Nikon maker note v0 (0x%02x, %02x, %02x, "
"%02x, %02x, %02x, %02x, %02x)...",
buf[o2 + 0], buf[o2 + 1], buf[o2 + 2], buf[o2 + 3],
buf[o2 + 4], buf[o2 + 5], buf[o2 + 6], buf[o2 + 7]);
/* 00 1b is # of entries in Motorola order - the rest should also be in MM order */
n->order = EXIF_BYTE_ORDER_MOTOROLA;
break;
default:
exif_log (en->log, EXIF_LOG_CODE_DEBUG, "ExifMnoteDataOlympus",
"Unknown Olympus variant %i.", n->version);
return;
}
/* Sanity check the offset */
if (CHECKOVERFLOW(o2,buf_size,2)) {
exif_log (en->log, EXIF_LOG_CODE_CORRUPT_DATA,
"ExifMnoteOlympus", "Short MakerNote");
return;
}
/* Read the number of tags */
c = exif_get_short (buf + o2, n->order);
o2 += 2;
/* Remove any old entries */
exif_mnote_data_olympus_clear (n);
/* Reserve enough space for all the possible MakerNote tags */
n->entries = exif_mem_alloc (en->mem, sizeof (MnoteOlympusEntry) * c);
if (!n->entries) {
EXIF_LOG_NO_MEMORY(en->log, "ExifMnoteOlympus", sizeof (MnoteOlympusEntry) * c);
return;
}
/* Parse all c entries, storing ones that are successfully parsed */
tcount = 0;
for (i = c, o = o2; i; --i, o += 12) {
size_t s;
if (CHECKOVERFLOW(o, buf_size, 12)) {
exif_log (en->log, EXIF_LOG_CODE_CORRUPT_DATA,
"ExifMnoteOlympus", "Short MakerNote");
break;
}
n->entries[tcount].tag = exif_get_short (buf + o, n->order) + base;
n->entries[tcount].format = exif_get_short (buf + o + 2, n->order);
n->entries[tcount].components = exif_get_long (buf + o + 4, n->order);
n->entries[tcount].order = n->order;
exif_log (en->log, EXIF_LOG_CODE_DEBUG, "ExifMnoteOlympus",
"Loading entry 0x%x ('%s')...", n->entries[tcount].tag,
mnote_olympus_tag_get_name (n->entries[tcount].tag));
/* exif_log (en->log, EXIF_LOG_CODE_DEBUG, "ExifMnoteOlympus",
"0x%x %d %ld*(%d)",
n->entries[tcount].tag,
n->entries[tcount].format,
n->entries[tcount].components,
(int)exif_format_get_size(n->entries[tcount].format)); */
/* Check if we overflow the multiplication. Use buf_size as the max size for integer overflow detection,
* we will check the buffer sizes closer later. */
if (exif_format_get_size (n->entries[tcount].format) &&
buf_size / exif_format_get_size (n->entries[tcount].format) < n->entries[tcount].components
) {
exif_log (en->log, EXIF_LOG_CODE_CORRUPT_DATA, "ExifMnoteOlympus", "Tag size overflow detected (%u * %lu)", exif_format_get_size (n->entries[tcount].format), n->entries[tcount].components);
continue;
}
/*
* Size? If bigger than 4 bytes, the actual data is not
* in the entry but somewhere else (offset).
*/
s = exif_format_get_size (n->entries[tcount].format) *
n->entries[tcount].components;
n->entries[tcount].size = s;
if (s) {
size_t dataofs = o + 8;
if (s > 4) {
/* The data in this case is merely a pointer */
dataofs = exif_get_long (buf + dataofs, n->order) + datao;
#ifdef EXIF_OVERCOME_SANYO_OFFSET_BUG
/* Some Sanyo models (e.g. VPC-C5, C40) suffer from a bug when
* writing the offset for the MNOTE_OLYMPUS_TAG_THUMBNAILIMAGE
* tag in its MakerNote. The offset is actually the absolute
* position in the file instead of the position within the IFD.
*/
if (dataofs > (buf_size - s) && n->version == sanyoV1) {
/* fix pointer */
dataofs -= datao + 6;
exif_log (en->log, EXIF_LOG_CODE_DEBUG,
"ExifMnoteOlympus",
"Inconsistent thumbnail tag offset; attempting to recover");
}
#endif
}
if (CHECKOVERFLOW(dataofs, buf_size, s)) {
exif_log (en->log, EXIF_LOG_CODE_DEBUG,
"ExifMnoteOlympus",
"Tag data past end of buffer (%u > %u)",
(unsigned)(dataofs + s), buf_size);
continue;
}
n->entries[tcount].data = exif_mem_alloc (en->mem, s);
if (!n->entries[tcount].data) {
EXIF_LOG_NO_MEMORY(en->log, "ExifMnoteOlympus", s);
continue;
}
memcpy (n->entries[tcount].data, buf + dataofs, s);
}
/* Tag was successfully parsed */
++tcount;
}
/* Store the count of successfully parsed tags */
n->count = tcount;
}
static unsigned int
exif_mnote_data_olympus_count (ExifMnoteData *n)
{
return n ? ((ExifMnoteDataOlympus *) n)->count : 0;
}
static unsigned int
exif_mnote_data_olympus_get_id (ExifMnoteData *d, unsigned int n)
{
ExifMnoteDataOlympus *note = (ExifMnoteDataOlympus *) d;
if (!note) return 0;
if (note->count <= n) return 0;
return note->entries[n].tag;
}
static const char *
exif_mnote_data_olympus_get_name (ExifMnoteData *d, unsigned int i)
{
ExifMnoteDataOlympus *n = (ExifMnoteDataOlympus *) d;
if (!n) return NULL;
if (i >= n->count) return NULL;
return mnote_olympus_tag_get_name (n->entries[i].tag);
}
static const char *
exif_mnote_data_olympus_get_title (ExifMnoteData *d, unsigned int i)
{
ExifMnoteDataOlympus *n = (ExifMnoteDataOlympus *) d;
if (!n) return NULL;
if (i >= n->count) return NULL;
return mnote_olympus_tag_get_title (n->entries[i].tag);
}
static const char *
exif_mnote_data_olympus_get_description (ExifMnoteData *d, unsigned int i)
{
ExifMnoteDataOlympus *n = (ExifMnoteDataOlympus *) d;
if (!n) return NULL;
if (i >= n->count) return NULL;
return mnote_olympus_tag_get_description (n->entries[i].tag);
}
static void
exif_mnote_data_olympus_set_byte_order (ExifMnoteData *d, ExifByteOrder o)
{
ExifByteOrder o_orig;
ExifMnoteDataOlympus *n = (ExifMnoteDataOlympus *) d;
unsigned int i;
if (!n) return;
o_orig = n->order;
n->order = o;
for (i = 0; i < n->count; i++) {
if (n->entries[i].components && (n->entries[i].size/n->entries[i].components < exif_format_get_size (n->entries[i].format)))
continue;
n->entries[i].order = o;
exif_array_set_byte_order (n->entries[i].format, n->entries[i].data,
n->entries[i].components, o_orig, o);
}
}
static void
exif_mnote_data_olympus_set_offset (ExifMnoteData *n, unsigned int o)
{
if (n) ((ExifMnoteDataOlympus *) n)->offset = o;
}
static enum OlympusVersion
exif_mnote_data_olympus_identify_variant (const unsigned char *buf,
unsigned int buf_size)
{
/* Olympus, Nikon, Sanyo, Epson */
if (buf_size >= 8) {
/* Match the terminating NUL character, too */
if (!memcmp (buf, "OLYMPUS", 8))
return olympusV2;
else if (!memcmp (buf, "OLYMP", 6))
return olympusV1;
else if (!memcmp (buf, "SANYO", 6))
return sanyoV1;
else if (!memcmp (buf, "EPSON", 6))
return epsonV1;
else if (!memcmp (buf, "Nikon", 6)) {
switch (buf[6]) {
case 1: return nikonV1;
case 2: return nikonV2;
default: return 0; /* Unrecognized Nikon variant */
}
}
}
/* Another variant of Nikon */
if ((buf_size >= 2) && (buf[0] == 0x00) && (buf[1] == 0x1b)) {
return nikonV0;
}
return unrecognized;
}
int
exif_mnote_data_olympus_identify (const ExifData *ed, const ExifEntry *e)
{
int variant = exif_mnote_data_olympus_identify_variant(e->data, e->size);
if (variant == nikonV0) {
/* This variant needs some extra checking with the Make */
char value[5];
ExifEntry *em = exif_data_get_entry (ed, EXIF_TAG_MAKE);
variant = unrecognized;
if (em) {
const char *v = exif_entry_get_value (em, value, sizeof(value));
if (v && (!strncmp (v, "Nikon", sizeof(value)) ||
!strncmp (v, "NIKON", sizeof(value)) ))
/* When saved, this variant will be written out like the
* alternative nikonV2 form above instead
*/
variant = nikonV0;
}
}
return variant;
}
ExifMnoteData *
exif_mnote_data_olympus_new (ExifMem *mem)
{
ExifMnoteData *d;
if (!mem) return NULL;
d = exif_mem_alloc (mem, sizeof (ExifMnoteDataOlympus));
if (!d) return NULL;
exif_mnote_data_construct (d, mem);
/* Set up function pointers */
d->methods.free = exif_mnote_data_olympus_free;
d->methods.set_byte_order = exif_mnote_data_olympus_set_byte_order;
d->methods.set_offset = exif_mnote_data_olympus_set_offset;
d->methods.load = exif_mnote_data_olympus_load;
d->methods.save = exif_mnote_data_olympus_save;
d->methods.count = exif_mnote_data_olympus_count;
d->methods.get_id = exif_mnote_data_olympus_get_id;
d->methods.get_name = exif_mnote_data_olympus_get_name;
d->methods.get_title = exif_mnote_data_olympus_get_title;
d->methods.get_description = exif_mnote_data_olympus_get_description;
d->methods.get_value = exif_mnote_data_olympus_get_value;
return d;
}
| ./CrossVul/dataset_final_sorted/CWE-416/c/bad_4020_2 |
crossvul-cpp_data_good_3348_5 | /*
Copyright (c) 2014. The YARA Authors. All Rights Reserved.
Redistribution and use in source and binary forms, with or without modification,
are permitted provided that the following conditions are met:
1. Redistributions of source code must retain the above copyright notice, this
list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation and/or
other materials provided with the distribution.
3. Neither the name of the copyright holder nor the names of its contributors
may be used to endorse or promote products derived from this software without
specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR
ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include <assert.h>
#include <stdarg.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <math.h>
#if _WIN32 || __CYGWIN__
#define PRIu64 "I64d"
#else
#include <inttypes.h>
#endif
#include <yara/mem.h>
#include <yara/error.h>
#include <yara/object.h>
#include <yara/exec.h>
#include <yara/utils.h>
int yr_object_create(
int8_t type,
const char* identifier,
YR_OBJECT* parent,
YR_OBJECT** object)
{
YR_OBJECT* obj;
int i;
size_t object_size = 0;
assert(parent != NULL || object != NULL);
switch (type)
{
case OBJECT_TYPE_STRUCTURE:
object_size = sizeof(YR_OBJECT_STRUCTURE);
break;
case OBJECT_TYPE_ARRAY:
object_size = sizeof(YR_OBJECT_ARRAY);
break;
case OBJECT_TYPE_DICTIONARY:
object_size = sizeof(YR_OBJECT_DICTIONARY);
break;
case OBJECT_TYPE_INTEGER:
object_size = sizeof(YR_OBJECT_INTEGER);
break;
case OBJECT_TYPE_FLOAT:
object_size = sizeof(YR_OBJECT_DOUBLE);
break;
case OBJECT_TYPE_STRING:
object_size = sizeof(YR_OBJECT_STRING);
break;
case OBJECT_TYPE_FUNCTION:
object_size = sizeof(YR_OBJECT_FUNCTION);
break;
default:
assert(FALSE);
}
obj = (YR_OBJECT*) yr_malloc(object_size);
if (obj == NULL)
return ERROR_INSUFFICIENT_MEMORY;
obj->type = type;
obj->identifier = yr_strdup(identifier);
obj->parent = parent;
obj->data = NULL;
switch(type)
{
case OBJECT_TYPE_STRUCTURE:
((YR_OBJECT_STRUCTURE*) obj)->members = NULL;
break;
case OBJECT_TYPE_ARRAY:
((YR_OBJECT_ARRAY*) obj)->items = NULL;
((YR_OBJECT_ARRAY*) obj)->prototype_item = NULL;
break;
case OBJECT_TYPE_DICTIONARY:
((YR_OBJECT_DICTIONARY*) obj)->items = NULL;
((YR_OBJECT_DICTIONARY*) obj)->prototype_item = NULL;
break;
case OBJECT_TYPE_INTEGER:
((YR_OBJECT_INTEGER*) obj)->value = UNDEFINED;
break;
case OBJECT_TYPE_FLOAT:
((YR_OBJECT_DOUBLE*) obj)->value = NAN;
break;
case OBJECT_TYPE_STRING:
((YR_OBJECT_STRING*) obj)->value = NULL;
break;
case OBJECT_TYPE_FUNCTION:
((YR_OBJECT_FUNCTION*) obj)->return_obj = NULL;
for (i = 0; i < MAX_OVERLOADED_FUNCTIONS; i++)
{
((YR_OBJECT_FUNCTION*) obj)->prototypes[i].arguments_fmt = NULL;
((YR_OBJECT_FUNCTION*) obj)->prototypes[i].code = NULL;
}
break;
}
if (obj->identifier == NULL)
{
yr_free(obj);
return ERROR_INSUFFICIENT_MEMORY;
}
if (parent != NULL)
{
assert(parent->type == OBJECT_TYPE_STRUCTURE ||
parent->type == OBJECT_TYPE_ARRAY ||
parent->type == OBJECT_TYPE_DICTIONARY ||
parent->type == OBJECT_TYPE_FUNCTION);
switch(parent->type)
{
case OBJECT_TYPE_STRUCTURE:
FAIL_ON_ERROR_WITH_CLEANUP(
yr_object_structure_set_member(parent, obj),
{
yr_free((void*) obj->identifier);
yr_free(obj);
});
break;
case OBJECT_TYPE_ARRAY:
((YR_OBJECT_ARRAY*) parent)->prototype_item = obj;
break;
case OBJECT_TYPE_DICTIONARY:
((YR_OBJECT_DICTIONARY*) parent)->prototype_item = obj;
break;
case OBJECT_TYPE_FUNCTION:
((YR_OBJECT_FUNCTION*) parent)->return_obj = obj;
break;
}
}
if (object != NULL)
*object = obj;
return ERROR_SUCCESS;
}
int yr_object_function_create(
const char* identifier,
const char* arguments_fmt,
const char* return_fmt,
YR_MODULE_FUNC code,
YR_OBJECT* parent,
YR_OBJECT** function)
{
YR_OBJECT* return_obj;
YR_OBJECT* o = NULL;
YR_OBJECT_FUNCTION* f = NULL;
int8_t return_type;
int i;
switch (*return_fmt)
{
case 'i':
return_type = OBJECT_TYPE_INTEGER;
break;
case 's':
return_type = OBJECT_TYPE_STRING;
break;
case 'f':
return_type = OBJECT_TYPE_FLOAT;
break;
default:
return ERROR_INVALID_FORMAT;
}
if (parent != NULL)
{
// The parent of a function must be a structure.
assert(parent->type == OBJECT_TYPE_STRUCTURE);
// Try to find if the structure already has a function
// with that name. In that case this is a function overload.
f = (YR_OBJECT_FUNCTION*) yr_object_lookup_field(parent, identifier);
// Overloaded functions must have the same return type.
if (f != NULL && return_type != f->return_obj->type)
return ERROR_WRONG_RETURN_TYPE;
}
if (f == NULL) // Function doesn't exist yet
{
FAIL_ON_ERROR(
yr_object_create(
OBJECT_TYPE_FUNCTION,
identifier,
parent,
&o));
FAIL_ON_ERROR_WITH_CLEANUP(
yr_object_create(
return_type,
"result",
o,
&return_obj),
yr_object_destroy(o));
f = (YR_OBJECT_FUNCTION*) o;
}
for (i = 0; i < MAX_OVERLOADED_FUNCTIONS; i++)
{
if (f->prototypes[i].arguments_fmt == NULL)
{
f->prototypes[i].arguments_fmt = arguments_fmt;
f->prototypes[i].code = code;
break;
}
}
if (function != NULL)
*function = (YR_OBJECT*) f;
return ERROR_SUCCESS;
}
int yr_object_from_external_variable(
YR_EXTERNAL_VARIABLE* external,
YR_OBJECT** object)
{
YR_OBJECT* obj;
int result;
uint8_t obj_type = 0;
switch(external->type)
{
case EXTERNAL_VARIABLE_TYPE_INTEGER:
case EXTERNAL_VARIABLE_TYPE_BOOLEAN:
obj_type = OBJECT_TYPE_INTEGER;
break;
case EXTERNAL_VARIABLE_TYPE_FLOAT:
obj_type = OBJECT_TYPE_FLOAT;
break;
case EXTERNAL_VARIABLE_TYPE_STRING:
case EXTERNAL_VARIABLE_TYPE_MALLOC_STRING:
obj_type = OBJECT_TYPE_STRING;
break;
default:
assert(FALSE);
}
result = yr_object_create(
obj_type,
external->identifier,
NULL,
&obj);
if (result == ERROR_SUCCESS)
{
switch(external->type)
{
case EXTERNAL_VARIABLE_TYPE_INTEGER:
case EXTERNAL_VARIABLE_TYPE_BOOLEAN:
yr_object_set_integer(external->value.i, obj, NULL);
break;
case EXTERNAL_VARIABLE_TYPE_FLOAT:
yr_object_set_float(external->value.f, obj, NULL);
break;
case EXTERNAL_VARIABLE_TYPE_STRING:
case EXTERNAL_VARIABLE_TYPE_MALLOC_STRING:
yr_object_set_string(
external->value.s, strlen(external->value.s), obj, NULL);
break;
}
*object = obj;
}
return result;
}
void yr_object_destroy(
YR_OBJECT* object)
{
YR_STRUCTURE_MEMBER* member;
YR_STRUCTURE_MEMBER* next_member;
YR_ARRAY_ITEMS* array_items;
YR_DICTIONARY_ITEMS* dict_items;
SIZED_STRING* str;
int i;
if (object == NULL)
return;
switch(object->type)
{
case OBJECT_TYPE_STRUCTURE:
member = ((YR_OBJECT_STRUCTURE*) object)->members;
while (member != NULL)
{
next_member = member->next;
yr_object_destroy(member->object);
yr_free(member);
member = next_member;
}
break;
case OBJECT_TYPE_STRING:
str = ((YR_OBJECT_STRING*) object)->value;
if (str != NULL)
yr_free(str);
break;
case OBJECT_TYPE_ARRAY:
if (((YR_OBJECT_ARRAY*) object)->prototype_item != NULL)
yr_object_destroy(((YR_OBJECT_ARRAY*) object)->prototype_item);
array_items = ((YR_OBJECT_ARRAY*) object)->items;
if (array_items != NULL)
{
for (i = 0; i < array_items->count; i++)
if (array_items->objects[i] != NULL)
yr_object_destroy(array_items->objects[i]);
}
yr_free(array_items);
break;
case OBJECT_TYPE_DICTIONARY:
if (((YR_OBJECT_DICTIONARY*) object)->prototype_item != NULL)
yr_object_destroy(((YR_OBJECT_DICTIONARY*) object)->prototype_item);
dict_items = ((YR_OBJECT_DICTIONARY*) object)->items;
if (dict_items != NULL)
{
for (i = 0; i < dict_items->used; i++)
{
if (dict_items->objects[i].key != NULL)
yr_free(dict_items->objects[i].key);
if (dict_items->objects[i].obj != NULL)
yr_object_destroy(dict_items->objects[i].obj);
}
}
yr_free(dict_items);
break;
case OBJECT_TYPE_FUNCTION:
yr_object_destroy(((YR_OBJECT_FUNCTION*) object)->return_obj);
break;
}
yr_free((void*) object->identifier);
yr_free(object);
}
YR_OBJECT* yr_object_lookup_field(
YR_OBJECT* object,
const char* field_name)
{
YR_STRUCTURE_MEMBER* member;
assert(object != NULL);
assert(object->type == OBJECT_TYPE_STRUCTURE);
member = ((YR_OBJECT_STRUCTURE*) object)->members;
while (member != NULL)
{
if (strcmp(member->object->identifier, field_name) == 0)
return member->object;
member = member->next;
}
return NULL;
}
YR_OBJECT* _yr_object_lookup(
YR_OBJECT* object,
int flags,
const char* pattern,
va_list args)
{
YR_OBJECT* obj = object;
const char* p = pattern;
const char* key = NULL;
char str[256];
int i;
int index = -1;
while (obj != NULL)
{
i = 0;
while(*p != '\0' && *p != '.' && *p != '[' && i < sizeof(str) - 1)
{
str[i++] = *p++;
}
str[i] = '\0';
if (obj->type != OBJECT_TYPE_STRUCTURE)
return NULL;
obj = yr_object_lookup_field(obj, str);
if (obj == NULL)
return NULL;
if (*p == '[')
{
p++;
if (*p == '%')
{
p++;
switch(*p++)
{
case 'i':
index = va_arg(args, int);
break;
case 's':
key = va_arg(args, const char*);
break;
default:
return NULL;
}
}
else if (*p >= '0' && *p <= '9')
{
index = (int) strtol(p, (char**) &p, 10);
}
else if (*p == '"')
{
i = 0;
p++; // skip the opening quotation mark
while (*p != '"' && *p != '\0' && i < sizeof(str))
str[i++] = *p++;
str[i] = '\0';
p++; // skip the closing quotation mark
key = str;
}
else
{
return NULL;
}
assert(*p == ']');
p++;
assert(*p == '.' || *p == '\0');
switch(obj->type)
{
case OBJECT_TYPE_ARRAY:
assert(index != -1);
obj = yr_object_array_get_item(obj, flags, index);
break;
case OBJECT_TYPE_DICTIONARY:
assert(key != NULL);
obj = yr_object_dict_get_item(obj, flags, key);
break;
}
}
if (*p == '\0')
break;
p++;
}
return obj;
}
YR_OBJECT* yr_object_lookup(
YR_OBJECT* object,
int flags,
const char* pattern,
...)
{
YR_OBJECT* result;
va_list args;
va_start(args, pattern);
result = _yr_object_lookup(object, flags, pattern, args);
va_end(args);
return result;
}
int yr_object_copy(
YR_OBJECT* object,
YR_OBJECT** object_copy)
{
YR_OBJECT* copy;
YR_OBJECT* o;
YR_STRUCTURE_MEMBER* structure_member;
YR_OBJECT_FUNCTION* func;
YR_OBJECT_FUNCTION* func_copy;
int i;
*object_copy = NULL;
FAIL_ON_ERROR(yr_object_create(
object->type,
object->identifier,
NULL,
©));
switch(object->type)
{
case OBJECT_TYPE_INTEGER:
((YR_OBJECT_INTEGER*) copy)->value = ((YR_OBJECT_INTEGER*) object)->value;
break;
case OBJECT_TYPE_STRING:
if (((YR_OBJECT_STRING*) object)->value != NULL)
{
((YR_OBJECT_STRING*) copy)->value = sized_string_dup(
((YR_OBJECT_STRING*) object)->value);
}
else
{
((YR_OBJECT_STRING*) copy)->value = NULL;
}
break;
case OBJECT_TYPE_FLOAT:
((YR_OBJECT_DOUBLE*) copy)->value = ((YR_OBJECT_DOUBLE*) object)->value;
break;
case OBJECT_TYPE_FUNCTION:
func = (YR_OBJECT_FUNCTION*) object;
func_copy = (YR_OBJECT_FUNCTION*) copy;
FAIL_ON_ERROR_WITH_CLEANUP(
yr_object_copy(func->return_obj, &func_copy->return_obj),
yr_object_destroy(copy));
for (i = 0; i < MAX_OVERLOADED_FUNCTIONS; i++)
func_copy->prototypes[i] = func->prototypes[i];
break;
case OBJECT_TYPE_STRUCTURE:
structure_member = ((YR_OBJECT_STRUCTURE*) object)->members;
while (structure_member != NULL)
{
FAIL_ON_ERROR_WITH_CLEANUP(
yr_object_copy(structure_member->object, &o),
yr_object_destroy(copy));
FAIL_ON_ERROR_WITH_CLEANUP(
yr_object_structure_set_member(copy, o),
yr_free(o);
yr_object_destroy(copy));
structure_member = structure_member->next;
}
break;
case OBJECT_TYPE_ARRAY:
yr_object_copy(
((YR_OBJECT_ARRAY *) object)->prototype_item,
&o);
((YR_OBJECT_ARRAY *)copy)->prototype_item = o;
break;
case OBJECT_TYPE_DICTIONARY:
yr_object_copy(
((YR_OBJECT_DICTIONARY *) object)->prototype_item,
&o);
((YR_OBJECT_DICTIONARY *)copy)->prototype_item = o;
break;
default:
assert(FALSE);
}
*object_copy = copy;
return ERROR_SUCCESS;
}
int yr_object_structure_set_member(
YR_OBJECT* object,
YR_OBJECT* member)
{
YR_STRUCTURE_MEMBER* sm;
assert(object->type == OBJECT_TYPE_STRUCTURE);
// Check if the object already have a member with the same identifier
if (yr_object_lookup_field(object, member->identifier) != NULL)
return ERROR_DUPLICATED_STRUCTURE_MEMBER;
sm = (YR_STRUCTURE_MEMBER*) yr_malloc(sizeof(YR_STRUCTURE_MEMBER));
if (sm == NULL)
return ERROR_INSUFFICIENT_MEMORY;
member->parent = object;
sm->object = member;
sm->next = ((YR_OBJECT_STRUCTURE*) object)->members;
((YR_OBJECT_STRUCTURE*) object)->members = sm;
return ERROR_SUCCESS;
}
YR_OBJECT* yr_object_array_get_item(
YR_OBJECT* object,
int flags,
int index)
{
YR_OBJECT* result = NULL;
YR_OBJECT_ARRAY* array;
assert(object->type == OBJECT_TYPE_ARRAY);
if (index < 0)
return NULL;
array = (YR_OBJECT_ARRAY*) object;
if (array->items != NULL && array->items->count > index)
result = array->items->objects[index];
if (result == NULL && flags & OBJECT_CREATE)
{
yr_object_copy(array->prototype_item, &result);
if (result != NULL)
yr_object_array_set_item(object, result, index);
}
return result;
}
int yr_object_array_set_item(
YR_OBJECT* object,
YR_OBJECT* item,
int index)
{
YR_OBJECT_ARRAY* array;
int i;
int count;
assert(index >= 0);
assert(object->type == OBJECT_TYPE_ARRAY);
array = ((YR_OBJECT_ARRAY*) object);
if (array->items == NULL)
{
count = yr_max(64, (index + 1) * 2);
array->items = (YR_ARRAY_ITEMS*) yr_malloc(
sizeof(YR_ARRAY_ITEMS) + count * sizeof(YR_OBJECT*));
if (array->items == NULL)
return ERROR_INSUFFICIENT_MEMORY;
memset(array->items->objects, 0, count * sizeof(YR_OBJECT*));
array->items->count = count;
}
else if (index >= array->items->count)
{
count = array->items->count * 2;
array->items = (YR_ARRAY_ITEMS*) yr_realloc(
array->items,
sizeof(YR_ARRAY_ITEMS) + count * sizeof(YR_OBJECT*));
if (array->items == NULL)
return ERROR_INSUFFICIENT_MEMORY;
for (i = array->items->count; i < count; i++)
array->items->objects[i] = NULL;
array->items->count = count;
}
item->parent = object;
array->items->objects[index] = item;
return ERROR_SUCCESS;
}
YR_OBJECT* yr_object_dict_get_item(
YR_OBJECT* object,
int flags,
const char* key)
{
int i;
YR_OBJECT* result = NULL;
YR_OBJECT_DICTIONARY* dict;
assert(object->type == OBJECT_TYPE_DICTIONARY);
dict = (YR_OBJECT_DICTIONARY*) object;
if (dict->items != NULL)
{
for (i = 0; i < dict->items->used; i++)
{
if (strcmp(dict->items->objects[i].key, key) == 0)
result = dict->items->objects[i].obj;
}
}
if (result == NULL && flags & OBJECT_CREATE)
{
yr_object_copy(dict->prototype_item, &result);
if (result != NULL)
yr_object_dict_set_item(object, result, key);
}
return result;
}
int yr_object_dict_set_item(
YR_OBJECT* object,
YR_OBJECT* item,
const char* key)
{
YR_OBJECT_DICTIONARY* dict;
int i;
int count;
assert(object->type == OBJECT_TYPE_DICTIONARY);
dict = ((YR_OBJECT_DICTIONARY*) object);
if (dict->items == NULL)
{
count = 64;
dict->items = (YR_DICTIONARY_ITEMS*) yr_malloc(
sizeof(YR_DICTIONARY_ITEMS) + count * sizeof(dict->items->objects[0]));
if (dict->items == NULL)
return ERROR_INSUFFICIENT_MEMORY;
memset(dict->items->objects, 0, count * sizeof(dict->items->objects[0]));
dict->items->free = count;
dict->items->used = 0;
}
else if (dict->items->free == 0)
{
count = dict->items->used * 2;
dict->items = (YR_DICTIONARY_ITEMS*) yr_realloc(
dict->items,
sizeof(YR_DICTIONARY_ITEMS) + count * sizeof(dict->items->objects[0]));
if (dict->items == NULL)
return ERROR_INSUFFICIENT_MEMORY;
for (i = dict->items->used; i < count; i++)
{
dict->items->objects[i].key = NULL;
dict->items->objects[i].obj = NULL;
}
dict->items->free = dict->items->used;
}
item->parent = object;
dict->items->objects[dict->items->used].key = yr_strdup(key);
dict->items->objects[dict->items->used].obj = item;
dict->items->used++;
dict->items->free--;
return ERROR_SUCCESS;
}
int yr_object_has_undefined_value(
YR_OBJECT* object,
const char* field,
...)
{
YR_OBJECT* field_obj;
va_list args;
va_start(args, field);
if (field != NULL)
field_obj = _yr_object_lookup(object, 0, field, args);
else
field_obj = object;
va_end(args);
if (field_obj == NULL)
return TRUE;
switch(field_obj->type)
{
case OBJECT_TYPE_FLOAT:
return isnan(((YR_OBJECT_DOUBLE*) field_obj)->value);
case OBJECT_TYPE_STRING:
return ((YR_OBJECT_STRING*) field_obj)->value == NULL;
case OBJECT_TYPE_INTEGER:
return ((YR_OBJECT_INTEGER*) field_obj)->value == UNDEFINED;
}
return FALSE;
}
int64_t yr_object_get_integer(
YR_OBJECT* object,
const char* field,
...)
{
YR_OBJECT* integer_obj;
va_list args;
va_start(args, field);
if (field != NULL)
integer_obj = _yr_object_lookup(object, 0, field, args);
else
integer_obj = object;
va_end(args);
if (integer_obj == NULL)
return UNDEFINED;
assertf(integer_obj->type == OBJECT_TYPE_INTEGER,
"type of \"%s\" is not integer\n", field);
return ((YR_OBJECT_INTEGER*) integer_obj)->value;
}
double yr_object_get_float(
YR_OBJECT* object,
const char* field,
...)
{
YR_OBJECT* double_obj;
va_list args;
va_start(args, field);
if (field != NULL)
double_obj = _yr_object_lookup(object, 0, field, args);
else
double_obj = object;
va_end(args);
if (double_obj == NULL)
return NAN;
assertf(double_obj->type == OBJECT_TYPE_FLOAT,
"type of \"%s\" is not double\n", field);
return ((YR_OBJECT_DOUBLE*) double_obj)->value;
}
SIZED_STRING* yr_object_get_string(
YR_OBJECT* object,
const char* field,
...)
{
YR_OBJECT* string_obj;
va_list args;
va_start(args, field);
if (field != NULL)
string_obj = _yr_object_lookup(object, 0, field, args);
else
string_obj = object;
va_end(args);
if (string_obj == NULL)
return NULL;
assertf(string_obj->type == OBJECT_TYPE_STRING,
"type of \"%s\" is not string\n", field);
return ((YR_OBJECT_STRING*) string_obj)->value;
}
int yr_object_set_integer(
int64_t value,
YR_OBJECT* object,
const char* field,
...)
{
YR_OBJECT* integer_obj;
va_list args;
va_start(args, field);
if (field != NULL)
integer_obj = _yr_object_lookup(
object, OBJECT_CREATE, field, args);
else
integer_obj = object;
va_end(args);
assert(integer_obj != NULL);
assert(integer_obj->type == OBJECT_TYPE_INTEGER);
((YR_OBJECT_INTEGER*) integer_obj)->value = value;
return ERROR_SUCCESS;
}
int yr_object_set_float(
double value,
YR_OBJECT* object,
const char* field,
...)
{
YR_OBJECT* double_obj;
va_list args;
va_start(args, field);
if (field != NULL)
double_obj = _yr_object_lookup(
object, OBJECT_CREATE, field, args);
else
double_obj = object;
va_end(args);
assert(double_obj != NULL);
assert(double_obj->type == OBJECT_TYPE_FLOAT);
((YR_OBJECT_DOUBLE*) double_obj)->value = value;
return ERROR_SUCCESS;
}
int yr_object_set_string(
const char* value,
size_t len,
YR_OBJECT* object,
const char* field,
...)
{
YR_OBJECT_STRING* string_obj;
va_list args;
va_start(args, field);
if (field != NULL)
string_obj = (YR_OBJECT_STRING*) _yr_object_lookup(
object, OBJECT_CREATE, field, args);
else
string_obj = (YR_OBJECT_STRING*) object;
va_end(args);
assert(string_obj != NULL);
assert(string_obj->type == OBJECT_TYPE_STRING);
if (string_obj->value != NULL)
yr_free(string_obj->value);
if (value != NULL)
{
string_obj->value = (SIZED_STRING*) yr_malloc(len + sizeof(SIZED_STRING));
if (string_obj->value == NULL)
return ERROR_INSUFFICIENT_MEMORY;
string_obj->value->length = (uint32_t) len;
string_obj->value->flags = 0;
memcpy(string_obj->value->c_string, value, len);
string_obj->value->c_string[len] = '\0';
}
else
{
string_obj->value = NULL;
}
return ERROR_SUCCESS;
}
YR_OBJECT* yr_object_get_root(
YR_OBJECT* object)
{
YR_OBJECT* o = object;
while (o->parent != NULL)
o = o->parent;
return o;
}
YR_API void yr_object_print_data(
YR_OBJECT* object,
int indent,
int print_identifier)
{
YR_DICTIONARY_ITEMS* dict_items;
YR_ARRAY_ITEMS* array_items;
YR_STRUCTURE_MEMBER* member;
char indent_spaces[32];
int i;
indent = yr_min(indent, sizeof(indent_spaces) - 1);
memset(indent_spaces, '\t', indent);
indent_spaces[indent] = '\0';
if (print_identifier && object->type != OBJECT_TYPE_FUNCTION)
printf("%s%s", indent_spaces, object->identifier);
switch(object->type)
{
case OBJECT_TYPE_INTEGER:
if (((YR_OBJECT_INTEGER*) object)->value != UNDEFINED)
printf(" = %" PRIu64, ((YR_OBJECT_INTEGER*) object)->value);
else
printf(" = UNDEFINED");
break;
case OBJECT_TYPE_STRING:
if (((YR_OBJECT_STRING*) object)->value != NULL)
{
size_t l;
printf(" = \"");
for (l = 0; l < ((YR_OBJECT_STRING*) object)->value->length; l++)
{
char c = ((YR_OBJECT_STRING*) object)->value->c_string[l];
if (isprint((unsigned char) c))
printf("%c", c);
else
printf("\\x%02x", (unsigned char) c);
}
printf("\"");
}
else
{
printf(" = UNDEFINED");
}
break;
case OBJECT_TYPE_STRUCTURE:
member = ((YR_OBJECT_STRUCTURE*) object)->members;
while (member != NULL)
{
if (member->object->type != OBJECT_TYPE_FUNCTION)
{
printf("\n");
yr_object_print_data(member->object, indent + 1, 1);
}
member = member->next;
}
break;
case OBJECT_TYPE_ARRAY:
array_items = ((YR_OBJECT_ARRAY*) object)->items;
if (array_items != NULL)
{
for (i = 0; i < array_items->count; i++)
{
if (array_items->objects[i] != NULL)
{
printf("\n%s\t[%d]", indent_spaces, i);
yr_object_print_data(array_items->objects[i], indent + 1, 0);
}
}
}
break;
case OBJECT_TYPE_DICTIONARY:
dict_items = ((YR_OBJECT_DICTIONARY*) object)->items;
if (dict_items != NULL)
{
for (i = 0; i < dict_items->used; i++)
{
printf("\n%s\t%s", indent_spaces, dict_items->objects[i].key);
yr_object_print_data(dict_items->objects[i].obj, indent + 1, 0);
}
}
break;
}
}
| ./CrossVul/dataset_final_sorted/CWE-416/c/good_3348_5 |
crossvul-cpp_data_good_2456_0 | /*
* GPAC - Multimedia Framework C SDK
*
* Authors: Jean Le Feuvre
* Copyright (c) Telecom ParisTech 2000-2012
* All rights reserved
*
* This file is part of GPAC / ISO Media File Format sub-project
*
* GPAC is free software; you can redistribute it and/or modify
* it under the terms of the GNU Lesser General Public License as published by
* the Free Software Foundation; either version 2, or (at your option)
* any later version.
*
* GPAC is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with this library; see the file COPYING. If not, write to
* the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
*
*/
#include <gpac/internal/isomedia_dev.h>
#ifndef GPAC_DISABLE_ISOM
void co64_del(GF_Box *s)
{
GF_ChunkLargeOffsetBox *ptr;
ptr = (GF_ChunkLargeOffsetBox *) s;
if (ptr == NULL) return;
if (ptr->offsets) gf_free(ptr->offsets);
gf_free(ptr);
}
GF_Err co64_Read(GF_Box *s,GF_BitStream *bs)
{
u32 entries;
GF_ChunkLargeOffsetBox *ptr = (GF_ChunkLargeOffsetBox *) s;
ptr->nb_entries = gf_bs_read_u32(bs);
ISOM_DECREASE_SIZE(ptr, 4)
if (ptr->nb_entries > ptr->size / 8) {
GF_LOG(GF_LOG_ERROR, GF_LOG_CONTAINER, ("[iso file] Invalid number of entries %d in co64\n", ptr->nb_entries));
return GF_ISOM_INVALID_FILE;
}
ptr->offsets = (u64 *) gf_malloc(ptr->nb_entries * sizeof(u64) );
if (ptr->offsets == NULL) return GF_OUT_OF_MEM;
ptr->alloc_size = ptr->nb_entries;
for (entries = 0; entries < ptr->nb_entries; entries++) {
ptr->offsets[entries] = gf_bs_read_u64(bs);
}
return GF_OK;
}
GF_Box *co64_New()
{
ISOM_DECL_BOX_ALLOC(GF_ChunkLargeOffsetBox, GF_ISOM_BOX_TYPE_CO64);
return (GF_Box *)tmp;
}
#ifndef GPAC_DISABLE_ISOM_WRITE
GF_Err co64_Write(GF_Box *s, GF_BitStream *bs)
{
GF_Err e;
u32 i;
GF_ChunkLargeOffsetBox *ptr = (GF_ChunkLargeOffsetBox *) s;
e = gf_isom_full_box_write(s, bs);
if (e) return e;
gf_bs_write_u32(bs, ptr->nb_entries);
for (i = 0; i < ptr->nb_entries; i++ ) {
gf_bs_write_u64(bs, ptr->offsets[i]);
}
return GF_OK;
}
GF_Err co64_Size(GF_Box *s)
{
GF_ChunkLargeOffsetBox *ptr = (GF_ChunkLargeOffsetBox *) s;
ptr->size += 4 + (8 * ptr->nb_entries);
return GF_OK;
}
#endif /*GPAC_DISABLE_ISOM_WRITE*/
void cprt_del(GF_Box *s)
{
GF_CopyrightBox *ptr = (GF_CopyrightBox *) s;
if (ptr == NULL) return;
if (ptr->notice)
gf_free(ptr->notice);
gf_free(ptr);
}
GF_Box *chpl_New()
{
ISOM_DECL_BOX_ALLOC(GF_ChapterListBox, GF_ISOM_BOX_TYPE_CHPL);
tmp->list = gf_list_new();
tmp->version = 1;
return (GF_Box *)tmp;
}
void chpl_del(GF_Box *s)
{
GF_ChapterListBox *ptr = (GF_ChapterListBox *) s;
if (ptr == NULL) return;
while (gf_list_count(ptr->list)) {
GF_ChapterEntry *ce = (GF_ChapterEntry *)gf_list_get(ptr->list, 0);
if (ce->name) gf_free(ce->name);
gf_free(ce);
gf_list_rem(ptr->list, 0);
}
gf_list_del(ptr->list);
gf_free(ptr);
}
/*this is using chpl format according to some NeroRecode samples*/
GF_Err chpl_Read(GF_Box *s,GF_BitStream *bs)
{
GF_ChapterEntry *ce;
u32 nb_chaps, len, i, count;
GF_ChapterListBox *ptr = (GF_ChapterListBox *)s;
/*reserved or ???*/
gf_bs_read_u32(bs);
nb_chaps = gf_bs_read_u8(bs);
count = 0;
while (nb_chaps) {
GF_SAFEALLOC(ce, GF_ChapterEntry);
if (!ce) return GF_OUT_OF_MEM;
ce->start_time = gf_bs_read_u64(bs);
len = gf_bs_read_u8(bs);
if (len) {
ce->name = (char *)gf_malloc(sizeof(char)*(len+1));
gf_bs_read_data(bs, ce->name, len);
ce->name[len] = 0;
} else {
ce->name = gf_strdup("");
}
for (i=0; i<count; i++) {
GF_ChapterEntry *ace = (GF_ChapterEntry *) gf_list_get(ptr->list, i);
if (ace->start_time >= ce->start_time) {
gf_list_insert(ptr->list, ce, i);
ce = NULL;
break;
}
}
if (ce) gf_list_add(ptr->list, ce);
count++;
nb_chaps--;
}
return GF_OK;
}
#ifndef GPAC_DISABLE_ISOM_WRITE
GF_Err chpl_Write(GF_Box *s, GF_BitStream *bs)
{
GF_Err e;
u32 count, i;
GF_ChapterListBox *ptr = (GF_ChapterListBox *) s;
e = gf_isom_full_box_write(s, bs);
if (e) return e;
count = gf_list_count(ptr->list);
gf_bs_write_u32(bs, 0);
gf_bs_write_u8(bs, count);
for (i=0; i<count; i++) {
u32 len;
GF_ChapterEntry *ce = (GF_ChapterEntry *)gf_list_get(ptr->list, i);
gf_bs_write_u64(bs, ce->start_time);
if (ce->name) {
len = (u32) strlen(ce->name);
if (len>255) len = 255;
gf_bs_write_u8(bs, len);
gf_bs_write_data(bs, ce->name, len);
} else {
gf_bs_write_u8(bs, 0);
}
}
return GF_OK;
}
GF_Err chpl_Size(GF_Box *s)
{
u32 count, i;
GF_ChapterListBox *ptr = (GF_ChapterListBox *)s;
ptr->size += 5;
count = gf_list_count(ptr->list);
for (i=0; i<count; i++) {
GF_ChapterEntry *ce = (GF_ChapterEntry *)gf_list_get(ptr->list, i);
ptr->size += 9; /*64bit time stamp + 8bit str len*/
if (ce->name) ptr->size += strlen(ce->name);
}
return GF_OK;
}
#endif /*GPAC_DISABLE_ISOM_WRITE*/
GF_Err cprt_Read(GF_Box *s,GF_BitStream *bs)
{
GF_CopyrightBox *ptr = (GF_CopyrightBox *)s;
gf_bs_read_int(bs, 1);
//the spec is unclear here, just says "the value 0 is interpreted as undetermined"
ptr->packedLanguageCode[0] = gf_bs_read_int(bs, 5);
ptr->packedLanguageCode[1] = gf_bs_read_int(bs, 5);
ptr->packedLanguageCode[2] = gf_bs_read_int(bs, 5);
ISOM_DECREASE_SIZE(ptr, 2);
//but before or after compaction ?? We assume before
if (ptr->packedLanguageCode[0] || ptr->packedLanguageCode[1] || ptr->packedLanguageCode[2]) {
ptr->packedLanguageCode[0] += 0x60;
ptr->packedLanguageCode[1] += 0x60;
ptr->packedLanguageCode[2] += 0x60;
} else {
ptr->packedLanguageCode[0] = 'u';
ptr->packedLanguageCode[1] = 'n';
ptr->packedLanguageCode[2] = 'd';
}
if (ptr->size) {
u32 bytesToRead = (u32) ptr->size;
ptr->notice = (char*)gf_malloc(bytesToRead * sizeof(char));
if (ptr->notice == NULL) return GF_OUT_OF_MEM;
gf_bs_read_data(bs, ptr->notice, bytesToRead);
}
return GF_OK;
}
GF_Box *cprt_New()
{
ISOM_DECL_BOX_ALLOC(GF_CopyrightBox, GF_ISOM_BOX_TYPE_CPRT);
tmp->packedLanguageCode[0] = 'u';
tmp->packedLanguageCode[1] = 'n';
tmp->packedLanguageCode[2] = 'd';
return (GF_Box *)tmp;
}
#ifndef GPAC_DISABLE_ISOM_WRITE
GF_Err cprt_Write(GF_Box *s, GF_BitStream *bs)
{
GF_Err e;
GF_CopyrightBox *ptr = (GF_CopyrightBox *) s;
e = gf_isom_full_box_write(s, bs);
if (e) return e;
gf_bs_write_int(bs, 0, 1);
if (ptr->packedLanguageCode[0]) {
gf_bs_write_int(bs, ptr->packedLanguageCode[0] - 0x60, 5);
gf_bs_write_int(bs, ptr->packedLanguageCode[1] - 0x60, 5);
gf_bs_write_int(bs, ptr->packedLanguageCode[2] - 0x60, 5);
} else {
gf_bs_write_int(bs, 0, 15);
}
if (ptr->notice) {
gf_bs_write_data(bs, ptr->notice, (u32) (strlen(ptr->notice) + 1) );
}
return GF_OK;
}
GF_Err cprt_Size(GF_Box *s)
{
GF_CopyrightBox *ptr = (GF_CopyrightBox *)s;
ptr->size += 2;
if (ptr->notice)
ptr->size += strlen(ptr->notice) + 1;
return GF_OK;
}
#endif /*GPAC_DISABLE_ISOM_WRITE*/
void kind_del(GF_Box *s)
{
GF_KindBox *ptr = (GF_KindBox *) s;
if (ptr == NULL) return;
if (ptr->schemeURI) gf_free(ptr->schemeURI);
if (ptr->value) gf_free(ptr->value);
gf_free(ptr);
}
GF_Err kind_Read(GF_Box *s,GF_BitStream *bs)
{
GF_KindBox *ptr = (GF_KindBox *)s;
if (ptr->size) {
u32 bytesToRead = (u32) ptr->size;
char *data;
u32 schemeURIlen;
data = (char*)gf_malloc(bytesToRead * sizeof(char));
if (data == NULL) return GF_OUT_OF_MEM;
gf_bs_read_data(bs, data, bytesToRead);
/*safety check in case the string is not null-terminated*/
if (data[bytesToRead-1]) {
char *str = (char*)gf_malloc((u32) bytesToRead + 1);
memcpy(str, data, (u32) bytesToRead);
str[ptr->size] = 0;
gf_free(data);
data = str;
bytesToRead++;
}
ptr->schemeURI = gf_strdup(data);
schemeURIlen = (u32) strlen(data);
if (bytesToRead > schemeURIlen+1) {
/* read the value */
char *data_value = data + schemeURIlen +1;
ptr->value = gf_strdup(data_value);
}
gf_free(data);
}
return GF_OK;
}
GF_Box *kind_New()
{
ISOM_DECL_BOX_ALLOC(GF_KindBox, GF_ISOM_BOX_TYPE_KIND);
return (GF_Box *)tmp;
}
#ifndef GPAC_DISABLE_ISOM_WRITE
GF_Err kind_Write(GF_Box *s, GF_BitStream *bs)
{
GF_Err e;
GF_KindBox *ptr = (GF_KindBox *) s;
e = gf_isom_full_box_write(s, bs);
if (e) return e;
gf_bs_write_data(bs, ptr->schemeURI, (u32) (strlen(ptr->schemeURI) + 1 ));
if (ptr->value) {
gf_bs_write_data(bs, ptr->value, (u32) (strlen(ptr->value) + 1) );
}
return GF_OK;
}
GF_Err kind_Size(GF_Box *s)
{
GF_KindBox *ptr = (GF_KindBox *)s;
ptr->size += strlen(ptr->schemeURI) + 1;
if (ptr->value) {
ptr->size += strlen(ptr->value) + 1;
}
return GF_OK;
}
#endif /*GPAC_DISABLE_ISOM_WRITE*/
void ctts_del(GF_Box *s)
{
GF_CompositionOffsetBox *ptr = (GF_CompositionOffsetBox *)s;
if (ptr->entries) gf_free(ptr->entries);
gf_free(ptr);
}
GF_Err ctts_Read(GF_Box *s, GF_BitStream *bs)
{
u32 i;
u32 sampleCount;
GF_CompositionOffsetBox *ptr = (GF_CompositionOffsetBox *)s;
ptr->nb_entries = gf_bs_read_u32(bs);
ISOM_DECREASE_SIZE(ptr, 4);
if (ptr->nb_entries > ptr->size / 8) {
GF_LOG(GF_LOG_ERROR, GF_LOG_CONTAINER, ("[iso file] Invalid number of entries %d in ctts\n", ptr->nb_entries));
return GF_ISOM_INVALID_FILE;
}
ptr->alloc_size = ptr->nb_entries;
ptr->entries = (GF_DttsEntry *)gf_malloc(sizeof(GF_DttsEntry)*ptr->alloc_size);
if (!ptr->entries) return GF_OUT_OF_MEM;
sampleCount = 0;
for (i=0; i<ptr->nb_entries; i++) {
ptr->entries[i].sampleCount = gf_bs_read_u32(bs);
if (ptr->version)
ptr->entries[i].decodingOffset = gf_bs_read_int(bs, 32);
else
ptr->entries[i].decodingOffset = (s32) gf_bs_read_u32(bs);
sampleCount += ptr->entries[i].sampleCount;
}
#ifndef GPAC_DISABLE_ISOM_WRITE
ptr->w_LastSampleNumber = sampleCount;
#endif
return GF_OK;
}
GF_Box *ctts_New()
{
ISOM_DECL_BOX_ALLOC(GF_CompositionOffsetBox, GF_ISOM_BOX_TYPE_CTTS);
return (GF_Box *) tmp;
}
#ifndef GPAC_DISABLE_ISOM_WRITE
GF_Err ctts_Write(GF_Box *s, GF_BitStream *bs)
{
GF_Err e;
u32 i;
GF_CompositionOffsetBox *ptr = (GF_CompositionOffsetBox *)s;
e = gf_isom_full_box_write(s, bs);
if (e) return e;
gf_bs_write_u32(bs, ptr->nb_entries);
for (i=0; i<ptr->nb_entries; i++ ) {
gf_bs_write_u32(bs, ptr->entries[i].sampleCount);
if (ptr->version) {
gf_bs_write_int(bs, ptr->entries[i].decodingOffset, 32);
} else {
gf_bs_write_u32(bs, (u32) ptr->entries[i].decodingOffset);
}
}
return GF_OK;
}
GF_Err ctts_Size(GF_Box *s)
{
GF_CompositionOffsetBox *ptr = (GF_CompositionOffsetBox *) s;
ptr->size += 4 + (8 * ptr->nb_entries);
return GF_OK;
}
#endif /*GPAC_DISABLE_ISOM_WRITE*/
void cslg_del(GF_Box *s)
{
GF_CompositionToDecodeBox *ptr = (GF_CompositionToDecodeBox *)s;
if (ptr == NULL) return;
gf_free(ptr);
return;
}
GF_Err cslg_Read(GF_Box *s, GF_BitStream *bs)
{
GF_CompositionToDecodeBox *ptr = (GF_CompositionToDecodeBox *)s;
ptr->compositionToDTSShift = gf_bs_read_int(bs, 32);
ptr->leastDecodeToDisplayDelta = gf_bs_read_int(bs, 32);
ptr->greatestDecodeToDisplayDelta = gf_bs_read_int(bs, 32);
ptr->compositionStartTime = gf_bs_read_int(bs, 32);
ptr->compositionEndTime = gf_bs_read_int(bs, 32);
return GF_OK;
}
GF_Box *cslg_New()
{
ISOM_DECL_BOX_ALLOC(GF_CompositionToDecodeBox, GF_ISOM_BOX_TYPE_CSLG);
return (GF_Box *) tmp;
}
#ifndef GPAC_DISABLE_ISOM_WRITE
GF_Err cslg_Write(GF_Box *s, GF_BitStream *bs)
{
GF_Err e;
GF_CompositionToDecodeBox *ptr = (GF_CompositionToDecodeBox *)s;
e = gf_isom_full_box_write(s, bs);
if (e) return e;
gf_bs_write_int(bs, ptr->compositionToDTSShift, 32);
gf_bs_write_int(bs, ptr->leastDecodeToDisplayDelta, 32);
gf_bs_write_int(bs, ptr->greatestDecodeToDisplayDelta, 32);
gf_bs_write_int(bs, ptr->compositionStartTime, 32);
gf_bs_write_int(bs, ptr->compositionEndTime, 32);
return GF_OK;
}
GF_Err cslg_Size(GF_Box *s)
{
GF_CompositionToDecodeBox *ptr = (GF_CompositionToDecodeBox *)s;
ptr->size += 20;
return GF_OK;
}
#endif /*GPAC_DISABLE_ISOM_WRITE*/
void ccst_del(GF_Box *s)
{
GF_CodingConstraintsBox *ptr = (GF_CodingConstraintsBox *)s;
if (ptr) gf_free(ptr);
return;
}
GF_Err ccst_Read(GF_Box *s, GF_BitStream *bs)
{
GF_CodingConstraintsBox *ptr = (GF_CodingConstraintsBox *)s;
ISOM_DECREASE_SIZE(ptr, 4);
ptr->all_ref_pics_intra = gf_bs_read_int(bs, 1);
ptr->intra_pred_used = gf_bs_read_int(bs, 1);
ptr->max_ref_per_pic = gf_bs_read_int(bs, 4);
ptr->reserved = gf_bs_read_int(bs, 26);
return GF_OK;
}
GF_Box *ccst_New()
{
ISOM_DECL_BOX_ALLOC(GF_CodingConstraintsBox, GF_ISOM_BOX_TYPE_CCST);
return (GF_Box *) tmp;
}
#ifndef GPAC_DISABLE_ISOM_WRITE
GF_Err ccst_Write(GF_Box *s, GF_BitStream *bs)
{
GF_Err e;
GF_CodingConstraintsBox *ptr = (GF_CodingConstraintsBox *)s;
e = gf_isom_full_box_write(s, bs);
if (e) return e;
gf_bs_write_int(bs, ptr->all_ref_pics_intra, 1);
gf_bs_write_int(bs, ptr->intra_pred_used, 1);
gf_bs_write_int(bs, ptr->max_ref_per_pic, 4);
gf_bs_write_int(bs, 0, 26);
return GF_OK;
}
GF_Err ccst_Size(GF_Box *s)
{
GF_CodingConstraintsBox *ptr = (GF_CodingConstraintsBox *)s;
ptr->size += 4;
return GF_OK;
}
#endif /*GPAC_DISABLE_ISOM_WRITE*/
void url_del(GF_Box *s)
{
GF_DataEntryURLBox *ptr = (GF_DataEntryURLBox *)s;
if (ptr == NULL) return;
if (ptr->location) gf_free(ptr->location);
gf_free(ptr);
return;
}
GF_Err url_Read(GF_Box *s, GF_BitStream *bs)
{
GF_DataEntryURLBox *ptr = (GF_DataEntryURLBox *)s;
if (ptr->size) {
ptr->location = (char*)gf_malloc((u32) ptr->size);
if (! ptr->location) return GF_OUT_OF_MEM;
gf_bs_read_data(bs, ptr->location, (u32)ptr->size);
}
return GF_OK;
}
GF_Box *url_New()
{
ISOM_DECL_BOX_ALLOC(GF_DataEntryURLBox, GF_ISOM_BOX_TYPE_URL);
return (GF_Box *)tmp;
}
#ifndef GPAC_DISABLE_ISOM_WRITE
GF_Err url_Write(GF_Box *s, GF_BitStream *bs)
{
GF_Err e;
GF_DataEntryURLBox *ptr = (GF_DataEntryURLBox *)s;
e = gf_isom_full_box_write(s, bs);
if (e) return e;
//the flag set indicates we have a string (WE HAVE TO for URLs)
if ( !(ptr->flags & 1)) {
if (ptr->location) {
gf_bs_write_data(bs, ptr->location, (u32)strlen(ptr->location) + 1);
}
}
return GF_OK;
}
GF_Err url_Size(GF_Box *s)
{
GF_DataEntryURLBox *ptr = (GF_DataEntryURLBox *)s;
if ( !(ptr->flags & 1)) {
if (ptr->location) ptr->size += 1 + strlen(ptr->location);
}
return GF_OK;
}
#endif /*GPAC_DISABLE_ISOM_WRITE*/
void urn_del(GF_Box *s)
{
GF_DataEntryURNBox *ptr = (GF_DataEntryURNBox *)s;
if (ptr == NULL) return;
if (ptr->location) gf_free(ptr->location);
if (ptr->nameURN) gf_free(ptr->nameURN);
gf_free(ptr);
}
GF_Err urn_Read(GF_Box *s, GF_BitStream *bs)
{
u32 i, to_read;
char *tmpName;
GF_DataEntryURNBox *ptr = (GF_DataEntryURNBox *)s;
if (! ptr->size ) return GF_OK;
//here we have to handle that in a clever way
to_read = (u32) ptr->size;
tmpName = (char*)gf_malloc(sizeof(char) * to_read);
if (!tmpName) return GF_OUT_OF_MEM;
//get the data
gf_bs_read_data(bs, tmpName, to_read);
//then get the break
i = 0;
while ( (i < to_read) && (tmpName[i] != 0) ) {
i++;
}
//check the data is consistent
if (i == to_read) {
gf_free(tmpName);
return GF_ISOM_INVALID_FILE;
}
//no NULL char, URL is not specified
if (i == to_read - 1) {
ptr->nameURN = tmpName;
ptr->location = NULL;
return GF_OK;
}
//OK, this has both URN and URL
ptr->nameURN = (char*)gf_malloc(sizeof(char) * (i+1));
if (!ptr->nameURN) {
gf_free(tmpName);
return GF_OUT_OF_MEM;
}
memcpy(ptr->nameURN, tmpName, i + 1);
if (tmpName[to_read - 1] != 0) {
GF_LOG(GF_LOG_WARNING, GF_LOG_CONTAINER, ("[iso file] urn box contains invalid location field\n" ));
}
else {
ptr->location = (char*)gf_malloc(sizeof(char) * (to_read - i - 1));
if (!ptr->location) {
gf_free(tmpName);
gf_free(ptr->nameURN);
ptr->nameURN = NULL;
return GF_OUT_OF_MEM;
}
memcpy(ptr->location, tmpName + i + 1, (to_read - i - 1));
}
gf_free(tmpName);
return GF_OK;
}
GF_Box *urn_New()
{
ISOM_DECL_BOX_ALLOC(GF_DataEntryURNBox, GF_ISOM_BOX_TYPE_URN);
return (GF_Box *)tmp;
}
#ifndef GPAC_DISABLE_ISOM_WRITE
GF_Err urn_Write(GF_Box *s, GF_BitStream *bs)
{
GF_Err e;
GF_DataEntryURNBox *ptr = (GF_DataEntryURNBox *)s;
e = gf_isom_full_box_write(s, bs);
if (e) return e;
//the flag set indicates we have a string (WE HAVE TO for URLs)
if ( !(ptr->flags & 1)) {
//to check, the spec says: First name, then location
if (ptr->nameURN) {
gf_bs_write_data(bs, ptr->nameURN, (u32)strlen(ptr->nameURN) + 1);
}
if (ptr->location) {
gf_bs_write_data(bs, ptr->location, (u32)strlen(ptr->location) + 1);
}
}
return GF_OK;
}
GF_Err urn_Size(GF_Box *s)
{
GF_DataEntryURNBox *ptr = (GF_DataEntryURNBox *)s;
if ( !(ptr->flags & 1)) {
if (ptr->nameURN) ptr->size += 1 + strlen(ptr->nameURN);
if (ptr->location) ptr->size += 1 + strlen(ptr->location);
}
return GF_OK;
}
#endif /*GPAC_DISABLE_ISOM_WRITE*/
void unkn_del(GF_Box *s)
{
GF_UnknownBox *ptr = (GF_UnknownBox *) s;
if (!s) return;
if (ptr->data) gf_free(ptr->data);
gf_free(ptr);
}
GF_Err unkn_Read(GF_Box *s, GF_BitStream *bs)
{
GF_Err e;
u32 bytesToRead, sub_size, sub_a;
GF_BitStream *sub_bs;
GF_UnknownBox *ptr = (GF_UnknownBox *)s;
if (ptr->size > 0xFFFFFFFF) return GF_ISOM_INVALID_FILE;
bytesToRead = (u32) (ptr->size);
if (!bytesToRead) return GF_OK;
if (bytesToRead>1000000) {
GF_LOG(GF_LOG_WARNING, GF_LOG_CONTAINER, ("[iso file] Unknown box %s (0x%08X) with payload larger than 1 MBytes, ignoring\n", gf_4cc_to_str(ptr->type), ptr->type ));
gf_bs_skip_bytes(bs, ptr->dataSize);
return GF_OK;
}
ptr->data = (char*)gf_malloc(bytesToRead);
if (ptr->data == NULL ) return GF_OUT_OF_MEM;
ptr->dataSize = bytesToRead;
gf_bs_read_data(bs, ptr->data, ptr->dataSize);
//try to parse container boxes, check if next 8 bytes match a subbox
sub_bs = gf_bs_new(ptr->data, ptr->dataSize, GF_BITSTREAM_READ);
sub_size = gf_bs_read_u32(sub_bs);
sub_a = gf_bs_read_u8(sub_bs);
e = (sub_size && (sub_size <= ptr->dataSize)) ? GF_OK : GF_NOT_SUPPORTED;
if (! isalnum(sub_a)) e = GF_NOT_SUPPORTED;
sub_a = gf_bs_read_u8(sub_bs);
if (! isalnum(sub_a)) e = GF_NOT_SUPPORTED;
sub_a = gf_bs_read_u8(sub_bs);
if (! isalnum(sub_a)) e = GF_NOT_SUPPORTED;
sub_a = gf_bs_read_u8(sub_bs);
if (! isalnum(sub_a)) e = GF_NOT_SUPPORTED;
if (e == GF_OK) {
gf_bs_seek(sub_bs, 0);
e = gf_isom_box_array_read(s, sub_bs, gf_isom_box_add_default);
}
gf_bs_del(sub_bs);
if (e==GF_OK) {
gf_free(ptr->data);
ptr->data = NULL;
ptr->dataSize = 0;
} else if (s->other_boxes) {
gf_isom_box_array_del(s->other_boxes);
s->other_boxes=NULL;
}
return GF_OK;
}
GF_Box *unkn_New()
{
ISOM_DECL_BOX_ALLOC(GF_UnknownBox, GF_ISOM_BOX_TYPE_UNKNOWN);
return (GF_Box *) tmp;
}
#ifndef GPAC_DISABLE_ISOM_WRITE
GF_Err unkn_Write(GF_Box *s, GF_BitStream *bs)
{
GF_Err e;
u32 type = s->type;
GF_UnknownBox *ptr = (GF_UnknownBox *)s;
if (!s) return GF_BAD_PARAM;
ptr->type = ptr->original_4cc;
e = gf_isom_box_write_header(s, bs);
ptr->type = type;
if (e) return e;
if (ptr->dataSize && ptr->data) {
gf_bs_write_data(bs, ptr->data, ptr->dataSize);
}
return GF_OK;
}
GF_Err unkn_Size(GF_Box *s)
{
GF_UnknownBox *ptr = (GF_UnknownBox *)s;
if (ptr->dataSize && ptr->data) {
ptr->size += ptr->dataSize;
}
return GF_OK;
}
#endif /*GPAC_DISABLE_ISOM_WRITE*/
void def_cont_box_del(GF_Box *s)
{
if (s) gf_free(s);
}
GF_Err def_cont_box_Read(GF_Box *s, GF_BitStream *bs)
{
return gf_isom_box_array_read(s, bs, gf_isom_box_add_default);
}
GF_Box *def_cont_box_New()
{
ISOM_DECL_BOX_ALLOC(GF_Box, 0);
return (GF_Box *) tmp;
}
#ifndef GPAC_DISABLE_ISOM_WRITEHintSa
GF_Err def_cont_box_Write(GF_Box *s, GF_BitStream *bs)
{
return gf_isom_box_write_header(s, bs);
}
GF_Err def_cont_box_Size(GF_Box *s)
{
return GF_OK;
}
#endif /*GPAC_DISABLE_ISOM_WRITE*/
void uuid_del(GF_Box *s)
{
GF_UnknownUUIDBox *ptr = (GF_UnknownUUIDBox *) s;
if (!s) return;
if (ptr->data) gf_free(ptr->data);
gf_free(ptr);
}
GF_Err uuid_Read(GF_Box *s, GF_BitStream *bs)
{
u32 bytesToRead;
GF_UnknownUUIDBox *ptr = (GF_UnknownUUIDBox *)s;
if (ptr->size > 0xFFFFFFFF) return GF_ISOM_INVALID_FILE;
bytesToRead = (u32) (ptr->size);
if (bytesToRead) {
ptr->data = (char*)gf_malloc(bytesToRead);
if (ptr->data == NULL ) return GF_OUT_OF_MEM;
ptr->dataSize = bytesToRead;
gf_bs_read_data(bs, ptr->data, ptr->dataSize);
}
return GF_OK;
}
GF_Box *uuid_New()
{
ISOM_DECL_BOX_ALLOC(GF_UnknownUUIDBox, GF_ISOM_BOX_TYPE_UUID);
return (GF_Box *) tmp;
}
#ifndef GPAC_DISABLE_ISOM_WRITE
GF_Err uuid_Write(GF_Box *s, GF_BitStream *bs)
{
GF_Err e;
GF_UnknownUUIDBox *ptr = (GF_UnknownUUIDBox*)s;
if (!s) return GF_BAD_PARAM;
e = gf_isom_box_write_header(s, bs);
if (e) return e;
if (ptr->data) {
gf_bs_write_data(bs, ptr->data, ptr->dataSize);
}
return GF_OK;
}
GF_Err uuid_Size(GF_Box *s)
{
GF_UnknownUUIDBox*ptr = (GF_UnknownUUIDBox*)s;
ptr->size += ptr->dataSize;
return GF_OK;
}
#endif /*GPAC_DISABLE_ISOM_WRITE*/
void dinf_del(GF_Box *s)
{
GF_DataInformationBox *ptr = (GF_DataInformationBox *)s;
if (ptr == NULL) return;
gf_isom_box_del((GF_Box *)ptr->dref);
gf_free(ptr);
}
GF_Err dinf_AddBox(GF_Box *s, GF_Box *a)
{
GF_DataInformationBox *ptr = (GF_DataInformationBox *)s;
switch(a->type) {
case GF_ISOM_BOX_TYPE_DREF:
if (ptr->dref) ERROR_ON_DUPLICATED_BOX(a, ptr)
ptr->dref = (GF_DataReferenceBox *)a;
return GF_OK;
default:
return gf_isom_box_add_default(s, a);
}
return GF_OK;
}
GF_Err dinf_Read(GF_Box *s, GF_BitStream *bs)
{
GF_Err e = gf_isom_box_array_read(s, bs, dinf_AddBox);
if (e) {
return e;
}
if (!((GF_DataInformationBox *)s)->dref) {
GF_Box* dref;
GF_LOG(GF_LOG_ERROR, GF_LOG_CONTAINER, ("[iso file] Missing dref box in dinf\n"));
dref = gf_isom_box_new(GF_ISOM_BOX_TYPE_DREF);
((GF_DataInformationBox *)s)->dref = (GF_DataReferenceBox *)dref;
gf_isom_box_add_for_dump_mode(s, dref);
}
return GF_OK;
}
GF_Box *dinf_New()
{
ISOM_DECL_BOX_ALLOC(GF_DataInformationBox, GF_ISOM_BOX_TYPE_DINF);
return (GF_Box *)tmp;
}
#ifndef GPAC_DISABLE_ISOM_WRITE
GF_Err dinf_Write(GF_Box *s, GF_BitStream *bs)
{
GF_Err e;
GF_DataInformationBox *ptr = (GF_DataInformationBox *)s;
e = gf_isom_box_write_header(s, bs);
if (e) return e;
if (ptr->dref) {
e = gf_isom_box_write((GF_Box *)ptr->dref, bs);
if (e) return e;
}
return GF_OK;
}
GF_Err dinf_Size(GF_Box *s)
{
GF_Err e;
GF_DataInformationBox *ptr = (GF_DataInformationBox *)s;
if (ptr->dref) {
e = gf_isom_box_size((GF_Box *) ptr->dref);
if (e) return e;
ptr->size += ptr->dref->size;
}
return GF_OK;
}
#endif /*GPAC_DISABLE_ISOM_WRITE*/
void dref_del(GF_Box *s)
{
GF_DataReferenceBox *ptr = (GF_DataReferenceBox *) s;
if (ptr == NULL) return;
gf_free(ptr);
}
GF_Err dref_AddDataEntry(GF_Box *ptr, GF_Box *entry)
{
return gf_isom_box_add_default(ptr, entry);
}
GF_Err dref_Read(GF_Box *s, GF_BitStream *bs)
{
GF_DataReferenceBox *ptr = (GF_DataReferenceBox *)s;
if (ptr == NULL) return GF_BAD_PARAM;
gf_bs_read_u32(bs);
ISOM_DECREASE_SIZE(ptr, 4);
return gf_isom_box_array_read(s, bs, dref_AddDataEntry);
}
GF_Box *dref_New()
{
ISOM_DECL_BOX_ALLOC(GF_DataReferenceBox, GF_ISOM_BOX_TYPE_DREF);
return (GF_Box *)tmp;
}
#ifndef GPAC_DISABLE_ISOM_WRITE
GF_Err dref_Write(GF_Box *s, GF_BitStream *bs)
{
GF_Err e;
u32 count;
GF_DataReferenceBox *ptr = (GF_DataReferenceBox *)s;
if (!s) return GF_BAD_PARAM;
e = gf_isom_full_box_write(s, bs);
if (e) return e;
count = ptr->other_boxes ? gf_list_count(ptr->other_boxes) : 0;
gf_bs_write_u32(bs, count);
return GF_OK;
}
GF_Err dref_Size(GF_Box *s)
{
GF_DataReferenceBox *ptr = (GF_DataReferenceBox *)s;
if (!s) return GF_BAD_PARAM;
ptr->size += 4;
return GF_OK;
}
#endif /*GPAC_DISABLE_ISOM_WRITE*/
void edts_del(GF_Box *s)
{
GF_EditBox *ptr = (GF_EditBox *) s;
gf_isom_box_del((GF_Box *)ptr->editList);
gf_free(ptr);
}
GF_Err edts_AddBox(GF_Box *s, GF_Box *a)
{
GF_EditBox *ptr = (GF_EditBox *)s;
if (a->type == GF_ISOM_BOX_TYPE_ELST) {
if (ptr->editList) return GF_BAD_PARAM;
ptr->editList = (GF_EditListBox *)a;
return GF_OK;
} else {
return gf_isom_box_add_default(s, a);
}
return GF_OK;
}
GF_Err edts_Read(GF_Box *s, GF_BitStream *bs)
{
return gf_isom_box_array_read(s, bs, edts_AddBox);
}
GF_Box *edts_New()
{
ISOM_DECL_BOX_ALLOC(GF_EditBox, GF_ISOM_BOX_TYPE_EDTS);
return (GF_Box *) tmp;
}
#ifndef GPAC_DISABLE_ISOM_WRITE
GF_Err edts_Write(GF_Box *s, GF_BitStream *bs)
{
GF_Err e;
GF_EditBox *ptr = (GF_EditBox *)s;
//here we have a trick: if editList is empty, skip the box
if (ptr->editList && gf_list_count(ptr->editList->entryList)) {
e = gf_isom_box_write_header(s, bs);
if (e) return e;
e = gf_isom_box_write((GF_Box *) ptr->editList, bs);
if (e) return e;
}
return GF_OK;
}
GF_Err edts_Size(GF_Box *s)
{
GF_Err e;
GF_EditBox *ptr = (GF_EditBox *)s;
//here we have a trick: if editList is empty, skip the box
if (!ptr->editList || ! gf_list_count(ptr->editList->entryList)) {
ptr->size = 0;
} else {
e = gf_isom_box_size((GF_Box *)ptr->editList);
if (e) return e;
ptr->size += ptr->editList->size;
}
return GF_OK;
}
#endif /*GPAC_DISABLE_ISOM_WRITE*/
void elst_del(GF_Box *s)
{
GF_EditListBox *ptr;
GF_EdtsEntry *p;
u32 nb_entries;
u32 i;
ptr = (GF_EditListBox *)s;
if (ptr == NULL) return;
nb_entries = gf_list_count(ptr->entryList);
for (i = 0; i < nb_entries; i++) {
p = (GF_EdtsEntry*)gf_list_get(ptr->entryList, i);
if (p) gf_free(p);
}
gf_list_del(ptr->entryList);
gf_free(ptr);
}
GF_Err elst_Read(GF_Box *s, GF_BitStream *bs)
{
u32 entries;
s32 tr;
u32 nb_entries;
GF_EdtsEntry *p;
GF_EditListBox *ptr = (GF_EditListBox *)s;
nb_entries = gf_bs_read_u32(bs);
ISOM_DECREASE_SIZE(ptr, 4);
if (ptr->version == 1) {
if (nb_entries > ptr->size / 20) {
GF_LOG(GF_LOG_ERROR, GF_LOG_CONTAINER, ("[iso file] Invalid number of entries %d in ctts\n", nb_entries));
return GF_ISOM_INVALID_FILE;
}
} else {
if (nb_entries > ptr->size / 12) {
GF_LOG(GF_LOG_ERROR, GF_LOG_CONTAINER, ("[iso file] Invalid number of entries %d in ctts\n", nb_entries));
return GF_ISOM_INVALID_FILE;
}
}
for (entries = 0; entries < nb_entries; entries++) {
p = (GF_EdtsEntry *) gf_malloc(sizeof(GF_EdtsEntry));
if (!p) return GF_OUT_OF_MEM;
if (ptr->version == 1) {
p->segmentDuration = gf_bs_read_u64(bs);
p->mediaTime = (s64) gf_bs_read_u64(bs);
} else {
p->segmentDuration = gf_bs_read_u32(bs);
tr = gf_bs_read_u32(bs);
p->mediaTime = (s64) tr;
}
p->mediaRate = gf_bs_read_u16(bs);
gf_bs_read_u16(bs);
gf_list_add(ptr->entryList, p);
}
return GF_OK;
}
GF_Box *elst_New()
{
ISOM_DECL_BOX_ALLOC(GF_EditListBox, GF_ISOM_BOX_TYPE_ELST);
tmp->entryList = gf_list_new();
if (!tmp->entryList) {
gf_free(tmp);
return NULL;
}
return (GF_Box *)tmp;
}
#ifndef GPAC_DISABLE_ISOM_WRITE
GF_Err elst_Write(GF_Box *s, GF_BitStream *bs)
{
GF_Err e;
u32 i;
u32 nb_entries;
GF_EdtsEntry *p;
GF_EditListBox *ptr = (GF_EditListBox *)s;
if (!ptr) return GF_BAD_PARAM;
nb_entries = gf_list_count(ptr->entryList);
e = gf_isom_full_box_write(s, bs);
if (e) return e;
gf_bs_write_u32(bs, nb_entries);
for (i = 0; i < nb_entries; i++ ) {
p = (GF_EdtsEntry*)gf_list_get(ptr->entryList, i);
if (ptr->version == 1) {
gf_bs_write_u64(bs, p->segmentDuration);
gf_bs_write_u64(bs, p->mediaTime);
} else {
gf_bs_write_u32(bs, (u32) p->segmentDuration);
gf_bs_write_u32(bs, (s32) p->mediaTime);
}
gf_bs_write_u16(bs, p->mediaRate);
gf_bs_write_u16(bs, 0);
}
return GF_OK;
}
GF_Err elst_Size(GF_Box *s)
{
u32 durtimebytes;
u32 i, nb_entries;
GF_EditListBox *ptr = (GF_EditListBox *)s;
//entry count
ptr->size += 4;
nb_entries = gf_list_count(ptr->entryList);
ptr->version = 0;
for (i=0; i<nb_entries; i++) {
GF_EdtsEntry *p = (GF_EdtsEntry*)gf_list_get(ptr->entryList, i);
if ((p->segmentDuration>0xFFFFFFFF) || (p->mediaTime>0xFFFFFFFF)) {
ptr->version = 1;
break;
}
}
durtimebytes = (ptr->version == 1 ? 16 : 8) + 4;
ptr->size += (nb_entries * durtimebytes);
return GF_OK;
}
#endif /*GPAC_DISABLE_ISOM_WRITE*/
void esds_del(GF_Box *s)
{
GF_ESDBox *ptr = (GF_ESDBox *)s;
if (ptr == NULL) return;
if (ptr->desc) gf_odf_desc_del((GF_Descriptor *)ptr->desc);
gf_free(ptr);
}
GF_Err esds_Read(GF_Box *s, GF_BitStream *bs)
{
GF_Err e=GF_OK;
u32 descSize;
char *enc_desc;
u32 SLIsPredefined(GF_SLConfig *sl);
GF_ESDBox *ptr = (GF_ESDBox *)s;
descSize = (u32) (ptr->size);
if (descSize) {
enc_desc = (char*)gf_malloc(sizeof(char) * descSize);
if (!enc_desc) return GF_OUT_OF_MEM;
//get the payload
gf_bs_read_data(bs, enc_desc, descSize);
//send it to the OD Codec
e = gf_odf_desc_read(enc_desc, descSize, (GF_Descriptor **) &ptr->desc);
//OK, free our desc
gf_free(enc_desc);
if (ptr->desc && (ptr->desc->tag!=GF_ODF_ESD_TAG) ) {
GF_LOG(GF_LOG_ERROR, GF_LOG_CONTAINER, ("[iso file] Invalid descriptor tag 0x%x in esds\n", ptr->desc->tag));
gf_odf_desc_del((GF_Descriptor*)ptr->desc);
ptr->desc=NULL;
return GF_ISOM_INVALID_FILE;
}
if (e) {
ptr->desc = NULL;
} else {
/*fix broken files*/
if (!ptr->desc->URLString) {
if (!ptr->desc->slConfig) {
ptr->desc->slConfig = (GF_SLConfig *) gf_odf_desc_new(GF_ODF_SLC_TAG);
ptr->desc->slConfig->predefined = SLPredef_MP4;
} else if (ptr->desc->slConfig->predefined != SLPredef_MP4) {
ptr->desc->slConfig->predefined = SLPredef_MP4;
gf_odf_slc_set_pref(ptr->desc->slConfig);
}
}
}
}
return e;
}
GF_Box *esds_New()
{
ISOM_DECL_BOX_ALLOC(GF_ESDBox, GF_ISOM_BOX_TYPE_ESDS);
return (GF_Box *)tmp;
}
#ifndef GPAC_DISABLE_ISOM_WRITE
GF_Err esds_Write(GF_Box *s, GF_BitStream *bs)
{
GF_Err e;
char *enc_desc;
u32 descSize = 0;
GF_ESDBox *ptr = (GF_ESDBox *)s;
e = gf_isom_full_box_write(s, bs);
if (e) return e;
e = gf_odf_desc_write((GF_Descriptor *)ptr->desc, &enc_desc, &descSize);
if (e) return e;
gf_bs_write_data(bs, enc_desc, descSize);
//free our buffer
gf_free(enc_desc);
return GF_OK;
}
GF_Err esds_Size(GF_Box *s)
{
u32 descSize = 0;
GF_ESDBox *ptr = (GF_ESDBox *)s;
descSize = gf_odf_desc_size((GF_Descriptor *)ptr->desc);
ptr->size += descSize;
return GF_OK;
}
#endif /*GPAC_DISABLE_ISOM_WRITE*/
void free_del(GF_Box *s)
{
GF_FreeSpaceBox *ptr = (GF_FreeSpaceBox *)s;
if (ptr->data) gf_free(ptr->data);
gf_free(ptr);
}
GF_Err free_Read(GF_Box *s, GF_BitStream *bs)
{
u32 bytesToRead;
GF_FreeSpaceBox *ptr = (GF_FreeSpaceBox *)s;
if (ptr->size > 0xFFFFFFFF) return GF_IO_ERR;
bytesToRead = (u32) (ptr->size);
if (bytesToRead) {
ptr->data = (char*)gf_malloc(bytesToRead * sizeof(char));
gf_bs_read_data(bs, ptr->data, bytesToRead);
ptr->dataSize = bytesToRead;
}
return GF_OK;
}
GF_Box *free_New()
{
ISOM_DECL_BOX_ALLOC(GF_FreeSpaceBox, GF_ISOM_BOX_TYPE_FREE);
return (GF_Box *)tmp;
}
#ifndef GPAC_DISABLE_ISOM_WRITE
GF_Err free_Write(GF_Box *s, GF_BitStream *bs)
{
GF_Err e;
GF_FreeSpaceBox *ptr = (GF_FreeSpaceBox *)s;
if (ptr->original_4cc) {
u32 t = s->type;
s->type=ptr->original_4cc;
e = gf_isom_box_write_header(s, bs);
s->type=t;
} else {
e = gf_isom_box_write_header(s, bs);
}
if (e) return e;
if (ptr->dataSize) {
if (ptr->data) {
gf_bs_write_data(bs, ptr->data, ptr->dataSize);
} else {
u32 i = 0;
while (i<ptr->dataSize) {
gf_bs_write_u8(bs, 0);
i++;
}
}
}
return GF_OK;
}
GF_Err free_Size(GF_Box *s)
{
GF_FreeSpaceBox *ptr = (GF_FreeSpaceBox *)s;
ptr->size += ptr->dataSize;
return GF_OK;
}
#endif /*GPAC_DISABLE_ISOM_WRITE*/
void ftyp_del(GF_Box *s)
{
GF_FileTypeBox *ptr = (GF_FileTypeBox *) s;
if (ptr->altBrand) gf_free(ptr->altBrand);
gf_free(ptr);
}
GF_Box *ftyp_New()
{
ISOM_DECL_BOX_ALLOC(GF_FileTypeBox, GF_ISOM_BOX_TYPE_FTYP);
return (GF_Box *)tmp;
}
GF_Err ftyp_Read(GF_Box *s,GF_BitStream *bs)
{
u32 i;
GF_FileTypeBox *ptr = (GF_FileTypeBox *)s;
if (ptr->size < 8) {
GF_LOG(GF_LOG_WARNING, GF_LOG_CONTAINER, ("[iso file] Found ftyp with size < 8, likely broken!\n"));
return GF_BAD_PARAM;
}
ptr->majorBrand = gf_bs_read_u32(bs);
ptr->minorVersion = gf_bs_read_u32(bs);
ISOM_DECREASE_SIZE(ptr, 8);
ptr->altCount = ( (u32) (ptr->size)) / 4;
if (!ptr->altCount) return GF_OK;
if (ptr->altCount * 4 != (u32) (ptr->size)) return GF_ISOM_INVALID_FILE;
ptr->altBrand = (u32*)gf_malloc(sizeof(u32)*ptr->altCount);
for (i = 0; i<ptr->altCount; i++) {
ptr->altBrand[i] = gf_bs_read_u32(bs);
}
return GF_OK;
}
#ifndef GPAC_DISABLE_ISOM_WRITE
GF_Err ftyp_Write(GF_Box *s, GF_BitStream *bs)
{
GF_Err e;
u32 i;
GF_FileTypeBox *ptr = (GF_FileTypeBox *) s;
e = gf_isom_box_write_header(s, bs);
if (e) return e;
gf_bs_write_u32(bs, ptr->majorBrand);
gf_bs_write_u32(bs, ptr->minorVersion);
for (i=0; i<ptr->altCount; i++) {
gf_bs_write_u32(bs, ptr->altBrand[i]);
}
return GF_OK;
}
GF_Err ftyp_Size(GF_Box *s)
{
GF_FileTypeBox *ptr = (GF_FileTypeBox *)s;
ptr->size += 8 + ptr->altCount * 4;
return GF_OK;
}
#endif /*GPAC_DISABLE_ISOM_WRITE*/
void gnrm_del(GF_Box *s)
{
GF_GenericSampleEntryBox *ptr = (GF_GenericSampleEntryBox *)s;
gf_isom_sample_entry_predestroy((GF_SampleEntryBox *)ptr);
if (ptr->data) gf_free(ptr->data);
gf_free(ptr);
}
GF_Box *gnrm_New()
{
ISOM_DECL_BOX_ALLOC(GF_GenericSampleEntryBox, GF_ISOM_BOX_TYPE_GNRM);
gf_isom_sample_entry_init((GF_SampleEntryBox*)tmp);
return (GF_Box *)tmp;
}
//dummy
GF_Err gnrm_Read(GF_Box *s, GF_BitStream *bs)
{
return GF_OK;
}
#ifndef GPAC_DISABLE_ISOM_WRITE
GF_Err gnrm_Write(GF_Box *s, GF_BitStream *bs)
{
GF_Err e;
GF_GenericSampleEntryBox *ptr = (GF_GenericSampleEntryBox *)s;
//carefull we are not writing the box type but the entry type so switch for write
ptr->type = ptr->EntryType;
e = gf_isom_box_write_header(s, bs);
if (e) return e;
ptr->type = GF_ISOM_BOX_TYPE_GNRM;
gf_bs_write_data(bs, ptr->reserved, 6);
gf_bs_write_u16(bs, ptr->dataReferenceIndex);
gf_bs_write_data(bs, ptr->data, ptr->data_size);
return GF_OK;
}
GF_Err gnrm_Size(GF_Box *s)
{
GF_GenericSampleEntryBox *ptr = (GF_GenericSampleEntryBox *)s;
s->type = GF_ISOM_BOX_TYPE_GNRM;
ptr->size += 8+ptr->data_size;
return GF_OK;
}
#endif /*GPAC_DISABLE_ISOM_WRITE*/
void gnrv_del(GF_Box *s)
{
GF_GenericVisualSampleEntryBox *ptr = (GF_GenericVisualSampleEntryBox *)s;
gf_isom_sample_entry_predestroy((GF_SampleEntryBox *)ptr);
if (ptr->data) gf_free(ptr->data);
gf_free(ptr);
}
GF_Box *gnrv_New()
{
ISOM_DECL_BOX_ALLOC(GF_GenericVisualSampleEntryBox, GF_ISOM_BOX_TYPE_GNRV);
gf_isom_video_sample_entry_init((GF_VisualSampleEntryBox*) tmp);
return (GF_Box *)tmp;
}
//dummy
GF_Err gnrv_Read(GF_Box *s, GF_BitStream *bs)
{
return GF_OK;
}
#ifndef GPAC_DISABLE_ISOM_WRITE
GF_Err gnrv_Write(GF_Box *s, GF_BitStream *bs)
{
GF_Err e;
GF_GenericVisualSampleEntryBox *ptr = (GF_GenericVisualSampleEntryBox *)s;
//carefull we are not writing the box type but the entry type so switch for write
ptr->type = ptr->EntryType;
e = gf_isom_box_write_header(s, bs);
if (e) return e;
ptr->type = GF_ISOM_BOX_TYPE_GNRV;
gf_isom_video_sample_entry_write((GF_VisualSampleEntryBox *)ptr, bs);
gf_bs_write_data(bs, ptr->data, ptr->data_size);
return GF_OK;
}
GF_Err gnrv_Size(GF_Box *s)
{
GF_GenericVisualSampleEntryBox *ptr = (GF_GenericVisualSampleEntryBox *)s;
s->type = GF_ISOM_BOX_TYPE_GNRV;
gf_isom_video_sample_entry_size((GF_VisualSampleEntryBox *)s);
ptr->size += ptr->data_size;
return GF_OK;
}
#endif /*GPAC_DISABLE_ISOM_WRITE*/
void gnra_del(GF_Box *s)
{
GF_GenericAudioSampleEntryBox *ptr = (GF_GenericAudioSampleEntryBox *)s;
gf_isom_sample_entry_predestroy((GF_SampleEntryBox *)ptr);
if (ptr->data) gf_free(ptr->data);
gf_free(ptr);
}
GF_Box *gnra_New()
{
ISOM_DECL_BOX_ALLOC(GF_GenericAudioSampleEntryBox, GF_ISOM_BOX_TYPE_GNRA);
gf_isom_audio_sample_entry_init((GF_AudioSampleEntryBox*) tmp);
return (GF_Box *)tmp;
}
//dummy
GF_Err gnra_Read(GF_Box *s, GF_BitStream *bs)
{
return GF_OK;
}
#ifndef GPAC_DISABLE_ISOM_WRITE
GF_Err gnra_Write(GF_Box *s, GF_BitStream *bs)
{
GF_Err e;
GF_GenericAudioSampleEntryBox *ptr = (GF_GenericAudioSampleEntryBox *)s;
//carefull we are not writing the box type but the entry type so switch for write
ptr->type = ptr->EntryType;
e = gf_isom_box_write_header(s, bs);
if (e) return e;
ptr->type = GF_ISOM_BOX_TYPE_GNRA;
gf_isom_audio_sample_entry_write((GF_AudioSampleEntryBox *)ptr, bs);
if (ptr->data) {
gf_bs_write_data(bs, ptr->data, ptr->data_size);
}
return GF_OK;
}
GF_Err gnra_Size(GF_Box *s)
{
GF_GenericAudioSampleEntryBox *ptr = (GF_GenericAudioSampleEntryBox *)s;
s->type = GF_ISOM_BOX_TYPE_GNRA;
gf_isom_audio_sample_entry_size((GF_AudioSampleEntryBox *)s);
ptr->size += ptr->data_size;
return GF_OK;
}
#endif /*GPAC_DISABLE_ISOM_WRITE*/
void hdlr_del(GF_Box *s)
{
GF_HandlerBox *ptr = (GF_HandlerBox *)s;
if (ptr == NULL) return;
if (ptr->nameUTF8) gf_free(ptr->nameUTF8);
gf_free(ptr);
}
GF_Err hdlr_Read(GF_Box *s, GF_BitStream *bs)
{
GF_HandlerBox *ptr = (GF_HandlerBox *)s;
ptr->reserved1 = gf_bs_read_u32(bs);
ptr->handlerType = gf_bs_read_u32(bs);
gf_bs_read_data(bs, (char*)ptr->reserved2, 12);
ISOM_DECREASE_SIZE(ptr, 20);
if (ptr->size) {
ptr->nameUTF8 = (char*)gf_malloc((u32) ptr->size);
if (ptr->nameUTF8 == NULL) return GF_OUT_OF_MEM;
gf_bs_read_data(bs, ptr->nameUTF8, (u32) ptr->size);
//patch for old QT files - we cannot rely on checking if str[0]==len(str+1) since we may have
//cases where the first character of the string decimal value is indeed the same as the string length!!
//we had this issue with encryption_import test
//we therefore only check if last char is null, and if not so assume old QT style
if (ptr->nameUTF8[ptr->size-1]) {
memmove(ptr->nameUTF8, ptr->nameUTF8+1, sizeof(char) * (u32) (ptr->size-1) );
ptr->nameUTF8[ptr->size-1] = 0;
ptr->store_counted_string = GF_TRUE;
}
}
return GF_OK;
}
GF_Box *hdlr_New()
{
ISOM_DECL_BOX_ALLOC(GF_HandlerBox, GF_ISOM_BOX_TYPE_HDLR);
return (GF_Box *)tmp;
}
#ifndef GPAC_DISABLE_ISOM_WRITE
GF_Err hdlr_Write(GF_Box *s, GF_BitStream *bs)
{
GF_Err e;
GF_HandlerBox *ptr = (GF_HandlerBox *)s;
e = gf_isom_full_box_write(s, bs);
if (e) return e;
gf_bs_write_u32(bs, ptr->reserved1);
gf_bs_write_u32(bs, ptr->handlerType);
gf_bs_write_data(bs, (char*)ptr->reserved2, 12);
if (ptr->nameUTF8) {
u32 len = (u32)strlen(ptr->nameUTF8);
if (ptr->store_counted_string) {
gf_bs_write_u8(bs, len);
gf_bs_write_data(bs, ptr->nameUTF8, len);
} else {
gf_bs_write_data(bs, ptr->nameUTF8, len);
gf_bs_write_u8(bs, 0);
}
} else {
gf_bs_write_u8(bs, 0);
}
return GF_OK;
}
GF_Err hdlr_Size(GF_Box *s)
{
GF_HandlerBox *ptr = (GF_HandlerBox *)s;
ptr->size += 20 + 1; //null term or counted string
if (ptr->nameUTF8) {
ptr->size += strlen(ptr->nameUTF8);
}
return GF_OK;
}
#endif /*GPAC_DISABLE_ISOM_WRITE*/
void hinf_del(GF_Box *s)
{
GF_HintInfoBox *hinf = (GF_HintInfoBox *)s;
gf_free(hinf);
}
GF_Box *hinf_New()
{
ISOM_DECL_BOX_ALLOC(GF_HintInfoBox, GF_ISOM_BOX_TYPE_HINF);
tmp->other_boxes = gf_list_new();
return (GF_Box *)tmp;
}
GF_Err hinf_AddBox(GF_Box *s, GF_Box *a)
{
GF_MAXRBox *maxR;
GF_HintInfoBox *hinf = (GF_HintInfoBox *)s;
u32 i;
switch (a->type) {
case GF_ISOM_BOX_TYPE_MAXR:
i=0;
while ((maxR = (GF_MAXRBox *)gf_list_enum(hinf->other_boxes, &i))) {
if ((maxR->type==GF_ISOM_BOX_TYPE_MAXR) && (maxR->granularity == ((GF_MAXRBox *)a)->granularity))
return GF_ISOM_INVALID_FILE;
}
break;
}
return gf_isom_box_add_default(s, a);
}
GF_Err hinf_Read(GF_Box *s, GF_BitStream *bs)
{
return gf_isom_box_array_read(s, bs, hinf_AddBox);
}
#ifndef GPAC_DISABLE_ISOM_WRITE
GF_Err hinf_Write(GF_Box *s, GF_BitStream *bs)
{
// GF_HintInfoBox *ptr = (GF_HintInfoBox *)s;
if (!s) return GF_BAD_PARAM;
return gf_isom_box_write_header(s, bs);
}
GF_Err hinf_Size(GF_Box *s)
{
return GF_OK;
}
#endif /*GPAC_DISABLE_ISOM_WRITE*/
void hmhd_del(GF_Box *s)
{
GF_HintMediaHeaderBox *ptr = (GF_HintMediaHeaderBox *)s;
if (ptr == NULL) return;
gf_free(ptr);
}
GF_Err hmhd_Read(GF_Box *s,GF_BitStream *bs)
{
GF_HintMediaHeaderBox *ptr = (GF_HintMediaHeaderBox *)s;
ptr->maxPDUSize = gf_bs_read_u16(bs);
ptr->avgPDUSize = gf_bs_read_u16(bs);
ptr->maxBitrate = gf_bs_read_u32(bs);
ptr->avgBitrate = gf_bs_read_u32(bs);
ptr->slidingAverageBitrate = gf_bs_read_u32(bs);
return GF_OK;
}
GF_Box *hmhd_New()
{
ISOM_DECL_BOX_ALLOC(GF_HintMediaHeaderBox, GF_ISOM_BOX_TYPE_HMHD);
return (GF_Box *)tmp;
}
#ifndef GPAC_DISABLE_ISOM_WRITE
GF_Err hmhd_Write(GF_Box *s, GF_BitStream *bs)
{
GF_Err e;
GF_HintMediaHeaderBox *ptr = (GF_HintMediaHeaderBox *)s;
e = gf_isom_full_box_write(s, bs);
if (e) return e;
gf_bs_write_u16(bs, ptr->maxPDUSize);
gf_bs_write_u16(bs, ptr->avgPDUSize);
gf_bs_write_u32(bs, ptr->maxBitrate);
gf_bs_write_u32(bs, ptr->avgBitrate);
gf_bs_write_u32(bs, ptr->slidingAverageBitrate);
return GF_OK;
}
GF_Err hmhd_Size(GF_Box *s)
{
GF_HintMediaHeaderBox *ptr = (GF_HintMediaHeaderBox *)s;
ptr->size += 16;
return GF_OK;
}
#endif /*GPAC_DISABLE_ISOM_WRITE*/
GF_Box *hnti_New()
{
ISOM_DECL_BOX_ALLOC(GF_HintTrackInfoBox, GF_ISOM_BOX_TYPE_HNTI);
return (GF_Box *)tmp;
}
void hnti_del(GF_Box *a)
{
gf_free(a);
}
GF_Err hnti_AddBox(GF_Box *s, GF_Box *a)
{
GF_HintTrackInfoBox *hnti = (GF_HintTrackInfoBox *)s;
if (!hnti || !a) return GF_BAD_PARAM;
switch (a->type) {
//this is the value for GF_RTPBox - same as HintSampleEntry for RTP !!!
case GF_ISOM_BOX_TYPE_RTP:
case GF_ISOM_BOX_TYPE_SDP:
if (hnti->SDP) return GF_BAD_PARAM;
hnti->SDP = a;
break;
default:
break;
}
return gf_isom_box_add_default(s, a);
}
GF_Err hnti_Read(GF_Box *s, GF_BitStream *bs)
{
return gf_isom_box_array_read_ex(s, bs, hnti_AddBox, s->type);
}
#ifndef GPAC_DISABLE_ISOM_WRITE
GF_Err hnti_Write(GF_Box *s, GF_BitStream *bs)
{
return gf_isom_box_write_header(s, bs);
}
GF_Err hnti_Size(GF_Box *s)
{
return GF_OK;
}
#endif /*GPAC_DISABLE_ISOM_WRITE*/
/**********************************************************
GF_SDPBox
**********************************************************/
void sdp_del(GF_Box *s)
{
GF_SDPBox *ptr = (GF_SDPBox *)s;
if (ptr->sdpText) gf_free(ptr->sdpText);
gf_free(ptr);
}
GF_Err sdp_Read(GF_Box *s, GF_BitStream *bs)
{
u32 length;
GF_SDPBox *ptr = (GF_SDPBox *)s;
if (ptr == NULL) return GF_BAD_PARAM;
length = (u32) (ptr->size);
//sdp text has no delimiter !!!
ptr->sdpText = (char*)gf_malloc(sizeof(char) * (length+1));
if (!ptr->sdpText) return GF_OUT_OF_MEM;
gf_bs_read_data(bs, ptr->sdpText, length);
ptr->sdpText[length] = 0;
return GF_OK;
}
GF_Box *sdp_New()
{
ISOM_DECL_BOX_ALLOC(GF_SDPBox, GF_ISOM_BOX_TYPE_SDP);
return (GF_Box *)tmp;
}
#ifndef GPAC_DISABLE_ISOM_WRITE
GF_Err sdp_Write(GF_Box *s, GF_BitStream *bs)
{
GF_Err e;
GF_SDPBox *ptr = (GF_SDPBox *)s;
if (ptr == NULL) return GF_BAD_PARAM;
e = gf_isom_box_write_header(s, bs);
if (e) return e;
//don't write the NULL char!!!
gf_bs_write_data(bs, ptr->sdpText, (u32) strlen(ptr->sdpText));
return GF_OK;
}
GF_Err sdp_Size(GF_Box *s)
{
GF_SDPBox *ptr = (GF_SDPBox *)s;
//don't count the NULL char!!!
ptr->size += strlen(ptr->sdpText);
return GF_OK;
}
#endif /*GPAC_DISABLE_ISOM_WRITE*/
void rtp_hnti_del(GF_Box *s)
{
GF_RTPBox *ptr = (GF_RTPBox *)s;
if (ptr->sdpText) gf_free(ptr->sdpText);
gf_free(ptr);
}
GF_Err rtp_hnti_Read(GF_Box *s, GF_BitStream *bs)
{
u32 length;
GF_RTPBox *ptr = (GF_RTPBox *)s;
if (ptr == NULL) return GF_BAD_PARAM;
ISOM_DECREASE_SIZE(ptr, 4)
ptr->subType = gf_bs_read_u32(bs);
length = (u32) (ptr->size);
//sdp text has no delimiter !!!
ptr->sdpText = (char*)gf_malloc(sizeof(char) * (length+1));
if (!ptr->sdpText) return GF_OUT_OF_MEM;
gf_bs_read_data(bs, ptr->sdpText, length);
ptr->sdpText[length] = 0;
return GF_OK;
}
GF_Box *rtp_hnti_New()
{
ISOM_DECL_BOX_ALLOC(GF_RTPBox, GF_ISOM_BOX_TYPE_RTP);
tmp->subType = GF_ISOM_BOX_TYPE_SDP;
return (GF_Box *)tmp;
}
#ifndef GPAC_DISABLE_ISOM_WRITE
GF_Err rtp_hnti_Write(GF_Box *s, GF_BitStream *bs)
{
GF_Err e;
GF_RTPBox *ptr = (GF_RTPBox *)s;
if (ptr == NULL) return GF_BAD_PARAM;
e = gf_isom_box_write_header(s, bs);
if (e) return e;
gf_bs_write_u32(bs, ptr->subType);
//don't write the NULL char!!!
gf_bs_write_data(bs, ptr->sdpText, (u32) strlen(ptr->sdpText));
return GF_OK;
}
GF_Err rtp_hnti_Size(GF_Box *s)
{
GF_RTPBox *ptr = (GF_RTPBox *)s;
ptr->size += 4 + strlen(ptr->sdpText);
return GF_OK;
}
#endif /*GPAC_DISABLE_ISOM_WRITE*/
/**********************************************************
TRPY GF_Box
**********************************************************/
void trpy_del(GF_Box *s)
{
gf_free((GF_TRPYBox *)s);
}
GF_Err trpy_Read(GF_Box *s, GF_BitStream *bs)
{
GF_TRPYBox *ptr = (GF_TRPYBox *)s;
ptr->nbBytes = gf_bs_read_u64(bs);
return GF_OK;
}
GF_Box *trpy_New()
{
ISOM_DECL_BOX_ALLOC(GF_TRPYBox, GF_ISOM_BOX_TYPE_TRPY);
return (GF_Box *)tmp;
}
#ifndef GPAC_DISABLE_ISOM_WRITE
GF_Err trpy_Write(GF_Box *s, GF_BitStream *bs)
{
GF_Err e;
GF_TRPYBox *ptr = (GF_TRPYBox *)s;
if (ptr == NULL) return GF_BAD_PARAM;
e = gf_isom_box_write_header(s, bs);
if (e) return e;
gf_bs_write_u64(bs, ptr->nbBytes);
return GF_OK;
}
GF_Err trpy_Size(GF_Box *s)
{
s->size += 8;
return GF_OK;
}
#endif /*GPAC_DISABLE_ISOM_WRITE*/
/**********************************************************
TOTL GF_Box
**********************************************************/
void totl_del(GF_Box *s)
{
gf_free((GF_TRPYBox *)s);
}
GF_Err totl_Read(GF_Box *s, GF_BitStream *bs)
{
GF_TOTLBox *ptr = (GF_TOTLBox *)s;
ptr->nbBytes = gf_bs_read_u32(bs);
return GF_OK;
}
GF_Box *totl_New()
{
ISOM_DECL_BOX_ALLOC(GF_TOTLBox, GF_ISOM_BOX_TYPE_TOTL);
return (GF_Box *)tmp;
}
#ifndef GPAC_DISABLE_ISOM_WRITE
GF_Err totl_Write(GF_Box *s, GF_BitStream *bs)
{
GF_Err e;
GF_TOTLBox *ptr = (GF_TOTLBox *)s;
if (ptr == NULL) return GF_BAD_PARAM;
e = gf_isom_box_write_header(s, bs);
if (e) return e;
gf_bs_write_u32(bs, ptr->nbBytes);
return GF_OK;
}
GF_Err totl_Size(GF_Box *s)
{
s->size += 4;
return GF_OK;
}
#endif /*GPAC_DISABLE_ISOM_WRITE*/
/**********************************************************
NUMP GF_Box
**********************************************************/
void nump_del(GF_Box *s)
{
gf_free((GF_NUMPBox *)s);
}
GF_Err nump_Read(GF_Box *s, GF_BitStream *bs)
{
GF_NUMPBox *ptr = (GF_NUMPBox *)s;
ptr->nbPackets = gf_bs_read_u64(bs);
return GF_OK;
}
GF_Box *nump_New()
{
ISOM_DECL_BOX_ALLOC(GF_NUMPBox, GF_ISOM_BOX_TYPE_NUMP);
return (GF_Box *)tmp;
}
#ifndef GPAC_DISABLE_ISOM_WRITE
GF_Err nump_Write(GF_Box *s, GF_BitStream *bs)
{
GF_Err e;
GF_NUMPBox *ptr = (GF_NUMPBox *)s;
if (ptr == NULL) return GF_BAD_PARAM;
e = gf_isom_box_write_header(s, bs);
if (e) return e;
gf_bs_write_u64(bs, ptr->nbPackets);
return GF_OK;
}
GF_Err nump_Size(GF_Box *s)
{
s->size += 8;
return GF_OK;
}
#endif /*GPAC_DISABLE_ISOM_WRITE*/
/**********************************************************
NPCK GF_Box
**********************************************************/
void npck_del(GF_Box *s)
{
gf_free((GF_NPCKBox *)s);
}
GF_Err npck_Read(GF_Box *s, GF_BitStream *bs)
{
GF_NPCKBox *ptr = (GF_NPCKBox *)s;
ptr->nbPackets = gf_bs_read_u32(bs);
return GF_OK;
}
GF_Box *npck_New()
{
ISOM_DECL_BOX_ALLOC(GF_NPCKBox, GF_ISOM_BOX_TYPE_NPCK);
return (GF_Box *)tmp;
}
#ifndef GPAC_DISABLE_ISOM_WRITE
GF_Err npck_Write(GF_Box *s, GF_BitStream *bs)
{
GF_Err e;
GF_NPCKBox *ptr = (GF_NPCKBox *)s;
if (ptr == NULL) return GF_BAD_PARAM;
e = gf_isom_box_write_header(s, bs);
if (e) return e;
gf_bs_write_u32(bs, ptr->nbPackets);
return GF_OK;
}
GF_Err npck_Size(GF_Box *s)
{
s->size += 4;
return GF_OK;
}
#endif /*GPAC_DISABLE_ISOM_WRITE*/
/**********************************************************
TPYL GF_Box
**********************************************************/
void tpyl_del(GF_Box *s)
{
gf_free((GF_NTYLBox *)s);
}
GF_Err tpyl_Read(GF_Box *s, GF_BitStream *bs)
{
GF_NTYLBox *ptr = (GF_NTYLBox *)s;
if (ptr == NULL) return GF_BAD_PARAM;
ptr->nbBytes = gf_bs_read_u64(bs);
return GF_OK;
}
GF_Box *tpyl_New()
{
ISOM_DECL_BOX_ALLOC(GF_NTYLBox, GF_ISOM_BOX_TYPE_TPYL);
return (GF_Box *)tmp;
}
#ifndef GPAC_DISABLE_ISOM_WRITE
GF_Err tpyl_Write(GF_Box *s, GF_BitStream *bs)
{
GF_Err e;
GF_NTYLBox *ptr = (GF_NTYLBox *)s;
if (ptr == NULL) return GF_BAD_PARAM;
e = gf_isom_box_write_header(s, bs);
if (e) return e;
gf_bs_write_u64(bs, ptr->nbBytes);
return GF_OK;
}
GF_Err tpyl_Size(GF_Box *s)
{
s->size += 8;
return GF_OK;
}
#endif /*GPAC_DISABLE_ISOM_WRITE*/
/**********************************************************
TPAY GF_Box
**********************************************************/
void tpay_del(GF_Box *s)
{
gf_free((GF_TPAYBox *)s);
}
GF_Err tpay_Read(GF_Box *s, GF_BitStream *bs)
{
GF_TPAYBox *ptr = (GF_TPAYBox *)s;
ptr->nbBytes = gf_bs_read_u32(bs);
return GF_OK;
}
GF_Box *tpay_New()
{
ISOM_DECL_BOX_ALLOC(GF_TPAYBox, GF_ISOM_BOX_TYPE_TPAY);
return (GF_Box *)tmp;
}
#ifndef GPAC_DISABLE_ISOM_WRITE
GF_Err tpay_Write(GF_Box *s, GF_BitStream *bs)
{
GF_Err e;
GF_TPAYBox *ptr = (GF_TPAYBox *)s;
if (ptr == NULL) return GF_BAD_PARAM;
e = gf_isom_box_write_header(s, bs);
if (e) return e;
gf_bs_write_u32(bs, ptr->nbBytes);
return GF_OK;
}
GF_Err tpay_Size(GF_Box *s)
{
s->size += 4;
return GF_OK;
}
#endif /*GPAC_DISABLE_ISOM_WRITE*/
/**********************************************************
MAXR GF_Box
**********************************************************/
void maxr_del(GF_Box *s)
{
gf_free((GF_MAXRBox *)s);
}
GF_Err maxr_Read(GF_Box *s, GF_BitStream *bs)
{
GF_MAXRBox *ptr = (GF_MAXRBox *)s;
if (ptr == NULL) return GF_BAD_PARAM;
ptr->granularity = gf_bs_read_u32(bs);
ptr->maxDataRate = gf_bs_read_u32(bs);
return GF_OK;
}
GF_Box *maxr_New()
{
ISOM_DECL_BOX_ALLOC(GF_MAXRBox, GF_ISOM_BOX_TYPE_MAXR);
return (GF_Box *)tmp;
}
#ifndef GPAC_DISABLE_ISOM_WRITE
GF_Err maxr_Write(GF_Box *s, GF_BitStream *bs)
{
GF_Err e;
GF_MAXRBox *ptr = (GF_MAXRBox *)s;
if (ptr == NULL) return GF_BAD_PARAM;
e = gf_isom_box_write_header(s, bs);
if (e) return e;
gf_bs_write_u32(bs, ptr->granularity);
gf_bs_write_u32(bs, ptr->maxDataRate);
return GF_OK;
}
GF_Err maxr_Size(GF_Box *s)
{
s->size += 8;
return GF_OK;
}
#endif /*GPAC_DISABLE_ISOM_WRITE*/
/**********************************************************
DMED GF_Box
**********************************************************/
void dmed_del(GF_Box *s)
{
gf_free((GF_DMEDBox *)s);
}
GF_Err dmed_Read(GF_Box *s, GF_BitStream *bs)
{
GF_DMEDBox *ptr = (GF_DMEDBox *)s;
ptr->nbBytes = gf_bs_read_u64(bs);
return GF_OK;
}
GF_Box *dmed_New()
{
ISOM_DECL_BOX_ALLOC(GF_DMEDBox, GF_ISOM_BOX_TYPE_DMED);
return (GF_Box *)tmp;
}
#ifndef GPAC_DISABLE_ISOM_WRITE
GF_Err dmed_Write(GF_Box *s, GF_BitStream *bs)
{
GF_Err e;
GF_DMEDBox *ptr = (GF_DMEDBox *)s;
if (ptr == NULL) return GF_BAD_PARAM;
e = gf_isom_box_write_header(s, bs);
if (e) return e;
gf_bs_write_u64(bs, ptr->nbBytes);
return GF_OK;
}
GF_Err dmed_Size(GF_Box *s)
{
s->size += 8;
return GF_OK;
}
#endif /*GPAC_DISABLE_ISOM_WRITE*/
/**********************************************************
DIMM GF_Box
**********************************************************/
void dimm_del(GF_Box *s)
{
gf_free((GF_DIMMBox *)s);
}
GF_Err dimm_Read(GF_Box *s, GF_BitStream *bs)
{
GF_DIMMBox *ptr = (GF_DIMMBox *)s;
ptr->nbBytes = gf_bs_read_u64(bs);
return GF_OK;
}
GF_Box *dimm_New()
{
ISOM_DECL_BOX_ALLOC(GF_DIMMBox, GF_ISOM_BOX_TYPE_DIMM);
return (GF_Box *)tmp;
}
#ifndef GPAC_DISABLE_ISOM_WRITE
GF_Err dimm_Write(GF_Box *s, GF_BitStream *bs)
{
GF_Err e;
GF_DIMMBox *ptr = (GF_DIMMBox *)s;
if (ptr == NULL) return GF_BAD_PARAM;
e = gf_isom_box_write_header(s, bs);
if (e) return e;
gf_bs_write_u64(bs, ptr->nbBytes);
return GF_OK;
}
GF_Err dimm_Size(GF_Box *s)
{
s->size += 8;
return GF_OK;
}
#endif /*GPAC_DISABLE_ISOM_WRITE*/
/**********************************************************
DREP GF_Box
**********************************************************/
void drep_del(GF_Box *s)
{
gf_free((GF_DREPBox *)s);
}
GF_Err drep_Read(GF_Box *s, GF_BitStream *bs)
{
GF_DREPBox *ptr = (GF_DREPBox *)s;
ptr->nbBytes = gf_bs_read_u64(bs);
return GF_OK;
}
GF_Box *drep_New()
{
ISOM_DECL_BOX_ALLOC(GF_DREPBox, GF_ISOM_BOX_TYPE_DREP);
return (GF_Box *)tmp;
}
#ifndef GPAC_DISABLE_ISOM_WRITE
GF_Err drep_Write(GF_Box *s, GF_BitStream *bs)
{
GF_Err e;
GF_DREPBox *ptr = (GF_DREPBox *)s;
if (ptr == NULL) return GF_BAD_PARAM;
e = gf_isom_box_write_header(s, bs);
if (e) return e;
gf_bs_write_u64(bs, ptr->nbBytes);
return GF_OK;
}
GF_Err drep_Size(GF_Box *s)
{
s->size += 8;
return GF_OK;
}
#endif /*GPAC_DISABLE_ISOM_WRITE*/
/**********************************************************
TMIN GF_Box
**********************************************************/
void tmin_del(GF_Box *s)
{
gf_free((GF_TMINBox *)s);
}
GF_Err tmin_Read(GF_Box *s, GF_BitStream *bs)
{
GF_TMINBox *ptr = (GF_TMINBox *)s;
ptr->minTime = gf_bs_read_u32(bs);
return GF_OK;
}
GF_Box *tmin_New()
{
ISOM_DECL_BOX_ALLOC(GF_TMINBox, GF_ISOM_BOX_TYPE_TMIN);
return (GF_Box *)tmp;
}
#ifndef GPAC_DISABLE_ISOM_WRITE
GF_Err tmin_Write(GF_Box *s, GF_BitStream *bs)
{
GF_Err e;
GF_TMINBox *ptr = (GF_TMINBox *)s;
if (ptr == NULL) return GF_BAD_PARAM;
e = gf_isom_box_write_header(s, bs);
if (e) return e;
gf_bs_write_u32(bs, ptr->minTime);
return GF_OK;
}
GF_Err tmin_Size(GF_Box *s)
{
s->size += 4;
return GF_OK;
}
#endif /*GPAC_DISABLE_ISOM_WRITE*/
/**********************************************************
TMAX GF_Box
**********************************************************/
void tmax_del(GF_Box *s)
{
gf_free((GF_TMAXBox *)s);
}
GF_Err tmax_Read(GF_Box *s, GF_BitStream *bs)
{
GF_TMAXBox *ptr = (GF_TMAXBox *)s;
ptr->maxTime = gf_bs_read_u32(bs);
return GF_OK;
}
GF_Box *tmax_New()
{
ISOM_DECL_BOX_ALLOC(GF_TMAXBox, GF_ISOM_BOX_TYPE_TMAX);
return (GF_Box *)tmp;
}
#ifndef GPAC_DISABLE_ISOM_WRITE
GF_Err tmax_Write(GF_Box *s, GF_BitStream *bs)
{
GF_Err e;
GF_TMAXBox *ptr = (GF_TMAXBox *)s;
if (ptr == NULL) return GF_BAD_PARAM;
e = gf_isom_box_write_header(s, bs);
if (e) return e;
gf_bs_write_u32(bs, ptr->maxTime);
return GF_OK;
}
GF_Err tmax_Size(GF_Box *s)
{
s->size += 4;
return GF_OK;
}
#endif /*GPAC_DISABLE_ISOM_WRITE*/
/**********************************************************
PMAX GF_Box
**********************************************************/
void pmax_del(GF_Box *s)
{
gf_free((GF_PMAXBox *)s);
}
GF_Err pmax_Read(GF_Box *s, GF_BitStream *bs)
{
GF_PMAXBox *ptr = (GF_PMAXBox *)s;
ptr->maxSize = gf_bs_read_u32(bs);
return GF_OK;
}
GF_Box *pmax_New()
{
ISOM_DECL_BOX_ALLOC(GF_PMAXBox, GF_ISOM_BOX_TYPE_PMAX);
return (GF_Box *)tmp;
}
#ifndef GPAC_DISABLE_ISOM_WRITE
GF_Err pmax_Write(GF_Box *s, GF_BitStream *bs)
{
GF_Err e;
GF_PMAXBox *ptr = (GF_PMAXBox *)s;
if (ptr == NULL) return GF_BAD_PARAM;
e = gf_isom_box_write_header(s, bs);
if (e) return e;
gf_bs_write_u32(bs, ptr->maxSize);
return GF_OK;
}
GF_Err pmax_Size(GF_Box *s)
{
s->size += 4;
return GF_OK;
}
#endif /*GPAC_DISABLE_ISOM_WRITE*/
/**********************************************************
DMAX GF_Box
**********************************************************/
void dmax_del(GF_Box *s)
{
gf_free((GF_DMAXBox *)s);
}
GF_Err dmax_Read(GF_Box *s, GF_BitStream *bs)
{
GF_DMAXBox *ptr = (GF_DMAXBox *)s;
ptr->maxDur = gf_bs_read_u32(bs);
return GF_OK;
}
GF_Box *dmax_New()
{
ISOM_DECL_BOX_ALLOC(GF_DMAXBox, GF_ISOM_BOX_TYPE_DMAX);
return (GF_Box *)tmp;
}
#ifndef GPAC_DISABLE_ISOM_WRITE
GF_Err dmax_Write(GF_Box *s, GF_BitStream *bs)
{
GF_Err e;
GF_DMAXBox *ptr = (GF_DMAXBox *)s;
if (ptr == NULL) return GF_BAD_PARAM;
e = gf_isom_box_write_header(s, bs);
if (e) return e;
gf_bs_write_u32(bs, ptr->maxDur);
return GF_OK;
}
GF_Err dmax_Size(GF_Box *s)
{
s->size += 4;
return GF_OK;
}
#endif /*GPAC_DISABLE_ISOM_WRITE*/
/**********************************************************
PAYT GF_Box
**********************************************************/
void payt_del(GF_Box *s)
{
GF_PAYTBox *payt = (GF_PAYTBox *)s;
if (payt->payloadString) gf_free(payt->payloadString);
gf_free(payt);
}
GF_Err payt_Read(GF_Box *s, GF_BitStream *bs)
{
u32 length;
GF_PAYTBox *ptr = (GF_PAYTBox *)s;
ptr->payloadCode = gf_bs_read_u32(bs);
length = gf_bs_read_u8(bs);
ptr->payloadString = (char*)gf_malloc(sizeof(char) * (length+1) );
if (! ptr->payloadString) return GF_OUT_OF_MEM;
gf_bs_read_data(bs, ptr->payloadString, length);
ptr->payloadString[length] = 0;
ISOM_DECREASE_SIZE(ptr, (4+length+1) );
return GF_OK;
}
GF_Box *payt_New()
{
ISOM_DECL_BOX_ALLOC(GF_PAYTBox, GF_ISOM_BOX_TYPE_PAYT);
return (GF_Box *)tmp;
}
#ifndef GPAC_DISABLE_ISOM_WRITE
GF_Err payt_Write(GF_Box *s, GF_BitStream *bs)
{
u32 len;
GF_Err e;
GF_PAYTBox *ptr = (GF_PAYTBox *)s;
if (ptr == NULL) return GF_BAD_PARAM;
e = gf_isom_box_write_header(s, bs);
if (e) return e;
gf_bs_write_u32(bs, ptr->payloadCode);
len = (u32) strlen(ptr->payloadString);
gf_bs_write_u8(bs, len);
if (len) gf_bs_write_data(bs, ptr->payloadString, len);
return GF_OK;
}
GF_Err payt_Size(GF_Box *s)
{
GF_PAYTBox *ptr = (GF_PAYTBox *)s;
s->size += 4 + 1;
if (ptr->payloadString) ptr->size += strlen(ptr->payloadString);
return GF_OK;
}
#endif /*GPAC_DISABLE_ISOM_WRITE*/
/**********************************************************
PAYT GF_Box
**********************************************************/
void name_del(GF_Box *s)
{
GF_NameBox *name = (GF_NameBox *)s;
if (name->string) gf_free(name->string);
gf_free(name);
}
GF_Err name_Read(GF_Box *s, GF_BitStream *bs)
{
u32 length;
GF_NameBox *ptr = (GF_NameBox *)s;
length = (u32) (ptr->size);
ptr->string = (char*)gf_malloc(sizeof(char) * (length+1));
if (! ptr->string) return GF_OUT_OF_MEM;
gf_bs_read_data(bs, ptr->string, length);
ptr->string[length] = 0;
return GF_OK;
}
GF_Box *name_New()
{
ISOM_DECL_BOX_ALLOC(GF_NameBox, GF_ISOM_BOX_TYPE_NAME);
return (GF_Box *)tmp;
}
#ifndef GPAC_DISABLE_ISOM_WRITE
GF_Err name_Write(GF_Box *s, GF_BitStream *bs)
{
GF_Err e;
GF_NameBox *ptr = (GF_NameBox *)s;
if (ptr == NULL) return GF_BAD_PARAM;
e = gf_isom_box_write_header(s, bs);
if (e) return e;
if (ptr->string) {
gf_bs_write_data(bs, ptr->string, (u32) strlen(ptr->string) + 1);
}
return GF_OK;
}
GF_Err name_Size(GF_Box *s)
{
GF_NameBox *ptr = (GF_NameBox *)s;
if (ptr->string) ptr->size += strlen(ptr->string) + 1;
return GF_OK;
}
#endif /*GPAC_DISABLE_ISOM_WRITE*/
void tssy_del(GF_Box *s)
{
gf_free(s);
}
GF_Err tssy_Read(GF_Box *s, GF_BitStream *bs)
{
GF_TimeStampSynchronyBox *ptr = (GF_TimeStampSynchronyBox *)s;
gf_bs_read_int(bs, 6);
ptr->timestamp_sync = gf_bs_read_int(bs, 2);
return GF_OK;
}
GF_Box *tssy_New()
{
ISOM_DECL_BOX_ALLOC(GF_TimeStampSynchronyBox, GF_ISOM_BOX_TYPE_TSSY);
return (GF_Box *)tmp;
}
#ifndef GPAC_DISABLE_ISOM_WRITE
GF_Err tssy_Write(GF_Box *s, GF_BitStream *bs)
{
GF_Err e;
GF_TimeStampSynchronyBox *ptr = (GF_TimeStampSynchronyBox *)s;
if (ptr == NULL) return GF_BAD_PARAM;
e = gf_isom_box_write_header(s, bs);
if (e) return e;
gf_bs_write_int(bs, 0, 6);
gf_bs_write_int(bs, ptr->timestamp_sync, 2);
return GF_OK;
}
GF_Err tssy_Size(GF_Box *s)
{
s->size += 1;
return GF_OK;
}
#endif /*GPAC_DISABLE_ISOM_WRITE*/
void srpp_del(GF_Box *s)
{
GF_SRTPProcessBox *ptr = (GF_SRTPProcessBox *)s;
if (ptr->info) gf_isom_box_del((GF_Box*)ptr->info);
if (ptr->scheme_type) gf_isom_box_del((GF_Box*)ptr->scheme_type);
gf_free(s);
}
GF_Err srpp_AddBox(GF_Box *s, GF_Box *a)
{
GF_SRTPProcessBox *ptr = (GF_SRTPProcessBox *)s;
switch(a->type) {
case GF_ISOM_BOX_TYPE_SCHI:
if (ptr->info) ERROR_ON_DUPLICATED_BOX(a, ptr)
ptr->info = (GF_SchemeInformationBox *)a;
return GF_OK;
case GF_ISOM_BOX_TYPE_SCHM:
if (ptr->scheme_type) ERROR_ON_DUPLICATED_BOX(a, ptr)
ptr->scheme_type = (GF_SchemeTypeBox *)a;
return GF_OK;
}
return gf_isom_box_add_default(s, a);
}
GF_Err srpp_Read(GF_Box *s, GF_BitStream *bs)
{
GF_SRTPProcessBox *ptr = (GF_SRTPProcessBox *)s;
ISOM_DECREASE_SIZE(s, 16)
ptr->encryption_algorithm_rtp = gf_bs_read_u32(bs);
ptr->encryption_algorithm_rtcp = gf_bs_read_u32(bs);
ptr->integrity_algorithm_rtp = gf_bs_read_u32(bs);
ptr->integrity_algorithm_rtp = gf_bs_read_u32(bs);
return gf_isom_box_array_read(s, bs, gf_isom_box_add_default);
}
GF_Box *srpp_New()
{
ISOM_DECL_BOX_ALLOC(GF_SRTPProcessBox, GF_ISOM_BOX_TYPE_SRPP);
return (GF_Box *)tmp;
}
#ifndef GPAC_DISABLE_ISOM_WRITE
GF_Err srpp_Write(GF_Box *s, GF_BitStream *bs)
{
GF_Err e;
GF_SRTPProcessBox *ptr = (GF_SRTPProcessBox *)s;
if (ptr == NULL) return GF_BAD_PARAM;
e = gf_isom_full_box_write(s, bs);
if (e) return e;
gf_bs_write_u32(bs, ptr->encryption_algorithm_rtp);
gf_bs_write_u32(bs, ptr->encryption_algorithm_rtcp);
gf_bs_write_u32(bs, ptr->integrity_algorithm_rtp);
gf_bs_write_u32(bs, ptr->integrity_algorithm_rtcp);
if (ptr->info) {
e = gf_isom_box_write((GF_Box*)ptr->info, bs);
if (e) return e;
}
if (ptr->scheme_type) {
e = gf_isom_box_write((GF_Box*)ptr->scheme_type, bs);
if (e) return e;
}
return GF_OK;
}
GF_Err srpp_Size(GF_Box *s)
{
GF_Err e;
GF_SRTPProcessBox *ptr = (GF_SRTPProcessBox *)s;
s->size += 16;
if (ptr->info) {
e = gf_isom_box_size((GF_Box*)ptr->info);
if (e) return e;
ptr->size += ptr->info->size;
}
if (ptr->scheme_type) {
e = gf_isom_box_size((GF_Box*)ptr->scheme_type);
if (e) return e;
ptr->size += ptr->scheme_type->size;
}
return GF_OK;
}
#endif /*GPAC_DISABLE_ISOM_WRITE*/
void rssr_del(GF_Box *s)
{
gf_free(s);
}
GF_Err rssr_Read(GF_Box *s, GF_BitStream *bs)
{
GF_ReceivedSsrcBox *ptr = (GF_ReceivedSsrcBox *)s;
ptr->ssrc = gf_bs_read_u32(bs);
return GF_OK;
}
GF_Box *rssr_New()
{
ISOM_DECL_BOX_ALLOC(GF_ReceivedSsrcBox, GF_ISOM_BOX_TYPE_RSSR);
return (GF_Box *)tmp;
}
#ifndef GPAC_DISABLE_ISOM_WRITE
GF_Err rssr_Write(GF_Box *s, GF_BitStream *bs)
{
GF_Err e;
GF_ReceivedSsrcBox *ptr = (GF_ReceivedSsrcBox *)s;
e = gf_isom_box_write_header(s, bs);
if (e) return e;
gf_bs_write_u32(bs, ptr->ssrc);
return GF_OK;
}
GF_Err rssr_Size(GF_Box *s)
{
s->size += 4;
return GF_OK;
}
#endif /*GPAC_DISABLE_ISOM_WRITE*/
void iods_del(GF_Box *s)
{
GF_ObjectDescriptorBox *ptr = (GF_ObjectDescriptorBox *)s;
if (ptr == NULL) return;
if (ptr->descriptor) gf_odf_desc_del(ptr->descriptor);
gf_free(ptr);
}
GF_Err iods_Read(GF_Box *s, GF_BitStream *bs)
{
GF_Err e;
u32 descSize;
char *desc;
GF_ObjectDescriptorBox *ptr = (GF_ObjectDescriptorBox *)s;
//use the OD codec...
descSize = (u32) (ptr->size);
desc = (char*)gf_malloc(sizeof(char) * descSize);
gf_bs_read_data(bs, desc, descSize);
e = gf_odf_desc_read(desc, descSize, &ptr->descriptor);
//OK, free our desc
gf_free(desc);
return e;
}
GF_Box *iods_New()
{
ISOM_DECL_BOX_ALLOC(GF_ObjectDescriptorBox, GF_ISOM_BOX_TYPE_IODS);
return (GF_Box *)tmp;
}
#ifndef GPAC_DISABLE_ISOM_WRITE
GF_Err iods_Write(GF_Box *s, GF_BitStream *bs)
{
GF_Err e;
u32 descSize;
char *desc;
GF_ObjectDescriptorBox *ptr = (GF_ObjectDescriptorBox *)s;
e = gf_isom_full_box_write(s, bs);
if (e) return e;
//call our OD codec
e = gf_odf_desc_write(ptr->descriptor, &desc, &descSize);
if (e) return e;
gf_bs_write_data(bs, desc, descSize);
//and free our stuff maybe!!
gf_free(desc);
return GF_OK;
}
GF_Err iods_Size(GF_Box *s)
{
GF_ObjectDescriptorBox *ptr = (GF_ObjectDescriptorBox *)s;
ptr->size += gf_odf_desc_size(ptr->descriptor);
return GF_OK;
}
#endif /*GPAC_DISABLE_ISOM_WRITE*/
void mdat_del(GF_Box *s)
{
GF_MediaDataBox *ptr = (GF_MediaDataBox *)s;
if (!s) return;
if (ptr->data) gf_free(ptr->data);
gf_free(ptr);
}
GF_Err mdat_Read(GF_Box *s, GF_BitStream *bs)
{
GF_MediaDataBox *ptr = (GF_MediaDataBox *)s;
if (ptr == NULL) return GF_BAD_PARAM;
ptr->dataSize = s->size;
ptr->bsOffset = gf_bs_get_position(bs);
//then skip these bytes
gf_bs_skip_bytes(bs, ptr->dataSize);
return GF_OK;
}
GF_Box *mdat_New()
{
ISOM_DECL_BOX_ALLOC(GF_MediaDataBox, GF_ISOM_BOX_TYPE_MDAT);
return (GF_Box *)tmp;
}
#ifndef GPAC_DISABLE_ISOM_WRITE
GF_Err mdat_Write(GF_Box *s, GF_BitStream *bs)
{
GF_Err e;
GF_MediaDataBox *ptr = (GF_MediaDataBox *)s;
e = gf_isom_box_write_header(s, bs);
if (e) return e;
//make sure we have some data ...
//if not, we handle that independantly (edit files)
if (ptr->data) {
gf_bs_write_data(bs, ptr->data, (u32) ptr->dataSize);
}
return GF_OK;
}
GF_Err mdat_Size(GF_Box *s)
{
GF_MediaDataBox *ptr = (GF_MediaDataBox *)s;
ptr->size += ptr->dataSize;
return GF_OK;
}
#endif /*GPAC_DISABLE_ISOM_WRITE*/
void mdhd_del(GF_Box *s)
{
GF_MediaHeaderBox *ptr = (GF_MediaHeaderBox *)s;
if (ptr == NULL) return;
gf_free(ptr);
}
GF_Err mdhd_Read(GF_Box *s, GF_BitStream *bs)
{
GF_MediaHeaderBox *ptr = (GF_MediaHeaderBox *)s;
if (ptr->version == 1) {
ptr->creationTime = gf_bs_read_u64(bs);
ptr->modificationTime = gf_bs_read_u64(bs);
ptr->timeScale = gf_bs_read_u32(bs);
ptr->duration = gf_bs_read_u64(bs);
} else {
ptr->creationTime = gf_bs_read_u32(bs);
ptr->modificationTime = gf_bs_read_u32(bs);
ptr->timeScale = gf_bs_read_u32(bs);
ptr->duration = gf_bs_read_u32(bs);
}
if (!ptr->timeScale) {
GF_LOG(GF_LOG_WARNING, GF_LOG_CONTAINER, ("[iso file] Media header timescale is 0 - defaulting to 90000\n" ));
ptr->timeScale = 90000;
}
ptr->original_duration = ptr->duration;
//our padding bit
gf_bs_read_int(bs, 1);
//the spec is unclear here, just says "the value 0 is interpreted as undetermined"
ptr->packedLanguage[0] = gf_bs_read_int(bs, 5);
ptr->packedLanguage[1] = gf_bs_read_int(bs, 5);
ptr->packedLanguage[2] = gf_bs_read_int(bs, 5);
//but before or after compaction ?? We assume before
if (ptr->packedLanguage[0] || ptr->packedLanguage[1] || ptr->packedLanguage[2]) {
ptr->packedLanguage[0] += 0x60;
ptr->packedLanguage[1] += 0x60;
ptr->packedLanguage[2] += 0x60;
} else {
ptr->packedLanguage[0] = 'u';
ptr->packedLanguage[1] = 'n';
ptr->packedLanguage[2] = 'd';
}
ptr->reserved = gf_bs_read_u16(bs);
return GF_OK;
}
GF_Box *mdhd_New()
{
ISOM_DECL_BOX_ALLOC(GF_MediaHeaderBox, GF_ISOM_BOX_TYPE_MDHD);
tmp->packedLanguage[0] = 'u';
tmp->packedLanguage[1] = 'n';
tmp->packedLanguage[2] = 'd';
return (GF_Box *)tmp;
}
#ifndef GPAC_DISABLE_ISOM_WRITE
GF_Err mdhd_Write(GF_Box *s, GF_BitStream *bs)
{
GF_Err e;
GF_MediaHeaderBox *ptr = (GF_MediaHeaderBox *)s;
e = gf_isom_full_box_write(s, bs);
if (e) return e;
if (ptr->version == 1) {
gf_bs_write_u64(bs, ptr->creationTime);
gf_bs_write_u64(bs, ptr->modificationTime);
gf_bs_write_u32(bs, ptr->timeScale);
gf_bs_write_u64(bs, ptr->duration);
} else {
gf_bs_write_u32(bs, (u32) ptr->creationTime);
gf_bs_write_u32(bs, (u32) ptr->modificationTime);
gf_bs_write_u32(bs, ptr->timeScale);
gf_bs_write_u32(bs, (u32) ptr->duration);
}
//SPECS: BIT(1) of padding
gf_bs_write_int(bs, 0, 1);
gf_bs_write_int(bs, ptr->packedLanguage[0] - 0x60, 5);
gf_bs_write_int(bs, ptr->packedLanguage[1] - 0x60, 5);
gf_bs_write_int(bs, ptr->packedLanguage[2] - 0x60, 5);
gf_bs_write_u16(bs, ptr->reserved);
return GF_OK;
}
GF_Err mdhd_Size(GF_Box *s)
{
GF_MediaHeaderBox *ptr = (GF_MediaHeaderBox *)s;
ptr->version = (ptr->duration>0xFFFFFFFF) ? 1 : 0;
ptr->size += 4;
ptr->size += (ptr->version == 1) ? 28 : 16;
return GF_OK;
}
#endif /*GPAC_DISABLE_ISOM_WRITE*/
void mdia_del(GF_Box *s)
{
GF_MediaBox *ptr = (GF_MediaBox *)s;
if (ptr == NULL) return;
if (ptr->mediaHeader) gf_isom_box_del((GF_Box *)ptr->mediaHeader);
if (ptr->information) gf_isom_box_del((GF_Box *)ptr->information);
if (ptr->handler) gf_isom_box_del((GF_Box *)ptr->handler);
gf_free(ptr);
}
GF_Err mdia_AddBox(GF_Box *s, GF_Box *a)
{
GF_MediaBox *ptr = (GF_MediaBox *)s;
switch(a->type) {
case GF_ISOM_BOX_TYPE_MDHD:
if (ptr->mediaHeader) ERROR_ON_DUPLICATED_BOX(a, ptr)
ptr->mediaHeader = (GF_MediaHeaderBox *)a;
return GF_OK;
case GF_ISOM_BOX_TYPE_HDLR:
if (ptr->handler) ERROR_ON_DUPLICATED_BOX(a, ptr)
ptr->handler = (GF_HandlerBox *)a;
return GF_OK;
case GF_ISOM_BOX_TYPE_MINF:
if (ptr->information) ERROR_ON_DUPLICATED_BOX(a, ptr)
ptr->information = (GF_MediaInformationBox *)a;
return GF_OK;
default:
return gf_isom_box_add_default(s, a);
}
return GF_OK;
}
GF_Err mdia_Read(GF_Box *s, GF_BitStream *bs)
{
GF_Err e = gf_isom_box_array_read(s, bs, mdia_AddBox);
if (e) return e;
if (!((GF_MediaBox *)s)->information) {
GF_LOG(GF_LOG_ERROR, GF_LOG_CONTAINER, ("[iso file] Missing MediaInformationBox\n"));
return GF_ISOM_INVALID_FILE;
}
if (!((GF_MediaBox *)s)->handler) {
GF_LOG(GF_LOG_ERROR, GF_LOG_CONTAINER, ("[iso file] Missing HandlerBox\n"));
return GF_ISOM_INVALID_FILE;
}
if (!((GF_MediaBox *)s)->mediaHeader) {
GF_LOG(GF_LOG_ERROR, GF_LOG_CONTAINER, ("[iso file] Missing MediaHeaderBox\n"));
return GF_ISOM_INVALID_FILE;
}
return GF_OK;
}
GF_Box *mdia_New()
{
ISOM_DECL_BOX_ALLOC(GF_MediaBox, GF_ISOM_BOX_TYPE_MDIA);
return (GF_Box *)tmp;
}
#ifndef GPAC_DISABLE_ISOM_WRITE
GF_Err mdia_Write(GF_Box *s, GF_BitStream *bs)
{
GF_Err e;
GF_MediaBox *ptr = (GF_MediaBox *)s;
e = gf_isom_box_write_header(s, bs);
if (e) return e;
//Header first
if (ptr->mediaHeader) {
e = gf_isom_box_write((GF_Box *) ptr->mediaHeader, bs);
if (e) return e;
}
//then handler
if (ptr->handler) {
e = gf_isom_box_write((GF_Box *) ptr->handler, bs);
if (e) return e;
}
if (ptr->information) {
e = gf_isom_box_write((GF_Box *) ptr->information, bs);
if (e) return e;
}
return GF_OK;
}
GF_Err mdia_Size(GF_Box *s)
{
GF_Err e;
GF_MediaBox *ptr = (GF_MediaBox *)s;
if (ptr->mediaHeader) {
e = gf_isom_box_size((GF_Box *) ptr->mediaHeader);
if (e) return e;
ptr->size += ptr->mediaHeader->size;
}
if (ptr->handler) {
e = gf_isom_box_size((GF_Box *) ptr->handler);
if (e) return e;
ptr->size += ptr->handler->size;
}
if (ptr->information) {
e = gf_isom_box_size((GF_Box *) ptr->information);
if (e) return e;
ptr->size += ptr->information->size;
}
return GF_OK;
}
#endif /*GPAC_DISABLE_ISOM_WRITE*/
void mfra_del(GF_Box *s)
{
GF_MovieFragmentRandomAccessBox *ptr = (GF_MovieFragmentRandomAccessBox *)s;
if (ptr == NULL) return;
if (ptr->mfro) gf_isom_box_del((GF_Box*)ptr->mfro);
gf_isom_box_array_del(ptr->tfra_list);
gf_free(ptr);
}
GF_Box *mfra_New()
{
ISOM_DECL_BOX_ALLOC(GF_MovieFragmentRandomAccessBox, GF_ISOM_BOX_TYPE_MFRA);
tmp->tfra_list = gf_list_new();
return (GF_Box *)tmp;
}
GF_Err mfra_AddBox(GF_Box *s, GF_Box *a)
{
GF_MovieFragmentRandomAccessBox *ptr = (GF_MovieFragmentRandomAccessBox *)s;
switch(a->type) {
case GF_ISOM_BOX_TYPE_TFRA:
return gf_list_add(ptr->tfra_list, a);
case GF_ISOM_BOX_TYPE_MFRO:
if (ptr->mfro) ERROR_ON_DUPLICATED_BOX(a, ptr)
ptr->mfro = (GF_MovieFragmentRandomAccessOffsetBox *)a;
return GF_OK;
default:
return gf_isom_box_add_default(s, a);
}
return GF_OK;
}
GF_Err mfra_Read(GF_Box *s, GF_BitStream *bs)
{
return gf_isom_box_array_read(s, bs, mfra_AddBox);
}
#ifndef GPAC_DISABLE_ISOM_WRITE
GF_Err mfra_Write(GF_Box *s, GF_BitStream *bs)
{
GF_Err e;
GF_MovieFragmentRandomAccessBox *ptr = (GF_MovieFragmentRandomAccessBox *)s;
e = gf_isom_box_write_header(s, bs);
if (e) return e;
e = gf_isom_box_array_write(s, ptr->tfra_list, bs);
if (e) return e;
if (ptr->mfro) {
e = gf_isom_box_write((GF_Box *) ptr->mfro, bs);
if (e) return e;
}
return GF_OK;
}
GF_Err mfra_Size(GF_Box *s)
{
GF_Err e;
GF_MovieFragmentRandomAccessBox *ptr = (GF_MovieFragmentRandomAccessBox *)s;
if (ptr->mfro) {
e = gf_isom_box_size((GF_Box *)ptr->mfro);
if (e) return e;
ptr->size += ptr->mfro->size;
}
return gf_isom_box_array_size(s, ptr->tfra_list);
}
#endif /*GPAC_DISABLE_ISOM_WRITE*/
void tfra_del(GF_Box *s)
{
GF_TrackFragmentRandomAccessBox *ptr = (GF_TrackFragmentRandomAccessBox *)s;
if (ptr == NULL) return;
if (ptr->entries) gf_free(ptr->entries);
gf_free(ptr);
}
GF_Box *tfra_New()
{
ISOM_DECL_BOX_ALLOC(GF_TrackFragmentRandomAccessBox, GF_ISOM_BOX_TYPE_TFRA);
return (GF_Box *)tmp;
}
GF_Err tfra_Read(GF_Box *s, GF_BitStream *bs)
{
u32 i;
GF_RandomAccessEntry *p = 0;
GF_TrackFragmentRandomAccessBox *ptr = (GF_TrackFragmentRandomAccessBox *)s;
if (ptr->size < 12)
return GF_ISOM_INVALID_FILE;
ptr->track_id = gf_bs_read_u32(bs);
ISOM_DECREASE_SIZE(ptr, 4);
if (gf_bs_read_int(bs, 26) != 0)
return GF_ISOM_INVALID_FILE;
ptr->traf_bits = (gf_bs_read_int(bs, 2) + 1) * 8;
ptr->trun_bits = (gf_bs_read_int(bs, 2) + 1) * 8;
ptr->sample_bits = (gf_bs_read_int(bs, 2) + 1) * 8;
ISOM_DECREASE_SIZE(ptr, 4);
ptr->nb_entries = gf_bs_read_u32(bs);
ISOM_DECREASE_SIZE(ptr, 4);
if (ptr->version == 1) {
if (ptr->nb_entries > ptr->size / (16+(ptr->traf_bits+ptr->trun_bits+ptr->sample_bits)/8)) {
GF_LOG(GF_LOG_ERROR, GF_LOG_CONTAINER, ("[iso file] Invalid number of entries %d in traf\n", ptr->nb_entries));
return GF_ISOM_INVALID_FILE;
}
} else {
if (ptr->nb_entries > ptr->size / (8+(ptr->traf_bits+ptr->trun_bits+ptr->sample_bits)/8)) {
GF_LOG(GF_LOG_ERROR, GF_LOG_CONTAINER, ("[iso file] Invalid number of entries %d in traf\n", ptr->nb_entries));
return GF_ISOM_INVALID_FILE;
}
}
if (ptr->nb_entries) {
p = (GF_RandomAccessEntry *) gf_malloc(sizeof(GF_RandomAccessEntry) * ptr->nb_entries);
if (!p) return GF_OUT_OF_MEM;
}
ptr->entries = p;
for (i=0; i<ptr->nb_entries; i++) {
memset(p, 0, sizeof(GF_RandomAccessEntry));
if (ptr->version == 1) {
p->time = gf_bs_read_u64(bs);
p->moof_offset = gf_bs_read_u64(bs);
} else {
p->time = gf_bs_read_u32(bs);
p->moof_offset = gf_bs_read_u32(bs);
}
p->traf_number = gf_bs_read_int(bs, ptr->traf_bits);
p->trun_number = gf_bs_read_int(bs, ptr->trun_bits);
p->sample_number = gf_bs_read_int(bs, ptr->sample_bits);
++p;
}
return GF_OK;
}
#ifndef GPAC_DISABLE_ISOM_WRITE
GF_Err tfra_Write(GF_Box *s, GF_BitStream *bs)
{
GF_Err e;
u32 i;
GF_TrackFragmentRandomAccessBox *ptr = (GF_TrackFragmentRandomAccessBox *)s;
e = gf_isom_full_box_write(s, bs);
if (e) return e;
gf_bs_write_u32(bs, ptr->track_id);
gf_bs_write_int(bs, 0, 26);
gf_bs_write_int(bs, ptr->traf_bits/8 - 1, 2);
gf_bs_write_int(bs, ptr->trun_bits/8 - 1, 2);
gf_bs_write_int(bs, ptr->sample_bits/8 - 1, 2);
gf_bs_write_u32(bs, ptr->nb_entries);
for (i=0; i<ptr->nb_entries; i++) {
GF_RandomAccessEntry *p = &ptr->entries[i];
if (ptr->version==1) {
gf_bs_write_u64(bs, p->time);
gf_bs_write_u64(bs, p->moof_offset);
} else {
gf_bs_write_u32(bs, (u32) p->time);
gf_bs_write_u32(bs, (u32) p->moof_offset);
}
gf_bs_write_int(bs, p->traf_number, ptr->traf_bits);
gf_bs_write_int(bs, p->trun_number, ptr->trun_bits);
gf_bs_write_int(bs, p->sample_number, ptr->sample_bits);
}
return GF_OK;
}
GF_Err tfra_Size(GF_Box *s)
{
GF_TrackFragmentRandomAccessBox *ptr = (GF_TrackFragmentRandomAccessBox *)s;
ptr->size += 12;
ptr->size += ptr->nb_entries * ( ((ptr->version==1) ? 16 : 8 ) + ptr->traf_bits/8 + ptr->trun_bits/8 + ptr->sample_bits/8);
return GF_OK;
}
#endif /*GPAC_DISABLE_ISOM_WRITE*/
void mfro_del(GF_Box *s)
{
GF_MovieFragmentRandomAccessOffsetBox *ptr = (GF_MovieFragmentRandomAccessOffsetBox *)s;
if (ptr == NULL) return;
gf_free(ptr);
}
GF_Box *mfro_New()
{
ISOM_DECL_BOX_ALLOC(GF_MovieFragmentRandomAccessOffsetBox, GF_ISOM_BOX_TYPE_MFRO);
return (GF_Box *)tmp;
}
GF_Err mfro_Read(GF_Box *s, GF_BitStream *bs)
{
GF_MovieFragmentRandomAccessOffsetBox *ptr = (GF_MovieFragmentRandomAccessOffsetBox *)s;
ptr->container_size = gf_bs_read_u32(bs);
ISOM_DECREASE_SIZE(ptr, 4);
return GF_OK;
}
#ifndef GPAC_DISABLE_ISOM_WRITE
GF_Err mfro_Write(GF_Box *s, GF_BitStream *bs)
{
GF_Err e;
GF_MovieFragmentRandomAccessOffsetBox *ptr = (GF_MovieFragmentRandomAccessOffsetBox *)s;
e = gf_isom_full_box_write(s, bs);
if (e) return e;
gf_bs_write_u32(bs, ptr->container_size);
return GF_OK;
}
GF_Err mfro_Size(GF_Box *s)
{
s->size += 4;
return GF_OK;
}
#endif /*GPAC_DISABLE_ISOM_WRITE*/
void elng_del(GF_Box *s)
{
GF_ExtendedLanguageBox *ptr = (GF_ExtendedLanguageBox *)s;
if (ptr == NULL) return;
if (ptr->extended_language) gf_free(ptr->extended_language);
gf_free(ptr);
}
GF_Err elng_Read(GF_Box *s, GF_BitStream *bs)
{
GF_ExtendedLanguageBox *ptr = (GF_ExtendedLanguageBox *)s;
if (ptr->size) {
ptr->extended_language = (char*)gf_malloc((u32) ptr->size);
if (ptr->extended_language == NULL) return GF_OUT_OF_MEM;
gf_bs_read_data(bs, ptr->extended_language, (u32) ptr->size);
/*safety check in case the string is not null-terminated*/
if (ptr->extended_language[ptr->size-1]) {
char *str = (char*)gf_malloc((u32) ptr->size + 1);
memcpy(str, ptr->extended_language, (u32) ptr->size);
str[ptr->size] = 0;
gf_free(ptr->extended_language);
ptr->extended_language = str;
}
}
return GF_OK;
}
GF_Box *elng_New()
{
ISOM_DECL_BOX_ALLOC(GF_MediaBox, GF_ISOM_BOX_TYPE_ELNG);
return (GF_Box *)tmp;
}
#ifndef GPAC_DISABLE_ISOM_WRITE
GF_Err elng_Write(GF_Box *s, GF_BitStream *bs)
{
GF_Err e;
GF_ExtendedLanguageBox *ptr = (GF_ExtendedLanguageBox *)s;
e = gf_isom_full_box_write(s, bs);
if (e) return e;
if (ptr->extended_language) {
gf_bs_write_data(bs, ptr->extended_language, (u32)(strlen(ptr->extended_language)+1));
}
return GF_OK;
}
GF_Err elng_Size(GF_Box *s)
{
GF_ExtendedLanguageBox *ptr = (GF_ExtendedLanguageBox *)s;
if (ptr->extended_language) {
ptr->size += strlen(ptr->extended_language)+1;
}
return GF_OK;
}
#endif /*GPAC_DISABLE_ISOM_WRITE*/
#ifndef GPAC_DISABLE_ISOM_FRAGMENTS
void mfhd_del(GF_Box *s)
{
GF_MovieFragmentHeaderBox *ptr = (GF_MovieFragmentHeaderBox *)s;
if (ptr == NULL) return;
gf_free(ptr);
}
GF_Err mfhd_Read(GF_Box *s, GF_BitStream *bs)
{
GF_MovieFragmentHeaderBox *ptr = (GF_MovieFragmentHeaderBox *)s;
ptr->sequence_number = gf_bs_read_u32(bs);
return GF_OK;
}
GF_Box *mfhd_New()
{
ISOM_DECL_BOX_ALLOC(GF_MovieFragmentHeaderBox, GF_ISOM_BOX_TYPE_MFHD);
return (GF_Box *)tmp;
}
#ifndef GPAC_DISABLE_ISOM_WRITE
GF_Err mfhd_Write(GF_Box *s, GF_BitStream *bs)
{
GF_Err e;
GF_MovieFragmentHeaderBox *ptr = (GF_MovieFragmentHeaderBox *) s;
if (!s) return GF_BAD_PARAM;
e = gf_isom_full_box_write(s, bs);
if (e) return e;
gf_bs_write_u32(bs, ptr->sequence_number);
return GF_OK;
}
GF_Err mfhd_Size(GF_Box *s)
{
s->size += 4;
return GF_OK;
}
#endif /*GPAC_DISABLE_ISOM_WRITE*/
#endif /*GPAC_DISABLE_ISOM_FRAGMENTS*/
void minf_del(GF_Box *s)
{
GF_MediaInformationBox *ptr = (GF_MediaInformationBox *)s;
if (ptr == NULL) return;
//if we have a Handler not self-contained, delete it (the self-contained belongs to the movie)
if (ptr->dataHandler) {
gf_isom_datamap_close(ptr);
}
if (ptr->InfoHeader) gf_isom_box_del((GF_Box *)ptr->InfoHeader);
if (ptr->dataInformation) gf_isom_box_del((GF_Box *)ptr->dataInformation);
if (ptr->sampleTable) gf_isom_box_del((GF_Box *)ptr->sampleTable);
gf_free(ptr);
}
GF_Err minf_AddBox(GF_Box *s, GF_Box *a)
{
GF_MediaInformationBox *ptr = (GF_MediaInformationBox *)s;
switch (a->type) {
case GF_ISOM_BOX_TYPE_NMHD:
case GF_ISOM_BOX_TYPE_STHD:
case GF_ISOM_BOX_TYPE_VMHD:
case GF_ISOM_BOX_TYPE_SMHD:
case GF_ISOM_BOX_TYPE_HMHD:
case GF_ISOM_BOX_TYPE_GMHD:
if (ptr->InfoHeader) ERROR_ON_DUPLICATED_BOX(a, ptr)
ptr->InfoHeader = a;
return GF_OK;
case GF_ISOM_BOX_TYPE_DINF:
if (ptr->dataInformation) ERROR_ON_DUPLICATED_BOX(a, ptr)
ptr->dataInformation = (GF_DataInformationBox *)a;
return GF_OK;
case GF_ISOM_BOX_TYPE_STBL:
if (ptr->sampleTable ) ERROR_ON_DUPLICATED_BOX(a, ptr)
ptr->sampleTable = (GF_SampleTableBox *)a;
return GF_OK;
default:
return gf_isom_box_add_default(s, a);
}
return GF_OK;
}
GF_Err minf_Read(GF_Box *s, GF_BitStream *bs)
{
GF_MediaInformationBox *ptr = (GF_MediaInformationBox *)s;
GF_Err e;
e = gf_isom_box_array_read(s, bs, minf_AddBox);
if (! ptr->dataInformation) {
GF_Box *dinf, *dref, *url;
extern Bool use_dump_mode;
Bool dump_mode = use_dump_mode;
GF_LOG(GF_LOG_ERROR, GF_LOG_CONTAINER, ("[iso file] Missing DataInformationBox\n"));
//commented on purpose, we are still able to handle the file, we only throw an error but keep processing
// e = GF_ISOM_INVALID_FILE;
//add a dinf box to avoid any access to a null dinf
dinf = gf_isom_box_new(GF_ISOM_BOX_TYPE_DINF);
if (!dinf) return GF_OUT_OF_MEM;
if (ptr->InfoHeader && gf_list_find(ptr->other_boxes, ptr->InfoHeader)>=0) dump_mode = GF_TRUE;
if (ptr->sampleTable && gf_list_find(ptr->other_boxes, ptr->sampleTable)>=0) dump_mode = GF_TRUE;
ptr->dataInformation = (GF_DataInformationBox *)dinf;
dref = gf_isom_box_new(GF_ISOM_BOX_TYPE_DREF);
if (!dref) return GF_OUT_OF_MEM;
e = dinf_AddBox(dinf, dref);
url = gf_isom_box_new(GF_ISOM_BOX_TYPE_URL);
if (!url) return GF_OUT_OF_MEM;
((GF_FullBox*)url)->flags = 1;
e = gf_isom_box_add_default(dref, url);
if (dump_mode) {
gf_isom_box_add_for_dump_mode((GF_Box*)ptr, (GF_Box*)ptr->dataInformation);
gf_isom_box_add_for_dump_mode((GF_Box*)dinf, (GF_Box*)dref);
}
}
return e;
}
GF_Box *minf_New()
{
ISOM_DECL_BOX_ALLOC(GF_MediaInformationBox, GF_ISOM_BOX_TYPE_MINF);
return (GF_Box *)tmp;
}
#ifndef GPAC_DISABLE_ISOM_WRITE
GF_Err minf_Write(GF_Box *s, GF_BitStream *bs)
{
GF_Err e;
GF_MediaInformationBox *ptr = (GF_MediaInformationBox *)s;
if (!s) return GF_BAD_PARAM;
e = gf_isom_box_write_header(s, bs);
if (e) return e;
//Header first
if (ptr->InfoHeader) {
e = gf_isom_box_write((GF_Box *) ptr->InfoHeader, bs);
if (e) return e;
}
//then dataInfo
if (ptr->dataInformation) {
e = gf_isom_box_write((GF_Box *) ptr->dataInformation, bs);
if (e) return e;
}
//then sampleTable
if (ptr->sampleTable) {
e = gf_isom_box_write((GF_Box *) ptr->sampleTable, bs);
if (e) return e;
}
return GF_OK;
}
GF_Err minf_Size(GF_Box *s)
{
GF_Err e;
GF_MediaInformationBox *ptr = (GF_MediaInformationBox *)s;
if (ptr->InfoHeader) {
e = gf_isom_box_size((GF_Box *) ptr->InfoHeader);
if (e) return e;
ptr->size += ptr->InfoHeader->size;
}
if (ptr->dataInformation) {
e = gf_isom_box_size((GF_Box *) ptr->dataInformation);
if (e) return e;
ptr->size += ptr->dataInformation->size;
}
if (ptr->sampleTable) {
e = gf_isom_box_size((GF_Box *) ptr->sampleTable);
if (e) return e;
ptr->size += ptr->sampleTable->size;
}
return GF_OK;
}
#endif /*GPAC_DISABLE_ISOM_WRITE*/
#ifndef GPAC_DISABLE_ISOM_FRAGMENTS
void moof_del(GF_Box *s)
{
GF_MovieFragmentBox *ptr = (GF_MovieFragmentBox *)s;
if (ptr == NULL) return;
if (ptr->mfhd) gf_isom_box_del((GF_Box *) ptr->mfhd);
gf_isom_box_array_del(ptr->TrackList);
if (ptr->mdat) gf_free(ptr->mdat);
gf_free(ptr);
}
GF_Err moof_AddBox(GF_Box *s, GF_Box *a)
{
GF_MovieFragmentBox *ptr = (GF_MovieFragmentBox *)s;
switch (a->type) {
case GF_ISOM_BOX_TYPE_MFHD:
if (ptr->mfhd) ERROR_ON_DUPLICATED_BOX(a, ptr)
ptr->mfhd = (GF_MovieFragmentHeaderBox *) a;
return GF_OK;
case GF_ISOM_BOX_TYPE_TRAF:
return gf_list_add(ptr->TrackList, a);
case GF_ISOM_BOX_TYPE_PSSH:
default:
return gf_isom_box_add_default(s, a);
}
}
GF_Err moof_Read(GF_Box *s, GF_BitStream *bs)
{
return gf_isom_box_array_read(s, bs, moof_AddBox);
}
GF_Box *moof_New()
{
ISOM_DECL_BOX_ALLOC(GF_MovieFragmentBox, GF_ISOM_BOX_TYPE_MOOF);
tmp->TrackList = gf_list_new();
return (GF_Box *)tmp;
}
#ifndef GPAC_DISABLE_ISOM_WRITE
GF_Err moof_Write(GF_Box *s, GF_BitStream *bs)
{
GF_Err e;
GF_MovieFragmentBox *ptr = (GF_MovieFragmentBox *) s;
if (!s) return GF_BAD_PARAM;
e = gf_isom_box_write_header(s, bs);
if (e) return e;
//Header First
if (ptr->mfhd) {
e = gf_isom_box_write((GF_Box *) ptr->mfhd, bs);
if (e) return e;
}
//then the track list
return gf_isom_box_array_write(s, ptr->TrackList, bs);
}
GF_Err moof_Size(GF_Box *s)
{
GF_Err e;
GF_MovieFragmentBox *ptr = (GF_MovieFragmentBox *)s;
if (ptr->mfhd) {
e = gf_isom_box_size((GF_Box *)ptr->mfhd);
if (e) return e;
ptr->size += ptr->mfhd->size;
}
return gf_isom_box_array_size(s, ptr->TrackList);
}
#endif /*GPAC_DISABLE_ISOM_WRITE*/
#endif /*GPAC_DISABLE_ISOM_FRAGMENTS*/
void moov_del(GF_Box *s)
{
GF_MovieBox *ptr = (GF_MovieBox *)s;
if (ptr == NULL) return;
if (ptr->mvhd) gf_isom_box_del((GF_Box *)ptr->mvhd);
if (ptr->meta) gf_isom_box_del((GF_Box *)ptr->meta);
if (ptr->iods) gf_isom_box_del((GF_Box *)ptr->iods);
if (ptr->udta) gf_isom_box_del((GF_Box *)ptr->udta);
#ifndef GPAC_DISABLE_ISOM_FRAGMENTS
if (ptr->mvex) gf_isom_box_del((GF_Box *)ptr->mvex);
#endif
gf_isom_box_array_del(ptr->trackList);
gf_free(ptr);
}
GF_Err moov_AddBox(GF_Box *s, GF_Box *a)
{
GF_MovieBox *ptr = (GF_MovieBox *)s;
switch (a->type) {
case GF_ISOM_BOX_TYPE_IODS:
if (ptr->iods) ERROR_ON_DUPLICATED_BOX(a, ptr)
ptr->iods = (GF_ObjectDescriptorBox *)a;
//if no IOD, delete the box
if (!ptr->iods->descriptor) {
extern Bool use_dump_mode;
ptr->iods = NULL;
// don't actually delete in dump mode, it will be done in other_boxes
if (!use_dump_mode)
gf_isom_box_del(a);
}
return GF_OK;
case GF_ISOM_BOX_TYPE_MVHD:
if (ptr->mvhd) ERROR_ON_DUPLICATED_BOX(a, ptr)
ptr->mvhd = (GF_MovieHeaderBox *)a;
return GF_OK;
case GF_ISOM_BOX_TYPE_UDTA:
if (ptr->udta) ERROR_ON_DUPLICATED_BOX(a, ptr)
ptr->udta = (GF_UserDataBox *)a;
return GF_OK;
#ifndef GPAC_DISABLE_ISOM_FRAGMENTS
case GF_ISOM_BOX_TYPE_MVEX:
if (ptr->mvex) ERROR_ON_DUPLICATED_BOX(a, ptr)
ptr->mvex = (GF_MovieExtendsBox *)a;
ptr->mvex->mov = ptr->mov;
return GF_OK;
#endif
case GF_ISOM_BOX_TYPE_META:
if (ptr->meta) ERROR_ON_DUPLICATED_BOX(a, ptr)
ptr->meta = (GF_MetaBox *)a;
return GF_OK;
case GF_ISOM_BOX_TYPE_TRAK:
//set our pointer to this obj
((GF_TrackBox *)a)->moov = ptr;
return gf_list_add(ptr->trackList, a);
case GF_ISOM_BOX_TYPE_PSSH:
default:
return gf_isom_box_add_default(s, a);
}
}
GF_Err moov_Read(GF_Box *s, GF_BitStream *bs)
{
GF_Err e;
e = gf_isom_box_array_read(s, bs, moov_AddBox);
if (e) {
return e;
}
else {
if (!((GF_MovieBox *)s)->mvhd) {
GF_LOG(GF_LOG_ERROR, GF_LOG_CONTAINER, ("[iso file] Missing MovieHeaderBox\n"));
return GF_ISOM_INVALID_FILE;
}
}
return e;
}
GF_Box *moov_New()
{
ISOM_DECL_BOX_ALLOC(GF_MovieBox, GF_ISOM_BOX_TYPE_MOOV);
tmp->trackList = gf_list_new();
if (!tmp->trackList) {
gf_free(tmp);
return NULL;
}
return (GF_Box *)tmp;
}
#ifndef GPAC_DISABLE_ISOM_WRITE
GF_Err moov_Write(GF_Box *s, GF_BitStream *bs)
{
GF_Err e;
GF_MovieBox *ptr = (GF_MovieBox *)s;
if (!s) return GF_BAD_PARAM;
e = gf_isom_box_write_header(s, bs);
if (e) return e;
if (ptr->mvhd) {
e = gf_isom_box_write((GF_Box *) ptr->mvhd, bs);
if (e) return e;
}
if (ptr->iods) {
e = gf_isom_box_write((GF_Box *) ptr->iods, bs);
if (e) return e;
}
if (ptr->meta) {
e = gf_isom_box_write((GF_Box *) ptr->meta, bs);
if (e) return e;
}
#ifndef GPAC_DISABLE_ISOM_FRAGMENTS
if (ptr->mvex && !ptr->mvex_after_traks) {
e = gf_isom_box_write((GF_Box *) ptr->mvex, bs);
if (e) return e;
}
#endif
e = gf_isom_box_array_write(s, ptr->trackList, bs);
if (e) return e;
#ifndef GPAC_DISABLE_ISOM_FRAGMENTS
if (ptr->mvex && ptr->mvex_after_traks) {
e = gf_isom_box_write((GF_Box *) ptr->mvex, bs);
if (e) return e;
}
#endif
if (ptr->udta) {
e = gf_isom_box_write((GF_Box *) ptr->udta, bs);
if (e) return e;
}
return GF_OK;
}
GF_Err moov_Size(GF_Box *s)
{
GF_Err e;
GF_MovieBox *ptr = (GF_MovieBox *)s;
if (ptr->mvhd) {
e = gf_isom_box_size((GF_Box *) ptr->mvhd);
if (e) return e;
ptr->size += ptr->mvhd->size;
}
if (ptr->iods) {
e = gf_isom_box_size((GF_Box *) ptr->iods);
if (e) return e;
ptr->size += ptr->iods->size;
}
if (ptr->udta) {
e = gf_isom_box_size((GF_Box *) ptr->udta);
if (e) return e;
ptr->size += ptr->udta->size;
}
if (ptr->meta) {
e = gf_isom_box_size((GF_Box *) ptr->meta);
if (e) return e;
ptr->size += ptr->meta->size;
}
#ifndef GPAC_DISABLE_ISOM_FRAGMENTS
if (ptr->mvex) {
e = gf_isom_box_size((GF_Box *) ptr->mvex);
if (e) return e;
ptr->size += ptr->mvex->size;
}
#endif
return gf_isom_box_array_size(s, ptr->trackList);
}
#endif /*GPAC_DISABLE_ISOM_WRITE*/
void audio_sample_entry_del(GF_Box *s)
{
GF_MPEGAudioSampleEntryBox *ptr = (GF_MPEGAudioSampleEntryBox *)s;
if (ptr == NULL) return;
gf_isom_sample_entry_predestroy((GF_SampleEntryBox *)s);
if (ptr->is_qtff!=2) {
if (ptr->esd) gf_isom_box_del((GF_Box *)ptr->esd);
if (ptr->cfg_opus) gf_isom_box_del((GF_Box *)ptr->cfg_opus);
if (ptr->cfg_ac3) gf_isom_box_del((GF_Box *)ptr->cfg_ac3);
if (ptr->cfg_mha) gf_isom_box_del((GF_Box *)ptr->cfg_mha);
}
if (ptr->slc) gf_odf_desc_del((GF_Descriptor *)ptr->slc);
if (ptr->cfg_3gpp) gf_isom_box_del((GF_Box *)ptr->cfg_3gpp);
gf_free(ptr);
}
GF_Err audio_sample_entry_AddBox(GF_Box *s, GF_Box *a)
{
GF_UnknownBox *wave = NULL;
Bool drop_wave=GF_FALSE;
GF_MPEGAudioSampleEntryBox *ptr = (GF_MPEGAudioSampleEntryBox *)s;
switch (a->type) {
case GF_ISOM_BOX_TYPE_ESDS:
if (ptr->esd) ERROR_ON_DUPLICATED_BOX(a, ptr)
ptr->esd = (GF_ESDBox *)a;
ptr->is_qtff = 0;
break;
case GF_ISOM_BOX_TYPE_SINF:
gf_list_add(ptr->protections, a);
break;
case GF_ISOM_BOX_TYPE_DAMR:
case GF_ISOM_BOX_TYPE_DEVC:
case GF_ISOM_BOX_TYPE_DQCP:
case GF_ISOM_BOX_TYPE_DSMV:
ptr->cfg_3gpp = (GF_3GPPConfigBox *) a;
/*for 3GP config, remember sample entry type in config*/
ptr->cfg_3gpp->cfg.type = ptr->type;
ptr->is_qtff = 0;
break;
case GF_ISOM_BOX_TYPE_DOPS:
ptr->cfg_opus = (GF_OpusSpecificBox *)a;
ptr->is_qtff = 0;
break;
case GF_ISOM_BOX_TYPE_DAC3:
ptr->cfg_ac3 = (GF_AC3ConfigBox *) a;
ptr->is_qtff = 0;
break;
case GF_ISOM_BOX_TYPE_DEC3:
ptr->cfg_ac3 = (GF_AC3ConfigBox *) a;
ptr->is_qtff = 0;
break;
case GF_ISOM_BOX_TYPE_MHA1:
case GF_ISOM_BOX_TYPE_MHA2:
case GF_ISOM_BOX_TYPE_MHM1:
case GF_ISOM_BOX_TYPE_MHM2:
ptr->cfg_mha = (GF_MHAConfigBox *) a;
ptr->is_qtff = 0;
break;
case GF_ISOM_BOX_TYPE_UNKNOWN:
wave = (GF_UnknownBox *)a;
/*HACK for QT files: get the esds box from the track*/
if (s->type == GF_ISOM_BOX_TYPE_MP4A) {
if (ptr->esd) ERROR_ON_DUPLICATED_BOX(a, ptr)
//wave subboxes may have been properly parsed
if ((wave->original_4cc == GF_QT_BOX_TYPE_WAVE) && gf_list_count(wave->other_boxes)) {
u32 i;
for (i =0; i<gf_list_count(wave->other_boxes); i++) {
GF_Box *inner_box = (GF_Box *)gf_list_get(wave->other_boxes, i);
if (inner_box->type == GF_ISOM_BOX_TYPE_ESDS) {
ptr->esd = (GF_ESDBox *)inner_box;
if (ptr->is_qtff & 1<<16) {
gf_list_rem(a->other_boxes, i);
drop_wave=GF_TRUE;
}
}
}
if (drop_wave) {
gf_isom_box_del(a);
ptr->is_qtff = 0;
return GF_OK;
}
ptr->is_qtff = 2;
return gf_isom_box_add_default(s, a);
}
gf_isom_box_del(a);
return GF_ISOM_INVALID_MEDIA;
}
ptr->is_qtff &= ~(1<<16);
if ((wave->original_4cc == GF_QT_BOX_TYPE_WAVE) && gf_list_count(wave->other_boxes)) {
ptr->is_qtff = 2;
}
return gf_isom_box_add_default(s, a);
case GF_QT_BOX_TYPE_WAVE:
{
u32 subtype = 0;
GF_Box **cfg_ptr = NULL;
if (s->type == GF_ISOM_BOX_TYPE_MP4A) {
cfg_ptr = (GF_Box **) &ptr->esd;
subtype = GF_ISOM_BOX_TYPE_ESDS;
}
else if (s->type == GF_ISOM_BOX_TYPE_AC3) {
cfg_ptr = (GF_Box **) &ptr->cfg_ac3;
subtype = GF_ISOM_BOX_TYPE_DAC3;
}
else if (s->type == GF_ISOM_BOX_TYPE_EC3) {
cfg_ptr = (GF_Box **) &ptr->cfg_ac3;
subtype = GF_ISOM_BOX_TYPE_DEC3;
}
else if (s->type == GF_ISOM_BOX_TYPE_OPUS) {
cfg_ptr = (GF_Box **) &ptr->cfg_opus;
subtype = GF_ISOM_BOX_TYPE_DOPS;
}
else if ((s->type == GF_ISOM_BOX_TYPE_MHA1) || (s->type == GF_ISOM_BOX_TYPE_MHA2)) {
cfg_ptr = (GF_Box **) &ptr->cfg_mha;
subtype = GF_ISOM_BOX_TYPE_MHAC;
}
if (cfg_ptr) {
if (*cfg_ptr) ERROR_ON_DUPLICATED_BOX(a, ptr)
//wave subboxes may have been properly parsed
if (gf_list_count(a->other_boxes)) {
u32 i;
for (i =0; i<gf_list_count(a->other_boxes); i++) {
GF_Box *inner_box = (GF_Box *)gf_list_get(a->other_boxes, i);
if (inner_box->type == subtype) {
*cfg_ptr = inner_box;
if (ptr->is_qtff & 1<<16) {
gf_list_rem(a->other_boxes, i);
drop_wave=GF_TRUE;
}
break;
}
}
if (drop_wave) {
gf_isom_box_del(a);
ptr->is_qtff = 0;
return GF_OK;
}
ptr->is_qtff = 2;
return gf_isom_box_add_default(s, a);
}
}
}
ptr->is_qtff = 2;
return gf_isom_box_add_default(s, a);
default:
return gf_isom_box_add_default(s, a);
}
return GF_OK;
}
GF_Err audio_sample_entry_Read(GF_Box *s, GF_BitStream *bs)
{
GF_MPEGAudioSampleEntryBox *ptr;
char *data;
u8 a, b, c, d;
u32 i, size, v, nb_alnum;
GF_Err e;
u64 pos, start;
ptr = (GF_MPEGAudioSampleEntryBox *)s;
start = gf_bs_get_position(bs);
gf_bs_seek(bs, start + 8);
v = gf_bs_read_u16(bs);
if (v)
ptr->is_qtff = 1;
//try to disambiguate QTFF v1 and MP4 v1 audio sample entries ...
if (v==1) {
//go to end of ISOM audio sample entry, skip 4 byte (box size field), read 4 bytes (box type) and check if this looks like a box
gf_bs_seek(bs, start + 8 + 20 + 4);
a = gf_bs_read_u8(bs);
b = gf_bs_read_u8(bs);
c = gf_bs_read_u8(bs);
d = gf_bs_read_u8(bs);
nb_alnum = 0;
if (isalnum(a)) nb_alnum++;
if (isalnum(b)) nb_alnum++;
if (isalnum(c)) nb_alnum++;
if (isalnum(d)) nb_alnum++;
if (nb_alnum>2) ptr->is_qtff = 0;
}
gf_bs_seek(bs, start);
e = gf_isom_audio_sample_entry_read((GF_AudioSampleEntryBox*)s, bs);
if (e) return e;
pos = gf_bs_get_position(bs);
size = (u32) s->size;
//when cookie is set on bs, always convert qtff-style mp4a to isobmff-style
//since the conversion is done in addBox and we don't have the bitstream there (arg...), flag the box
if (gf_bs_get_cookie(bs)) {
ptr->is_qtff |= 1<<16;
}
e = gf_isom_box_array_read(s, bs, audio_sample_entry_AddBox);
if (!e) return GF_OK;
if (size<8) return GF_ISOM_INVALID_FILE;
/*hack for some weird files (possibly recorded with live.com tools, needs further investigations)*/
gf_bs_seek(bs, pos);
data = (char*)gf_malloc(sizeof(char) * size);
gf_bs_read_data(bs, data, size);
for (i=0; i<size-8; i++) {
if (GF_4CC((u32)data[i+4], (u8)data[i+5], (u8)data[i+6], (u8)data[i+7]) == GF_ISOM_BOX_TYPE_ESDS) {
extern Bool use_dump_mode;
GF_BitStream *mybs = gf_bs_new(data + i, size - i, GF_BITSTREAM_READ);
if (ptr->esd) {
if (!use_dump_mode) gf_isom_box_del((GF_Box *)ptr->esd);
ptr->esd=NULL;
}
e = gf_isom_box_parse((GF_Box **)&ptr->esd, mybs);
if (e==GF_OK) {
gf_isom_box_add_for_dump_mode((GF_Box*)ptr, (GF_Box*)ptr->esd);
} else if (ptr->esd) {
gf_isom_box_del((GF_Box *)ptr->esd);
ptr->esd=NULL;
}
gf_bs_del(mybs);
break;
}
}
gf_free(data);
return e;
}
GF_Box *audio_sample_entry_New()
{
ISOM_DECL_BOX_ALLOC(GF_MPEGAudioSampleEntryBox, GF_ISOM_BOX_TYPE_MP4A);
gf_isom_audio_sample_entry_init((GF_AudioSampleEntryBox*)tmp);
return (GF_Box *)tmp;
}
GF_Box *enca_New()
{
ISOM_DECL_BOX_ALLOC(GF_MPEGAudioSampleEntryBox, GF_ISOM_BOX_TYPE_ENCA);
gf_isom_audio_sample_entry_init((GF_AudioSampleEntryBox*)tmp);
return (GF_Box *)tmp;
}
#ifndef GPAC_DISABLE_ISOM_WRITE
GF_Err audio_sample_entry_Write(GF_Box *s, GF_BitStream *bs)
{
GF_Err e;
GF_MPEGAudioSampleEntryBox *ptr = (GF_MPEGAudioSampleEntryBox *)s;
e = gf_isom_box_write_header(s, bs);
if (e) return e;
gf_isom_audio_sample_entry_write((GF_AudioSampleEntryBox*)s, bs);
if (ptr->is_qtff)
return gf_isom_box_array_write(s, ptr->protections, bs);
if (ptr->esd) {
e = gf_isom_box_write((GF_Box *)ptr->esd, bs);
if (e) return e;
}
if (ptr->cfg_3gpp) {
e = gf_isom_box_write((GF_Box *)ptr->cfg_3gpp, bs);
if (e) return e;
}
if (ptr->cfg_opus) {
e = gf_isom_box_write((GF_Box *)ptr->cfg_opus, bs);
if (e) return e;
}
if (ptr->cfg_ac3) {
e = gf_isom_box_write((GF_Box *)ptr->cfg_ac3, bs);
if (e) return e;
}
return gf_isom_box_array_write(s, ptr->protections, bs);
}
GF_Err audio_sample_entry_Size(GF_Box *s)
{
GF_Err e;
GF_MPEGAudioSampleEntryBox *ptr = (GF_MPEGAudioSampleEntryBox *)s;
gf_isom_audio_sample_entry_size((GF_AudioSampleEntryBox*)s);
if (ptr->is_qtff)
return gf_isom_box_array_size(s, ptr->protections);
if (ptr->esd) {
e = gf_isom_box_size((GF_Box *)ptr->esd);
if (e) return e;
ptr->size += ptr->esd->size;
}
if (ptr->cfg_3gpp) {
e = gf_isom_box_size((GF_Box *)ptr->cfg_3gpp);
if (e) return e;
ptr->size += ptr->cfg_3gpp->size;
}
if (ptr->cfg_opus) {
e = gf_isom_box_size((GF_Box *)ptr->cfg_opus);
if (e) return e;
ptr->size += ptr->cfg_opus->size;
}
if (ptr->cfg_ac3) {
e = gf_isom_box_size((GF_Box *)ptr->cfg_ac3);
if (e) return e;
ptr->size += ptr->cfg_ac3->size;
}
return gf_isom_box_array_size(s, ptr->protections);
}
#endif /*GPAC_DISABLE_ISOM_WRITE*/
void gen_sample_entry_del(GF_Box *s)
{
GF_SampleEntryBox *ptr = (GF_SampleEntryBox *)s;
if (ptr == NULL) return;
gf_isom_sample_entry_predestroy((GF_SampleEntryBox *)s);
gf_free(ptr);
}
GF_Err gen_sample_entry_Read(GF_Box *s, GF_BitStream *bs)
{
GF_Err e = gf_isom_base_sample_entry_read((GF_SampleEntryBox *)s, bs);
if (e) return e;
ISOM_DECREASE_SIZE(s, 8);
return gf_isom_box_array_read(s, bs, gf_isom_box_add_default);
}
GF_Box *gen_sample_entry_New()
{
ISOM_DECL_BOX_ALLOC(GF_SampleEntryBox, GF_QT_BOX_TYPE_C608);//type will be overriten
gf_isom_sample_entry_init((GF_SampleEntryBox*)tmp);
return (GF_Box *)tmp;
}
#ifndef GPAC_DISABLE_ISOM_WRITE
GF_Err gen_sample_entry_Write(GF_Box *s, GF_BitStream *bs)
{
GF_Err e;
GF_SampleEntryBox *ptr = (GF_SampleEntryBox *)s;
e = gf_isom_box_write_header(s, bs);
if (e) return e;
gf_bs_write_data(bs, ptr->reserved, 6);
gf_bs_write_u16(bs, ptr->dataReferenceIndex);
return gf_isom_box_array_write(s, ptr->protections, bs);
}
GF_Err gen_sample_entry_Size(GF_Box *s)
{
GF_SampleEntryBox *ptr = (GF_SampleEntryBox *)s;
ptr->size += 8;
return gf_isom_box_array_size(s, ptr->protections);
}
#endif /*GPAC_DISABLE_ISOM_WRITE*/
void mp4s_del(GF_Box *s)
{
GF_MPEGSampleEntryBox *ptr = (GF_MPEGSampleEntryBox *)s;
if (ptr == NULL) return;
gf_isom_sample_entry_predestroy((GF_SampleEntryBox *)s);
if (ptr->esd) gf_isom_box_del((GF_Box *)ptr->esd);
if (ptr->slc) gf_odf_desc_del((GF_Descriptor *)ptr->slc);
gf_free(ptr);
}
GF_Err mp4s_AddBox(GF_Box *s, GF_Box *a)
{
GF_MPEGSampleEntryBox *ptr = (GF_MPEGSampleEntryBox *)s;
switch (a->type) {
case GF_ISOM_BOX_TYPE_ESDS:
if (ptr->esd) ERROR_ON_DUPLICATED_BOX(a, ptr)
ptr->esd = (GF_ESDBox *)a;
break;
case GF_ISOM_BOX_TYPE_SINF:
gf_list_add(ptr->protections, a);
break;
default:
return gf_isom_box_add_default(s, a);
}
return GF_OK;
}
GF_Err mp4s_Read(GF_Box *s, GF_BitStream *bs)
{
GF_Err e;
GF_MPEGSampleEntryBox *ptr = (GF_MPEGSampleEntryBox *)s;
e = gf_isom_base_sample_entry_read((GF_SampleEntryBox *)ptr, bs);
if (e) return e;
ISOM_DECREASE_SIZE(ptr, 8);
return gf_isom_box_array_read(s, bs, mp4s_AddBox);
}
GF_Box *mp4s_New()
{
ISOM_DECL_BOX_ALLOC(GF_MPEGSampleEntryBox, GF_ISOM_BOX_TYPE_MP4S);
gf_isom_sample_entry_init((GF_SampleEntryBox*)tmp);
return (GF_Box *)tmp;
}
GF_Box *encs_New()
{
ISOM_DECL_BOX_ALLOC(GF_MPEGSampleEntryBox, GF_ISOM_BOX_TYPE_ENCS);
gf_isom_sample_entry_init((GF_SampleEntryBox*)tmp);
return (GF_Box *)tmp;
}
#ifndef GPAC_DISABLE_ISOM_WRITE
GF_Err mp4s_Write(GF_Box *s, GF_BitStream *bs)
{
GF_Err e;
GF_MPEGSampleEntryBox *ptr = (GF_MPEGSampleEntryBox *)s;
e = gf_isom_box_write_header(s, bs);
if (e) return e;
gf_bs_write_data(bs, ptr->reserved, 6);
gf_bs_write_u16(bs, ptr->dataReferenceIndex);
e = gf_isom_box_write((GF_Box *)ptr->esd, bs);
if (e) return e;
return gf_isom_box_array_write(s, ptr->protections, bs);
}
GF_Err mp4s_Size(GF_Box *s)
{
GF_Err e;
GF_MPEGSampleEntryBox *ptr = (GF_MPEGSampleEntryBox *)s;
ptr->size += 8;
e = gf_isom_box_size((GF_Box *)ptr->esd);
if (e) return e;
ptr->size += ptr->esd->size;
return gf_isom_box_array_size(s, ptr->protections);
}
#endif /*GPAC_DISABLE_ISOM_WRITE*/
void video_sample_entry_del(GF_Box *s)
{
GF_MPEGVisualSampleEntryBox *ptr = (GF_MPEGVisualSampleEntryBox *)s;
if (ptr == NULL) return;
gf_isom_sample_entry_predestroy((GF_SampleEntryBox *)s);
if (ptr->esd) gf_isom_box_del((GF_Box *)ptr->esd);
if (ptr->slc) gf_odf_desc_del((GF_Descriptor *)ptr->slc);
/*for publishing*/
if (ptr->emul_esd) gf_odf_desc_del((GF_Descriptor *)ptr->emul_esd);
if (ptr->avc_config) gf_isom_box_del((GF_Box *) ptr->avc_config);
if (ptr->svc_config) gf_isom_box_del((GF_Box *) ptr->svc_config);
if (ptr->mvc_config) gf_isom_box_del((GF_Box *) ptr->mvc_config);
if (ptr->hevc_config) gf_isom_box_del((GF_Box *) ptr->hevc_config);
if (ptr->lhvc_config) gf_isom_box_del((GF_Box *) ptr->lhvc_config);
if (ptr->av1_config) gf_isom_box_del((GF_Box *)ptr->av1_config);
if (ptr->vp_config) gf_isom_box_del((GF_Box *)ptr->vp_config);
if (ptr->cfg_3gpp) gf_isom_box_del((GF_Box *)ptr->cfg_3gpp);
if (ptr->descr) gf_isom_box_del((GF_Box *) ptr->descr);
if (ptr->ipod_ext) gf_isom_box_del((GF_Box *)ptr->ipod_ext);
if (ptr->pasp) gf_isom_box_del((GF_Box *)ptr->pasp);
if (ptr->clap) gf_isom_box_del((GF_Box *)ptr->clap);
if (ptr->colr) gf_isom_box_del((GF_Box*)ptr->colr);
if (ptr->mdcv) gf_isom_box_del((GF_Box*)ptr->mdcv);
if (ptr->clli) gf_isom_box_del((GF_Box*)ptr->clli);
if (ptr->rinf) gf_isom_box_del((GF_Box *)ptr->rinf);
if (ptr->ccst) gf_isom_box_del((GF_Box *)ptr->ccst);
if (ptr->rvcc) gf_isom_box_del((GF_Box *)ptr->rvcc);
if (ptr->auxi) gf_isom_box_del((GF_Box *)ptr->auxi);
gf_free(ptr);
}
GF_Err video_sample_entry_AddBox(GF_Box *s, GF_Box *a)
{
GF_MPEGVisualSampleEntryBox *ptr = (GF_MPEGVisualSampleEntryBox *)s;
switch (a->type) {
case GF_ISOM_BOX_TYPE_ESDS:
if (ptr->esd) ERROR_ON_DUPLICATED_BOX(a, ptr)
ptr->esd = (GF_ESDBox *)a;
break;
case GF_ISOM_BOX_TYPE_SINF:
gf_list_add(ptr->protections, a);
break;
case GF_ISOM_BOX_TYPE_RINF:
if (ptr->rinf) ERROR_ON_DUPLICATED_BOX(a, ptr)
ptr->rinf = (GF_RestrictedSchemeInfoBox *) a;
break;
case GF_ISOM_BOX_TYPE_AVCC:
if (ptr->avc_config) ERROR_ON_DUPLICATED_BOX(a, ptr)
ptr->avc_config = (GF_AVCConfigurationBox *)a;
break;
case GF_ISOM_BOX_TYPE_HVCC:
if (ptr->hevc_config) ERROR_ON_DUPLICATED_BOX(a, ptr)
ptr->hevc_config = (GF_HEVCConfigurationBox *)a;
break;
case GF_ISOM_BOX_TYPE_SVCC:
if (ptr->svc_config) ERROR_ON_DUPLICATED_BOX(a, ptr)
ptr->svc_config = (GF_AVCConfigurationBox *)a;
break;
case GF_ISOM_BOX_TYPE_MVCC:
if (ptr->mvc_config) ERROR_ON_DUPLICATED_BOX(a, ptr)
ptr->mvc_config = (GF_AVCConfigurationBox *)a;
break;
case GF_ISOM_BOX_TYPE_LHVC:
if (ptr->lhvc_config) ERROR_ON_DUPLICATED_BOX(a, ptr)
ptr->lhvc_config = (GF_HEVCConfigurationBox *)a;
break;
case GF_ISOM_BOX_TYPE_AV1C:
if (ptr->av1_config) ERROR_ON_DUPLICATED_BOX(a, ptr)
ptr->av1_config = (GF_AV1ConfigurationBox *)a;
break;
case GF_ISOM_BOX_TYPE_VPCC:
if (ptr->vp_config) ERROR_ON_DUPLICATED_BOX(a, ptr)
ptr->vp_config = (GF_VPConfigurationBox *)a;
break;
case GF_ISOM_BOX_TYPE_M4DS:
if (ptr->descr) ERROR_ON_DUPLICATED_BOX(a, ptr)
ptr->descr = (GF_MPEG4ExtensionDescriptorsBox *)a;
break;
case GF_ISOM_BOX_TYPE_UUID:
if (! memcmp(((GF_UnknownUUIDBox*)a)->uuid, GF_ISOM_IPOD_EXT, 16)) {
if (ptr->ipod_ext) ERROR_ON_DUPLICATED_BOX(a, ptr)
ptr->ipod_ext = (GF_UnknownUUIDBox *)a;
} else {
return gf_isom_box_add_default(s, a);
}
break;
case GF_ISOM_BOX_TYPE_D263:
ptr->cfg_3gpp = (GF_3GPPConfigBox *)a;
/*for 3GP config, remember sample entry type in config*/
ptr->cfg_3gpp->cfg.type = ptr->type;
break;
case GF_ISOM_BOX_TYPE_PASP:
if (ptr->pasp) ERROR_ON_DUPLICATED_BOX(a, ptr)
ptr->pasp = (GF_PixelAspectRatioBox *)a;
break;
case GF_ISOM_BOX_TYPE_CLAP:
if (ptr->clap) ERROR_ON_DUPLICATED_BOX(a, ptr)
ptr->clap = (GF_CleanApertureBox *)a;
break;
case GF_ISOM_BOX_TYPE_COLR:
if (ptr->colr) ERROR_ON_DUPLICATED_BOX(a, ptr)
ptr->colr = (GF_ColourInformationBox*)a;
break;
case GF_ISOM_BOX_TYPE_MDCV:
if (ptr->mdcv) ERROR_ON_DUPLICATED_BOX(a, ptr)
ptr->mdcv = (GF_MasteringDisplayColourVolumeBox*)a;
break;
case GF_ISOM_BOX_TYPE_CLLI:
if (ptr->clli) ERROR_ON_DUPLICATED_BOX(a, ptr)
ptr->clli = (GF_ContentLightLevelBox*)a;
break;
case GF_ISOM_BOX_TYPE_CCST:
if (ptr->ccst) ERROR_ON_DUPLICATED_BOX(a, ptr)
ptr->ccst = (GF_CodingConstraintsBox *)a;
break;
case GF_ISOM_BOX_TYPE_AUXI:
if (ptr->auxi) ERROR_ON_DUPLICATED_BOX(a, ptr)
ptr->auxi = (GF_AuxiliaryTypeInfoBox *)a;
break;
case GF_ISOM_BOX_TYPE_RVCC:
if (ptr->rvcc) ERROR_ON_DUPLICATED_BOX(a, ptr)
ptr->rvcc = (GF_RVCConfigurationBox *)a;
break;
default:
return gf_isom_box_add_default(s, a);
}
return GF_OK;
}
GF_Err video_sample_entry_Read(GF_Box *s, GF_BitStream *bs)
{
GF_MPEGVisualSampleEntryBox *mp4v = (GF_MPEGVisualSampleEntryBox*)s;
GF_Err e;
e = gf_isom_video_sample_entry_read((GF_VisualSampleEntryBox *)s, bs);
if (e) return e;
e = gf_isom_box_array_read(s, bs, video_sample_entry_AddBox);
if (e) return e;
/*this is an AVC sample desc*/
if (mp4v->avc_config || mp4v->svc_config || mp4v->mvc_config) AVC_RewriteESDescriptor(mp4v);
/*this is an HEVC sample desc*/
if (mp4v->hevc_config || mp4v->lhvc_config || (mp4v->type==GF_ISOM_BOX_TYPE_HVT1))
HEVC_RewriteESDescriptor(mp4v);
/*this is an AV1 sample desc*/
if (mp4v->av1_config)
AV1_RewriteESDescriptor(mp4v);
return GF_OK;
}
GF_Box *video_sample_entry_New()
{
GF_MPEGVisualSampleEntryBox *tmp;
GF_SAFEALLOC(tmp, GF_MPEGVisualSampleEntryBox);
if (tmp == NULL) return NULL;
gf_isom_video_sample_entry_init((GF_VisualSampleEntryBox *)tmp);
return (GF_Box *)tmp;
}
#ifndef GPAC_DISABLE_ISOM_WRITE
GF_Err video_sample_entry_Write(GF_Box *s, GF_BitStream *bs)
{
GF_Err e;
GF_MPEGVisualSampleEntryBox *ptr = (GF_MPEGVisualSampleEntryBox *)s;
e = gf_isom_box_write_header(s, bs);
if (e) return e;
gf_isom_video_sample_entry_write((GF_VisualSampleEntryBox *)s, bs);
/*mp4v*/
if (ptr->esd) {
e = gf_isom_box_write((GF_Box *)ptr->esd, bs);
if (e) return e;
}
/*mp4v*/
else if (ptr->cfg_3gpp) {
e = gf_isom_box_write((GF_Box *)ptr->cfg_3gpp, bs);
if (e) return e;
}
/*avc or hevc or av1*/
else {
if (ptr->avc_config && ptr->avc_config->config) {
e = gf_isom_box_write((GF_Box *) ptr->avc_config, bs);
if (e) return e;
}
if (ptr->hevc_config && ptr->hevc_config->config) {
e = gf_isom_box_write((GF_Box *) ptr->hevc_config, bs);
if (e) return e;
}
if (ptr->ipod_ext) {
e = gf_isom_box_write((GF_Box *) ptr->ipod_ext, bs);
if (e) return e;
}
if (ptr->descr) {
e = gf_isom_box_write((GF_Box *) ptr->descr, bs);
if (e) return e;
}
if (ptr->svc_config && ptr->svc_config->config) {
e = gf_isom_box_write((GF_Box *) ptr->svc_config, bs);
if (e) return e;
}
if (ptr->mvc_config && ptr->mvc_config->config) {
e = gf_isom_box_write((GF_Box *) ptr->mvc_config, bs);
if (e) return e;
}
if (ptr->lhvc_config && ptr->lhvc_config->config) {
e = gf_isom_box_write((GF_Box *) ptr->lhvc_config, bs);
if (e) return e;
}
if (ptr->av1_config && ptr->av1_config->config) {
e = gf_isom_box_write((GF_Box *)ptr->av1_config, bs);
if (e) return e;
}
if (ptr->vp_config && ptr->vp_config->config) {
e = gf_isom_box_write((GF_Box *)ptr->vp_config, bs);
if (e) return e;
}
}
if (ptr->clap) {
e = gf_isom_box_write((GF_Box*)ptr->clap, bs);
if (e) return e;
}
if (ptr->pasp) {
e = gf_isom_box_write((GF_Box *)ptr->pasp, bs);
if (e) return e;
}
if (ptr->colr) {
e = gf_isom_box_write((GF_Box*)ptr->colr, bs);
if (e) return e;
}
if (ptr->mdcv) {
e = gf_isom_box_write((GF_Box*)ptr->mdcv, bs);
if (e) return e;
}
if (ptr->clli) {
e = gf_isom_box_write((GF_Box*)ptr->clli, bs);
if (e) return e;
}
if (ptr->ccst) {
e = gf_isom_box_write((GF_Box *)ptr->ccst, bs);
if (e) return e;
}
if (ptr->auxi) {
e = gf_isom_box_write((GF_Box *)ptr->auxi, bs);
if (e) return e;
}
if (ptr->rvcc) {
e = gf_isom_box_write((GF_Box *)ptr->rvcc, bs);
if (e) return e;
}
if (ptr->rinf) {
e = gf_isom_box_write((GF_Box *)ptr->rinf, bs);
if (e) return e;
}
return gf_isom_box_array_write(s, ptr->protections, bs);
}
GF_Err video_sample_entry_Size(GF_Box *s)
{
GF_Err e;
GF_MPEGVisualSampleEntryBox *ptr = (GF_MPEGVisualSampleEntryBox *)s;
gf_isom_video_sample_entry_size((GF_VisualSampleEntryBox *)s);
if (ptr->esd) {
e = gf_isom_box_size((GF_Box *)ptr->esd);
if (e) return e;
ptr->size += ptr->esd->size;
} else if (ptr->cfg_3gpp) {
e = gf_isom_box_size((GF_Box *)ptr->cfg_3gpp);
if (e) return e;
ptr->size += ptr->cfg_3gpp->size;
} else {
switch (ptr->type) {
case GF_ISOM_BOX_TYPE_AVC1:
case GF_ISOM_BOX_TYPE_AVC2:
case GF_ISOM_BOX_TYPE_AVC3:
case GF_ISOM_BOX_TYPE_AVC4:
case GF_ISOM_BOX_TYPE_SVC1:
case GF_ISOM_BOX_TYPE_SVC2:
case GF_ISOM_BOX_TYPE_MVC1:
case GF_ISOM_BOX_TYPE_MVC2:
if (!ptr->avc_config && !ptr->svc_config && !ptr->mvc_config)
return GF_ISOM_INVALID_FILE;
break;
case GF_ISOM_BOX_TYPE_VP08:
case GF_ISOM_BOX_TYPE_VP09:
if (!ptr->vp_config) {
return GF_ISOM_INVALID_FILE;
}
break;
case GF_ISOM_BOX_TYPE_AV01:
if (!ptr->av1_config) {
return GF_ISOM_INVALID_FILE;
}
break;
case GF_ISOM_BOX_TYPE_HVC1:
case GF_ISOM_BOX_TYPE_HEV1:
case GF_ISOM_BOX_TYPE_HVC2:
case GF_ISOM_BOX_TYPE_HEV2:
case GF_ISOM_BOX_TYPE_LHV1:
case GF_ISOM_BOX_TYPE_LHE1:
//commented on purpose, HVT1 tracks have no config associated
// case GF_ISOM_BOX_TYPE_HVT1:
// case GF_ISOM_BOX_TYPE_HVT2:
if (!ptr->hevc_config && !ptr->lhvc_config) {
return GF_ISOM_INVALID_FILE;
}
break;
default:
break;
}
if (ptr->hevc_config && ptr->hevc_config->config) {
e = gf_isom_box_size((GF_Box *)ptr->hevc_config);
if (e) return e;
ptr->size += ptr->hevc_config->size;
}
if (ptr->avc_config && ptr->avc_config->config) {
e = gf_isom_box_size((GF_Box *) ptr->avc_config);
if (e) return e;
ptr->size += ptr->avc_config->size;
}
if (ptr->svc_config && ptr->svc_config->config) {
e = gf_isom_box_size((GF_Box *) ptr->svc_config);
if (e) return e;
ptr->size += ptr->svc_config->size;
}
if (ptr->mvc_config && ptr->mvc_config->config) {
e = gf_isom_box_size((GF_Box *) ptr->mvc_config);
if (e) return e;
ptr->size += ptr->mvc_config->size;
}
if (ptr->lhvc_config && ptr->lhvc_config->config) {
e = gf_isom_box_size((GF_Box *) ptr->lhvc_config);
if (e) return e;
ptr->size += ptr->lhvc_config->size;
}
if (ptr->av1_config && ptr->av1_config->config) {
e = gf_isom_box_size((GF_Box *)ptr->av1_config);
if (e) return e;
ptr->size += ptr->av1_config->size;
}
if (ptr->vp_config && ptr->vp_config->config) {
e = gf_isom_box_size((GF_Box *)ptr->vp_config);
if (e) return e;
ptr->size += ptr->vp_config->size;
}
if (ptr->ipod_ext) {
e = gf_isom_box_size((GF_Box *) ptr->ipod_ext);
if (e) return e;
ptr->size += ptr->ipod_ext->size;
}
if (ptr->descr) {
e = gf_isom_box_size((GF_Box *) ptr->descr);
if (e) return e;
ptr->size += ptr->descr->size;
}
}
if (ptr->pasp) {
e = gf_isom_box_size((GF_Box *)ptr->pasp);
if (e) return e;
ptr->size += ptr->pasp->size;
}
if (ptr->colr) {
e = gf_isom_box_size((GF_Box*)ptr->colr);
if (e) return e;
ptr->size += ptr->colr->size;
}
if (ptr->mdcv) {
e = gf_isom_box_size((GF_Box*)ptr->mdcv);
if (e) return e;
ptr->size += ptr->mdcv->size;
}
if (ptr->clli) {
e = gf_isom_box_size((GF_Box*)ptr->clli);
if (e) return e;
ptr->size += ptr->clli->size;
}
if (ptr->clap) {
e = gf_isom_box_size((GF_Box *)ptr->clap);
if (e) return e;
ptr->size += ptr->clap->size;
}
if (ptr->ccst) {
e = gf_isom_box_size((GF_Box *)ptr->ccst);
if (e) return e;
ptr->size += ptr->ccst->size;
}
if (ptr->auxi) {
e = gf_isom_box_size((GF_Box *)ptr->auxi);
if (e) return e;
ptr->size += ptr->auxi->size;
}
if (ptr->rvcc) {
e = gf_isom_box_size((GF_Box *)ptr->rvcc);
if (e) return e;
ptr->size += ptr->rvcc->size;
}
if (ptr->rinf) {
e = gf_isom_box_size((GF_Box *)ptr->rinf);
if (e) return e;
ptr->size += ptr->rinf->size;
}
return gf_isom_box_array_size(s, ptr->protections);
}
#endif /*GPAC_DISABLE_ISOM_WRITE*/
#ifndef GPAC_DISABLE_ISOM_FRAGMENTS
void mvex_del(GF_Box *s)
{
GF_MovieExtendsBox *ptr = (GF_MovieExtendsBox *)s;
if (ptr == NULL) return;
if (ptr->mehd) gf_isom_box_del((GF_Box*)ptr->mehd);
gf_isom_box_array_del(ptr->TrackExList);
gf_isom_box_array_del(ptr->TrackExPropList);
ptr->mehd = NULL;
ptr->TrackExList = NULL;
ptr->TrackExPropList = NULL;
gf_free(ptr);
}
GF_Err mvex_AddBox(GF_Box *s, GF_Box *a)
{
GF_MovieExtendsBox *ptr = (GF_MovieExtendsBox *)s;
switch (a->type) {
case GF_ISOM_BOX_TYPE_TREX:
return gf_list_add(ptr->TrackExList, a);
case GF_ISOM_BOX_TYPE_TREP:
return gf_list_add(ptr->TrackExPropList, a);
case GF_ISOM_BOX_TYPE_MEHD:
if (ptr->mehd) break;
ptr->mehd = (GF_MovieExtendsHeaderBox*)a;
return GF_OK;
default:
return gf_isom_box_add_default(s, a);
}
return GF_OK;
}
GF_Err mvex_Read(GF_Box *s, GF_BitStream *bs)
{
return gf_isom_box_array_read(s, bs, mvex_AddBox);
}
GF_Box *mvex_New()
{
ISOM_DECL_BOX_ALLOC(GF_MovieExtendsBox, GF_ISOM_BOX_TYPE_MVEX);
tmp->TrackExList = gf_list_new();
if (!tmp->TrackExList) {
gf_free(tmp);
return NULL;
}
tmp->TrackExPropList = gf_list_new();
if (!tmp->TrackExPropList) {
gf_list_del(tmp->TrackExList);
gf_free(tmp);
return NULL;
}
return (GF_Box *)tmp;
}
#ifndef GPAC_DISABLE_ISOM_WRITE
GF_Err mvex_Write(GF_Box *s, GF_BitStream *bs)
{
GF_Err e;
GF_MovieExtendsBox *ptr = (GF_MovieExtendsBox *) s;
if (!s) return GF_BAD_PARAM;
e = gf_isom_box_write_header(s, bs);
if (e) return e;
if (ptr->mehd) {
e = gf_isom_box_write((GF_Box *)ptr->mehd, bs);
if (e) return e;
}
e = gf_isom_box_array_write(s, ptr->TrackExList, bs);
if (e) return e;
return gf_isom_box_array_write(s, ptr->TrackExPropList, bs);
}
GF_Err mvex_Size(GF_Box *s)
{
GF_Err e;
GF_MovieExtendsBox *ptr = (GF_MovieExtendsBox *)s;
if (ptr->mehd) {
e = gf_isom_box_size((GF_Box *)ptr->mehd);
if (e) return e;
ptr->size += ptr->mehd->size;
}
e = gf_isom_box_array_size(s, ptr->TrackExList);
if (e) return e;
return gf_isom_box_array_size(s, ptr->TrackExPropList);
}
#endif /*GPAC_DISABLE_ISOM_WRITE*/
GF_Box *mehd_New()
{
ISOM_DECL_BOX_ALLOC(GF_MovieExtendsHeaderBox, GF_ISOM_BOX_TYPE_MEHD);
return (GF_Box *)tmp;
}
void mehd_del(GF_Box *s)
{
gf_free(s);
}
GF_Err mehd_Read(GF_Box *s, GF_BitStream *bs)
{
GF_MovieExtendsHeaderBox *ptr = (GF_MovieExtendsHeaderBox *)s;
if (ptr->version==1) {
ptr->fragment_duration = gf_bs_read_u64(bs);
} else {
ptr->fragment_duration = (u64) gf_bs_read_u32(bs);
}
return GF_OK;
}
#ifndef GPAC_DISABLE_ISOM_WRITE
GF_Err mehd_Write(GF_Box *s, GF_BitStream *bs)
{
GF_MovieExtendsHeaderBox *ptr = (GF_MovieExtendsHeaderBox *)s;
GF_Err e = gf_isom_full_box_write(s, bs);
if (e) return e;
if (ptr->version == 1) {
gf_bs_write_u64(bs, ptr->fragment_duration);
} else {
gf_bs_write_u32(bs, (u32) ptr->fragment_duration);
}
return GF_OK;
}
GF_Err mehd_Size(GF_Box *s)
{
GF_MovieExtendsHeaderBox *ptr = (GF_MovieExtendsHeaderBox *)s;
ptr->version = (ptr->fragment_duration>0xFFFFFFFF) ? 1 : 0;
s->size += (ptr->version == 1) ? 8 : 4;
return GF_OK;
}
#endif /*GPAC_DISABLE_ISOM_WRITE*/
#endif /*GPAC_DISABLE_ISOM_FRAGMENTS*/
void mvhd_del(GF_Box *s)
{
GF_MovieHeaderBox *ptr = (GF_MovieHeaderBox *)s;
if (ptr == NULL) return;
gf_free(ptr);
}
GF_Err mvhd_Read(GF_Box *s, GF_BitStream *bs)
{
GF_MovieHeaderBox *ptr = (GF_MovieHeaderBox *)s;
if (ptr == NULL) return GF_BAD_PARAM;
if (ptr->version == 1) {
ptr->creationTime = gf_bs_read_u64(bs);
ptr->modificationTime = gf_bs_read_u64(bs);
ptr->timeScale = gf_bs_read_u32(bs);
ptr->duration = gf_bs_read_u64(bs);
} else {
ptr->creationTime = gf_bs_read_u32(bs);
ptr->modificationTime = gf_bs_read_u32(bs);
ptr->timeScale = gf_bs_read_u32(bs);
ptr->duration = gf_bs_read_u32(bs);
}
if (!ptr->timeScale) {
GF_LOG(GF_LOG_WARNING, GF_LOG_CONTAINER, ("[iso file] Movie header timescale is invalid (0) - defaulting to 600\n" ));
ptr->timeScale = 600;
}
ptr->preferredRate = gf_bs_read_u32(bs);
ptr->preferredVolume = gf_bs_read_u16(bs);
gf_bs_read_data(bs, ptr->reserved, 10);
ptr->matrixA = gf_bs_read_u32(bs);
ptr->matrixB = gf_bs_read_u32(bs);
ptr->matrixU = gf_bs_read_u32(bs);
ptr->matrixC = gf_bs_read_u32(bs);
ptr->matrixD = gf_bs_read_u32(bs);
ptr->matrixV = gf_bs_read_u32(bs);
ptr->matrixX = gf_bs_read_u32(bs);
ptr->matrixY = gf_bs_read_u32(bs);
ptr->matrixW = gf_bs_read_u32(bs);
ptr->previewTime = gf_bs_read_u32(bs);
ptr->previewDuration = gf_bs_read_u32(bs);
ptr->posterTime = gf_bs_read_u32(bs);
ptr->selectionTime = gf_bs_read_u32(bs);
ptr->selectionDuration = gf_bs_read_u32(bs);
ptr->currentTime = gf_bs_read_u32(bs);
ptr->nextTrackID = gf_bs_read_u32(bs);
ptr->original_duration = ptr->duration;
return GF_OK;
}
GF_Box *mvhd_New()
{
ISOM_DECL_BOX_ALLOC(GF_MovieHeaderBox, GF_ISOM_BOX_TYPE_MVHD);
tmp->preferredRate = (1<<16);
tmp->preferredVolume = (1<<8);
tmp->matrixA = (1<<16);
tmp->matrixD = (1<<16);
tmp->matrixW = (1<<30);
tmp->nextTrackID = 1;
return (GF_Box *)tmp;
}
#ifndef GPAC_DISABLE_ISOM_WRITE
GF_Err mvhd_Write(GF_Box *s, GF_BitStream *bs)
{
GF_Err e;
GF_MovieHeaderBox *ptr = (GF_MovieHeaderBox *)s;
e = gf_isom_full_box_write(s, bs);
if (e) return e;
if (ptr->version == 1) {
gf_bs_write_u64(bs, ptr->creationTime);
gf_bs_write_u64(bs, ptr->modificationTime);
gf_bs_write_u32(bs, ptr->timeScale);
gf_bs_write_u64(bs, ptr->duration);
} else {
gf_bs_write_u32(bs, (u32) ptr->creationTime);
gf_bs_write_u32(bs, (u32) ptr->modificationTime);
gf_bs_write_u32(bs, ptr->timeScale);
gf_bs_write_u32(bs, (u32) ptr->duration);
}
gf_bs_write_u32(bs, ptr->preferredRate);
gf_bs_write_u16(bs, ptr->preferredVolume);
gf_bs_write_data(bs, ptr->reserved, 10);
gf_bs_write_u32(bs, ptr->matrixA);
gf_bs_write_u32(bs, ptr->matrixB);
gf_bs_write_u32(bs, ptr->matrixU);
gf_bs_write_u32(bs, ptr->matrixC);
gf_bs_write_u32(bs, ptr->matrixD);
gf_bs_write_u32(bs, ptr->matrixV);
gf_bs_write_u32(bs, ptr->matrixX);
gf_bs_write_u32(bs, ptr->matrixY);
gf_bs_write_u32(bs, ptr->matrixW);
gf_bs_write_u32(bs, ptr->previewTime);
gf_bs_write_u32(bs, ptr->previewDuration);
gf_bs_write_u32(bs, ptr->posterTime);
gf_bs_write_u32(bs, ptr->selectionTime);
gf_bs_write_u32(bs, ptr->selectionDuration);
gf_bs_write_u32(bs, ptr->currentTime);
gf_bs_write_u32(bs, ptr->nextTrackID);
return GF_OK;
}
GF_Err mvhd_Size(GF_Box *s)
{
GF_MovieHeaderBox *ptr = (GF_MovieHeaderBox *)s;
if (ptr->duration==(u64) -1) ptr->version = 0;
else ptr->version = (ptr->duration>0xFFFFFFFF) ? 1 : 0;
ptr->size += (ptr->version == 1) ? 28 : 16;
ptr->size += 80;
return GF_OK;
}
#endif /*GPAC_DISABLE_ISOM_WRITE*/
void nmhd_del(GF_Box *s)
{
GF_MPEGMediaHeaderBox *ptr = (GF_MPEGMediaHeaderBox *)s;
if (ptr == NULL) return;
gf_free(ptr);
}
GF_Err nmhd_Read(GF_Box *s, GF_BitStream *bs)
{
return GF_OK;
}
GF_Box *nmhd_New()
{
ISOM_DECL_BOX_ALLOC(GF_MPEGMediaHeaderBox, GF_ISOM_BOX_TYPE_NMHD);
return (GF_Box *)tmp;
}
#ifndef GPAC_DISABLE_ISOM_WRITE
GF_Err nmhd_Write(GF_Box *s, GF_BitStream *bs)
{
return gf_isom_full_box_write(s, bs);
}
GF_Err nmhd_Size(GF_Box *s)
{
return GF_OK;
}
#endif /*GPAC_DISABLE_ISOM_WRITE*/
void padb_del(GF_Box *s)
{
GF_PaddingBitsBox *ptr = (GF_PaddingBitsBox *) s;
if (ptr == NULL) return;
if (ptr->padbits) gf_free(ptr->padbits);
gf_free(ptr);
}
GF_Err padb_Read(GF_Box *s,GF_BitStream *bs)
{
u32 i;
GF_PaddingBitsBox *ptr = (GF_PaddingBitsBox *)s;
ptr->SampleCount = gf_bs_read_u32(bs);
ptr->padbits = (u8 *)gf_malloc(sizeof(u8)*ptr->SampleCount);
for (i=0; i<ptr->SampleCount; i += 2) {
gf_bs_read_int(bs, 1);
if (i+1 < ptr->SampleCount) {
ptr->padbits[i+1] = gf_bs_read_int(bs, 3);
} else {
gf_bs_read_int(bs, 3);
}
gf_bs_read_int(bs, 1);
ptr->padbits[i] = gf_bs_read_int(bs, 3);
}
return GF_OK;
}
GF_Box *padb_New()
{
ISOM_DECL_BOX_ALLOC(GF_PaddingBitsBox, GF_ISOM_BOX_TYPE_PADB);
return (GF_Box *)tmp;
}
#ifndef GPAC_DISABLE_ISOM_WRITE
GF_Err padb_Write(GF_Box *s, GF_BitStream *bs)
{
u32 i;
GF_Err e;
GF_PaddingBitsBox *ptr = (GF_PaddingBitsBox *) s;
e = gf_isom_full_box_write(s, bs);
if (e) return e;
gf_bs_write_int(bs, ptr->SampleCount, 32);
for (i=0 ; i<ptr->SampleCount; i += 2) {
gf_bs_write_int(bs, 0, 1);
if (i+1 < ptr->SampleCount) {
gf_bs_write_int(bs, ptr->padbits[i+1], 3);
} else {
gf_bs_write_int(bs, 0, 3);
}
gf_bs_write_int(bs, 0, 1);
gf_bs_write_int(bs, ptr->padbits[i], 3);
}
return GF_OK;
}
GF_Err padb_Size(GF_Box *s)
{
GF_PaddingBitsBox *ptr = (GF_PaddingBitsBox *)s;
ptr->size += 4;
if (ptr->SampleCount) ptr->size += (ptr->SampleCount + 1) / 2;
return GF_OK;
}
#endif /*GPAC_DISABLE_ISOM_WRITE*/
void rely_del(GF_Box *s)
{
GF_RelyHintBox *rely = (GF_RelyHintBox *)s;
gf_free(rely);
}
GF_Err rely_Read(GF_Box *s, GF_BitStream *bs)
{
GF_RelyHintBox *ptr = (GF_RelyHintBox *)s;
ptr->reserved = gf_bs_read_int(bs, 6);
ptr->prefered = gf_bs_read_int(bs, 1);
ptr->required = gf_bs_read_int(bs, 1);
return GF_OK;
}
GF_Box *rely_New()
{
ISOM_DECL_BOX_ALLOC(GF_RelyHintBox, GF_ISOM_BOX_TYPE_RELY);
return (GF_Box *)tmp;
}
#ifndef GPAC_DISABLE_ISOM_WRITE
GF_Err rely_Write(GF_Box *s, GF_BitStream *bs)
{
GF_Err e;
GF_RelyHintBox *ptr = (GF_RelyHintBox *)s;
if (ptr == NULL) return GF_BAD_PARAM;
e = gf_isom_box_write_header(s, bs);
if (e) return e;
gf_bs_write_int(bs, ptr->reserved, 6);
gf_bs_write_int(bs, ptr->prefered, 1);
gf_bs_write_int(bs, ptr->required, 1);
return GF_OK;
}
GF_Err rely_Size(GF_Box *s)
{
s->size += 1;
return GF_OK;
}
#endif /*GPAC_DISABLE_ISOM_WRITE*/
void rtpo_del(GF_Box *s)
{
GF_RTPOBox *rtpo = (GF_RTPOBox *)s;
gf_free(rtpo);
}
GF_Err rtpo_Read(GF_Box *s, GF_BitStream *bs)
{
GF_RTPOBox *ptr = (GF_RTPOBox *)s;
ptr->timeOffset = gf_bs_read_u32(bs);
return GF_OK;
}
GF_Box *rtpo_New()
{
ISOM_DECL_BOX_ALLOC(GF_RTPOBox, GF_ISOM_BOX_TYPE_RTPO);
return (GF_Box *)tmp;
}
#ifndef GPAC_DISABLE_ISOM_WRITE
GF_Err rtpo_Write(GF_Box *s, GF_BitStream *bs)
{
GF_Err e;
GF_RTPOBox *ptr = (GF_RTPOBox *)s;
if (ptr == NULL) return GF_BAD_PARAM;
//here we have no pb, just remembed that some entries will have to
//be 4-bytes aligned ...
e = gf_isom_box_write_header(s, bs);
if (e) return e;
gf_bs_write_u32(bs, ptr->timeOffset);
return GF_OK;
}
GF_Err rtpo_Size(GF_Box *s)
{
s->size += 4;
return GF_OK;
}
#endif /*GPAC_DISABLE_ISOM_WRITE*/
void smhd_del(GF_Box *s)
{
GF_SoundMediaHeaderBox *ptr = (GF_SoundMediaHeaderBox *)s;
if (ptr == NULL ) return;
gf_free(ptr);
}
GF_Err smhd_Read(GF_Box *s, GF_BitStream *bs)
{
GF_SoundMediaHeaderBox *ptr = (GF_SoundMediaHeaderBox *)s;
ptr->balance = gf_bs_read_u16(bs);
ptr->reserved = gf_bs_read_u16(bs);
return GF_OK;
}
GF_Box *smhd_New()
{
ISOM_DECL_BOX_ALLOC(GF_SoundMediaHeaderBox, GF_ISOM_BOX_TYPE_SMHD);
return (GF_Box *)tmp;
}
#ifndef GPAC_DISABLE_ISOM_WRITE
GF_Err smhd_Write(GF_Box *s, GF_BitStream *bs)
{
GF_Err e;
GF_SoundMediaHeaderBox *ptr = (GF_SoundMediaHeaderBox *)s;
e = gf_isom_full_box_write(s, bs);
if (e) return e;
gf_bs_write_u16(bs, ptr->balance);
gf_bs_write_u16(bs, ptr->reserved);
return GF_OK;
}
GF_Err smhd_Size(GF_Box *s)
{
GF_SoundMediaHeaderBox *ptr = (GF_SoundMediaHeaderBox *)s;
ptr->reserved = 0;
ptr->size += 4;
return GF_OK;
}
#endif /*GPAC_DISABLE_ISOM_WRITE*/
void snro_del(GF_Box *s)
{
GF_SeqOffHintEntryBox *snro = (GF_SeqOffHintEntryBox *)s;
gf_free(snro);
}
GF_Err snro_Read(GF_Box *s, GF_BitStream *bs)
{
GF_SeqOffHintEntryBox *ptr = (GF_SeqOffHintEntryBox *)s;
ptr->SeqOffset = gf_bs_read_u32(bs);
return GF_OK;
}
GF_Box *snro_New()
{
ISOM_DECL_BOX_ALLOC(GF_SeqOffHintEntryBox, GF_ISOM_BOX_TYPE_SNRO);
return (GF_Box *)tmp;
}
#ifndef GPAC_DISABLE_ISOM_WRITE
GF_Err snro_Write(GF_Box *s, GF_BitStream *bs)
{
GF_Err e;
GF_SeqOffHintEntryBox *ptr = (GF_SeqOffHintEntryBox *)s;
if (ptr == NULL) return GF_BAD_PARAM;
e = gf_isom_box_write_header(s, bs);
if (e) return e;
gf_bs_write_u32(bs, ptr->SeqOffset);
return GF_OK;
}
GF_Err snro_Size(GF_Box *s)
{
s->size += 4;
return GF_OK;
}
#endif /*GPAC_DISABLE_ISOM_WRITE*/
void stbl_del(GF_Box *s)
{
GF_SampleTableBox *ptr = (GF_SampleTableBox *)s;
if (ptr == NULL) return;
if (ptr->ChunkOffset) gf_isom_box_del(ptr->ChunkOffset);
if (ptr->CompositionOffset) gf_isom_box_del((GF_Box *) ptr->CompositionOffset);
if (ptr->CompositionToDecode) gf_isom_box_del((GF_Box *) ptr->CompositionToDecode);
if (ptr->DegradationPriority) gf_isom_box_del((GF_Box *) ptr->DegradationPriority);
if (ptr->SampleDescription) gf_isom_box_del((GF_Box *) ptr->SampleDescription);
if (ptr->SampleSize) gf_isom_box_del((GF_Box *) ptr->SampleSize);
if (ptr->SampleToChunk) gf_isom_box_del((GF_Box *) ptr->SampleToChunk);
if (ptr->ShadowSync) gf_isom_box_del((GF_Box *) ptr->ShadowSync);
if (ptr->SyncSample) gf_isom_box_del((GF_Box *) ptr->SyncSample);
if (ptr->TimeToSample) gf_isom_box_del((GF_Box *) ptr->TimeToSample);
if (ptr->SampleDep) gf_isom_box_del((GF_Box *) ptr->SampleDep);
if (ptr->PaddingBits) gf_isom_box_del((GF_Box *) ptr->PaddingBits);
if (ptr->sub_samples) gf_isom_box_array_del(ptr->sub_samples);
if (ptr->sampleGroups) gf_isom_box_array_del(ptr->sampleGroups);
if (ptr->sampleGroupsDescription) gf_isom_box_array_del(ptr->sampleGroupsDescription);
if (ptr->sai_sizes) gf_isom_box_array_del(ptr->sai_sizes);
if (ptr->sai_offsets) gf_isom_box_array_del(ptr->sai_offsets);
if (ptr->traf_map) {
if (ptr->traf_map->sample_num) gf_free(ptr->traf_map->sample_num);
gf_free(ptr->traf_map);
}
gf_free(ptr);
}
GF_Err stbl_AddBox(GF_Box *s, GF_Box *a)
{
GF_SampleTableBox *ptr = (GF_SampleTableBox *)s;
if (!a) return GF_OK;
switch (a->type) {
case GF_ISOM_BOX_TYPE_STTS:
if (ptr->TimeToSample) ERROR_ON_DUPLICATED_BOX(a, ptr)
ptr->TimeToSample = (GF_TimeToSampleBox *)a;
break;
case GF_ISOM_BOX_TYPE_CTTS:
if (ptr->CompositionOffset) ERROR_ON_DUPLICATED_BOX(a, ptr)
ptr->CompositionOffset = (GF_CompositionOffsetBox *)a;
break;
case GF_ISOM_BOX_TYPE_CSLG:
if (ptr->CompositionToDecode) ERROR_ON_DUPLICATED_BOX(a, ptr)
ptr->CompositionToDecode = (GF_CompositionToDecodeBox *)a;
break;
case GF_ISOM_BOX_TYPE_STSS:
if (ptr->SyncSample) ERROR_ON_DUPLICATED_BOX(a, ptr)
ptr->SyncSample = (GF_SyncSampleBox *)a;
break;
case GF_ISOM_BOX_TYPE_STSD:
if (ptr->SampleDescription) ERROR_ON_DUPLICATED_BOX(a, ptr)
ptr->SampleDescription =(GF_SampleDescriptionBox *)a;
break;
case GF_ISOM_BOX_TYPE_STZ2:
case GF_ISOM_BOX_TYPE_STSZ:
if (ptr->SampleSize) ERROR_ON_DUPLICATED_BOX(a, ptr)
ptr->SampleSize = (GF_SampleSizeBox *)a;
break;
case GF_ISOM_BOX_TYPE_STSC:
if (ptr->SampleToChunk) ERROR_ON_DUPLICATED_BOX(a, ptr)
ptr->SampleToChunk = (GF_SampleToChunkBox *)a;
break;
case GF_ISOM_BOX_TYPE_PADB:
if (ptr->PaddingBits) ERROR_ON_DUPLICATED_BOX(a, ptr)
ptr->PaddingBits = (GF_PaddingBitsBox *) a;
break;
//WARNING: AS THIS MAY CHANGE DYNAMICALLY DURING EDIT,
case GF_ISOM_BOX_TYPE_CO64:
case GF_ISOM_BOX_TYPE_STCO:
if (ptr->ChunkOffset) {
extern Bool use_dump_mode;
if (!use_dump_mode)
gf_isom_box_del(ptr->ChunkOffset);
}
ptr->ChunkOffset = a;
return GF_OK;
case GF_ISOM_BOX_TYPE_STSH:
if (ptr->ShadowSync) ERROR_ON_DUPLICATED_BOX(a, ptr)
ptr->ShadowSync = (GF_ShadowSyncBox *)a;
break;
case GF_ISOM_BOX_TYPE_STDP:
if (ptr->DegradationPriority) ERROR_ON_DUPLICATED_BOX(a, ptr)
ptr->DegradationPriority = (GF_DegradationPriorityBox *)a;
break;
case GF_ISOM_BOX_TYPE_SDTP:
if (ptr->SampleDep) ERROR_ON_DUPLICATED_BOX(a, ptr)
ptr->SampleDep= (GF_SampleDependencyTypeBox *)a;
break;
case GF_ISOM_BOX_TYPE_SUBS:
if (!ptr->sub_samples) ptr->sub_samples = gf_list_new();
gf_list_add(ptr->sub_samples, a);
//check subsample box
{
GF_SubSampleInformationBox *subs = (GF_SubSampleInformationBox *)a;
GF_SubSampleInfoEntry *ent = gf_list_get(subs->Samples, 0);
if (!ent) {
gf_list_rem(subs->Samples, 0);
GF_LOG(GF_LOG_WARNING, GF_LOG_CONTAINER, ("[iso file] first entry in SubSample in track SampleTable is invalid\n"));
}
else if (ent->sample_delta==0) {
GF_LOG(GF_LOG_WARNING, GF_LOG_CONTAINER, ("[iso file] first entry in SubSample in track SampleTable has sample_delta of 0, should be one. Fixing\n"));
ent->sample_delta = 1;
}
}
break;
case GF_ISOM_BOX_TYPE_SBGP:
if (!ptr->sampleGroups) ptr->sampleGroups = gf_list_new();
gf_list_add(ptr->sampleGroups, a);
break;
case GF_ISOM_BOX_TYPE_SGPD:
if (!ptr->sampleGroupsDescription) ptr->sampleGroupsDescription = gf_list_new();
gf_list_add(ptr->sampleGroupsDescription, a);
break;
case GF_ISOM_BOX_TYPE_SAIZ:
if (!ptr->sai_sizes) ptr->sai_sizes = gf_list_new();
gf_list_add(ptr->sai_sizes, a);
break;
case GF_ISOM_BOX_TYPE_SAIO:
if (!ptr->sai_offsets) ptr->sai_offsets = gf_list_new();
gf_list_add(ptr->sai_offsets, a);
break;
default:
return gf_isom_box_add_default((GF_Box *)ptr, a);
}
return GF_OK;
}
GF_Err stbl_Read(GF_Box *s, GF_BitStream *bs)
{
GF_Err e;
//we need to parse DegPrior in a special way
GF_SampleTableBox *ptr = (GF_SampleTableBox *)s;
e = gf_isom_box_array_read(s, bs, stbl_AddBox);
if (e) return e;
if (!ptr->SyncSample)
ptr->no_sync_found = 1;
ptr->nb_sgpd_in_stbl = gf_list_count(ptr->sampleGroupsDescription);
ptr->nb_other_boxes_in_stbl = gf_list_count(ptr->other_boxes);
return GF_OK;
}
GF_Box *stbl_New()
{
ISOM_DECL_BOX_ALLOC(GF_SampleTableBox, GF_ISOM_BOX_TYPE_STBL);
//maxSamplePer chunk is 10 by default
tmp->MaxSamplePerChunk = 10;
tmp->groupID = 1;
return (GF_Box *)tmp;
}
#ifndef GPAC_DISABLE_ISOM_WRITE
GF_Err stbl_Write(GF_Box *s, GF_BitStream *bs)
{
GF_Err e;
GF_SampleTableBox *ptr = (GF_SampleTableBox *)s;
if (!s) return GF_BAD_PARAM;
e = gf_isom_box_write_header(s, bs);
if (e) return e;
if (ptr->SampleDescription) {
e = gf_isom_box_write((GF_Box *) ptr->SampleDescription, bs);
if (e) return e;
}
if (ptr->TimeToSample) {
e = gf_isom_box_write((GF_Box *) ptr->TimeToSample, bs);
if (e) return e;
}
if (ptr->CompositionOffset) {
e = gf_isom_box_write((GF_Box *) ptr->CompositionOffset, bs);
if (e) return e;
}
if (ptr->CompositionToDecode) {
e = gf_isom_box_write((GF_Box *) ptr->CompositionToDecode, bs);
if (e) return e;
}
if (ptr->SyncSample) {
e = gf_isom_box_write((GF_Box *) ptr->SyncSample, bs);
if (e) return e;
}
if (ptr->ShadowSync) {
e = gf_isom_box_write((GF_Box *) ptr->ShadowSync, bs);
if (e) return e;
}
if (ptr->SampleToChunk) {
e = gf_isom_box_write((GF_Box *) ptr->SampleToChunk, bs);
if (e) return e;
}
if (ptr->SampleSize) {
e = gf_isom_box_write((GF_Box *) ptr->SampleSize, bs);
if (e) return e;
}
if (ptr->ChunkOffset) {
e = gf_isom_box_write(ptr->ChunkOffset, bs);
if (e) return e;
}
if (ptr->DegradationPriority) {
e = gf_isom_box_write((GF_Box *) ptr->DegradationPriority, bs);
if (e) return e;
}
if (ptr->SampleDep && ptr->SampleDep->sampleCount) {
e = gf_isom_box_write((GF_Box *) ptr->SampleDep, bs);
if (e) return e;
}
if (ptr->PaddingBits) {
e = gf_isom_box_write((GF_Box *) ptr->PaddingBits, bs);
if (e) return e;
}
if (ptr->sub_samples) {
e = gf_isom_box_array_write(s, ptr->sub_samples, bs);
if (e) return e;
}
if (ptr->sampleGroupsDescription) {
e = gf_isom_box_array_write(s, ptr->sampleGroupsDescription, bs);
if (e) return e;
}
if (ptr->sampleGroups) {
e = gf_isom_box_array_write(s, ptr->sampleGroups, bs);
if (e) return e;
}
if (ptr->sai_sizes) {
e = gf_isom_box_array_write(s, ptr->sai_sizes, bs);
if (e) return e;
}
if (ptr->sai_offsets) {
e = gf_isom_box_array_write(s, ptr->sai_offsets, bs);
if (e) return e;
}
return GF_OK;
}
GF_Err stbl_Size(GF_Box *s)
{
GF_Err e;
GF_SampleTableBox *ptr = (GF_SampleTableBox *)s;
//Mandatory boxs (but not internally :)
if (ptr->SampleDescription) {
e = gf_isom_box_size((GF_Box *) ptr->SampleDescription);
if (e) return e;
ptr->size += ptr->SampleDescription->size;
}
if (ptr->SampleSize) {
e = gf_isom_box_size((GF_Box *) ptr->SampleSize);
if (e) return e;
ptr->size += ptr->SampleSize->size;
}
if (ptr->SampleToChunk) {
e = gf_isom_box_size((GF_Box *) ptr->SampleToChunk);
if (e) return e;
ptr->size += ptr->SampleToChunk->size;
}
if (ptr->TimeToSample) {
e = gf_isom_box_size((GF_Box *) ptr->TimeToSample);
if (e) return e;
ptr->size += ptr->TimeToSample->size;
}
if (ptr->ChunkOffset) {
e = gf_isom_box_size(ptr->ChunkOffset);
if (e) return e;
ptr->size += ptr->ChunkOffset->size;
}
//optional boxs
if (ptr->CompositionOffset) {
e = gf_isom_box_size((GF_Box *) ptr->CompositionOffset);
if (e) return e;
ptr->size += ptr->CompositionOffset->size;
}
if (ptr->CompositionToDecode) {
e = gf_isom_box_size((GF_Box *) ptr->CompositionToDecode);
if (e) return e;
ptr->size += ptr->CompositionToDecode->size;
}
if (ptr->DegradationPriority) {
e = gf_isom_box_size((GF_Box *) ptr->DegradationPriority);
if (e) return e;
ptr->size += ptr->DegradationPriority->size;
}
if (ptr->ShadowSync) {
e = gf_isom_box_size((GF_Box *) ptr->ShadowSync);
if (e) return e;
ptr->size += ptr->ShadowSync->size;
}
if (ptr->SyncSample) {
e = gf_isom_box_size((GF_Box *) ptr->SyncSample);
if (e) return e;
ptr->size += ptr->SyncSample->size;
}
if (ptr->SampleDep && ptr->SampleDep->sampleCount) {
e = gf_isom_box_size((GF_Box *) ptr->SampleDep);
if (e) return e;
ptr->size += ptr->SampleDep->size;
}
//padb
if (ptr->PaddingBits) {
e = gf_isom_box_size((GF_Box *) ptr->PaddingBits);
if (e) return e;
ptr->size += ptr->PaddingBits->size;
}
if (ptr->sub_samples) {
e = gf_isom_box_array_size(s, ptr->sub_samples);
if (e) return e;
}
if (ptr->sampleGroups) {
e = gf_isom_box_array_size(s, ptr->sampleGroups);
if (e) return e;
}
if (ptr->sampleGroupsDescription) {
e = gf_isom_box_array_size(s, ptr->sampleGroupsDescription);
if (e) return e;
}
if (ptr->sai_sizes) {
e = gf_isom_box_array_size(s, ptr->sai_sizes);
if (e) return e;
}
if (ptr->sai_offsets) {
e = gf_isom_box_array_size(s, ptr->sai_offsets);
if (e) return e;
}
return GF_OK;
}
#endif /*GPAC_DISABLE_ISOM_WRITE*/
void stco_del(GF_Box *s)
{
GF_ChunkOffsetBox *ptr = (GF_ChunkOffsetBox *)s;
if (ptr == NULL) return;
if (ptr->offsets) gf_free(ptr->offsets);
gf_free(ptr);
}
GF_Err stco_Read(GF_Box *s, GF_BitStream *bs)
{
u32 entries;
GF_ChunkOffsetBox *ptr = (GF_ChunkOffsetBox *)s;
ptr->nb_entries = gf_bs_read_u32(bs);
ISOM_DECREASE_SIZE(ptr, 4);
if (ptr->nb_entries > ptr->size / 4) {
GF_LOG(GF_LOG_ERROR, GF_LOG_CONTAINER, ("[iso file] Invalid number of entries %d in stco\n", ptr->nb_entries));
return GF_ISOM_INVALID_FILE;
}
if (ptr->nb_entries) {
ptr->offsets = (u32 *) gf_malloc(ptr->nb_entries * sizeof(u32) );
if (ptr->offsets == NULL) return GF_OUT_OF_MEM;
ptr->alloc_size = ptr->nb_entries;
for (entries = 0; entries < ptr->nb_entries; entries++) {
ptr->offsets[entries] = gf_bs_read_u32(bs);
}
}
return GF_OK;
}
GF_Box *stco_New()
{
ISOM_DECL_BOX_ALLOC(GF_ChunkOffsetBox, GF_ISOM_BOX_TYPE_STCO);
return (GF_Box *)tmp;
}
#ifndef GPAC_DISABLE_ISOM_WRITE
GF_Err stco_Write(GF_Box *s, GF_BitStream *bs)
{
GF_Err e;
u32 i;
GF_ChunkOffsetBox *ptr = (GF_ChunkOffsetBox *)s;
e = gf_isom_full_box_write(s, bs);
if (e) return e;
gf_bs_write_u32(bs, ptr->nb_entries);
for (i = 0; i < ptr->nb_entries; i++) {
gf_bs_write_u32(bs, ptr->offsets[i]);
}
return GF_OK;
}
GF_Err stco_Size(GF_Box *s)
{
GF_ChunkOffsetBox *ptr = (GF_ChunkOffsetBox *)s;
ptr->size += 4 + (4 * ptr->nb_entries);
return GF_OK;
}
#endif /*GPAC_DISABLE_ISOM_WRITE*/
void stdp_del(GF_Box *s)
{
GF_DegradationPriorityBox *ptr = (GF_DegradationPriorityBox *)s;
if (ptr == NULL ) return;
if (ptr->priorities) gf_free(ptr->priorities);
gf_free(ptr);
}
//this is called through stbl_read...
GF_Err stdp_Read(GF_Box *s, GF_BitStream *bs)
{
u32 entry;
GF_DegradationPriorityBox *ptr = (GF_DegradationPriorityBox *)s;
/*out-of-order stdp, assume no padding at the end and take the entire remaining data for entries*/
if (!ptr->nb_entries) ptr->nb_entries = (u32) ptr->size / 2;
else if (ptr->nb_entries > ptr->size / 2) return GF_ISOM_INVALID_FILE;
ptr->priorities = (u16 *) gf_malloc(ptr->nb_entries * sizeof(u16));
if (ptr->priorities == NULL) return GF_OUT_OF_MEM;
for (entry = 0; entry < ptr->nb_entries; entry++) {
ptr->priorities[entry] = gf_bs_read_u16(bs);
}
ISOM_DECREASE_SIZE(ptr, (2*ptr->nb_entries) );
return GF_OK;
}
GF_Box *stdp_New()
{
ISOM_DECL_BOX_ALLOC(GF_DegradationPriorityBox, GF_ISOM_BOX_TYPE_STDP);
return (GF_Box *)tmp;
}
#ifndef GPAC_DISABLE_ISOM_WRITE
GF_Err stdp_Write(GF_Box *s, GF_BitStream *bs)
{
GF_Err e;
u32 i;
GF_DegradationPriorityBox *ptr = (GF_DegradationPriorityBox *)s;
e = gf_isom_full_box_write(s, bs);
if (e) return e;
for (i = 0; i < ptr->nb_entries; i++) {
gf_bs_write_u16(bs, ptr->priorities[i]);
}
return GF_OK;
}
GF_Err stdp_Size(GF_Box *s)
{
GF_DegradationPriorityBox *ptr = (GF_DegradationPriorityBox *)s;
ptr->size += (2 * ptr->nb_entries);
return GF_OK;
}
#endif /*GPAC_DISABLE_ISOM_WRITE*/
void stsc_del(GF_Box *s)
{
GF_SampleToChunkBox *ptr = (GF_SampleToChunkBox *)s;
if (ptr == NULL) return;
if (ptr->entries) gf_free(ptr->entries);
gf_free(ptr);
}
GF_Err stsc_Read(GF_Box *s, GF_BitStream *bs)
{
u32 i;
GF_SampleToChunkBox *ptr = (GF_SampleToChunkBox *)s;
ptr->nb_entries = gf_bs_read_u32(bs);
ISOM_DECREASE_SIZE(ptr, 4);
if (ptr->nb_entries > ptr->size / 12) {
GF_LOG(GF_LOG_ERROR, GF_LOG_CONTAINER, ("[iso file] Invalid number of entries %d in stsc\n", ptr->nb_entries));
return GF_ISOM_INVALID_FILE;
}
ptr->alloc_size = ptr->nb_entries;
ptr->entries = NULL;
if (ptr->nb_entries) {
ptr->entries = gf_malloc(sizeof(GF_StscEntry)*ptr->alloc_size);
if (!ptr->entries) return GF_OUT_OF_MEM;
}
for (i = 0; i < ptr->nb_entries; i++) {
ptr->entries[i].firstChunk = gf_bs_read_u32(bs);
ptr->entries[i].samplesPerChunk = gf_bs_read_u32(bs);
ptr->entries[i].sampleDescriptionIndex = gf_bs_read_u32(bs);
ptr->entries[i].isEdited = 0;
ptr->entries[i].nextChunk = 0;
if (!ptr->entries[i].firstChunk) {
GF_LOG(GF_LOG_ERROR, GF_LOG_CONTAINER, ("[iso file] invalid first chunk 0 in stsc entry\n", ptr->nb_entries));
return GF_ISOM_INVALID_FILE;
}
//update the next chunk in the previous entry
if (i) ptr->entries[i-1].nextChunk = ptr->entries[i].firstChunk;
}
ptr->currentIndex = 0;
ptr->firstSampleInCurrentChunk = 0;
ptr->currentChunk = 0;
ptr->ghostNumber = 0;
return GF_OK;
}
GF_Box *stsc_New()
{
ISOM_DECL_BOX_ALLOC(GF_SampleToChunkBox, GF_ISOM_BOX_TYPE_STSC);
return (GF_Box *)tmp;
}
#ifndef GPAC_DISABLE_ISOM_WRITE
GF_Err stsc_Write(GF_Box *s, GF_BitStream *bs)
{
GF_Err e;
u32 i;
GF_SampleToChunkBox *ptr = (GF_SampleToChunkBox *)s;
e = gf_isom_full_box_write(s, bs);
if (e) return e;
gf_bs_write_u32(bs, ptr->nb_entries);
for (i=0; i<ptr->nb_entries; i++) {
gf_bs_write_u32(bs, ptr->entries[i].firstChunk);
gf_bs_write_u32(bs, ptr->entries[i].samplesPerChunk);
gf_bs_write_u32(bs, ptr->entries[i].sampleDescriptionIndex);
}
return GF_OK;
}
GF_Err stsc_Size(GF_Box *s)
{
GF_SampleToChunkBox *ptr = (GF_SampleToChunkBox *)s;
ptr->size += 4 + (12 * ptr->nb_entries);
return GF_OK;
}
#endif /*GPAC_DISABLE_ISOM_WRITE*/
void stsd_del(GF_Box *s)
{
GF_SampleDescriptionBox *ptr = (GF_SampleDescriptionBox *)s;
if (ptr == NULL) return;
gf_free(ptr);
}
GF_Err stsd_AddBox(GF_Box *s, GF_Box *a)
{
GF_UnknownBox *def;
GF_SampleDescriptionBox *ptr = (GF_SampleDescriptionBox *)s;
if (!a) return GF_OK;
if (gf_box_valid_in_parent(a, "stsd")) {
return gf_isom_box_add_default((GF_Box*)ptr, a);
}
switch (a->type) {
//unknown sample description: we need a specific box to handle the data ref index
//rather than a default box ...
case GF_ISOM_BOX_TYPE_UNKNOWN:
def = (GF_UnknownBox *)a;
/*we need at least 8 bytes for unknown sample entries*/
if (def->dataSize < 8) {
gf_isom_box_del(a);
return GF_ISOM_INVALID_MEDIA;
}
return gf_isom_box_add_default((GF_Box*)ptr, a);
default:
GF_LOG(GF_LOG_ERROR, GF_LOG_CONTAINER, ("[iso file] Cannot process box of type %s\n", gf_4cc_to_str(a->type)));
return GF_ISOM_INVALID_FILE;
}
}
GF_Err stsd_Read(GF_Box *s, GF_BitStream *bs)
{
gf_bs_read_u32(bs);
ISOM_DECREASE_SIZE(s, 4)
return gf_isom_box_array_read_ex(s, bs, stsd_AddBox, GF_ISOM_BOX_TYPE_STSD);
}
GF_Box *stsd_New()
{
ISOM_DECL_BOX_ALLOC(GF_SampleDescriptionBox, GF_ISOM_BOX_TYPE_STSD);
tmp->other_boxes = gf_list_new();
return (GF_Box *)tmp;
}
#ifndef GPAC_DISABLE_ISOM_WRITE
GF_Err stsd_Write(GF_Box *s, GF_BitStream *bs)
{
GF_Err e;
u32 nb_entries;
GF_SampleDescriptionBox *ptr = (GF_SampleDescriptionBox *)s;
e = gf_isom_full_box_write(s, bs);
if (e) return e;
nb_entries = gf_list_count(ptr->other_boxes);
gf_bs_write_u32(bs, nb_entries);
return GF_OK;
}
GF_Err stsd_Size(GF_Box *s)
{
GF_SampleDescriptionBox *ptr = (GF_SampleDescriptionBox *)s;
ptr->size += 4;
return GF_OK;
}
#endif /*GPAC_DISABLE_ISOM_WRITE*/
void stsh_del(GF_Box *s)
{
u32 i = 0;
GF_StshEntry *ent;
GF_ShadowSyncBox *ptr = (GF_ShadowSyncBox *)s;
if (ptr == NULL) return;
while ( (ent = (GF_StshEntry *)gf_list_enum(ptr->entries, &i)) ) {
gf_free(ent);
}
gf_list_del(ptr->entries);
gf_free(ptr);
}
GF_Err stsh_Read(GF_Box *s, GF_BitStream *bs)
{
GF_Err e;
u32 count, i;
GF_StshEntry *ent;
GF_ShadowSyncBox *ptr = (GF_ShadowSyncBox *)s;
count = gf_bs_read_u32(bs);
for (i = 0; i < count; i++) {
ent = (GF_StshEntry *) gf_malloc(sizeof(GF_StshEntry));
if (!ent) return GF_OUT_OF_MEM;
ent->shadowedSampleNumber = gf_bs_read_u32(bs);
ent->syncSampleNumber = gf_bs_read_u32(bs);
e = gf_list_add(ptr->entries, ent);
if (e) return e;
}
return GF_OK;
}
GF_Box *stsh_New()
{
ISOM_DECL_BOX_ALLOC(GF_ShadowSyncBox, GF_ISOM_BOX_TYPE_STSH);
tmp->entries = gf_list_new();
if (!tmp->entries) {
gf_free(tmp);
return NULL;
}
return (GF_Box *)tmp;
}
#ifndef GPAC_DISABLE_ISOM_WRITE
GF_Err stsh_Write(GF_Box *s, GF_BitStream *bs)
{
GF_Err e;
u32 i;
GF_StshEntry *ent;
GF_ShadowSyncBox *ptr = (GF_ShadowSyncBox *)s;
e = gf_isom_full_box_write(s, bs);
if (e) return e;
gf_bs_write_u32(bs, gf_list_count(ptr->entries));
i=0;
while ((ent = (GF_StshEntry *)gf_list_enum(ptr->entries, &i))) {
gf_bs_write_u32(bs, ent->shadowedSampleNumber);
gf_bs_write_u32(bs, ent->syncSampleNumber);
}
return GF_OK;
}
GF_Err stsh_Size(GF_Box *s)
{
GF_ShadowSyncBox *ptr = (GF_ShadowSyncBox *)s;
ptr->size += 4 + (8 * gf_list_count(ptr->entries));
return GF_OK;
}
#endif /*GPAC_DISABLE_ISOM_WRITE*/
void stss_del(GF_Box *s)
{
GF_SyncSampleBox *ptr = (GF_SyncSampleBox *)s;
if (ptr == NULL) return;
if (ptr->sampleNumbers) gf_free(ptr->sampleNumbers);
gf_free(ptr);
}
GF_Err stss_Read(GF_Box *s, GF_BitStream *bs)
{
u32 i;
GF_SyncSampleBox *ptr = (GF_SyncSampleBox *)s;
ptr->nb_entries = gf_bs_read_u32(bs);
ISOM_DECREASE_SIZE(ptr, 4);
if (ptr->nb_entries > ptr->size / 4) {
GF_LOG(GF_LOG_ERROR, GF_LOG_CONTAINER, ("[iso file] Invalid number of entries %d in stss\n", ptr->nb_entries));
return GF_ISOM_INVALID_FILE;
}
ptr->alloc_size = ptr->nb_entries;
ptr->sampleNumbers = (u32 *) gf_malloc( ptr->alloc_size * sizeof(u32));
if (ptr->sampleNumbers == NULL) return GF_OUT_OF_MEM;
for (i = 0; i < ptr->nb_entries; i++) {
ptr->sampleNumbers[i] = gf_bs_read_u32(bs);
}
return GF_OK;
}
GF_Box *stss_New()
{
ISOM_DECL_BOX_ALLOC(GF_SyncSampleBox, GF_ISOM_BOX_TYPE_STSS);
return (GF_Box*)tmp;
}
#ifndef GPAC_DISABLE_ISOM_WRITE
GF_Err stss_Write(GF_Box *s, GF_BitStream *bs)
{
GF_Err e;
u32 i;
GF_SyncSampleBox *ptr = (GF_SyncSampleBox *)s;
e = gf_isom_full_box_write(s, bs);
if (e) return e;
gf_bs_write_u32(bs, ptr->nb_entries);
for (i = 0; i < ptr->nb_entries; i++) {
gf_bs_write_u32(bs, ptr->sampleNumbers[i]);
}
return GF_OK;
}
GF_Err stss_Size(GF_Box *s)
{
GF_SyncSampleBox *ptr = (GF_SyncSampleBox *)s;
ptr->size += 4 + (4 * ptr->nb_entries);
return GF_OK;
}
#endif /*GPAC_DISABLE_ISOM_WRITE*/
void stsz_del(GF_Box *s)
{
GF_SampleSizeBox *ptr = (GF_SampleSizeBox *)s;
if (ptr == NULL) return;
if (ptr->sizes) gf_free(ptr->sizes);
gf_free(ptr);
}
GF_Err stsz_Read(GF_Box *s, GF_BitStream *bs)
{
u32 i, estSize;
GF_SampleSizeBox *ptr = (GF_SampleSizeBox *)s;
if (ptr == NULL) return GF_BAD_PARAM;
//support for CompactSizes
if (s->type == GF_ISOM_BOX_TYPE_STSZ) {
ptr->sampleSize = gf_bs_read_u32(bs);
ptr->sampleCount = gf_bs_read_u32(bs);
ISOM_DECREASE_SIZE(ptr, 8);
} else {
//24-reserved
gf_bs_read_int(bs, 24);
i = gf_bs_read_u8(bs);
ptr->sampleCount = gf_bs_read_u32(bs);
ISOM_DECREASE_SIZE(ptr, 8);
switch (i) {
case 4:
case 8:
case 16:
ptr->sampleSize = i;
break;
default:
//try to fix the file
//no samples, no parsing pb
if (!ptr->sampleCount) {
ptr->sampleSize = 16;
return GF_OK;
}
estSize = (u32) (ptr->size) / ptr->sampleCount;
if (!estSize && ((ptr->sampleCount+1)/2 == (ptr->size)) ) {
ptr->sampleSize = 4;
break;
} else if (estSize == 1 || estSize == 2) {
ptr->sampleSize = 8 * estSize;
} else {
return GF_ISOM_INVALID_FILE;
}
}
}
if (s->type == GF_ISOM_BOX_TYPE_STSZ) {
if (! ptr->sampleSize && ptr->sampleCount) {
if (ptr->sampleCount > ptr->size / 4) {
GF_LOG(GF_LOG_ERROR, GF_LOG_CONTAINER, ("[iso file] Invalid number of entries %d in stsz\n", ptr->sampleCount));
return GF_ISOM_INVALID_FILE;
}
ptr->sizes = (u32 *) gf_malloc(ptr->sampleCount * sizeof(u32));
ptr->alloc_size = ptr->sampleCount;
if (! ptr->sizes) return GF_OUT_OF_MEM;
for (i = 0; i < ptr->sampleCount; i++) {
ptr->sizes[i] = gf_bs_read_u32(bs);
}
}
} else {
if (ptr->sampleSize==4) {
if (ptr->sampleCount / 2 > ptr->size) {
GF_LOG(GF_LOG_ERROR, GF_LOG_CONTAINER, ("[iso file] Invalid number of entries %d in stsz\n", ptr->sampleCount));
return GF_ISOM_INVALID_FILE;
}
} else {
if (ptr->sampleCount > ptr->size / (ptr->sampleSize/8)) {
GF_LOG(GF_LOG_ERROR, GF_LOG_CONTAINER, ("[iso file] Invalid number of entries %d in stsz\n", ptr->sampleCount));
return GF_ISOM_INVALID_FILE;
}
}
//note we could optimize the mem usage by keeping the table compact
//in memory. But that would complicate both caching and editing
//we therefore keep all sizes as u32 and uncompress the table
ptr->sizes = (u32 *) gf_malloc(ptr->sampleCount * sizeof(u32));
if (! ptr->sizes) return GF_OUT_OF_MEM;
ptr->alloc_size = ptr->sampleCount;
for (i = 0; i < ptr->sampleCount; ) {
switch (ptr->sampleSize) {
case 4:
ptr->sizes[i] = gf_bs_read_int(bs, 4);
if (i+1 < ptr->sampleCount) {
ptr->sizes[i+1] = gf_bs_read_int(bs, 4);
} else {
//0 padding in odd sample count
gf_bs_read_int(bs, 4);
}
i += 2;
break;
default:
ptr->sizes[i] = gf_bs_read_int(bs, ptr->sampleSize);
i += 1;
break;
}
}
}
return GF_OK;
}
GF_Box *stsz_New()
{
ISOM_DECL_BOX_ALLOC(GF_SampleSizeBox, 0);
//type is unknown here, can be regular or compact table
return (GF_Box *)tmp;
}
#ifndef GPAC_DISABLE_ISOM_WRITE
GF_Err stsz_Write(GF_Box *s, GF_BitStream *bs)
{
GF_Err e;
u32 i;
GF_SampleSizeBox *ptr = (GF_SampleSizeBox *)s;
e = gf_isom_full_box_write(s, bs);
if (e) return e;
//in both versions this is still valid
if (ptr->type == GF_ISOM_BOX_TYPE_STSZ) {
gf_bs_write_u32(bs, ptr->sampleSize);
} else {
gf_bs_write_u24(bs, 0);
gf_bs_write_u8(bs, ptr->sampleSize);
}
gf_bs_write_u32(bs, ptr->sampleCount);
if (ptr->type == GF_ISOM_BOX_TYPE_STSZ) {
if (! ptr->sampleSize) {
for (i = 0; i < ptr->sampleCount; i++) {
gf_bs_write_u32(bs, ptr->sizes ? ptr->sizes[i] : 0);
}
}
} else {
for (i = 0; i < ptr->sampleCount; ) {
switch (ptr->sampleSize) {
case 4:
gf_bs_write_int(bs, ptr->sizes[i], 4);
if (i+1 < ptr->sampleCount) {
gf_bs_write_int(bs, ptr->sizes[i+1], 4);
} else {
//0 padding in odd sample count
gf_bs_write_int(bs, 0, 4);
}
i += 2;
break;
default:
gf_bs_write_int(bs, ptr->sizes[i], ptr->sampleSize);
i += 1;
break;
}
}
}
return GF_OK;
}
GF_Err stsz_Size(GF_Box *s)
{
u32 i, fieldSize, size;
GF_SampleSizeBox *ptr = (GF_SampleSizeBox *)s;
ptr->size += 8;
if (!ptr->sampleCount) return GF_OK;
//regular table
if (ptr->type == GF_ISOM_BOX_TYPE_STSZ) {
if (ptr->sampleSize) return GF_OK;
ptr->size += (4 * ptr->sampleCount);
return GF_OK;
}
fieldSize = 4;
size = ptr->sizes[0];
for (i=0; i < ptr->sampleCount; i++) {
if (ptr->sizes[i] <= 0xF) continue;
//switch to 8-bit table
else if (ptr->sizes[i] <= 0xFF) {
fieldSize = 8;
}
//switch to 16-bit table
else if (ptr->sizes[i] <= 0xFFFF) {
fieldSize = 16;
}
//switch to 32-bit table
else {
fieldSize = 32;
}
//check the size
if (size != ptr->sizes[i]) size = 0;
}
//if all samples are of the same size, switch to regular (more compact)
if (size) {
ptr->type = GF_ISOM_BOX_TYPE_STSZ;
ptr->sampleSize = size;
gf_free(ptr->sizes);
ptr->sizes = NULL;
}
if (fieldSize == 32) {
//oops, doesn't fit in a compact table
ptr->type = GF_ISOM_BOX_TYPE_STSZ;
ptr->size += (4 * ptr->sampleCount);
return GF_OK;
}
//make sure we are a compact table (no need to change the mem representation)
ptr->type = GF_ISOM_BOX_TYPE_STZ2;
ptr->sampleSize = fieldSize;
if (fieldSize == 4) {
//do not forget the 0 padding field for odd count
ptr->size += (ptr->sampleCount + 1) / 2;
} else {
ptr->size += (ptr->sampleCount) * (fieldSize/8);
}
return GF_OK;
}
#endif /*GPAC_DISABLE_ISOM_WRITE*/
void stts_del(GF_Box *s)
{
GF_TimeToSampleBox *ptr = (GF_TimeToSampleBox *)s;
if (ptr->entries) gf_free(ptr->entries);
gf_free(ptr);
}
GF_Err stts_Read(GF_Box *s, GF_BitStream *bs)
{
u32 i;
GF_TimeToSampleBox *ptr = (GF_TimeToSampleBox *)s;
#ifndef GPAC_DISABLE_ISOM_WRITE
ptr->w_LastDTS = 0;
#endif
ptr->nb_entries = gf_bs_read_u32(bs);
ISOM_DECREASE_SIZE(ptr, 4);
if (ptr->nb_entries > ptr->size / 8) {
GF_LOG(GF_LOG_ERROR, GF_LOG_CONTAINER, ("[iso file] Invalid number of entries %d in stts\n", ptr->nb_entries));
return GF_ISOM_INVALID_FILE;
}
ptr->alloc_size = ptr->nb_entries;
ptr->entries = gf_malloc(sizeof(GF_SttsEntry)*ptr->alloc_size);
if (!ptr->entries) return GF_OUT_OF_MEM;
for (i=0; i<ptr->nb_entries; i++) {
ptr->entries[i].sampleCount = gf_bs_read_u32(bs);
ptr->entries[i].sampleDelta = gf_bs_read_u32(bs);
#ifndef GPAC_DISABLE_ISOM_WRITE
ptr->w_currentSampleNum += ptr->entries[i].sampleCount;
ptr->w_LastDTS += (u64)ptr->entries[i].sampleCount * ptr->entries[i].sampleDelta;
#endif
if (!ptr->entries[i].sampleDelta) {
if ((i+1<ptr->nb_entries) ) {
GF_LOG(GF_LOG_WARNING, GF_LOG_CONTAINER, ("[iso file] Found stts entry with sample_delta=0 - forbidden ! Fixing to 1\n" ));
ptr->entries[i].sampleDelta = 1;
} else if (ptr->entries[i].sampleCount>1) {
GF_LOG(GF_LOG_WARNING, GF_LOG_CONTAINER, ("[iso file] more than one stts entry at the end of the track with sample_delta=0 - forbidden ! Fixing to 1\n" ));
ptr->entries[i].sampleDelta = 1;
}
} else if ((s32) ptr->entries[i].sampleDelta < 0) {
GF_LOG(GF_LOG_WARNING, GF_LOG_CONTAINER, ("[iso file] stts entry %d has negative duration %d - forbidden ! Fixing to 1, sync may get lost (consider reimport raw media)\n", i, (s32) ptr->entries[i].sampleDelta ));
ptr->entries[i].sampleDelta = 1;
}
}
if (ptr->size<(ptr->nb_entries*8)) return GF_ISOM_INVALID_FILE;
ISOM_DECREASE_SIZE(ptr, ptr->nb_entries*8);
//remove the last sample delta.
#ifndef GPAC_DISABLE_ISOM_WRITE
if (ptr->nb_entries) ptr->w_LastDTS -= ptr->entries[ptr->nb_entries-1].sampleDelta;
#endif
return GF_OK;
}
GF_Box *stts_New()
{
ISOM_DECL_BOX_ALLOC(GF_TimeToSampleBox, GF_ISOM_BOX_TYPE_STTS);
return (GF_Box *)tmp;
}
#ifndef GPAC_DISABLE_ISOM_WRITE
GF_Err stts_Write(GF_Box *s, GF_BitStream *bs)
{
GF_Err e;
u32 i;
GF_TimeToSampleBox *ptr = (GF_TimeToSampleBox *)s;
e = gf_isom_full_box_write(s, bs);
if (e) return e;
gf_bs_write_u32(bs, ptr->nb_entries);
for (i=0; i<ptr->nb_entries; i++) {
gf_bs_write_u32(bs, ptr->entries[i].sampleCount);
gf_bs_write_u32(bs, ptr->entries[i].sampleDelta);
}
return GF_OK;
}
GF_Err stts_Size(GF_Box *s)
{
GF_TimeToSampleBox *ptr = (GF_TimeToSampleBox *)s;
ptr->size += 4 + (8 * ptr->nb_entries);
return GF_OK;
}
#endif /*GPAC_DISABLE_ISOM_WRITE*/
#ifndef GPAC_DISABLE_ISOM_FRAGMENTS
void tfhd_del(GF_Box *s)
{
GF_TrackFragmentHeaderBox *ptr = (GF_TrackFragmentHeaderBox *)s;
if (ptr == NULL) return;
gf_free(ptr);
}
GF_Err tfhd_Read(GF_Box *s, GF_BitStream *bs)
{
GF_TrackFragmentHeaderBox *ptr = (GF_TrackFragmentHeaderBox *)s;
ptr->trackID = gf_bs_read_u32(bs);
//The rest depends on the flags
if (ptr->flags & GF_ISOM_TRAF_BASE_OFFSET) {
ptr->base_data_offset = gf_bs_read_u64(bs);
}
if (ptr->flags & GF_ISOM_TRAF_SAMPLE_DESC) {
ptr->sample_desc_index = gf_bs_read_u32(bs);
}
if (ptr->flags & GF_ISOM_TRAF_SAMPLE_DUR) {
ptr->def_sample_duration = gf_bs_read_u32(bs);
}
if (ptr->flags & GF_ISOM_TRAF_SAMPLE_SIZE) {
ptr->def_sample_size = gf_bs_read_u32(bs);
}
if (ptr->flags & GF_ISOM_TRAF_SAMPLE_FLAGS) {
ptr->def_sample_flags = gf_bs_read_u32(bs);
}
return GF_OK;
}
GF_Box *tfhd_New()
{
ISOM_DECL_BOX_ALLOC(GF_TrackFragmentHeaderBox, GF_ISOM_BOX_TYPE_TFHD);
//NO FLAGS SET BY DEFAULT
return (GF_Box *)tmp;
}
#ifndef GPAC_DISABLE_ISOM_WRITE
GF_Err tfhd_Write(GF_Box *s, GF_BitStream *bs)
{
GF_Err e;
GF_TrackFragmentHeaderBox *ptr = (GF_TrackFragmentHeaderBox *) s;
if (!s) return GF_BAD_PARAM;
e = gf_isom_full_box_write(s, bs);
if (e) return e;
gf_bs_write_u32(bs, ptr->trackID);
//The rest depends on the flags
if (ptr->flags & GF_ISOM_TRAF_BASE_OFFSET) {
gf_bs_write_u64(bs, ptr->base_data_offset);
}
if (ptr->flags & GF_ISOM_TRAF_SAMPLE_DESC) {
gf_bs_write_u32(bs, ptr->sample_desc_index);
}
if (ptr->flags & GF_ISOM_TRAF_SAMPLE_DUR) {
gf_bs_write_u32(bs, ptr->def_sample_duration);
}
if (ptr->flags & GF_ISOM_TRAF_SAMPLE_SIZE) {
gf_bs_write_u32(bs, ptr->def_sample_size);
}
if (ptr->flags & GF_ISOM_TRAF_SAMPLE_FLAGS) {
gf_bs_write_u32(bs, ptr->def_sample_flags);
}
return GF_OK;
}
GF_Err tfhd_Size(GF_Box *s)
{
GF_TrackFragmentHeaderBox *ptr = (GF_TrackFragmentHeaderBox *)s;
ptr->size += 4;
//The rest depends on the flags
if (ptr->flags & GF_ISOM_TRAF_BASE_OFFSET) ptr->size += 8;
if (ptr->flags & GF_ISOM_TRAF_SAMPLE_DESC) ptr->size += 4;
if (ptr->flags & GF_ISOM_TRAF_SAMPLE_DUR) ptr->size += 4;
if (ptr->flags & GF_ISOM_TRAF_SAMPLE_SIZE) ptr->size += 4;
if (ptr->flags & GF_ISOM_TRAF_SAMPLE_FLAGS) ptr->size += 4;
return GF_OK;
}
#endif /*GPAC_DISABLE_ISOM_WRITE*/
#endif /*GPAC_DISABLE_ISOM_FRAGMENTS*/
void tims_del(GF_Box *s)
{
GF_TSHintEntryBox *tims = (GF_TSHintEntryBox *)s;
gf_free(tims);
}
GF_Err tims_Read(GF_Box *s, GF_BitStream *bs)
{
GF_TSHintEntryBox *ptr = (GF_TSHintEntryBox *)s;
ptr->timeScale = gf_bs_read_u32(bs);
return GF_OK;
}
GF_Box *tims_New()
{
ISOM_DECL_BOX_ALLOC(GF_TSHintEntryBox, GF_ISOM_BOX_TYPE_TIMS);
return (GF_Box *)tmp;
}
#ifndef GPAC_DISABLE_ISOM_WRITE
GF_Err tims_Write(GF_Box *s, GF_BitStream *bs)
{
GF_Err e;
GF_TSHintEntryBox *ptr = (GF_TSHintEntryBox *)s;
if (ptr == NULL) return GF_BAD_PARAM;
e = gf_isom_box_write_header(s, bs);
if (e) return e;
gf_bs_write_u32(bs, ptr->timeScale);
return GF_OK;
}
GF_Err tims_Size(GF_Box *s)
{
s->size += 4;
return GF_OK;
}
#endif /*GPAC_DISABLE_ISOM_WRITE*/
void tkhd_del(GF_Box *s)
{
GF_TrackHeaderBox *ptr = (GF_TrackHeaderBox *)s;
if (ptr == NULL) return;
gf_free(ptr);
return;
}
GF_Err tkhd_Read(GF_Box *s, GF_BitStream *bs)
{
GF_TrackHeaderBox *ptr = (GF_TrackHeaderBox *)s;
if (ptr->version == 1) {
ptr->creationTime = gf_bs_read_u64(bs);
ptr->modificationTime = gf_bs_read_u64(bs);
ptr->trackID = gf_bs_read_u32(bs);
ptr->reserved1 = gf_bs_read_u32(bs);
ptr->duration = gf_bs_read_u64(bs);
} else {
ptr->creationTime = gf_bs_read_u32(bs);
ptr->modificationTime = gf_bs_read_u32(bs);
ptr->trackID = gf_bs_read_u32(bs);
ptr->reserved1 = gf_bs_read_u32(bs);
ptr->duration = gf_bs_read_u32(bs);
}
ptr->reserved2[0] = gf_bs_read_u32(bs);
ptr->reserved2[1] = gf_bs_read_u32(bs);
ptr->layer = gf_bs_read_u16(bs);
ptr->alternate_group = gf_bs_read_u16(bs);
ptr->volume = gf_bs_read_u16(bs);
ptr->reserved3 = gf_bs_read_u16(bs);
ptr->matrix[0] = gf_bs_read_u32(bs);
ptr->matrix[1] = gf_bs_read_u32(bs);
ptr->matrix[2] = gf_bs_read_u32(bs);
ptr->matrix[3] = gf_bs_read_u32(bs);
ptr->matrix[4] = gf_bs_read_u32(bs);
ptr->matrix[5] = gf_bs_read_u32(bs);
ptr->matrix[6] = gf_bs_read_u32(bs);
ptr->matrix[7] = gf_bs_read_u32(bs);
ptr->matrix[8] = gf_bs_read_u32(bs);
ptr->width = gf_bs_read_u32(bs);
ptr->height = gf_bs_read_u32(bs);
return GF_OK;
}
GF_Box *tkhd_New()
{
ISOM_DECL_BOX_ALLOC(GF_TrackHeaderBox, GF_ISOM_BOX_TYPE_TKHD);
tmp->matrix[0] = 0x00010000;
tmp->matrix[4] = 0x00010000;
tmp->matrix[8] = 0x40000000;
return (GF_Box *)tmp;
}
#ifndef GPAC_DISABLE_ISOM_WRITE
GF_Err tkhd_Write(GF_Box *s, GF_BitStream *bs)
{
GF_Err e;
GF_TrackHeaderBox *ptr = (GF_TrackHeaderBox *)s;
e = gf_isom_full_box_write(s, bs);
if (e) return e;
if (ptr->version == 1) {
gf_bs_write_u64(bs, ptr->creationTime);
gf_bs_write_u64(bs, ptr->modificationTime);
gf_bs_write_u32(bs, ptr->trackID);
gf_bs_write_u32(bs, ptr->reserved1);
gf_bs_write_u64(bs, ptr->duration);
} else {
gf_bs_write_u32(bs, (u32) ptr->creationTime);
gf_bs_write_u32(bs, (u32) ptr->modificationTime);
gf_bs_write_u32(bs, ptr->trackID);
gf_bs_write_u32(bs, ptr->reserved1);
gf_bs_write_u32(bs, (u32) ptr->duration);
}
gf_bs_write_u32(bs, ptr->reserved2[0]);
gf_bs_write_u32(bs, ptr->reserved2[1]);
gf_bs_write_u16(bs, ptr->layer);
gf_bs_write_u16(bs, ptr->alternate_group);
gf_bs_write_u16(bs, ptr->volume);
gf_bs_write_u16(bs, ptr->reserved3);
gf_bs_write_u32(bs, ptr->matrix[0]);
gf_bs_write_u32(bs, ptr->matrix[1]);
gf_bs_write_u32(bs, ptr->matrix[2]);
gf_bs_write_u32(bs, ptr->matrix[3]);
gf_bs_write_u32(bs, ptr->matrix[4]);
gf_bs_write_u32(bs, ptr->matrix[5]);
gf_bs_write_u32(bs, ptr->matrix[6]);
gf_bs_write_u32(bs, ptr->matrix[7]);
gf_bs_write_u32(bs, ptr->matrix[8]);
gf_bs_write_u32(bs, ptr->width);
gf_bs_write_u32(bs, ptr->height);
return GF_OK;
}
GF_Err tkhd_Size(GF_Box *s)
{
GF_TrackHeaderBox *ptr = (GF_TrackHeaderBox *)s;
if (ptr->duration==(u64) -1) ptr->version = 0;
else ptr->version = (ptr->duration>0xFFFFFFFF) ? 1 : 0;
ptr->size += (ptr->version == 1) ? 32 : 20;
ptr->size += 60;
return GF_OK;
}
#endif /*GPAC_DISABLE_ISOM_WRITE*/
#ifndef GPAC_DISABLE_ISOM_FRAGMENTS
void traf_del(GF_Box *s)
{
GF_TrackFragmentBox *ptr = (GF_TrackFragmentBox *)s;
if (ptr == NULL) return;
if (ptr->tfhd) gf_isom_box_del((GF_Box *) ptr->tfhd);
if (ptr->sdtp) gf_isom_box_del((GF_Box *) ptr->sdtp);
if (ptr->sub_samples) gf_isom_box_array_del(ptr->sub_samples);
if (ptr->tfdt) gf_isom_box_del((GF_Box *) ptr->tfdt);
if (ptr->sample_encryption) gf_isom_box_del((GF_Box *) ptr->sample_encryption);
gf_isom_box_array_del(ptr->TrackRuns);
if (ptr->sampleGroups) gf_isom_box_array_del(ptr->sampleGroups);
if (ptr->sampleGroupsDescription) gf_isom_box_array_del(ptr->sampleGroupsDescription);
if (ptr->sai_sizes) gf_isom_box_array_del(ptr->sai_sizes);
if (ptr->sai_offsets) gf_isom_box_array_del(ptr->sai_offsets);
gf_free(ptr);
}
GF_Err traf_AddBox(GF_Box *s, GF_Box *a)
{
GF_TrackFragmentBox *ptr = (GF_TrackFragmentBox *)s;
switch (a->type) {
case GF_ISOM_BOX_TYPE_TFHD:
if (ptr->tfhd) ERROR_ON_DUPLICATED_BOX(a, ptr)
ptr->tfhd = (GF_TrackFragmentHeaderBox *) a;
return GF_OK;
case GF_ISOM_BOX_TYPE_TRUN:
return gf_list_add(ptr->TrackRuns, a);
case GF_ISOM_BOX_TYPE_SDTP:
if (ptr->sdtp) ERROR_ON_DUPLICATED_BOX(a, ptr)
ptr->sdtp = (GF_SampleDependencyTypeBox *)a;
return GF_OK;
case GF_ISOM_BOX_TYPE_TFDT:
if (ptr->tfdt) ERROR_ON_DUPLICATED_BOX(a, ptr)
ptr->tfdt = (GF_TFBaseMediaDecodeTimeBox*) a;
return GF_OK;
case GF_ISOM_BOX_TYPE_SUBS:
if (!ptr->sub_samples) ptr->sub_samples = gf_list_new();
return gf_list_add(ptr->sub_samples, a);
case GF_ISOM_BOX_TYPE_SBGP:
if (!ptr->sampleGroups) ptr->sampleGroups = gf_list_new();
gf_list_add(ptr->sampleGroups, a);
return GF_OK;
case GF_ISOM_BOX_TYPE_SGPD:
if (!ptr->sampleGroupsDescription) ptr->sampleGroupsDescription = gf_list_new();
gf_list_add(ptr->sampleGroupsDescription, a);
return GF_OK;
case GF_ISOM_BOX_TYPE_SAIZ:
if (!ptr->sai_sizes) ptr->sai_sizes = gf_list_new();
gf_list_add(ptr->sai_sizes, a);
return GF_OK;
case GF_ISOM_BOX_TYPE_SAIO:
if (!ptr->sai_offsets) ptr->sai_offsets = gf_list_new();
gf_list_add(ptr->sai_offsets, a);
return GF_OK;
//we will throw an error if both PIFF_PSEC and SENC are found. Not such files seen yet
case GF_ISOM_BOX_TYPE_UUID:
if ( ((GF_UUIDBox *)a)->internal_4cc==GF_ISOM_BOX_UUID_PSEC) {
if (ptr->sample_encryption) ERROR_ON_DUPLICATED_BOX(a, ptr)
ptr->sample_encryption = (GF_SampleEncryptionBox *)a;
ptr->sample_encryption->traf = ptr;
return GF_OK;
} else {
return gf_isom_box_add_default(s, a);
}
case GF_ISOM_BOX_TYPE_SENC:
if (ptr->sample_encryption) ERROR_ON_DUPLICATED_BOX(a, ptr)
ptr->sample_encryption = (GF_SampleEncryptionBox *)a;
ptr->sample_encryption->traf = ptr;
return GF_OK;
default:
return gf_isom_box_add_default(s, a);
}
return GF_OK;
}
GF_Err traf_Read(GF_Box *s, GF_BitStream *bs)
{
GF_TrackFragmentBox *ptr = (GF_TrackFragmentBox *)s;
GF_Err e = gf_isom_box_array_read(s, bs, traf_AddBox);
if (e) return e;
if (!ptr->tfhd) {
GF_LOG(GF_LOG_ERROR, GF_LOG_CONTAINER, ("[iso file] Missing TrackFragmentHeaderBox \n"));
return GF_ISOM_INVALID_FILE;
}
return GF_OK;
}
GF_Box *traf_New()
{
ISOM_DECL_BOX_ALLOC(GF_TrackFragmentBox, GF_ISOM_BOX_TYPE_TRAF);
tmp->TrackRuns = gf_list_new();
return (GF_Box *)tmp;
}
#ifndef GPAC_DISABLE_ISOM_WRITE
GF_Box *tfxd_New()
{
ISOM_DECL_BOX_ALLOC(GF_MSSTimeExtBox, GF_ISOM_BOX_TYPE_UUID);
tmp->internal_4cc = GF_ISOM_BOX_UUID_TFXD;
return (GF_Box *)tmp;
}
void tfxd_del(GF_Box *s)
{
gf_free(s);
}
GF_Err tfxd_Read(GF_Box *s, GF_BitStream *bs)
{
GF_MSSTimeExtBox *ptr = (GF_MSSTimeExtBox *)s;
if (ptr->size<4) return GF_ISOM_INVALID_FILE;
ptr->version = gf_bs_read_u8(bs);
ptr->flags = gf_bs_read_u24(bs);
ISOM_DECREASE_SIZE(ptr, 4);
if (ptr->version == 0x01) {
ptr->absolute_time_in_track_timescale = gf_bs_read_u64(bs);
ptr->fragment_duration_in_track_timescale = gf_bs_read_u64(bs);
} else {
ptr->absolute_time_in_track_timescale = gf_bs_read_u32(bs);
ptr->fragment_duration_in_track_timescale = gf_bs_read_u32(bs);
}
return GF_OK;
}
GF_Err tfxd_Write(GF_Box *s, GF_BitStream *bs)
{
GF_Err e = GF_OK;
GF_MSSTimeExtBox *uuid = (GF_MSSTimeExtBox*)s;
e = gf_isom_box_write_header(s, bs);
if (e) return e;
gf_bs_write_u8(bs, 1);
gf_bs_write_u24(bs, 0);
gf_bs_write_u64(bs, uuid->absolute_time_in_track_timescale);
gf_bs_write_u64(bs, uuid->fragment_duration_in_track_timescale);
return GF_OK;
}
GF_Err tfxd_Size(GF_Box *s)
{
s->size += 20;
return GF_OK;
}
GF_Err traf_Write(GF_Box *s, GF_BitStream *bs)
{
GF_Err e;
GF_TrackFragmentBox *ptr = (GF_TrackFragmentBox *) s;
if (!s) return GF_BAD_PARAM;
e = gf_isom_box_write_header(s, bs);
if (e) return e;
//Header first
if (ptr->tfhd) {
e = gf_isom_box_write((GF_Box *) ptr->tfhd, bs);
if (e) return e;
}
if (ptr->sub_samples) {
e = gf_isom_box_array_write(s, ptr->sub_samples, bs);
if (e) return e;
}
if (ptr->tfdt) {
e = gf_isom_box_write((GF_Box *) ptr->tfdt, bs);
if (e) return e;
}
if (ptr->sampleGroupsDescription) {
e = gf_isom_box_array_write(s, ptr->sampleGroupsDescription, bs);
if (e) return e;
}
if (ptr->sampleGroups) {
e = gf_isom_box_array_write(s, ptr->sampleGroups, bs);
if (e) return e;
}
if (ptr->sai_sizes) {
e = gf_isom_box_array_write(s, ptr->sai_sizes, bs);
if (e) return e;
}
if (ptr->sai_offsets) {
e = gf_isom_box_array_write(s, ptr->sai_offsets, bs);
if (e) return e;
}
if (ptr->sample_encryption) {
e = gf_isom_box_write((GF_Box *) ptr->sample_encryption, bs);
if (e) return e;
}
e = gf_isom_box_array_write(s, ptr->TrackRuns, bs);
if (e) return e;
//when sdtp is present (smooth-like) write it after the trun box
if (ptr->sdtp) {
e = gf_isom_box_write((GF_Box *) ptr->sdtp, bs);
if (e) return e;
}
//tfxd should be last ...
if (ptr->tfxd) {
e = gf_isom_box_write((GF_Box *) ptr->tfxd, bs);
if (e) return e;
}
return GF_OK;
}
GF_Err traf_Size(GF_Box *s)
{
GF_Err e;
GF_TrackFragmentBox *ptr = (GF_TrackFragmentBox *)s;
if (ptr->tfhd) {
e = gf_isom_box_size((GF_Box *) ptr->tfhd);
if (e) return e;
ptr->size += ptr->tfhd->size;
}
if (ptr->sub_samples) {
e = gf_isom_box_array_size(s, ptr->sub_samples);
if (e) return e;
}
if (ptr->sdtp) {
e = gf_isom_box_size((GF_Box *) ptr->sdtp);
if (e) return e;
ptr->size += ptr->sdtp->size;
}
if (ptr->tfdt) {
e = gf_isom_box_size((GF_Box *) ptr->tfdt);
if (e) return e;
ptr->size += ptr->tfdt->size;
}
if (ptr->sampleGroups) {
e = gf_isom_box_array_size(s, ptr->sampleGroups);
if (e) return e;
}
if (ptr->sampleGroupsDescription) {
e = gf_isom_box_array_size(s, ptr->sampleGroupsDescription);
if (e) return e;
}
if (ptr->sai_sizes) {
e = gf_isom_box_array_size(s, ptr->sai_sizes);
if (e) return e;
}
if (ptr->sai_offsets) {
e = gf_isom_box_array_size(s, ptr->sai_offsets);
if (e) return e;
}
if (ptr->sample_encryption) {
e = gf_isom_box_size((GF_Box *) ptr->sample_encryption);
if (e) return e;
ptr->size += ptr->sample_encryption->size;
}
if (ptr->tfxd) {
e = gf_isom_box_size((GF_Box *)ptr->tfxd);
if (e) return e;
s->size += ptr->tfxd->size;
}
return gf_isom_box_array_size(s, ptr->TrackRuns);
}
#endif /*GPAC_DISABLE_ISOM_WRITE*/
#endif /*GPAC_DISABLE_ISOM_FRAGMENTS*/
void trak_del(GF_Box *s)
{
GF_TrackBox *ptr = (GF_TrackBox *) s;
if (ptr == NULL) return;
if (ptr->Header) gf_isom_box_del((GF_Box *)ptr->Header);
if (ptr->udta) gf_isom_box_del((GF_Box *)ptr->udta);
if (ptr->Media) gf_isom_box_del((GF_Box *)ptr->Media);
if (ptr->References) gf_isom_box_del((GF_Box *)ptr->References);
if (ptr->editBox) gf_isom_box_del((GF_Box *)ptr->editBox);
if (ptr->meta) gf_isom_box_del((GF_Box *)ptr->meta);
if (ptr->name) gf_free(ptr->name);
if (ptr->groups) gf_isom_box_del((GF_Box *)ptr->groups);
if (ptr->Aperture) gf_isom_box_del((GF_Box *)ptr->Aperture);
gf_free(ptr);
}
static void gf_isom_check_sample_desc(GF_TrackBox *trak)
{
GF_BitStream *bs;
GF_UnknownBox *a;
u32 i;
if (!trak->Media || !trak->Media->information) {
GF_LOG(GF_LOG_WARNING, GF_LOG_CONTAINER, ("[iso file] Track with no media box !\n" ));
return;
}
if (!trak->Media->information->sampleTable) {
GF_LOG(GF_LOG_WARNING, GF_LOG_CONTAINER, ("[iso file] Track with no sample table !\n" ));
trak->Media->information->sampleTable = (GF_SampleTableBox *) gf_isom_box_new(GF_ISOM_BOX_TYPE_STBL);
gf_isom_box_add_for_dump_mode((GF_Box *)trak->Media->information, (GF_Box *)trak->Media->information->sampleTable);
}
if (!trak->Media->information->sampleTable->SampleDescription) {
GF_LOG(GF_LOG_WARNING, GF_LOG_CONTAINER, ("[iso file] Track with no sample description box !\n" ));
trak->Media->information->sampleTable->SampleDescription = (GF_SampleDescriptionBox *) gf_isom_box_new(GF_ISOM_BOX_TYPE_STSD);
gf_isom_box_add_for_dump_mode((GF_Box *)trak->Media->information->sampleTable, (GF_Box *)trak->Media->information->sampleTable->SampleDescription);
return;
}
i=0;
while ((a = (GF_UnknownBox*)gf_list_enum(trak->Media->information->sampleTable->SampleDescription->other_boxes, &i))) {
switch (a->type) {
case GF_ISOM_BOX_TYPE_MP4S:
case GF_ISOM_BOX_TYPE_ENCS:
case GF_ISOM_BOX_TYPE_MP4A:
case GF_ISOM_BOX_TYPE_ENCA:
case GF_ISOM_BOX_TYPE_MP4V:
case GF_ISOM_BOX_TYPE_ENCV:
case GF_ISOM_BOX_TYPE_RESV:
case GF_ISOM_SUBTYPE_3GP_AMR:
case GF_ISOM_SUBTYPE_3GP_AMR_WB:
case GF_ISOM_SUBTYPE_3GP_EVRC:
case GF_ISOM_SUBTYPE_3GP_QCELP:
case GF_ISOM_SUBTYPE_3GP_SMV:
case GF_ISOM_SUBTYPE_3GP_H263:
case GF_ISOM_BOX_TYPE_GHNT:
case GF_ISOM_BOX_TYPE_RTP_STSD:
case GF_ISOM_BOX_TYPE_SRTP_STSD:
case GF_ISOM_BOX_TYPE_FDP_STSD:
case GF_ISOM_BOX_TYPE_RRTP_STSD:
case GF_ISOM_BOX_TYPE_RTCP_STSD:
case GF_ISOM_BOX_TYPE_METX:
case GF_ISOM_BOX_TYPE_METT:
case GF_ISOM_BOX_TYPE_STXT:
case GF_ISOM_BOX_TYPE_AVC1:
case GF_ISOM_BOX_TYPE_AVC2:
case GF_ISOM_BOX_TYPE_AVC3:
case GF_ISOM_BOX_TYPE_AVC4:
case GF_ISOM_BOX_TYPE_SVC1:
case GF_ISOM_BOX_TYPE_MVC1:
case GF_ISOM_BOX_TYPE_HVC1:
case GF_ISOM_BOX_TYPE_HEV1:
case GF_ISOM_BOX_TYPE_HVC2:
case GF_ISOM_BOX_TYPE_HEV2:
case GF_ISOM_BOX_TYPE_HVT1:
case GF_ISOM_BOX_TYPE_LHV1:
case GF_ISOM_BOX_TYPE_LHE1:
case GF_ISOM_BOX_TYPE_AV01:
case GF_ISOM_BOX_TYPE_VP08:
case GF_ISOM_BOX_TYPE_VP09:
case GF_ISOM_BOX_TYPE_AV1C:
case GF_ISOM_BOX_TYPE_TX3G:
case GF_ISOM_BOX_TYPE_TEXT:
case GF_ISOM_BOX_TYPE_ENCT:
case GF_ISOM_BOX_TYPE_DIMS:
case GF_ISOM_BOX_TYPE_OPUS:
case GF_ISOM_BOX_TYPE_AC3:
case GF_ISOM_BOX_TYPE_EC3:
case GF_ISOM_BOX_TYPE_LSR1:
case GF_ISOM_BOX_TYPE_WVTT:
case GF_ISOM_BOX_TYPE_STPP:
case GF_ISOM_BOX_TYPE_SBTT:
case GF_ISOM_BOX_TYPE_MP3:
case GF_ISOM_BOX_TYPE_JPEG:
case GF_ISOM_BOX_TYPE_PNG:
case GF_ISOM_BOX_TYPE_JP2K:
case GF_ISOM_BOX_TYPE_MHA1:
case GF_ISOM_BOX_TYPE_MHA2:
case GF_ISOM_BOX_TYPE_MHM1:
case GF_ISOM_BOX_TYPE_MHM2:
case GF_QT_BOX_TYPE_AUDIO_RAW:
case GF_QT_BOX_TYPE_AUDIO_TWOS:
case GF_QT_BOX_TYPE_AUDIO_SOWT:
case GF_QT_BOX_TYPE_AUDIO_FL32:
case GF_QT_BOX_TYPE_AUDIO_FL64:
case GF_QT_BOX_TYPE_AUDIO_IN24:
case GF_QT_BOX_TYPE_AUDIO_IN32:
case GF_QT_BOX_TYPE_AUDIO_ULAW:
case GF_QT_BOX_TYPE_AUDIO_ALAW:
case GF_QT_BOX_TYPE_AUDIO_ADPCM:
case GF_QT_BOX_TYPE_AUDIO_IMA_ADPCM:
case GF_QT_BOX_TYPE_AUDIO_DVCA:
case GF_QT_BOX_TYPE_AUDIO_QDMC:
case GF_QT_BOX_TYPE_AUDIO_QDMC2:
case GF_QT_BOX_TYPE_AUDIO_QCELP:
case GF_QT_BOX_TYPE_AUDIO_kMP3:
continue;
case GF_ISOM_BOX_TYPE_UNKNOWN:
break;
default:
if (gf_box_valid_in_parent((GF_Box *) a, "stsd")) {
continue;
}
GF_LOG(GF_LOG_WARNING, GF_LOG_CONTAINER, ("[iso file] Unexpected box %s in stsd!\n", gf_4cc_to_str(a->type)));
continue;
}
//we are sure to have an unknown box here
assert(a->type==GF_ISOM_BOX_TYPE_UNKNOWN);
if (!a->data || (a->dataSize<8) ) {
GF_LOG(GF_LOG_WARNING, GF_LOG_CONTAINER, ("[iso file] Sample description %s does not have at least 8 bytes!\n", gf_4cc_to_str(a->original_4cc) ));
continue;
}
else if (a->dataSize > a->size) {
GF_LOG(GF_LOG_ERROR, GF_LOG_CONTAINER, ("[iso file] Sample description %s has wrong data size %d!\n", gf_4cc_to_str(a->original_4cc), a->dataSize));
continue;
}
#define STSD_SWITCH_BOX(_box) \
if (gf_bs_available(bs)) { \
u64 pos = gf_bs_get_position(bs); \
u32 count_subb = 0; \
GF_Err e;\
gf_bs_set_cookie(bs, 1);\
e = gf_isom_box_array_read((GF_Box *) _box, bs, gf_isom_box_add_default); \
count_subb = _box->other_boxes ? gf_list_count(_box->other_boxes) : 0; \
if (!count_subb || e) { \
gf_bs_seek(bs, pos); \
_box->data_size = (u32) gf_bs_available(bs); \
if (_box->data_size) { \
_box->data = a->data; \
a->data = NULL; \
memmove(_box->data, _box->data + pos, _box->data_size); \
} \
} else { \
_box->data_size = 0; \
} \
} \
gf_bs_del(bs); \
if (!_box->data_size && _box->data) { \
gf_free(_box->data); \
_box->data = NULL; \
} \
_box->size = 0; \
_box->EntryType = a->original_4cc; \
gf_list_rem(trak->Media->information->sampleTable->SampleDescription->other_boxes, i-1); \
gf_isom_box_del((GF_Box *)a); \
gf_list_insert(trak->Media->information->sampleTable->SampleDescription->other_boxes, _box, i-1); \
/*only process visual or audio*/
switch (trak->Media->handler->handlerType) {
case GF_ISOM_MEDIA_VISUAL:
case GF_ISOM_MEDIA_AUXV:
case GF_ISOM_MEDIA_PICT:
{
GF_GenericVisualSampleEntryBox *genv = (GF_GenericVisualSampleEntryBox *) gf_isom_box_new(GF_ISOM_BOX_TYPE_GNRV);
bs = gf_bs_new(a->data, a->dataSize, GF_BITSTREAM_READ);
genv->size = a->size-8;
gf_isom_video_sample_entry_read((GF_VisualSampleEntryBox *) genv, bs);
STSD_SWITCH_BOX(genv)
}
break;
case GF_ISOM_MEDIA_AUDIO:
{
GF_GenericAudioSampleEntryBox *gena = (GF_GenericAudioSampleEntryBox *) gf_isom_box_new(GF_ISOM_BOX_TYPE_GNRA);
gena->size = a->size-8;
bs = gf_bs_new(a->data, a->dataSize, GF_BITSTREAM_READ);
gf_isom_audio_sample_entry_read((GF_AudioSampleEntryBox *) gena, bs);
STSD_SWITCH_BOX(gena)
}
break;
default:
{
GF_Err e;
GF_GenericSampleEntryBox *genm = (GF_GenericSampleEntryBox *) gf_isom_box_new(GF_ISOM_BOX_TYPE_GNRM);
genm->size = a->size-8;
bs = gf_bs_new(a->data, a->dataSize, GF_BITSTREAM_READ);
e = gf_isom_base_sample_entry_read((GF_SampleEntryBox *)genm, bs);
if (e) return;
STSD_SWITCH_BOX(genm)
}
break;
}
}
}
GF_Err trak_AddBox(GF_Box *s, GF_Box *a)
{
GF_TrackBox *ptr = (GF_TrackBox *)s;
if (!a) return GF_OK;
switch(a->type) {
case GF_ISOM_BOX_TYPE_TKHD:
if (ptr->Header) ERROR_ON_DUPLICATED_BOX(a, ptr)
ptr->Header = (GF_TrackHeaderBox *)a;
return GF_OK;
case GF_ISOM_BOX_TYPE_EDTS:
if (ptr->editBox) ERROR_ON_DUPLICATED_BOX(a, ptr)
ptr->editBox = (GF_EditBox *)a;
return GF_OK;
case GF_ISOM_BOX_TYPE_UDTA:
if (ptr->udta) ERROR_ON_DUPLICATED_BOX(a, ptr)
ptr->udta = (GF_UserDataBox *)a;
return GF_OK;
case GF_ISOM_BOX_TYPE_META:
if (ptr->meta) ERROR_ON_DUPLICATED_BOX(a, ptr)
ptr->meta = (GF_MetaBox *)a;
return GF_OK;
case GF_ISOM_BOX_TYPE_TREF:
if (ptr->References) ERROR_ON_DUPLICATED_BOX(a, ptr)
ptr->References = (GF_TrackReferenceBox *)a;
return GF_OK;
case GF_ISOM_BOX_TYPE_MDIA:
if (ptr->Media) ERROR_ON_DUPLICATED_BOX(a, ptr)
ptr->Media = (GF_MediaBox *)a;
((GF_MediaBox *)a)->mediaTrack = ptr;
return GF_OK;
case GF_ISOM_BOX_TYPE_TRGR:
if (ptr->groups) ERROR_ON_DUPLICATED_BOX(a, ptr)
ptr->groups = (GF_TrackGroupBox *)a;
return GF_OK;
case GF_QT_BOX_TYPE_TAPT:
if (ptr->Aperture) ERROR_ON_DUPLICATED_BOX(a, ptr)
ptr->Aperture = (GF_Box *)a;
return GF_OK;
case GF_ISOM_BOX_TYPE_SENC:
ptr->sample_encryption = (GF_SampleEncryptionBox*)a;
return gf_isom_box_add_default((GF_Box *)ptr, a);
case GF_ISOM_BOX_TYPE_UUID:
if (((GF_UnknownUUIDBox *)a)->internal_4cc == GF_ISOM_BOX_UUID_PSEC) {
ptr->sample_encryption = (GF_SampleEncryptionBox*) a;
return gf_isom_box_add_default((GF_Box *)ptr, a);
}
default:
return gf_isom_box_add_default(s, a);
}
return GF_OK;
}
GF_Err trak_Read(GF_Box *s, GF_BitStream *bs)
{
GF_Err e;
u32 i;
GF_TrackBox *ptr = (GF_TrackBox *)s;
e = gf_isom_box_array_read(s, bs, trak_AddBox);
if (e) return e;
gf_isom_check_sample_desc(ptr);
if (!ptr->Header) {
GF_LOG(GF_LOG_ERROR, GF_LOG_CONTAINER, ("[iso file] Missing TrackHeaderBox\n"));
return GF_ISOM_INVALID_FILE;
}
if (!ptr->Media) {
GF_LOG(GF_LOG_ERROR, GF_LOG_CONTAINER, ("[iso file] Missing MediaBox\n"));
return GF_ISOM_INVALID_FILE;
}
if (!ptr->Media->information || !ptr->Media->information->sampleTable) {
GF_LOG(GF_LOG_ERROR, GF_LOG_CONTAINER, ("[iso file] Invalid MediaBox\n"));
return GF_ISOM_INVALID_FILE;
}
for (i=0; i<gf_list_count(ptr->Media->information->sampleTable->other_boxes); i++) {
GF_Box *a = gf_list_get(ptr->Media->information->sampleTable->other_boxes, i);
if ((a->type ==GF_ISOM_BOX_TYPE_UUID) && (((GF_UUIDBox *)a)->internal_4cc == GF_ISOM_BOX_UUID_PSEC)) {
ptr->sample_encryption = (struct __sample_encryption_box *) a;
break;
}
else if (a->type == GF_ISOM_BOX_TYPE_SENC) {
ptr->sample_encryption = (struct __sample_encryption_box *)a;
break;
}
}
return e;
}
GF_Box *trak_New()
{
ISOM_DECL_BOX_ALLOC(GF_TrackBox, GF_ISOM_BOX_TYPE_TRAK);
return (GF_Box *)tmp;
}
#ifndef GPAC_DISABLE_ISOM_WRITE
GF_Err trak_Write(GF_Box *s, GF_BitStream *bs)
{
GF_Err e;
GF_TrackBox *ptr = (GF_TrackBox *)s;
e = gf_isom_box_write_header(s, bs);
if (e) return e;
if (ptr->Header) {
e = gf_isom_box_write((GF_Box *) ptr->Header, bs);
if (e) return e;
}
if (ptr->Aperture) {
e = gf_isom_box_write((GF_Box *) ptr->Aperture, bs);
if (e) return e;
}
if (ptr->References) {
e = gf_isom_box_write((GF_Box *) ptr->References, bs);
if (e) return e;
}
if (ptr->editBox) {
e = gf_isom_box_write((GF_Box *) ptr->editBox, bs);
if (e) return e;
}
if (ptr->Media) {
e = gf_isom_box_write((GF_Box *) ptr->Media, bs);
if (e) return e;
}
if (ptr->meta) {
e = gf_isom_box_write((GF_Box *) ptr->meta, bs);
if (e) return e;
}
if (ptr->groups) {
e = gf_isom_box_write((GF_Box *) ptr->groups, bs);
if (e) return e;
}
if (ptr->udta) {
e = gf_isom_box_write((GF_Box *) ptr->udta, bs);
if (e) return e;
}
return GF_OK;
}
GF_Err trak_Size(GF_Box *s)
{
GF_Err e;
GF_TrackBox *ptr = (GF_TrackBox *)s;
if (ptr->Header) {
e = gf_isom_box_size((GF_Box *) ptr->Header);
if (e) return e;
ptr->size += ptr->Header->size;
}
if (ptr->Aperture) {
e = gf_isom_box_size((GF_Box *) ptr->Aperture);
if (e) return e;
ptr->size += ptr->Aperture->size;
}
if (ptr->udta) {
e = gf_isom_box_size((GF_Box *) ptr->udta);
if (e) return e;
ptr->size += ptr->udta->size;
}
if (ptr->References) {
e = gf_isom_box_size((GF_Box *) ptr->References);
if (e) return e;
ptr->size += ptr->References->size;
}
if (ptr->editBox) {
e = gf_isom_box_size((GF_Box *) ptr->editBox);
if (e) return e;
ptr->size += ptr->editBox->size;
}
if (ptr->Media) {
e = gf_isom_box_size((GF_Box *) ptr->Media);
if (e) return e;
ptr->size += ptr->Media->size;
}
if (ptr->meta) {
e = gf_isom_box_size((GF_Box *) ptr->meta);
if (e) return e;
ptr->size += ptr->meta->size;
}
if (ptr->groups) {
e = gf_isom_box_size((GF_Box *) ptr->groups);
if (e) return e;
ptr->size += ptr->groups->size;
}
return GF_OK;
}
#endif /*GPAC_DISABLE_ISOM_WRITE*/
void stri_del(GF_Box *s)
{
GF_SubTrackInformationBox *ptr = (GF_SubTrackInformationBox *)s;
if (ptr == NULL) return;
if (ptr->attribute_list) gf_free(ptr->attribute_list);
gf_free(ptr);
}
GF_Err stri_Read(GF_Box *s, GF_BitStream *bs)
{
size_t i;
GF_SubTrackInformationBox *ptr = (GF_SubTrackInformationBox *)s;
ptr->switch_group = gf_bs_read_u16(bs);
ptr->alternate_group = gf_bs_read_u16(bs);
ptr->sub_track_id = gf_bs_read_u32(bs);
ptr->size -= 8;
ptr->attribute_count = ptr->size / 4;
GF_SAFE_ALLOC_N(ptr->attribute_list, (size_t)ptr->attribute_count, u32);
if (!ptr->attribute_list) return GF_OUT_OF_MEM;
for (i = 0; i < ptr->attribute_count; i++) {
ptr->attribute_list[i] = gf_bs_read_u32(bs);
}
return GF_OK;
}
GF_Box *stri_New()
{
ISOM_DECL_BOX_ALLOC(GF_SubTrackInformationBox, GF_ISOM_BOX_TYPE_STRI);
return (GF_Box *)tmp;
}
#ifndef GPAC_DISABLE_ISOM_WRITE
GF_Err stri_Write(GF_Box *s, GF_BitStream *bs)
{
GF_Err e;
u32 i;
GF_SubTrackInformationBox *ptr = (GF_SubTrackInformationBox *)s;
e = gf_isom_full_box_write(s, bs);
if (e) return e;
gf_bs_write_u16(bs, ptr->switch_group);
gf_bs_write_u16(bs, ptr->alternate_group);
gf_bs_write_u32(bs, ptr->sub_track_id);
for (i = 0; i < ptr->attribute_count; i++) {
gf_bs_write_u32(bs, ptr->attribute_list[i]);
}
return GF_OK;
}
GF_Err stri_Size(GF_Box *s)
{
GF_SubTrackInformationBox *ptr = (GF_SubTrackInformationBox *)s;
ptr->size += 8 + 4 * ptr->attribute_count;
return GF_OK;
}
#endif /*GPAC_DISABLE_ISOM_WRITE*/
void stsg_del(GF_Box *s)
{
GF_SubTrackSampleGroupBox *ptr = (GF_SubTrackSampleGroupBox *)s;
if (ptr == NULL) return;
if (ptr->group_description_index) gf_free(ptr->group_description_index);
gf_free(ptr);
}
GF_Err stsg_Read(GF_Box *s, GF_BitStream *bs)
{
u32 i;
GF_SubTrackSampleGroupBox *ptr = (GF_SubTrackSampleGroupBox *)s;
ISOM_DECREASE_SIZE(s, 6);
ptr->grouping_type = gf_bs_read_u32(bs);
ptr->nb_groups = gf_bs_read_u16(bs);
ISOM_DECREASE_SIZE(s, ptr->nb_groups*4);
GF_SAFE_ALLOC_N(ptr->group_description_index, ptr->nb_groups, u32);
if (!ptr->group_description_index) return GF_OUT_OF_MEM;
for (i = 0; i < ptr->nb_groups; i++) {
ptr->group_description_index[i] = gf_bs_read_u32(bs);
}
return GF_OK;
}
GF_Box *stsg_New()
{
ISOM_DECL_BOX_ALLOC(GF_SubTrackSampleGroupBox, GF_ISOM_BOX_TYPE_STSG);
return (GF_Box *)tmp;
}
#ifndef GPAC_DISABLE_ISOM_WRITE
GF_Err stsg_Write(GF_Box *s, GF_BitStream *bs)
{
GF_Err e;
u32 i;
GF_SubTrackSampleGroupBox *ptr = (GF_SubTrackSampleGroupBox *)s;
e = gf_isom_full_box_write(s, bs);
if (e) return e;
gf_bs_write_u32(bs, ptr->grouping_type);
gf_bs_write_u16(bs, ptr->nb_groups);
for (i = 0; i < ptr->nb_groups; i++) {
gf_bs_write_u32(bs, ptr->group_description_index[i]);
}
return GF_OK;
}
GF_Err stsg_Size(GF_Box *s)
{
GF_SubTrackSampleGroupBox *ptr = (GF_SubTrackSampleGroupBox *)s;
ptr->size += 6 + 4 * ptr->nb_groups;
return GF_OK;
}
#endif /*GPAC_DISABLE_ISOM_WRITE*/
void strk_del(GF_Box *s)
{
GF_SubTrackBox *ptr = (GF_SubTrackBox *)s;
if (ptr == NULL) return;
if (ptr->info) gf_isom_box_del((GF_Box *)ptr->info);
gf_free(ptr);
}
GF_Err strk_AddBox(GF_Box *s, GF_Box *a)
{
GF_SubTrackBox *ptr = (GF_SubTrackBox *)s;
if (!a) return GF_OK;
switch (a->type) {
case GF_ISOM_BOX_TYPE_STRI:
if (ptr->info) ERROR_ON_DUPLICATED_BOX(a, ptr)
ptr->info = (GF_SubTrackInformationBox *)a;
return GF_OK;
case GF_ISOM_BOX_TYPE_STRD:
if (ptr->strd) ERROR_ON_DUPLICATED_BOX(a, ptr)
ptr->strd = a;
return GF_OK;
default:
return gf_isom_box_add_default(s, a);
}
return GF_OK;
}
GF_Err strk_Read(GF_Box *s, GF_BitStream *bs)
{
GF_Err e;
GF_SubTrackBox *ptr = (GF_SubTrackBox *)s;
e = gf_isom_box_array_read(s, bs, strk_AddBox);
if (e) return e;
if (!ptr->info) {
GF_LOG(GF_LOG_ERROR, GF_LOG_CONTAINER, ("[iso file] Missing SubTrackInformationBox\n"));
return GF_ISOM_INVALID_FILE;
}
return e;
}
GF_Box *strk_New()
{
ISOM_DECL_BOX_ALLOC(GF_SubTrackBox, GF_ISOM_BOX_TYPE_STRK);
return (GF_Box *)tmp;
}
#ifndef GPAC_DISABLE_ISOM_WRITE
GF_Err strk_Write(GF_Box *s, GF_BitStream *bs)
{
GF_Err e;
GF_SubTrackBox *ptr = (GF_SubTrackBox *)s;
e = gf_isom_box_write_header(s, bs);
if (e) return e;
if (ptr->info) {
e = gf_isom_box_write((GF_Box *)ptr->info, bs);
if (e) return e;
}
return GF_OK;
}
GF_Err strk_Size(GF_Box *s)
{
GF_Err e;
GF_SubTrackBox *ptr = (GF_SubTrackBox *)s;
if (ptr->info) {
e = gf_isom_box_size((GF_Box *)ptr->info);
if (e) return e;
ptr->size += ptr->info->size;
}
return GF_OK;
}
#endif /*GPAC_DISABLE_ISOM_WRITE*/
GF_Err tref_AddBox(GF_Box *ptr, GF_Box *a)
{
return gf_isom_box_add_default(ptr, a);
}
void tref_del(GF_Box *s)
{
GF_TrackReferenceBox *ptr = (GF_TrackReferenceBox *)s;
if (ptr == NULL) return;
gf_free(ptr);
}
GF_Err tref_Read(GF_Box *s, GF_BitStream *bs)
{
return gf_isom_box_array_read_ex(s, bs, gf_isom_box_add_default, s->type);
}
GF_Box *tref_New()
{
ISOM_DECL_BOX_ALLOC(GF_TrackReferenceBox, GF_ISOM_BOX_TYPE_TREF);
return (GF_Box *)tmp;
}
#ifndef GPAC_DISABLE_ISOM_WRITE
GF_Err tref_Write(GF_Box *s, GF_BitStream *bs)
{
// GF_TrackReferenceBox *ptr = (GF_TrackReferenceBox *)s;
return gf_isom_box_write_header(s, bs);
}
GF_Err tref_Size(GF_Box *s)
{
return GF_OK;
}
#endif /*GPAC_DISABLE_ISOM_WRITE*/
void reftype_del(GF_Box *s)
{
GF_TrackReferenceTypeBox *ptr = (GF_TrackReferenceTypeBox *)s;
if (!ptr) return;
if (ptr->trackIDs) gf_free(ptr->trackIDs);
gf_free(ptr);
}
GF_Err reftype_Read(GF_Box *s, GF_BitStream *bs)
{
u32 bytesToRead;
u32 i;
GF_TrackReferenceTypeBox *ptr = (GF_TrackReferenceTypeBox *)s;
bytesToRead = (u32) (ptr->size);
if (!bytesToRead) return GF_OK;
ptr->trackIDCount = (u32) (bytesToRead) / sizeof(u32);
ptr->trackIDs = (u32 *) gf_malloc(ptr->trackIDCount * sizeof(u32));
if (!ptr->trackIDs) return GF_OUT_OF_MEM;
for (i = 0; i < ptr->trackIDCount; i++) {
ptr->trackIDs[i] = gf_bs_read_u32(bs);
}
return GF_OK;
}
GF_Box *reftype_New()
{
ISOM_DECL_BOX_ALLOC(GF_TrackReferenceTypeBox, GF_ISOM_BOX_TYPE_REFT);
return (GF_Box *)tmp;
}
GF_Err reftype_AddRefTrack(GF_TrackReferenceTypeBox *ref, u32 trackID, u16 *outRefIndex)
{
u32 i;
if (!ref || !trackID) return GF_BAD_PARAM;
if (outRefIndex) *outRefIndex = 0;
//don't add a dep if already here !!
for (i = 0; i < ref->trackIDCount; i++) {
if (ref->trackIDs[i] == trackID) {
if (outRefIndex) *outRefIndex = i+1;
return GF_OK;
}
}
ref->trackIDs = (u32 *) gf_realloc(ref->trackIDs, (ref->trackIDCount + 1) * sizeof(u32) );
if (!ref->trackIDs) return GF_OUT_OF_MEM;
ref->trackIDs[ref->trackIDCount] = trackID;
ref->trackIDCount++;
if (outRefIndex) *outRefIndex = ref->trackIDCount;
return GF_OK;
}
#ifndef GPAC_DISABLE_ISOM_WRITE
GF_Err reftype_Write(GF_Box *s, GF_BitStream *bs)
{
GF_Err e;
u32 i;
GF_TrackReferenceTypeBox *ptr = (GF_TrackReferenceTypeBox *)s;
ptr->type = ptr->reference_type;
if (!ptr->trackIDCount) return GF_OK;
e = gf_isom_box_write_header(s, bs);
ptr->type = GF_ISOM_BOX_TYPE_REFT;
if (e) return e;
for (i = 0; i < ptr->trackIDCount; i++) {
gf_bs_write_u32(bs, ptr->trackIDs[i]);
}
return GF_OK;
}
GF_Err reftype_Size(GF_Box *s)
{
GF_TrackReferenceTypeBox *ptr = (GF_TrackReferenceTypeBox *)s;
if (!ptr->trackIDCount)
ptr->size=0;
else
ptr->size += (ptr->trackIDCount * sizeof(u32));
return GF_OK;
}
#endif /*GPAC_DISABLE_ISOM_WRITE*/
#ifndef GPAC_DISABLE_ISOM_FRAGMENTS
void trex_del(GF_Box *s)
{
GF_TrackExtendsBox *ptr = (GF_TrackExtendsBox *)s;
if (ptr == NULL) return;
gf_free(ptr);
}
GF_Err trex_Read(GF_Box *s, GF_BitStream *bs)
{
GF_TrackExtendsBox *ptr = (GF_TrackExtendsBox *)s;
ptr->trackID = gf_bs_read_u32(bs);
ptr->def_sample_desc_index = gf_bs_read_u32(bs);
ptr->def_sample_duration = gf_bs_read_u32(bs);
ptr->def_sample_size = gf_bs_read_u32(bs);
ptr->def_sample_flags = gf_bs_read_u32(bs);
return GF_OK;
}
GF_Box *trex_New()
{
ISOM_DECL_BOX_ALLOC(GF_TrackExtendsBox, GF_ISOM_BOX_TYPE_TREX);
return (GF_Box *)tmp;
}
#ifndef GPAC_DISABLE_ISOM_WRITE
GF_Err trex_Write(GF_Box *s, GF_BitStream *bs)
{
GF_Err e;
GF_TrackExtendsBox *ptr = (GF_TrackExtendsBox *) s;
if (!s) return GF_BAD_PARAM;
e = gf_isom_full_box_write(s, bs);
if (e) return e;
gf_bs_write_u32(bs, ptr->trackID);
//we always write 1 in trex default sample desc as using 0 breaks chrome/opera/...
gf_bs_write_u32(bs, ptr->def_sample_desc_index ? ptr->def_sample_desc_index : 1);
gf_bs_write_u32(bs, ptr->def_sample_duration);
gf_bs_write_u32(bs, ptr->def_sample_size);
gf_bs_write_u32(bs, ptr->def_sample_flags);
return GF_OK;
}
GF_Err trex_Size(GF_Box *s)
{
GF_TrackExtendsBox *ptr = (GF_TrackExtendsBox *)s;
ptr->size += 20;
return GF_OK;
}
#endif /*GPAC_DISABLE_ISOM_WRITE*/
void trep_del(GF_Box *s)
{
GF_TrackExtensionPropertiesBox *ptr = (GF_TrackExtensionPropertiesBox *)s;
if (ptr == NULL) return;
gf_free(ptr);
}
GF_Err trep_Read(GF_Box *s, GF_BitStream *bs)
{
GF_TrackExtensionPropertiesBox *ptr = (GF_TrackExtensionPropertiesBox *)s;
ptr->trackID = gf_bs_read_u32(bs);
ISOM_DECREASE_SIZE(ptr, 4);
return gf_isom_box_array_read(s, bs, gf_isom_box_add_default);
}
GF_Box *trep_New()
{
ISOM_DECL_BOX_ALLOC(GF_TrackExtensionPropertiesBox, GF_ISOM_BOX_TYPE_TREP);
tmp->other_boxes = gf_list_new();
return (GF_Box *)tmp;
}
#ifndef GPAC_DISABLE_ISOM_WRITE
GF_Err trep_Write(GF_Box *s, GF_BitStream *bs)
{
GF_Err e;
GF_TrackExtensionPropertiesBox *ptr = (GF_TrackExtensionPropertiesBox *) s;
if (!s) return GF_BAD_PARAM;
e = gf_isom_full_box_write(s, bs);
if (e) return e;
gf_bs_write_u32(bs, ptr->trackID);
return GF_OK;
}
GF_Err trep_Size(GF_Box *s)
{
GF_TrackExtensionPropertiesBox *ptr = (GF_TrackExtensionPropertiesBox *)s;
ptr->size += 4;
return GF_OK;
}
#endif /*GPAC_DISABLE_ISOM_WRITE*/
#endif /*GPAC_DISABLE_ISOM_FRAGMENTS*/
#ifndef GPAC_DISABLE_ISOM_FRAGMENTS
void trun_del(GF_Box *s)
{
GF_TrunEntry *p;
GF_TrackFragmentRunBox *ptr = (GF_TrackFragmentRunBox *)s;
if (ptr == NULL) return;
while (gf_list_count(ptr->entries)) {
p = (GF_TrunEntry*)gf_list_get(ptr->entries, 0);
gf_list_rem(ptr->entries, 0);
gf_free(p);
}
gf_list_del(ptr->entries);
if (ptr->cache) gf_bs_del(ptr->cache);
gf_free(ptr);
}
GF_Err trun_Read(GF_Box *s, GF_BitStream *bs)
{
u32 i;
GF_TrunEntry *p;
GF_TrackFragmentRunBox *ptr = (GF_TrackFragmentRunBox *)s;
//check this is a good file
if ((ptr->flags & GF_ISOM_TRUN_FIRST_FLAG) && (ptr->flags & GF_ISOM_TRUN_FLAGS))
return GF_ISOM_INVALID_FILE;
ptr->sample_count = gf_bs_read_u32(bs);
ISOM_DECREASE_SIZE(ptr, 4);
//The rest depends on the flags
if (ptr->flags & GF_ISOM_TRUN_DATA_OFFSET) {
ptr->data_offset = gf_bs_read_u32(bs);
ISOM_DECREASE_SIZE(ptr, 4);
}
if (ptr->flags & GF_ISOM_TRUN_FIRST_FLAG) {
ptr->first_sample_flags = gf_bs_read_u32(bs);
ISOM_DECREASE_SIZE(ptr, 4);
}
if (! (ptr->flags & (GF_ISOM_TRUN_DURATION | GF_ISOM_TRUN_SIZE | GF_ISOM_TRUN_FLAGS | GF_ISOM_TRUN_CTS_OFFSET) ) ) {
GF_SAFEALLOC(p, GF_TrunEntry);
p->nb_pack = ptr->sample_count;
gf_list_add(ptr->entries, p);
return GF_OK;
}
//read each entry (even though nothing may be written)
for (i=0; i<ptr->sample_count; i++) {
u32 trun_size = 0;
p = (GF_TrunEntry *) gf_malloc(sizeof(GF_TrunEntry));
if (!p) return GF_OUT_OF_MEM;
memset(p, 0, sizeof(GF_TrunEntry));
if (ptr->flags & GF_ISOM_TRUN_DURATION) {
p->Duration = gf_bs_read_u32(bs);
trun_size += 4;
}
if (ptr->flags & GF_ISOM_TRUN_SIZE) {
p->size = gf_bs_read_u32(bs);
trun_size += 4;
}
//SHOULDN'T BE USED IF GF_ISOM_TRUN_FIRST_FLAG IS DEFINED
if (ptr->flags & GF_ISOM_TRUN_FLAGS) {
p->flags = gf_bs_read_u32(bs);
trun_size += 4;
}
if (ptr->flags & GF_ISOM_TRUN_CTS_OFFSET) {
if (ptr->version==0) {
p->CTS_Offset = (u32) gf_bs_read_u32(bs);
} else {
p->CTS_Offset = (s32) gf_bs_read_u32(bs);
}
}
gf_list_add(ptr->entries, p);
ISOM_DECREASE_SIZE(ptr, trun_size);
}
return GF_OK;
}
GF_Box *trun_New()
{
ISOM_DECL_BOX_ALLOC(GF_TrackFragmentRunBox, GF_ISOM_BOX_TYPE_TRUN);
tmp->entries = gf_list_new();
//NO FLAGS SET BY DEFAULT
return (GF_Box *)tmp;
}
#ifndef GPAC_DISABLE_ISOM_WRITE
GF_Err trun_Write(GF_Box *s, GF_BitStream *bs)
{
GF_TrunEntry *p;
GF_Err e;
u32 i, count;
GF_TrackFragmentRunBox *ptr = (GF_TrackFragmentRunBox *) s;
if (!s) return GF_BAD_PARAM;
e = gf_isom_full_box_write(s, bs);
if (e) return e;
gf_bs_write_u32(bs, ptr->sample_count);
//The rest depends on the flags
if (ptr->flags & GF_ISOM_TRUN_DATA_OFFSET) {
gf_bs_write_u32(bs, ptr->data_offset);
}
if (ptr->flags & GF_ISOM_TRUN_FIRST_FLAG) {
gf_bs_write_u32(bs, ptr->first_sample_flags);
}
if (! (ptr->flags & (GF_ISOM_TRUN_DURATION | GF_ISOM_TRUN_SIZE | GF_ISOM_TRUN_FLAGS | GF_ISOM_TRUN_CTS_OFFSET) ) ) {
return GF_OK;
}
count = gf_list_count(ptr->entries);
for (i=0; i<count; i++) {
p = (GF_TrunEntry*)gf_list_get(ptr->entries, i);
if (ptr->flags & GF_ISOM_TRUN_DURATION) {
gf_bs_write_u32(bs, p->Duration);
}
if (ptr->flags & GF_ISOM_TRUN_SIZE) {
gf_bs_write_u32(bs, p->size);
}
//SHOULDN'T BE USED IF GF_ISOM_TRUN_FIRST_FLAG IS DEFINED
if (ptr->flags & GF_ISOM_TRUN_FLAGS) {
gf_bs_write_u32(bs, p->flags);
}
if (ptr->flags & GF_ISOM_TRUN_CTS_OFFSET) {
if (ptr->version==0) {
gf_bs_write_u32(bs, p->CTS_Offset);
} else {
gf_bs_write_u32(bs, (u32) p->CTS_Offset);
}
}
}
return GF_OK;
}
GF_Err trun_Size(GF_Box *s)
{
u32 i, count;
GF_TrackFragmentRunBox *ptr = (GF_TrackFragmentRunBox *)s;
ptr->size += 4;
//The rest depends on the flags
if (ptr->flags & GF_ISOM_TRUN_DATA_OFFSET) ptr->size += 4;
if (ptr->flags & GF_ISOM_TRUN_FIRST_FLAG) ptr->size += 4;
if (! (ptr->flags & (GF_ISOM_TRUN_DURATION | GF_ISOM_TRUN_SIZE | GF_ISOM_TRUN_FLAGS | GF_ISOM_TRUN_CTS_OFFSET) ) ) {
return GF_OK;
}
//if nothing to do, this will be skipped automatically
count = gf_list_count(ptr->entries);
for (i=0; i<count; i++) {
if (ptr->flags & GF_ISOM_TRUN_DURATION) ptr->size += 4;
if (ptr->flags & GF_ISOM_TRUN_SIZE) ptr->size += 4;
//SHOULDN'T BE USED IF GF_ISOM_TRUN_FIRST_FLAG IS DEFINED
if (ptr->flags & GF_ISOM_TRUN_FLAGS) ptr->size += 4;
if (ptr->flags & GF_ISOM_TRUN_CTS_OFFSET) ptr->size += 4;
}
return GF_OK;
}
#endif /*GPAC_DISABLE_ISOM_WRITE*/
#endif /*GPAC_DISABLE_ISOM_FRAGMENTS*/
void tsro_del(GF_Box *s)
{
GF_TimeOffHintEntryBox *tsro = (GF_TimeOffHintEntryBox *)s;
gf_free(tsro);
}
GF_Err tsro_Read(GF_Box *s, GF_BitStream *bs)
{
GF_TimeOffHintEntryBox *ptr = (GF_TimeOffHintEntryBox *)s;
ptr->TimeOffset = gf_bs_read_u32(bs);
return GF_OK;
}
GF_Box *tsro_New()
{
ISOM_DECL_BOX_ALLOC(GF_TimeOffHintEntryBox, GF_ISOM_BOX_TYPE_TSRO);
return (GF_Box *)tmp;
}
#ifndef GPAC_DISABLE_ISOM_WRITE
GF_Err tsro_Write(GF_Box *s, GF_BitStream *bs)
{
GF_Err e;
GF_TimeOffHintEntryBox *ptr = (GF_TimeOffHintEntryBox *)s;
if (ptr == NULL) return GF_BAD_PARAM;
e = gf_isom_box_write_header(s, bs);
if (e) return e;
gf_bs_write_u32(bs, ptr->TimeOffset);
return GF_OK;
}
GF_Err tsro_Size(GF_Box *s)
{
s->size += 4;
return GF_OK;
}
#endif /*GPAC_DISABLE_ISOM_WRITE*/
void udta_del(GF_Box *s)
{
u32 i;
GF_UserDataMap *map;
GF_UserDataBox *ptr = (GF_UserDataBox *)s;
if (ptr == NULL) return;
i=0;
while ((map = (GF_UserDataMap *)gf_list_enum(ptr->recordList, &i))) {
gf_isom_box_array_del(map->other_boxes);
gf_free(map);
}
gf_list_del(ptr->recordList);
gf_free(ptr);
}
GF_UserDataMap *udta_getEntry(GF_UserDataBox *ptr, u32 box_type, bin128 *uuid)
{
u32 i;
GF_UserDataMap *map;
if (ptr == NULL) return NULL;
i=0;
while ((map = (GF_UserDataMap *)gf_list_enum(ptr->recordList, &i))) {
if (map->boxType == box_type) {
if ((box_type != GF_ISOM_BOX_TYPE_UUID) || !uuid) return map;
if (!memcmp(map->uuid, *uuid, 16)) return map;
}
}
return NULL;
}
GF_Err udta_AddBox(GF_Box *s, GF_Box *a)
{
GF_Err e;
u32 box_type;
GF_UserDataMap *map;
GF_UserDataBox *ptr = (GF_UserDataBox *)s;
if (!ptr) return GF_BAD_PARAM;
if (!a) return GF_OK;
/* for unknown udta boxes, we reference them by their original box type */
box_type = a->type;
if (box_type == GF_ISOM_BOX_TYPE_UNKNOWN) {
GF_UnknownBox* unkn = (GF_UnknownBox *)a;
if (unkn)
box_type = unkn->original_4cc;
}
map = udta_getEntry(ptr, box_type, (a->type==GF_ISOM_BOX_TYPE_UUID) ? & ((GF_UUIDBox *)a)->uuid : NULL);
if (map == NULL) {
map = (GF_UserDataMap *) gf_malloc(sizeof(GF_UserDataMap));
if (map == NULL) return GF_OUT_OF_MEM;
memset(map, 0, sizeof(GF_UserDataMap));
map->boxType = box_type;
if (a->type == GF_ISOM_BOX_TYPE_UUID)
memcpy(map->uuid, ((GF_UUIDBox *)a)->uuid, 16);
map->other_boxes = gf_list_new();
if (!map->other_boxes) {
gf_free(map);
return GF_OUT_OF_MEM;
}
e = gf_list_add(ptr->recordList, map);
if (e) return e;
}
return gf_list_add(map->other_boxes, a);
}
GF_Err udta_Read(GF_Box *s, GF_BitStream *bs)
{
GF_Err e = gf_isom_box_array_read(s, bs, udta_AddBox);
if (e) return e;
if (s->size==4) {
u32 val = gf_bs_read_u32(bs);
s->size = 0;
if (val) {
GF_LOG(GF_LOG_WARNING, GF_LOG_CONTAINER, ("[iso file] udta has 4 remaining bytes set to %08X but they should be 0\n", val));
}
}
return GF_OK;
}
GF_Box *udta_New()
{
ISOM_DECL_BOX_ALLOC(GF_UserDataBox, GF_ISOM_BOX_TYPE_UDTA);
tmp->recordList = gf_list_new();
if (!tmp->recordList) {
gf_free(tmp);
return NULL;
}
return (GF_Box *)tmp;
}
#ifndef GPAC_DISABLE_ISOM_WRITE
GF_Err udta_Write(GF_Box *s, GF_BitStream *bs)
{
GF_Err e;
u32 i;
GF_UserDataMap *map;
GF_UserDataBox *ptr = (GF_UserDataBox *)s;
e = gf_isom_box_write_header(s, bs);
if (e) return e;
i=0;
while ((map = (GF_UserDataMap *)gf_list_enum(ptr->recordList, &i))) {
//warning: here we are not passing the actual "parent" of the list
//but the UDTA box. The parent itself is not an box, we don't care about it
e = gf_isom_box_array_write(s, map->other_boxes, bs);
if (e) return e;
}
return GF_OK;
}
GF_Err udta_Size(GF_Box *s)
{
GF_Err e;
u32 i;
GF_UserDataMap *map;
GF_UserDataBox *ptr = (GF_UserDataBox *)s;
i=0;
while ((map = (GF_UserDataMap *)gf_list_enum(ptr->recordList, &i))) {
//warning: here we are not passing the actual "parent" of the list
//but the UDTA box. The parent itself is not an box, we don't care about it
e = gf_isom_box_array_size(s, map->other_boxes);
if (e) return e;
}
return GF_OK;
}
#endif /*GPAC_DISABLE_ISOM_WRITE*/
void vmhd_del(GF_Box *s)
{
GF_VideoMediaHeaderBox *ptr = (GF_VideoMediaHeaderBox *)s;
if (ptr == NULL) return;
gf_free(ptr);
}
GF_Err vmhd_Read(GF_Box *s, GF_BitStream *bs)
{
GF_VideoMediaHeaderBox *ptr = (GF_VideoMediaHeaderBox *)s;
ptr->reserved = gf_bs_read_u64(bs);
return GF_OK;
}
GF_Box *vmhd_New()
{
ISOM_DECL_BOX_ALLOC(GF_VideoMediaHeaderBox, GF_ISOM_BOX_TYPE_VMHD);
tmp->flags = 1;
return (GF_Box *)tmp;
}
#ifndef GPAC_DISABLE_ISOM_WRITE
GF_Err vmhd_Write(GF_Box *s, GF_BitStream *bs)
{
GF_Err e;
GF_VideoMediaHeaderBox *ptr = (GF_VideoMediaHeaderBox *)s;
e = gf_isom_full_box_write(s, bs);
if (e) return e;
gf_bs_write_u64(bs, ptr->reserved);
return GF_OK;
}
GF_Err vmhd_Size(GF_Box *s)
{
GF_VideoMediaHeaderBox *ptr = (GF_VideoMediaHeaderBox *)s;
ptr->size += 8;
return GF_OK;
}
#endif /*GPAC_DISABLE_ISOM_WRITE*/
void void_del(GF_Box *s)
{
gf_free(s);
}
GF_Err void_Read(GF_Box *s, GF_BitStream *bs)
{
if (s->size) return GF_ISOM_INVALID_FILE;
return GF_OK;
}
GF_Box *void_New()
{
ISOM_DECL_BOX_ALLOC(GF_Box, GF_ISOM_BOX_TYPE_VOID);
return tmp;
}
#ifndef GPAC_DISABLE_ISOM_WRITE
GF_Err void_Write(GF_Box *s, GF_BitStream *bs)
{
gf_bs_write_u32(bs, 0);
return GF_OK;
}
GF_Err void_Size(GF_Box *s)
{
s->size = 4;
return GF_OK;
}
#endif /*GPAC_DISABLE_ISOM_WRITE*/
GF_Box *pdin_New()
{
ISOM_DECL_BOX_ALLOC(GF_ProgressiveDownloadBox, GF_ISOM_BOX_TYPE_PDIN);
return (GF_Box *)tmp;
}
void pdin_del(GF_Box *s)
{
GF_ProgressiveDownloadBox *ptr = (GF_ProgressiveDownloadBox*)s;
if (ptr == NULL) return;
if (ptr->rates) gf_free(ptr->rates);
if (ptr->times) gf_free(ptr->times);
gf_free(ptr);
}
GF_Err pdin_Read(GF_Box *s, GF_BitStream *bs)
{
u32 i;
GF_ProgressiveDownloadBox *ptr = (GF_ProgressiveDownloadBox*)s;
ptr->count = (u32) (ptr->size) / 8;
ptr->rates = (u32*)gf_malloc(sizeof(u32)*ptr->count);
ptr->times = (u32*)gf_malloc(sizeof(u32)*ptr->count);
for (i=0; i<ptr->count; i++) {
ptr->rates[i] = gf_bs_read_u32(bs);
ptr->times[i] = gf_bs_read_u32(bs);
}
return GF_OK;
}
#ifndef GPAC_DISABLE_ISOM_WRITE
GF_Err pdin_Write(GF_Box *s, GF_BitStream *bs)
{
GF_Err e;
u32 i;
GF_ProgressiveDownloadBox *ptr = (GF_ProgressiveDownloadBox *)s;
e = gf_isom_full_box_write(s, bs);
if (e) return e;
for (i=0; i<ptr->count; i++) {
gf_bs_write_u32(bs, ptr->rates[i]);
gf_bs_write_u32(bs, ptr->times[i]);
}
return GF_OK;
}
GF_Err pdin_Size(GF_Box *s)
{
GF_ProgressiveDownloadBox *ptr = (GF_ProgressiveDownloadBox *)s;
ptr->size += 8*ptr->count;
return GF_OK;
}
#endif /*GPAC_DISABLE_ISOM_WRITE*/
GF_Box *sdtp_New()
{
ISOM_DECL_BOX_ALLOC(GF_SampleDependencyTypeBox, GF_ISOM_BOX_TYPE_SDTP);
return (GF_Box *)tmp;
}
void sdtp_del(GF_Box *s)
{
GF_SampleDependencyTypeBox *ptr = (GF_SampleDependencyTypeBox*)s;
if (ptr == NULL) return;
if (ptr->sample_info) gf_free(ptr->sample_info);
gf_free(ptr);
}
GF_Err sdtp_Read(GF_Box *s, GF_BitStream *bs)
{
GF_SampleDependencyTypeBox *ptr = (GF_SampleDependencyTypeBox*)s;
/*out-of-order sdtp, assume no padding at the end*/
if (!ptr->sampleCount) ptr->sampleCount = (u32) ptr->size;
else if (ptr->sampleCount > (u32) ptr->size) return GF_ISOM_INVALID_FILE;
ptr->sample_info = (u8 *) gf_malloc(sizeof(u8)*ptr->sampleCount);
gf_bs_read_data(bs, (char*)ptr->sample_info, ptr->sampleCount);
ISOM_DECREASE_SIZE(ptr, ptr->sampleCount);
return GF_OK;
}
#ifndef GPAC_DISABLE_ISOM_WRITE
GF_Err sdtp_Write(GF_Box *s, GF_BitStream *bs)
{
GF_Err e;
GF_SampleDependencyTypeBox *ptr = (GF_SampleDependencyTypeBox *)s;
e = gf_isom_full_box_write(s, bs);
if (e) return e;
gf_bs_write_data(bs, (char*)ptr->sample_info, ptr->sampleCount);
return GF_OK;
}
GF_Err sdtp_Size(GF_Box *s)
{
GF_SampleDependencyTypeBox *ptr = (GF_SampleDependencyTypeBox *)s;
ptr->size += ptr->sampleCount;
return GF_OK;
}
#endif /*GPAC_DISABLE_ISOM_WRITE*/
GF_Box *pasp_New()
{
ISOM_DECL_BOX_ALLOC(GF_PixelAspectRatioBox, GF_ISOM_BOX_TYPE_PASP);
return (GF_Box *)tmp;
}
void pasp_del(GF_Box *s)
{
GF_PixelAspectRatioBox *ptr = (GF_PixelAspectRatioBox*)s;
if (ptr == NULL) return;
gf_free(ptr);
}
GF_Err pasp_Read(GF_Box *s, GF_BitStream *bs)
{
GF_PixelAspectRatioBox *ptr = (GF_PixelAspectRatioBox*)s;
ptr->hSpacing = gf_bs_read_u32(bs);
ptr->vSpacing = gf_bs_read_u32(bs);
ISOM_DECREASE_SIZE(ptr, 8);
return GF_OK;
}
#ifndef GPAC_DISABLE_ISOM_WRITE
GF_Err pasp_Write(GF_Box *s, GF_BitStream *bs)
{
GF_PixelAspectRatioBox *ptr = (GF_PixelAspectRatioBox *)s;
GF_Err e = gf_isom_box_write_header(s, bs);
if (e) return e;
gf_bs_write_u32(bs, ptr->hSpacing);
gf_bs_write_u32(bs, ptr->vSpacing);
return GF_OK;
}
GF_Err pasp_Size(GF_Box *s)
{
s->size += 8;
return GF_OK;
}
#endif /*GPAC_DISABLE_ISOM_WRITE*/
GF_Box *clap_New()
{
ISOM_DECL_BOX_ALLOC(GF_CleanApertureBox, GF_ISOM_BOX_TYPE_CLAP);
return (GF_Box *)tmp;
}
void clap_del(GF_Box *s)
{
GF_CleanApertureBox *ptr = (GF_CleanApertureBox*)s;
if (ptr == NULL) return;
gf_free(ptr);
}
GF_Err clap_Read(GF_Box *s, GF_BitStream *bs)
{
GF_CleanApertureBox *ptr = (GF_CleanApertureBox*)s;
ISOM_DECREASE_SIZE(ptr, 32);
ptr->cleanApertureWidthN = gf_bs_read_u32(bs);
ptr->cleanApertureWidthD = gf_bs_read_u32(bs);
ptr->cleanApertureHeightN = gf_bs_read_u32(bs);
ptr->cleanApertureHeightD = gf_bs_read_u32(bs);
ptr->horizOffN = gf_bs_read_u32(bs);
ptr->horizOffD = gf_bs_read_u32(bs);
ptr->vertOffN = gf_bs_read_u32(bs);
ptr->vertOffD = gf_bs_read_u32(bs);
return GF_OK;
}
#ifndef GPAC_DISABLE_ISOM_WRITE
GF_Err clap_Write(GF_Box *s, GF_BitStream *bs)
{
GF_CleanApertureBox *ptr = (GF_CleanApertureBox *)s;
GF_Err e = gf_isom_box_write_header(s, bs);
if (e) return e;
gf_bs_write_u32(bs, ptr->cleanApertureWidthN);
gf_bs_write_u32(bs, ptr->cleanApertureWidthD);
gf_bs_write_u32(bs, ptr->cleanApertureHeightN);
gf_bs_write_u32(bs, ptr->cleanApertureHeightD);
gf_bs_write_u32(bs, ptr->horizOffN);
gf_bs_write_u32(bs, ptr->horizOffD);
gf_bs_write_u32(bs, ptr->vertOffN);
gf_bs_write_u32(bs, ptr->vertOffD);
return GF_OK;
}
GF_Err clap_Size(GF_Box *s)
{
s->size += 32;
return GF_OK;
}
#endif /*GPAC_DISABLE_ISOM_WRITE*/
GF_Box *metx_New()
{
//type is overridden by the box constructor
ISOM_DECL_BOX_ALLOC(GF_MetaDataSampleEntryBox, GF_ISOM_BOX_TYPE_METX);
gf_isom_sample_entry_init((GF_SampleEntryBox*)tmp);
return (GF_Box *)tmp;
}
void metx_del(GF_Box *s)
{
GF_MetaDataSampleEntryBox *ptr = (GF_MetaDataSampleEntryBox*)s;
if (ptr == NULL) return;
gf_isom_sample_entry_predestroy((GF_SampleEntryBox *)s);
if (ptr->content_encoding) gf_free(ptr->content_encoding);
if (ptr->xml_namespace) gf_free(ptr->xml_namespace);
if (ptr->xml_schema_loc) gf_free(ptr->xml_schema_loc);
if (ptr->mime_type) gf_free(ptr->mime_type);
if (ptr->config) gf_isom_box_del((GF_Box *)ptr->config);
gf_free(ptr);
}
GF_Err metx_AddBox(GF_Box *s, GF_Box *a)
{
GF_MetaDataSampleEntryBox *ptr = (GF_MetaDataSampleEntryBox *)s;
switch (a->type) {
case GF_ISOM_BOX_TYPE_SINF:
gf_list_add(ptr->protections, a);
break;
case GF_ISOM_BOX_TYPE_TXTC:
//we allow the config box on metx
if (ptr->config) ERROR_ON_DUPLICATED_BOX(a, ptr)
ptr->config = (GF_TextConfigBox *)a;
break;
default:
return gf_isom_box_add_default(s, a);
}
return GF_OK;
}
GF_Err metx_Read(GF_Box *s, GF_BitStream *bs)
{
u32 size, i;
GF_Err e;
char *str;
GF_MetaDataSampleEntryBox *ptr = (GF_MetaDataSampleEntryBox*)s;
e = gf_isom_base_sample_entry_read((GF_SampleEntryBox *)ptr, bs);
if (e) return e;
size = (u32) ptr->size - 8;
str = gf_malloc(sizeof(char)*size);
i=0;
while (size) {
str[i] = gf_bs_read_u8(bs);
size--;
if (!str[i]) {
i++;
break;
}
i++;
}
if (!size && i>1 && str[i-1]) {
GF_LOG(GF_LOG_WARNING, GF_LOG_CONTAINER, ("[iso file] metx read invalid string\n"));
gf_free(str);
return GF_ISOM_INVALID_FILE;
}
if (i>1) {
if (ptr->type==GF_ISOM_BOX_TYPE_STPP) {
ptr->xml_namespace = gf_strdup(str);
} else {
ptr->content_encoding = gf_strdup(str);
}
}
i=0;
while (size) {
str[i] = gf_bs_read_u8(bs);
size--;
if (!str[i]) {
i++;
break;
}
i++;
}
if (!size && i>1 && str[i-1]) {
GF_LOG(GF_LOG_WARNING, GF_LOG_CONTAINER, ("[iso file] metx read invalid string\n"));
gf_free(str);
return GF_ISOM_INVALID_FILE;
}
if ((ptr->type==GF_ISOM_BOX_TYPE_METX) || (ptr->type==GF_ISOM_BOX_TYPE_STPP)) {
if (i>1) {
if (ptr->type==GF_ISOM_BOX_TYPE_STPP) {
ptr->xml_schema_loc = gf_strdup(str);
} else {
ptr->xml_namespace = gf_strdup(str);
}
}
i=0;
while (size) {
str[i] = gf_bs_read_u8(bs);
size--;
if (!str[i]) {
i++;
break;
}
i++;
}
if (!size && i>1 && str[i-1]) {
GF_LOG(GF_LOG_WARNING, GF_LOG_CONTAINER, ("[iso file] metx read invalid string\n"));
gf_free(str);
return GF_ISOM_INVALID_FILE;
}
if (i>1) {
if (ptr->type==GF_ISOM_BOX_TYPE_STPP) {
ptr->mime_type = gf_strdup(str);
} else {
ptr->xml_schema_loc = gf_strdup(str);
}
}
}
//mett, sbtt, stxt, stpp
else {
if (i>1) ptr->mime_type = gf_strdup(str);
}
ptr->size = size;
gf_free(str);
return gf_isom_box_array_read(s, bs, metx_AddBox);
}
#ifndef GPAC_DISABLE_ISOM_WRITE
GF_Err metx_Write(GF_Box *s, GF_BitStream *bs)
{
GF_MetaDataSampleEntryBox *ptr = (GF_MetaDataSampleEntryBox *)s;
GF_Err e = gf_isom_box_write_header(s, bs);
if (e) return e;
gf_bs_write_data(bs, ptr->reserved, 6);
gf_bs_write_u16(bs, ptr->dataReferenceIndex);
if (ptr->type!=GF_ISOM_BOX_TYPE_STPP) {
if (ptr->content_encoding)
gf_bs_write_data(bs, ptr->content_encoding, (u32) strlen(ptr->content_encoding));
gf_bs_write_u8(bs, 0);
}
if ((ptr->type==GF_ISOM_BOX_TYPE_METX) || (ptr->type==GF_ISOM_BOX_TYPE_STPP)) {
if (ptr->xml_namespace)
gf_bs_write_data(bs, ptr->xml_namespace, (u32) strlen(ptr->xml_namespace));
gf_bs_write_u8(bs, 0);
if (ptr->xml_schema_loc)
gf_bs_write_data(bs, ptr->xml_schema_loc, (u32) strlen(ptr->xml_schema_loc));
gf_bs_write_u8(bs, 0);
if (ptr->type==GF_ISOM_BOX_TYPE_STPP) {
if (ptr->mime_type)
gf_bs_write_data(bs, ptr->mime_type, (u32) strlen(ptr->mime_type));
gf_bs_write_u8(bs, 0);
}
}
//mett, sbtt, stxt
else {
if (ptr->mime_type)
gf_bs_write_data(bs, ptr->mime_type, (u32) strlen(ptr->mime_type));
gf_bs_write_u8(bs, 0);
if (ptr->config) {
gf_isom_box_write((GF_Box *)ptr->config, bs);
}
}
return gf_isom_box_array_write(s, ptr->protections, bs);
}
GF_Err metx_Size(GF_Box *s)
{
GF_Err e;
GF_MetaDataSampleEntryBox *ptr = (GF_MetaDataSampleEntryBox *)s;
ptr->size += 8;
if (ptr->type!=GF_ISOM_BOX_TYPE_STPP) {
if (ptr->content_encoding)
ptr->size += strlen(ptr->content_encoding);
ptr->size++;
}
if ((ptr->type==GF_ISOM_BOX_TYPE_METX) || (ptr->type==GF_ISOM_BOX_TYPE_STPP)) {
if (ptr->xml_namespace)
ptr->size += strlen(ptr->xml_namespace);
ptr->size++;
if (ptr->xml_schema_loc)
ptr->size += strlen(ptr->xml_schema_loc);
ptr->size++;
if (ptr->type==GF_ISOM_BOX_TYPE_STPP) {
if (ptr->mime_type)
ptr->size += strlen(ptr->mime_type);
ptr->size++;
}
}
//mett, sbtt, stxt
else {
if (ptr->mime_type)
ptr->size += strlen(ptr->mime_type);
ptr->size++;
if (ptr->config) {
e = gf_isom_box_size((GF_Box *)ptr->config);
if (e) return e;
ptr->size += ptr->config->size;
}
}
return gf_isom_box_array_size(s, ptr->protections);
}
#endif /*GPAC_DISABLE_ISOM_WRITE*/
/* SimpleTextSampleEntry */
GF_Box *txtc_New()
{
ISOM_DECL_BOX_ALLOC(GF_TextConfigBox, GF_ISOM_BOX_TYPE_TXTC);
return (GF_Box *)tmp;
}
void txtc_del(GF_Box *s)
{
GF_TextConfigBox *ptr = (GF_TextConfigBox*)s;
if (ptr == NULL) return;
if (ptr->config) gf_free(ptr->config);
gf_free(ptr);
}
GF_Err txtc_Read(GF_Box *s, GF_BitStream *bs)
{
GF_TextConfigBox *ptr = (GF_TextConfigBox*)s;
ptr->config = (char *)gf_malloc(sizeof(char)*((u32) ptr->size+1));
gf_bs_read_data(bs, ptr->config, (u32) ptr->size);
ptr->config[ptr->size] = 0;
return GF_OK;
}
#ifndef GPAC_DISABLE_ISOM_WRITE
GF_Err txtc_Write(GF_Box *s, GF_BitStream *bs)
{
GF_TextConfigBox *ptr = (GF_TextConfigBox *)s;
GF_Err e = gf_isom_full_box_write(s, bs);
if (e) return e;
if (ptr->config)
gf_bs_write_data(bs, ptr->config, (u32) strlen(ptr->config));
gf_bs_write_u8(bs, 0);
return GF_OK;
}
GF_Err txtc_Size(GF_Box *s)
{
GF_TextConfigBox *ptr = (GF_TextConfigBox *)s;
if (ptr->config)
ptr->size += strlen(ptr->config);
ptr->size++;
return GF_OK;
}
#endif /*GPAC_DISABLE_ISOM_WRITE*/
GF_Box *dac3_New()
{
ISOM_DECL_BOX_ALLOC(GF_AC3ConfigBox, GF_ISOM_BOX_TYPE_DAC3);
return (GF_Box *)tmp;
}
GF_Box *dec3_New()
{
ISOM_DECL_BOX_ALLOC(GF_AC3ConfigBox, GF_ISOM_BOX_TYPE_DAC3);
tmp->cfg.is_ec3 = 1;
return (GF_Box *)tmp;
}
void dac3_del(GF_Box *s)
{
GF_AC3ConfigBox *ptr = (GF_AC3ConfigBox *)s;
gf_free(ptr);
}
GF_Err dac3_Read(GF_Box *s, GF_BitStream *bs)
{
GF_AC3ConfigBox *ptr = (GF_AC3ConfigBox *)s;
if (ptr == NULL) return GF_BAD_PARAM;
if (ptr->cfg.is_ec3) {
u32 i;
ptr->cfg.brcode = gf_bs_read_int(bs, 13);
ptr->cfg.nb_streams = gf_bs_read_int(bs, 3) + 1;
for (i=0; i<ptr->cfg.nb_streams; i++) {
ptr->cfg.streams[i].fscod = gf_bs_read_int(bs, 2);
ptr->cfg.streams[i].bsid = gf_bs_read_int(bs, 5);
ptr->cfg.streams[i].bsmod = gf_bs_read_int(bs, 5);
ptr->cfg.streams[i].acmod = gf_bs_read_int(bs, 3);
ptr->cfg.streams[i].lfon = gf_bs_read_int(bs, 1);
gf_bs_read_int(bs, 3);
ptr->cfg.streams[i].nb_dep_sub = gf_bs_read_int(bs, 4);
if (ptr->cfg.streams[i].nb_dep_sub) {
ptr->cfg.streams[i].chan_loc = gf_bs_read_int(bs, 9);
} else {
gf_bs_read_int(bs, 1);
}
}
} else {
ptr->cfg.nb_streams = 1;
ptr->cfg.streams[0].fscod = gf_bs_read_int(bs, 2);
ptr->cfg.streams[0].bsid = gf_bs_read_int(bs, 5);
ptr->cfg.streams[0].bsmod = gf_bs_read_int(bs, 3);
ptr->cfg.streams[0].acmod = gf_bs_read_int(bs, 3);
ptr->cfg.streams[0].lfon = gf_bs_read_int(bs, 1);
ptr->cfg.brcode = gf_bs_read_int(bs, 5);
gf_bs_read_int(bs, 5);
}
return GF_OK;
}
#ifndef GPAC_DISABLE_ISOM_WRITE
GF_Err dac3_Write(GF_Box *s, GF_BitStream *bs)
{
GF_Err e;
GF_AC3ConfigBox *ptr = (GF_AC3ConfigBox *)s;
if (ptr->cfg.is_ec3) s->type = GF_ISOM_BOX_TYPE_DEC3;
e = gf_isom_box_write_header(s, bs);
if (ptr->cfg.is_ec3) s->type = GF_ISOM_BOX_TYPE_DAC3;
if (e) return e;
if (ptr->cfg.is_ec3) {
u32 i;
gf_bs_write_int(bs, ptr->cfg.brcode, 13);
gf_bs_write_int(bs, ptr->cfg.nb_streams - 1, 3);
for (i=0; i<ptr->cfg.nb_streams; i++) {
gf_bs_write_int(bs, ptr->cfg.streams[i].fscod, 2);
gf_bs_write_int(bs, ptr->cfg.streams[i].bsid, 5);
gf_bs_write_int(bs, ptr->cfg.streams[i].bsmod, 5);
gf_bs_write_int(bs, ptr->cfg.streams[i].acmod, 3);
gf_bs_write_int(bs, ptr->cfg.streams[i].lfon, 1);
gf_bs_write_int(bs, 0, 3);
gf_bs_write_int(bs, ptr->cfg.streams[i].nb_dep_sub, 4);
if (ptr->cfg.streams[i].nb_dep_sub) {
gf_bs_write_int(bs, ptr->cfg.streams[i].chan_loc, 9);
} else {
gf_bs_write_int(bs, 0, 1);
}
}
} else {
gf_bs_write_int(bs, ptr->cfg.streams[0].fscod, 2);
gf_bs_write_int(bs, ptr->cfg.streams[0].bsid, 5);
gf_bs_write_int(bs, ptr->cfg.streams[0].bsmod, 3);
gf_bs_write_int(bs, ptr->cfg.streams[0].acmod, 3);
gf_bs_write_int(bs, ptr->cfg.streams[0].lfon, 1);
gf_bs_write_int(bs, ptr->cfg.brcode, 5);
gf_bs_write_int(bs, 0, 5);
}
return GF_OK;
}
GF_Err dac3_Size(GF_Box *s)
{
GF_AC3ConfigBox *ptr = (GF_AC3ConfigBox *)s;
if (ptr->cfg.is_ec3) {
u32 i;
s->size += 2;
for (i=0; i<ptr->cfg.nb_streams; i++) {
s->size += 3;
if (ptr->cfg.streams[i].nb_dep_sub)
s->size += 1;
}
} else {
s->size += 3;
}
return GF_OK;
}
#endif /*GPAC_DISABLE_ISOM_WRITE*/
void lsrc_del(GF_Box *s)
{
GF_LASERConfigurationBox *ptr = (GF_LASERConfigurationBox *)s;
if (ptr == NULL) return;
if (ptr->hdr) gf_free(ptr->hdr);
gf_free(ptr);
}
GF_Err lsrc_Read(GF_Box *s, GF_BitStream *bs)
{
GF_LASERConfigurationBox *ptr = (GF_LASERConfigurationBox *)s;
ptr->hdr_size = (u32) ptr->size;
ptr->hdr = gf_malloc(sizeof(char)*ptr->hdr_size);
if (!ptr->hdr)
return GF_OUT_OF_MEM;
gf_bs_read_data(bs, ptr->hdr, ptr->hdr_size);
return GF_OK;
}
GF_Box *lsrc_New()
{
ISOM_DECL_BOX_ALLOC(GF_LASERConfigurationBox, GF_ISOM_BOX_TYPE_LSRC);
return (GF_Box *)tmp;
}
#ifndef GPAC_DISABLE_ISOM_WRITE
GF_Err lsrc_Write(GF_Box *s, GF_BitStream *bs)
{
GF_Err e;
GF_LASERConfigurationBox *ptr = (GF_LASERConfigurationBox *)s;
e = gf_isom_box_write_header(s, bs);
if (e) return e;
gf_bs_write_data(bs, ptr->hdr, ptr->hdr_size);
return GF_OK;
}
GF_Err lsrc_Size(GF_Box *s)
{
GF_LASERConfigurationBox *ptr = (GF_LASERConfigurationBox *)s;
ptr->size += ptr->hdr_size;
return GF_OK;
}
#endif /*GPAC_DISABLE_ISOM_WRITE*/
void lsr1_del(GF_Box *s)
{
GF_LASeRSampleEntryBox *ptr = (GF_LASeRSampleEntryBox *)s;
if (ptr == NULL) return;
gf_isom_sample_entry_predestroy((GF_SampleEntryBox *)s);
if (ptr->slc) gf_odf_desc_del((GF_Descriptor *)ptr->slc);
if (ptr->lsr_config) gf_isom_box_del((GF_Box *) ptr->lsr_config);
if (ptr->descr) gf_isom_box_del((GF_Box *) ptr->descr);
gf_free(ptr);
}
GF_Err lsr1_AddBox(GF_Box *s, GF_Box *a)
{
GF_LASeRSampleEntryBox *ptr = (GF_LASeRSampleEntryBox *)s;
switch (a->type) {
case GF_ISOM_BOX_TYPE_LSRC:
if (ptr->lsr_config) ERROR_ON_DUPLICATED_BOX(a, ptr)
ptr->lsr_config = (GF_LASERConfigurationBox *)a;
break;
case GF_ISOM_BOX_TYPE_M4DS:
if (ptr->descr) ERROR_ON_DUPLICATED_BOX(a, ptr)
ptr->descr = (GF_MPEG4ExtensionDescriptorsBox *)a;
break;
default:
return gf_isom_box_add_default(s, a);
}
return GF_OK;
}
GF_Err lsr1_Read(GF_Box *s, GF_BitStream *bs)
{
GF_Err e;
GF_LASeRSampleEntryBox *ptr = (GF_LASeRSampleEntryBox*)s;
e = gf_isom_base_sample_entry_read((GF_SampleEntryBox *)ptr, bs);
if (e) return e;
ISOM_DECREASE_SIZE(ptr, 8);
return gf_isom_box_array_read(s, bs, lsr1_AddBox);
}
GF_Box *lsr1_New()
{
ISOM_DECL_BOX_ALLOC(GF_LASeRSampleEntryBox, GF_ISOM_BOX_TYPE_LSR1);
gf_isom_sample_entry_init((GF_SampleEntryBox*)tmp);
return (GF_Box *)tmp;
}
#ifndef GPAC_DISABLE_ISOM_WRITE
GF_Err lsr1_Write(GF_Box *s, GF_BitStream *bs)
{
GF_Err e;
GF_LASeRSampleEntryBox *ptr = (GF_LASeRSampleEntryBox *)s;
e = gf_isom_box_write_header(s, bs);
if (e) return e;
gf_bs_write_data(bs, ptr->reserved, 6);
gf_bs_write_u16(bs, ptr->dataReferenceIndex);
if (ptr->lsr_config) {
e = gf_isom_box_write((GF_Box *)ptr->lsr_config, bs);
if (e) return e;
}
if (ptr->descr) {
e = gf_isom_box_write((GF_Box *)ptr->descr, bs);
if (e) return e;
}
return e;
}
GF_Err lsr1_Size(GF_Box *s)
{
GF_Err e;
GF_LASeRSampleEntryBox *ptr = (GF_LASeRSampleEntryBox *)s;
s->size += 8;
if (ptr->lsr_config) {
e = gf_isom_box_size((GF_Box *)ptr->lsr_config);
if (e) return e;
ptr->size += ptr->lsr_config->size;
}
if (ptr->descr) {
e = gf_isom_box_size((GF_Box *)ptr->descr);
if (e) return e;
ptr->size += ptr->descr->size;
}
return GF_OK;
}
#endif /*GPAC_DISABLE_ISOM_WRITE*/
void sidx_del(GF_Box *s)
{
GF_SegmentIndexBox *ptr = (GF_SegmentIndexBox *) s;
if (ptr == NULL) return;
if (ptr->refs) gf_free(ptr->refs);
gf_free(ptr);
}
GF_Err sidx_Read(GF_Box *s,GF_BitStream *bs)
{
u32 i;
GF_SegmentIndexBox *ptr = (GF_SegmentIndexBox*) s;
ptr->reference_ID = gf_bs_read_u32(bs);
ptr->timescale = gf_bs_read_u32(bs);
ISOM_DECREASE_SIZE(ptr, 8);
if (ptr->version==0) {
ptr->earliest_presentation_time = gf_bs_read_u32(bs);
ptr->first_offset = gf_bs_read_u32(bs);
ISOM_DECREASE_SIZE(ptr, 8);
} else {
ptr->earliest_presentation_time = gf_bs_read_u64(bs);
ptr->first_offset = gf_bs_read_u64(bs);
ISOM_DECREASE_SIZE(ptr, 16);
}
gf_bs_read_u16(bs); /* reserved */
ptr->nb_refs = gf_bs_read_u16(bs);
ISOM_DECREASE_SIZE(ptr, 4);
ptr->refs = gf_malloc(sizeof(GF_SIDXReference)*ptr->nb_refs);
if (!ptr->refs)
return GF_OUT_OF_MEM;
for (i=0; i<ptr->nb_refs; i++) {
ptr->refs[i].reference_type = gf_bs_read_int(bs, 1);
ptr->refs[i].reference_size = gf_bs_read_int(bs, 31);
ptr->refs[i].subsegment_duration = gf_bs_read_u32(bs);
ptr->refs[i].starts_with_SAP = gf_bs_read_int(bs, 1);
ptr->refs[i].SAP_type = gf_bs_read_int(bs, 3);
ptr->refs[i].SAP_delta_time = gf_bs_read_int(bs, 28);
ISOM_DECREASE_SIZE(ptr, 12);
}
return GF_OK;
}
GF_Box *sidx_New()
{
ISOM_DECL_BOX_ALLOC(GF_SegmentIndexBox, GF_ISOM_BOX_TYPE_SIDX);
return (GF_Box *)tmp;
}
#ifndef GPAC_DISABLE_ISOM_WRITE
GF_Err sidx_Write(GF_Box *s, GF_BitStream *bs)
{
GF_Err e;
u32 i;
GF_SegmentIndexBox *ptr = (GF_SegmentIndexBox*) s;
e = gf_isom_full_box_write(s, bs);
if (e) return e;
gf_bs_write_u32(bs, ptr->reference_ID);
gf_bs_write_u32(bs, ptr->timescale);
if (ptr->version==0) {
gf_bs_write_u32(bs, (u32) ptr->earliest_presentation_time);
gf_bs_write_u32(bs, (u32) ptr->first_offset);
} else {
gf_bs_write_u64(bs, ptr->earliest_presentation_time);
gf_bs_write_u64(bs, ptr->first_offset);
}
gf_bs_write_u16(bs, 0);
gf_bs_write_u16(bs, ptr->nb_refs);
for (i=0; i<ptr->nb_refs; i++ ) {
gf_bs_write_int(bs, ptr->refs[i].reference_type, 1);
gf_bs_write_int(bs, ptr->refs[i].reference_size, 31);
gf_bs_write_u32(bs, ptr->refs[i].subsegment_duration);
gf_bs_write_int(bs, ptr->refs[i].starts_with_SAP, 1);
gf_bs_write_int(bs, ptr->refs[i].SAP_type, 3);
gf_bs_write_int(bs, ptr->refs[i].SAP_delta_time, 28);
}
return GF_OK;
}
GF_Err sidx_Size(GF_Box *s)
{
GF_SegmentIndexBox *ptr = (GF_SegmentIndexBox*) s;
ptr->size += 12;
if (ptr->version==0) {
ptr->size += 8;
} else {
ptr->size += 16;
}
ptr->size += ptr->nb_refs * 12;
return GF_OK;
}
#endif /*GPAC_DISABLE_ISOM_WRITE*/
void ssix_del(GF_Box *s)
{
u32 i;
GF_SubsegmentIndexBox *ptr = (GF_SubsegmentIndexBox *)s;
if (ptr == NULL) return;
if (ptr->subsegments) {
for (i = 0; i < ptr->subsegment_count; i++) {
GF_SubsegmentInfo *subsegment = &ptr->subsegments[i];
if (subsegment->ranges) gf_free(subsegment->ranges);
}
gf_free(ptr->subsegments);
}
gf_free(ptr);
}
GF_Err ssix_Read(GF_Box *s, GF_BitStream *bs)
{
u32 i,j;
GF_SubsegmentIndexBox *ptr = (GF_SubsegmentIndexBox*)s;
if (ptr->size < 4) return GF_BAD_PARAM;
ptr->subsegment_count = gf_bs_read_u32(bs);
ptr->size -= 4;
if (ptr->subsegment_count > UINT_MAX / sizeof(GF_SubsegmentInfo))
return GF_ISOM_INVALID_FILE;
GF_SAFE_ALLOC_N(ptr->subsegments, ptr->subsegment_count, GF_SubsegmentInfo);
if (!ptr->subsegments)
return GF_OUT_OF_MEM;
for (i = 0; i < ptr->subsegment_count; i++) {
GF_SubsegmentInfo *subseg = &ptr->subsegments[i];
if (ptr->size < 4) return GF_BAD_PARAM;
subseg->range_count = gf_bs_read_u32(bs);
ptr->size -= 4;
if (ptr->size < subseg->range_count*4) return GF_BAD_PARAM;
subseg->ranges = (GF_SubsegmentRangeInfo*) gf_malloc(sizeof(GF_SubsegmentRangeInfo) * subseg->range_count);
for (j = 0; j < subseg->range_count; j++) {
subseg->ranges[j].level = gf_bs_read_u8(bs);
subseg->ranges[j].range_size = gf_bs_read_u24(bs);
ptr->size -= 4;
}
}
return GF_OK;
}
GF_Box *ssix_New()
{
ISOM_DECL_BOX_ALLOC(GF_SubsegmentIndexBox, GF_ISOM_BOX_TYPE_SSIX);
return (GF_Box *)tmp;
}
#ifndef GPAC_DISABLE_ISOM_WRITE
GF_Err ssix_Write(GF_Box *s, GF_BitStream *bs)
{
GF_Err e;
u32 i, j;
GF_SubsegmentIndexBox *ptr = (GF_SubsegmentIndexBox*)s;
e = gf_isom_full_box_write(s, bs);
if (e) return e;
gf_bs_write_u32(bs, ptr->subsegment_count);
for (i = 0; i<ptr->subsegment_count; i++) {
gf_bs_write_u32(bs, ptr->subsegments[i].range_count);
for (j = 0; j < ptr->subsegments[i].range_count; j++) {
gf_bs_write_u8(bs, ptr->subsegments[i].ranges[j].level);
gf_bs_write_u24(bs, ptr->subsegments[i].ranges[j].range_size);
}
}
return GF_OK;
}
GF_Err ssix_Size(GF_Box *s)
{
u32 i;
GF_SubsegmentIndexBox *ptr = (GF_SubsegmentIndexBox*)s;
ptr->size += 4;
for (i = 0; i < ptr->subsegment_count; i++) {
ptr->size += 4 + 4 * ptr->subsegments[i].range_count;
}
return GF_OK;
}
#endif /*GPAC_DISABLE_ISOM_WRITE*/
void leva_del(GF_Box *s)
{
GF_LevelAssignmentBox *ptr = (GF_LevelAssignmentBox *)s;
if (ptr == NULL) return;
if (ptr->levels) gf_free(ptr->levels);
gf_free(ptr);
}
GF_Err leva_Read(GF_Box *s, GF_BitStream *bs)
{
u32 i;
GF_LevelAssignmentBox *ptr = (GF_LevelAssignmentBox*)s;
if (ptr->size < 4) return GF_BAD_PARAM;
ptr->level_count = gf_bs_read_u8(bs);
ptr->size -= 4;
GF_SAFE_ALLOC_N(ptr->levels, ptr->level_count, GF_LevelAssignment);
for (i = 0; i < ptr->level_count; i++) {
GF_LevelAssignment *level = &ptr->levels[i];
u8 tmp;
if (ptr->size < 5) return GF_BAD_PARAM;
level->track_id = gf_bs_read_u32(bs);
tmp = gf_bs_read_u8(bs);
level->padding_flag = tmp >> 7;
level->type = tmp & 0x7F;
if (level->type == 0) {
level->grouping_type = gf_bs_read_u32(bs);
}
else if (level->type == 1) {
level->grouping_type = gf_bs_read_u32(bs);
level->grouping_type_parameter = gf_bs_read_u32(bs);
}
else if (level->type == 4) {
level->sub_track_id = gf_bs_read_u32(bs);
}
}
return GF_OK;
}
GF_Box *leva_New()
{
ISOM_DECL_BOX_ALLOC(GF_LevelAssignmentBox, GF_ISOM_BOX_TYPE_LEVA);
return (GF_Box *)tmp;
}
#ifndef GPAC_DISABLE_ISOM_WRITE
GF_Err leva_Write(GF_Box *s, GF_BitStream *bs)
{
GF_Err e;
u32 i;
GF_LevelAssignmentBox *ptr = (GF_LevelAssignmentBox*)s;
e = gf_isom_full_box_write(s, bs);
if (e) return e;
gf_bs_write_u8(bs, ptr->level_count);
for (i = 0; i<ptr->level_count; i++) {
gf_bs_write_u32(bs, ptr->levels[i].track_id);
gf_bs_write_u8(bs, ptr->levels[i].padding_flag << 7 | (ptr->levels[i].type & 0x7F));
if (ptr->levels[i].type == 0) {
gf_bs_write_u32(bs, ptr->levels[i].grouping_type);
}
else if (ptr->levels[i].type == 1) {
gf_bs_write_u32(bs, ptr->levels[i].grouping_type);
gf_bs_write_u32(bs, ptr->levels[i].grouping_type_parameter);
}
else if (ptr->levels[i].type == 4) {
gf_bs_write_u32(bs, ptr->levels[i].sub_track_id);
}
}
return GF_OK;
}
GF_Err leva_Size(GF_Box *s)
{
u32 i;
GF_LevelAssignmentBox *ptr = (GF_LevelAssignmentBox*)s;
ptr->size += 1;
for (i = 0; i < ptr->level_count; i++) {
ptr->size += 5;
if (ptr->levels[i].type == 0 || ptr->levels[i].type == 4) {
ptr->size += 4;
}
else if (ptr->levels[i].type == 1) {
ptr->size += 8;
}
}
return GF_OK;
}
#endif /*GPAC_DISABLE_ISOM_WRITE*/
GF_Box *pcrb_New()
{
ISOM_DECL_BOX_ALLOC(GF_PcrInfoBox, GF_ISOM_BOX_TYPE_PCRB);
return (GF_Box *)tmp;
}
void pcrb_del(GF_Box *s)
{
GF_PcrInfoBox *ptr = (GF_PcrInfoBox *) s;
if (ptr == NULL) return;
if (ptr->pcr_values) gf_free(ptr->pcr_values);
gf_free(ptr);
}
GF_Err pcrb_Read(GF_Box *s,GF_BitStream *bs)
{
u32 i;
GF_PcrInfoBox *ptr = (GF_PcrInfoBox*) s;
ptr->subsegment_count = gf_bs_read_u32(bs);
ISOM_DECREASE_SIZE(ptr, 4);
ptr->pcr_values = gf_malloc(sizeof(u64)*ptr->subsegment_count);
if (!ptr->pcr_values)
return GF_OUT_OF_MEM;
for (i=0; i<ptr->subsegment_count; i++) {
u64 data1 = gf_bs_read_u32(bs);
u64 data2 = gf_bs_read_u16(bs);
ISOM_DECREASE_SIZE(ptr, 6);
ptr->pcr_values[i] = (data1 << 10) | (data2 >> 6);
}
return GF_OK;
}
#ifndef GPAC_DISABLE_ISOM_WRITE
GF_Err pcrb_Write(GF_Box *s, GF_BitStream *bs)
{
GF_Err e;
u32 i;
GF_PcrInfoBox *ptr = (GF_PcrInfoBox*) s;
e = gf_isom_box_write_header(s, bs);
if (e) return e;
gf_bs_write_u32(bs, ptr->subsegment_count);
for (i=0; i<ptr->subsegment_count; i++ ) {
u32 data1 = (u32) (ptr->pcr_values[i] >> 10);
u16 data2 = (u16) (ptr->pcr_values[i] << 6);
gf_bs_write_u32(bs, data1);
gf_bs_write_u16(bs, data2);
}
return GF_OK;
}
GF_Err pcrb_Size(GF_Box *s)
{
GF_PcrInfoBox *ptr = (GF_PcrInfoBox*) s;
ptr->size += 4;
ptr->size += ptr->subsegment_count * 6;
return GF_OK;
}
#endif /*GPAC_DISABLE_ISOM_WRITE*/
GF_Box *subs_New()
{
ISOM_DECL_BOX_ALLOC(GF_SubSampleInformationBox, GF_ISOM_BOX_TYPE_SUBS);
tmp->Samples = gf_list_new();
return (GF_Box *)tmp;
}
void subs_del(GF_Box *s)
{
GF_SubSampleInformationBox *ptr = (GF_SubSampleInformationBox *)s;
if (ptr == NULL) return;
while (gf_list_count(ptr->Samples)) {
GF_SubSampleInfoEntry *pSamp;
pSamp = (GF_SubSampleInfoEntry*)gf_list_get(ptr->Samples, 0);
while (gf_list_count(pSamp->SubSamples)) {
GF_SubSampleEntry *pSubSamp;
pSubSamp = (GF_SubSampleEntry*) gf_list_get(pSamp->SubSamples, 0);
gf_free(pSubSamp);
gf_list_rem(pSamp->SubSamples, 0);
}
gf_list_del(pSamp->SubSamples);
gf_free(pSamp);
gf_list_rem(ptr->Samples, 0);
}
gf_list_del(ptr->Samples);
gf_free(ptr);
}
#ifndef GPAC_DISABLE_ISOM_WRITE
GF_Err subs_Write(GF_Box *s, GF_BitStream *bs)
{
GF_Err e;
u32 i, j, entry_count;
u16 subsample_count;
GF_SubSampleInfoEntry *pSamp;
GF_SubSampleEntry *pSubSamp;
GF_SubSampleInformationBox *ptr = (GF_SubSampleInformationBox *) s;
if (!s) return GF_BAD_PARAM;
e = gf_isom_full_box_write(s, bs);
if (e) return e;
entry_count = gf_list_count(ptr->Samples);
gf_bs_write_u32(bs, entry_count);
for (i=0; i<entry_count; i++) {
pSamp = (GF_SubSampleInfoEntry*) gf_list_get(ptr->Samples, i);
subsample_count = gf_list_count(pSamp->SubSamples);
gf_bs_write_u32(bs, pSamp->sample_delta);
gf_bs_write_u16(bs, subsample_count);
for (j=0; j<subsample_count; j++) {
pSubSamp = (GF_SubSampleEntry*) gf_list_get(pSamp->SubSamples, j);
if (ptr->version == 1) {
gf_bs_write_u32(bs, pSubSamp->subsample_size);
} else {
gf_bs_write_u16(bs, pSubSamp->subsample_size);
}
gf_bs_write_u8(bs, pSubSamp->subsample_priority);
gf_bs_write_u8(bs, pSubSamp->discardable);
gf_bs_write_u32(bs, pSubSamp->reserved);
}
}
return e;
}
GF_Err subs_Size(GF_Box *s)
{
GF_SubSampleInformationBox *ptr = (GF_SubSampleInformationBox *) s;
GF_SubSampleInfoEntry *pSamp;
u32 entry_count, i;
u16 subsample_count;
// add 4 byte for entry_count
ptr->size += 4;
entry_count = gf_list_count(ptr->Samples);
for (i=0; i<entry_count; i++) {
pSamp = (GF_SubSampleInfoEntry*) gf_list_get(ptr->Samples, i);
subsample_count = gf_list_count(pSamp->SubSamples);
// 4 byte for sample_delta, 2 byte for subsample_count
// and 6 + (4 or 2) bytes for each subsample
ptr->size += 4 + 2 + subsample_count * (6 + (ptr->version==1 ? 4 : 2));
}
return GF_OK;
}
#endif /*GPAC_DISABLE_ISOM_WRITE*/
GF_Err subs_Read(GF_Box *s, GF_BitStream *bs)
{
GF_SubSampleInformationBox *ptr = (GF_SubSampleInformationBox *)s;
u32 entry_count, i, j;
u16 subsample_count;
entry_count = gf_bs_read_u32(bs);
ISOM_DECREASE_SIZE(ptr, 4);
for (i=0; i<entry_count; i++) {
u32 subs_size=0;
GF_SubSampleInfoEntry *pSamp = (GF_SubSampleInfoEntry*) gf_malloc(sizeof(GF_SubSampleInfoEntry));
if (!pSamp) return GF_OUT_OF_MEM;
memset(pSamp, 0, sizeof(GF_SubSampleInfoEntry));
pSamp->SubSamples = gf_list_new();
pSamp->sample_delta = gf_bs_read_u32(bs);
subsample_count = gf_bs_read_u16(bs);
subs_size=6;
for (j=0; j<subsample_count; j++) {
GF_SubSampleEntry *pSubSamp = (GF_SubSampleEntry*) gf_malloc(sizeof(GF_SubSampleEntry));
if (!pSubSamp) return GF_OUT_OF_MEM;
memset(pSubSamp, 0, sizeof(GF_SubSampleEntry));
if (ptr->version==1) {
pSubSamp->subsample_size = gf_bs_read_u32(bs);
subs_size+=4;
} else {
pSubSamp->subsample_size = gf_bs_read_u16(bs);
subs_size+=2;
}
pSubSamp->subsample_priority = gf_bs_read_u8(bs);
pSubSamp->discardable = gf_bs_read_u8(bs);
pSubSamp->reserved = gf_bs_read_u32(bs);
subs_size+=6;
gf_list_add(pSamp->SubSamples, pSubSamp);
}
gf_list_add(ptr->Samples, pSamp);
ISOM_DECREASE_SIZE(ptr, subs_size);
}
return GF_OK;
}
#ifndef GPAC_DISABLE_ISOM_FRAGMENTS
GF_Box *tfdt_New()
{
ISOM_DECL_BOX_ALLOC(GF_TFBaseMediaDecodeTimeBox, GF_ISOM_BOX_TYPE_TFDT);
return (GF_Box *)tmp;
}
void tfdt_del(GF_Box *s)
{
gf_free(s);
}
/*this is using chpl format according to some NeroRecode samples*/
GF_Err tfdt_Read(GF_Box *s,GF_BitStream *bs)
{
GF_TFBaseMediaDecodeTimeBox *ptr = (GF_TFBaseMediaDecodeTimeBox *)s;
if (ptr->version==1) {
ptr->baseMediaDecodeTime = gf_bs_read_u64(bs);
ISOM_DECREASE_SIZE(ptr, 8);
} else {
ptr->baseMediaDecodeTime = (u32) gf_bs_read_u32(bs);
ISOM_DECREASE_SIZE(ptr, 4);
}
return GF_OK;
}
#ifndef GPAC_DISABLE_ISOM_WRITE
GF_Err tfdt_Write(GF_Box *s, GF_BitStream *bs)
{
GF_Err e;
GF_TFBaseMediaDecodeTimeBox *ptr = (GF_TFBaseMediaDecodeTimeBox *) s;
e = gf_isom_full_box_write(s, bs);
if (e) return e;
if (ptr->version==1) {
gf_bs_write_u64(bs, ptr->baseMediaDecodeTime);
} else {
gf_bs_write_u32(bs, (u32) ptr->baseMediaDecodeTime);
}
return GF_OK;
}
GF_Err tfdt_Size(GF_Box *s)
{
GF_TFBaseMediaDecodeTimeBox *ptr = (GF_TFBaseMediaDecodeTimeBox *)s;
if (ptr->baseMediaDecodeTime<=0xFFFFFFFF) {
ptr->version = 0;
ptr->size += 4;
} else {
ptr->version = 1;
ptr->size += 8;
}
return GF_OK;
}
#endif /*GPAC_DISABLE_ISOM_WRITE*/
#endif /*GPAC_DISABLE_ISOM_FRAGMENTS*/
GF_Box *rvcc_New()
{
ISOM_DECL_BOX_ALLOC(GF_RVCConfigurationBox, GF_ISOM_BOX_TYPE_RVCC);
return (GF_Box *)tmp;
}
void rvcc_del(GF_Box *s)
{
gf_free(s);
}
GF_Err rvcc_Read(GF_Box *s,GF_BitStream *bs)
{
GF_RVCConfigurationBox *ptr = (GF_RVCConfigurationBox*)s;
ptr->predefined_rvc_config = gf_bs_read_u16(bs);
ISOM_DECREASE_SIZE(ptr, 2);
if (!ptr->predefined_rvc_config) {
ptr->rvc_meta_idx = gf_bs_read_u16(bs);
ISOM_DECREASE_SIZE(ptr, 2);
}
return GF_OK;
}
#ifndef GPAC_DISABLE_ISOM_WRITE
GF_Err rvcc_Write(GF_Box *s, GF_BitStream *bs)
{
GF_Err e;
GF_RVCConfigurationBox *ptr = (GF_RVCConfigurationBox*) s;
e = gf_isom_box_write_header(s, bs);
if (e) return e;
gf_bs_write_u16(bs, ptr->predefined_rvc_config);
if (!ptr->predefined_rvc_config) {
gf_bs_write_u16(bs, ptr->rvc_meta_idx);
}
return GF_OK;
}
GF_Err rvcc_Size(GF_Box *s)
{
GF_RVCConfigurationBox *ptr = (GF_RVCConfigurationBox *)s;
ptr->size += 2;
if (! ptr->predefined_rvc_config) ptr->size += 2;
return GF_OK;
}
#endif /*GPAC_DISABLE_ISOM_WRITE*/
GF_Box *sbgp_New()
{
ISOM_DECL_BOX_ALLOC(GF_SampleGroupBox, GF_ISOM_BOX_TYPE_SBGP);
return (GF_Box *)tmp;
}
void sbgp_del(GF_Box *a)
{
GF_SampleGroupBox *p = (GF_SampleGroupBox *)a;
if (p->sample_entries) gf_free(p->sample_entries);
gf_free(p);
}
GF_Err sbgp_Read(GF_Box *s, GF_BitStream *bs)
{
u32 i;
GF_SampleGroupBox *ptr = (GF_SampleGroupBox *)s;
ptr->grouping_type = gf_bs_read_u32(bs);
ISOM_DECREASE_SIZE(ptr, 4);
if (ptr->version==1) {
ptr->grouping_type_parameter = gf_bs_read_u32(bs);
ISOM_DECREASE_SIZE(ptr, 4);
}
ptr->entry_count = gf_bs_read_u32(bs);
ISOM_DECREASE_SIZE(ptr, 4);
if (ptr->size < sizeof(GF_SampleGroupEntry)*ptr->entry_count)
return GF_ISOM_INVALID_FILE;
ptr->sample_entries = gf_malloc(sizeof(GF_SampleGroupEntry)*ptr->entry_count);
if (!ptr->sample_entries)
return GF_OUT_OF_MEM;
if (!ptr->sample_entries) return GF_IO_ERR;
for (i=0; i<ptr->entry_count; i++) {
ptr->sample_entries[i].sample_count = gf_bs_read_u32(bs);
ptr->sample_entries[i].group_description_index = gf_bs_read_u32(bs);
ISOM_DECREASE_SIZE(ptr, 8);
}
return GF_OK;
}
#ifndef GPAC_DISABLE_ISOM_WRITE
GF_Err sbgp_Write(GF_Box *s, GF_BitStream *bs)
{
u32 i;
GF_Err e;
GF_SampleGroupBox *p = (GF_SampleGroupBox*)s;
e = gf_isom_full_box_write(s, bs);
if (e) return e;
gf_bs_write_u32(bs, p->grouping_type);
if (p->version==1)
gf_bs_write_u32(bs, p->grouping_type_parameter);
gf_bs_write_u32(bs, p->entry_count);
for (i = 0; i<p->entry_count; i++ ) {
gf_bs_write_u32(bs, p->sample_entries[i].sample_count);
gf_bs_write_u32(bs, p->sample_entries[i].group_description_index);
}
return GF_OK;
}
GF_Err sbgp_Size(GF_Box *s)
{
GF_SampleGroupBox *p = (GF_SampleGroupBox*)s;
p->size += 8;
if (p->grouping_type_parameter) p->version=1;
if (p->version==1) p->size += 4;
p->size += 8*p->entry_count;
return GF_OK;
}
#endif /*GPAC_DISABLE_ISOM_WRITE*/
static void *sgpd_parse_entry(u32 grouping_type, GF_BitStream *bs, u32 entry_size, u32 *total_bytes)
{
Bool null_size_ok = GF_FALSE;
GF_DefaultSampleGroupDescriptionEntry *ptr;
switch (grouping_type) {
case GF_ISOM_SAMPLE_GROUP_ROLL:
case GF_ISOM_SAMPLE_GROUP_PROL:
{
GF_RollRecoveryEntry *ptr;
GF_SAFEALLOC(ptr, GF_RollRecoveryEntry);
if (!ptr) return NULL;
ptr->roll_distance = gf_bs_read_int(bs, 16);
*total_bytes = 2;
return ptr;
}
case GF_ISOM_SAMPLE_GROUP_RAP:
{
GF_VisualRandomAccessEntry *ptr;
GF_SAFEALLOC(ptr, GF_VisualRandomAccessEntry);
if (!ptr) return NULL;
ptr->num_leading_samples_known = gf_bs_read_int(bs, 1);
ptr->num_leading_samples = gf_bs_read_int(bs, 7);
*total_bytes = 1;
return ptr;
}
case GF_ISOM_SAMPLE_GROUP_SAP:
{
GF_SAPEntry *ptr;
GF_SAFEALLOC(ptr, GF_SAPEntry);
if (!ptr) return NULL;
ptr->dependent_flag = gf_bs_read_int(bs, 1);
gf_bs_read_int(bs, 3);
ptr->SAP_type = gf_bs_read_int(bs, 4);
*total_bytes = 1;
return ptr;
}
case GF_ISOM_SAMPLE_GROUP_SYNC:
{
GF_SYNCEntry *ptr;
GF_SAFEALLOC(ptr, GF_SYNCEntry);
if (!ptr) return NULL;
gf_bs_read_int(bs, 2);
ptr->NALU_type = gf_bs_read_int(bs, 6);
*total_bytes = 1;
return ptr;
}
case GF_ISOM_SAMPLE_GROUP_TELE:
{
GF_TemporalLevelEntry *ptr;
GF_SAFEALLOC(ptr, GF_TemporalLevelEntry);
if (!ptr) return NULL;
ptr->level_independently_decodable = gf_bs_read_int(bs, 1);
gf_bs_read_int(bs, 7);
*total_bytes = 1;
return ptr;
}
case GF_ISOM_SAMPLE_GROUP_SEIG:
{
GF_CENCSampleEncryptionGroupEntry *ptr;
GF_SAFEALLOC(ptr, GF_CENCSampleEncryptionGroupEntry);
if (!ptr) return NULL;
gf_bs_read_u8(bs); //reserved
ptr->crypt_byte_block = gf_bs_read_int(bs, 4);
ptr->skip_byte_block = gf_bs_read_int(bs, 4);
ptr->IsProtected = gf_bs_read_u8(bs);
ptr->Per_Sample_IV_size = gf_bs_read_u8(bs);
gf_bs_read_data(bs, (char *)ptr->KID, 16);
*total_bytes = 20;
if ((ptr->IsProtected == 1) && !ptr->Per_Sample_IV_size) {
ptr->constant_IV_size = gf_bs_read_u8(bs);
if ((ptr->constant_IV_size != 8) && (ptr->constant_IV_size != 16)) {
GF_LOG(GF_LOG_WARNING, GF_LOG_CONTAINER, ("[iso file] seig sample group have invalid constant_IV size\n"));
gf_free(ptr);
return NULL;
}
gf_bs_read_data(bs, (char *)ptr->constant_IV, ptr->constant_IV_size);
*total_bytes += 1 + ptr->constant_IV_size;
}
if (!entry_size) {
GF_LOG(GF_LOG_WARNING, GF_LOG_CONTAINER, ("[iso file] seig sample group does not indicate entry size, deprecated in spec\n"));
}
return ptr;
}
case GF_ISOM_SAMPLE_GROUP_OINF:
{
GF_OperatingPointsInformation *ptr = gf_isom_oinf_new_entry();
u32 s = (u32) gf_bs_get_position(bs);
gf_isom_oinf_read_entry(ptr, bs);
*total_bytes = (u32) gf_bs_get_position(bs) - s;
if (!entry_size) {
GF_LOG(GF_LOG_WARNING, GF_LOG_CONTAINER, ("[iso file] oinf sample group does not indicate entry size, deprecated in spec\n"));
}
return ptr;
}
case GF_ISOM_SAMPLE_GROUP_LINF:
{
GF_LHVCLayerInformation *ptr = gf_isom_linf_new_entry();
u32 s = (u32) gf_bs_get_position(bs);
gf_isom_linf_read_entry(ptr, bs);
*total_bytes = (u32) gf_bs_get_position(bs) - s;
if (!entry_size) {
GF_LOG(GF_LOG_WARNING, GF_LOG_CONTAINER, ("[iso file] linf sample group does not indicate entry size, deprecated in spec\n"));
}
return ptr;
}
case GF_ISOM_SAMPLE_GROUP_TRIF:
if (! entry_size) {
u32 flags = gf_bs_peek_bits(bs, 24, 0);
if (flags & 0x10000) entry_size=3;
else {
if (flags & 0x80000) entry_size=7;
else entry_size=11;
//have dependency list
if (flags & 0x200000) {
u32 nb_entries = gf_bs_peek_bits(bs, 16, entry_size);
entry_size += 2 + 2*nb_entries;
}
}
GF_LOG(GF_LOG_WARNING, GF_LOG_CONTAINER, ("[iso file] trif sample group does not indicate entry size, deprecated in spec\n"));
}
break;
case GF_ISOM_SAMPLE_GROUP_NALM:
if (! entry_size) {
u64 start = gf_bs_get_position(bs);
Bool rle, large_size;
u32 entry_count;
gf_bs_read_int(bs, 6);
large_size = gf_bs_read_int(bs, 1);
rle = gf_bs_read_int(bs, 1);
entry_count = gf_bs_read_int(bs, large_size ? 16 : 8);
gf_bs_seek(bs, start);
entry_size = 1 + large_size ? 2 : 1;
entry_size += entry_count * 2;
if (rle) entry_size += entry_count * (large_size ? 2 : 1);
GF_LOG(GF_LOG_WARNING, GF_LOG_CONTAINER, ("[iso file] nalm sample group does not indicate entry size, deprecated in spec\n"));
}
break;
case GF_ISOM_SAMPLE_GROUP_TSAS:
case GF_ISOM_SAMPLE_GROUP_STSA:
null_size_ok = GF_TRUE;
break;
//TODO, add support for these ones ?
case GF_ISOM_SAMPLE_GROUP_TSCL:
entry_size = 20;
break;
case GF_ISOM_SAMPLE_GROUP_LBLI:
entry_size = 2;
break;
default:
break;
}
if (!entry_size && !null_size_ok) {
GF_LOG(GF_LOG_WARNING, GF_LOG_CONTAINER, ("[iso file] %s sample group does not indicate entry size and is not implemented, cannot parse!\n", gf_4cc_to_str( grouping_type) ));
return NULL;
}
GF_SAFEALLOC(ptr, GF_DefaultSampleGroupDescriptionEntry);
if (!ptr) return NULL;
if (entry_size) {
ptr->length = entry_size;
ptr->data = (u8 *) gf_malloc(sizeof(u8)*ptr->length);
gf_bs_read_data(bs, (char *) ptr->data, ptr->length);
*total_bytes = entry_size;
}
return ptr;
}
static void sgpd_del_entry(u32 grouping_type, void *entry)
{
switch (grouping_type) {
case GF_ISOM_SAMPLE_GROUP_SYNC:
case GF_ISOM_SAMPLE_GROUP_ROLL:
case GF_ISOM_SAMPLE_GROUP_PROL:
case GF_ISOM_SAMPLE_GROUP_RAP:
case GF_ISOM_SAMPLE_GROUP_SEIG:
case GF_ISOM_SAMPLE_GROUP_TELE:
case GF_ISOM_SAMPLE_GROUP_SAP:
gf_free(entry);
return;
case GF_ISOM_SAMPLE_GROUP_OINF:
gf_isom_oinf_del_entry(entry);
return;
case GF_ISOM_SAMPLE_GROUP_LINF:
gf_isom_linf_del_entry(entry);
return;
default:
{
GF_DefaultSampleGroupDescriptionEntry *ptr = (GF_DefaultSampleGroupDescriptionEntry *)entry;
if (ptr->data) gf_free(ptr->data);
gf_free(ptr);
}
}
}
void sgpd_write_entry(u32 grouping_type, void *entry, GF_BitStream *bs)
{
switch (grouping_type) {
case GF_ISOM_SAMPLE_GROUP_ROLL:
case GF_ISOM_SAMPLE_GROUP_PROL:
gf_bs_write_int(bs, ((GF_RollRecoveryEntry*)entry)->roll_distance, 16);
return;
case GF_ISOM_SAMPLE_GROUP_RAP:
gf_bs_write_int(bs, ((GF_VisualRandomAccessEntry*)entry)->num_leading_samples_known, 1);
gf_bs_write_int(bs, ((GF_VisualRandomAccessEntry*)entry)->num_leading_samples, 7);
return;
case GF_ISOM_SAMPLE_GROUP_SAP:
gf_bs_write_int(bs, ((GF_SAPEntry*)entry)->dependent_flag, 1);
gf_bs_write_int(bs, 0, 3);
gf_bs_write_int(bs, ((GF_SAPEntry*)entry)->SAP_type, 4);
return;
case GF_ISOM_SAMPLE_GROUP_SYNC:
gf_bs_write_int(bs, 0, 2);
gf_bs_write_int(bs, ((GF_SYNCEntry*)entry)->NALU_type, 6);
return;
case GF_ISOM_SAMPLE_GROUP_TELE:
gf_bs_write_int(bs, ((GF_TemporalLevelEntry*)entry)->level_independently_decodable, 1);
gf_bs_write_int(bs, 0, 7);
return;
case GF_ISOM_SAMPLE_GROUP_SEIG:
gf_bs_write_u8(bs, 0x0);
gf_bs_write_int(bs, ((GF_CENCSampleEncryptionGroupEntry*)entry)->crypt_byte_block, 4);
gf_bs_write_int(bs, ((GF_CENCSampleEncryptionGroupEntry*)entry)->skip_byte_block, 4);
gf_bs_write_u8(bs, ((GF_CENCSampleEncryptionGroupEntry *)entry)->IsProtected);
gf_bs_write_u8(bs, ((GF_CENCSampleEncryptionGroupEntry *)entry)->Per_Sample_IV_size);
gf_bs_write_data(bs, (char *)((GF_CENCSampleEncryptionGroupEntry *)entry)->KID, 16);
if ((((GF_CENCSampleEncryptionGroupEntry *)entry)->IsProtected == 1) && !((GF_CENCSampleEncryptionGroupEntry *)entry)->Per_Sample_IV_size) {
gf_bs_write_u8(bs, ((GF_CENCSampleEncryptionGroupEntry *)entry)->constant_IV_size);
gf_bs_write_data(bs, (char *)((GF_CENCSampleEncryptionGroupEntry *)entry)->constant_IV, ((GF_CENCSampleEncryptionGroupEntry *)entry)->constant_IV_size);
}
return;
case GF_ISOM_SAMPLE_GROUP_OINF:
gf_isom_oinf_write_entry(entry, bs);
return;
case GF_ISOM_SAMPLE_GROUP_LINF:
gf_isom_linf_write_entry(entry, bs);
return;
default:
{
GF_DefaultSampleGroupDescriptionEntry *ptr = (GF_DefaultSampleGroupDescriptionEntry *)entry;
if (ptr->length)
gf_bs_write_data(bs, (char *) ptr->data, ptr->length);
}
}
}
#ifndef GPAC_DISABLE_ISOM_WRITE
static u32 sgpd_size_entry(u32 grouping_type, void *entry)
{
switch (grouping_type) {
case GF_ISOM_SAMPLE_GROUP_ROLL:
case GF_ISOM_SAMPLE_GROUP_PROL:
return 2;
case GF_ISOM_SAMPLE_GROUP_TELE:
case GF_ISOM_SAMPLE_GROUP_RAP:
case GF_ISOM_SAMPLE_GROUP_SAP:
case GF_ISOM_SAMPLE_GROUP_SYNC:
return 1;
case GF_ISOM_SAMPLE_GROUP_TSCL:
return 20;
case GF_ISOM_SAMPLE_GROUP_LBLI:
return 2;
case GF_ISOM_SAMPLE_GROUP_TSAS:
case GF_ISOM_SAMPLE_GROUP_STSA:
return 0;
case GF_ISOM_SAMPLE_GROUP_SEIG:
return ((((GF_CENCSampleEncryptionGroupEntry *)entry)->IsProtected == 1) && !((GF_CENCSampleEncryptionGroupEntry *)entry)->Per_Sample_IV_size) ? 21 + ((GF_CENCSampleEncryptionGroupEntry *)entry)->constant_IV_size : 20;
case GF_ISOM_SAMPLE_GROUP_OINF:
return gf_isom_oinf_size_entry(entry);
case GF_ISOM_SAMPLE_GROUP_LINF:
return gf_isom_linf_size_entry(entry);
default:
return ((GF_DefaultSampleGroupDescriptionEntry *)entry)->length;
}
}
#endif
GF_Box *sgpd_New()
{
ISOM_DECL_BOX_ALLOC(GF_SampleGroupDescriptionBox, GF_ISOM_BOX_TYPE_SGPD);
/*version 0 is deprecated, use v1 by default*/
tmp->version = 1;
tmp->group_descriptions = gf_list_new();
return (GF_Box *)tmp;
}
void sgpd_del(GF_Box *a)
{
GF_SampleGroupDescriptionBox *p = (GF_SampleGroupDescriptionBox *)a;
while (gf_list_count(p->group_descriptions)) {
void *ptr = gf_list_last(p->group_descriptions);
sgpd_del_entry(p->grouping_type, ptr);
gf_list_rem_last(p->group_descriptions);
}
gf_list_del(p->group_descriptions);
gf_free(p);
}
GF_Err sgpd_Read(GF_Box *s, GF_BitStream *bs)
{
u32 entry_count;
GF_SampleGroupDescriptionBox *p = (GF_SampleGroupDescriptionBox *)s;
p->grouping_type = gf_bs_read_u32(bs);
ISOM_DECREASE_SIZE(p, 4);
if (p->version>=1) {
p->default_length = gf_bs_read_u32(bs);
ISOM_DECREASE_SIZE(p, 4);
}
if (p->version>=2) {
p->default_description_index = gf_bs_read_u32(bs);
ISOM_DECREASE_SIZE(p, 4);
}
entry_count = gf_bs_read_u32(bs);
ISOM_DECREASE_SIZE(p, 4);
if (entry_count>p->size)
return GF_ISOM_INVALID_FILE;
while (entry_count) {
void *ptr;
u32 parsed_bytes=0;
u32 size = p->default_length;
if ((p->version>=1) && !size) {
size = gf_bs_read_u32(bs);
ISOM_DECREASE_SIZE(p, 4);
}
ptr = sgpd_parse_entry(p->grouping_type, bs, size, &parsed_bytes);
//don't return an error, just stop parsing so that we skip over the sgpd box
if (!ptr) return GF_OK;
ISOM_DECREASE_SIZE(p, parsed_bytes);
gf_list_add(p->group_descriptions, ptr);
entry_count--;
}
return GF_OK;
}
#ifndef GPAC_DISABLE_ISOM_WRITE
GF_Err sgpd_Write(GF_Box *s, GF_BitStream *bs)
{
u32 i;
GF_SampleGroupDescriptionBox *p = (GF_SampleGroupDescriptionBox *)s;
GF_Err e;
e = gf_isom_full_box_write(s, bs);
if (e) return e;
gf_bs_write_u32(bs, p->grouping_type);
if (p->version>=1) gf_bs_write_u32(bs, p->default_length);
if (p->version>=2) gf_bs_write_u32(bs, p->default_description_index);
gf_bs_write_u32(bs, gf_list_count(p->group_descriptions) );
for (i=0; i<gf_list_count(p->group_descriptions); i++) {
void *ptr = gf_list_get(p->group_descriptions, i);
if ((p->version >= 1) && !p->default_length) {
u32 size = sgpd_size_entry(p->grouping_type, ptr);
gf_bs_write_u32(bs, size);
}
sgpd_write_entry(p->grouping_type, ptr, bs);
}
return GF_OK;
}
GF_Err sgpd_Size(GF_Box *s)
{
u32 i;
GF_SampleGroupDescriptionBox *p = (GF_SampleGroupDescriptionBox *)s;
p->size += 8;
//we force all sample groups to version 1, v0 being deprecated
p->version=1;
p->size += 4;
if (p->version>=2) p->size += 4;
p->default_length = 0;
for (i=0; i<gf_list_count(p->group_descriptions); i++) {
void *ptr = gf_list_get(p->group_descriptions, i);
u32 size = sgpd_size_entry(p->grouping_type, ptr);
p->size += size;
if (!p->default_length) {
p->default_length = size;
} else if (p->default_length != size) {
p->default_length = 0;
}
}
if (p->version>=1) {
if (!p->default_length) p->size += gf_list_count(p->group_descriptions)*4;
}
return GF_OK;
}
#endif /*GPAC_DISABLE_ISOM_WRITE*/
void saiz_del(GF_Box *s)
{
GF_SampleAuxiliaryInfoSizeBox*ptr = (GF_SampleAuxiliaryInfoSizeBox*)s;
if (ptr == NULL) return;
if (ptr->sample_info_size) gf_free(ptr->sample_info_size);
gf_free(ptr);
}
GF_Err saiz_Read(GF_Box *s, GF_BitStream *bs)
{
GF_SampleAuxiliaryInfoSizeBox*ptr = (GF_SampleAuxiliaryInfoSizeBox*)s;
if (ptr->flags & 1) {
ptr->aux_info_type = gf_bs_read_u32(bs);
ptr->aux_info_type_parameter = gf_bs_read_u32(bs);
ISOM_DECREASE_SIZE(ptr, 8);
}
ptr->default_sample_info_size = gf_bs_read_u8(bs);
ptr->sample_count = gf_bs_read_u32(bs);
ISOM_DECREASE_SIZE(ptr, 5);
if (ptr->default_sample_info_size == 0) {
if (ptr->size < sizeof(u8)*ptr->sample_count)
return GF_ISOM_INVALID_FILE;
ptr->sample_info_size = gf_malloc(sizeof(u8)*ptr->sample_count);
if (!ptr->sample_info_size)
return GF_OUT_OF_MEM;
gf_bs_read_data(bs, (char *) ptr->sample_info_size, ptr->sample_count);
ISOM_DECREASE_SIZE(ptr, ptr->sample_count);
}
return GF_OK;
}
GF_Box *saiz_New()
{
ISOM_DECL_BOX_ALLOC(GF_SampleAuxiliaryInfoSizeBox, GF_ISOM_BOX_TYPE_SAIZ);
return (GF_Box *)tmp;
}
#ifndef GPAC_DISABLE_ISOM_WRITE
GF_Err saiz_Write(GF_Box *s, GF_BitStream *bs)
{
GF_Err e;
GF_SampleAuxiliaryInfoSizeBox*ptr = (GF_SampleAuxiliaryInfoSizeBox*) s;
if (!s) return GF_BAD_PARAM;
e = gf_isom_full_box_write(s, bs);
if (e) return e;
if (ptr->flags & 1) {
gf_bs_write_u32(bs, ptr->aux_info_type);
gf_bs_write_u32(bs, ptr->aux_info_type_parameter);
}
gf_bs_write_u8(bs, ptr->default_sample_info_size);
gf_bs_write_u32(bs, ptr->sample_count);
if (!ptr->default_sample_info_size) {
gf_bs_write_data(bs, (char *) ptr->sample_info_size, ptr->sample_count);
}
return GF_OK;
}
GF_Err saiz_Size(GF_Box *s)
{
GF_SampleAuxiliaryInfoSizeBox *ptr = (GF_SampleAuxiliaryInfoSizeBox*)s;
if (ptr->aux_info_type || ptr->aux_info_type_parameter) {
ptr->flags |= 1;
}
if (ptr->flags & 1) ptr->size += 8;
ptr->size += 5;
if (ptr->default_sample_info_size==0) ptr->size += ptr->sample_count;
return GF_OK;
}
#endif //GPAC_DISABLE_ISOM_WRITE
void saio_del(GF_Box *s)
{
GF_SampleAuxiliaryInfoOffsetBox *ptr = (GF_SampleAuxiliaryInfoOffsetBox*)s;
if (ptr == NULL) return;
if (ptr->offsets) gf_free(ptr->offsets);
if (ptr->offsets_large) gf_free(ptr->offsets_large);
gf_free(ptr);
}
GF_Err saio_Read(GF_Box *s, GF_BitStream *bs)
{
GF_SampleAuxiliaryInfoOffsetBox *ptr = (GF_SampleAuxiliaryInfoOffsetBox *)s;
if (ptr->flags & 1) {
ptr->aux_info_type = gf_bs_read_u32(bs);
ptr->aux_info_type_parameter = gf_bs_read_u32(bs);
ISOM_DECREASE_SIZE(ptr, 8);
}
ptr->entry_count = gf_bs_read_u32(bs);
ISOM_DECREASE_SIZE(ptr, 4);
if (ptr->entry_count) {
u32 i;
if (ptr->version==0) {
if (ptr->size < sizeof(u32)*ptr->entry_count)
return GF_ISOM_INVALID_FILE;
ptr->offsets = gf_malloc(sizeof(u32)*ptr->entry_count);
if (!ptr->offsets)
return GF_OUT_OF_MEM;
for (i=0; i<ptr->entry_count; i++)
ptr->offsets[i] = gf_bs_read_u32(bs);
ISOM_DECREASE_SIZE(ptr, 4*ptr->entry_count);
} else {
if (ptr->size < sizeof(u64)*ptr->entry_count)
return GF_ISOM_INVALID_FILE;
ptr->offsets_large = gf_malloc(sizeof(u64)*ptr->entry_count);
if (!ptr->offsets_large)
return GF_OUT_OF_MEM;
for (i=0; i<ptr->entry_count; i++)
ptr->offsets_large[i] = gf_bs_read_u64(bs);
ISOM_DECREASE_SIZE(ptr, 8*ptr->entry_count);
}
}
return GF_OK;
}
GF_Box *saio_New()
{
ISOM_DECL_BOX_ALLOC(GF_SampleAuxiliaryInfoOffsetBox, GF_ISOM_BOX_TYPE_SAIO);
return (GF_Box *)tmp;
}
#ifndef GPAC_DISABLE_ISOM_WRITE
GF_Err saio_Write(GF_Box *s, GF_BitStream *bs)
{
GF_Err e;
GF_SampleAuxiliaryInfoOffsetBox *ptr = (GF_SampleAuxiliaryInfoOffsetBox *) s;
if (!s) return GF_BAD_PARAM;
e = gf_isom_full_box_write(s, bs);
if (e) return e;
if (ptr->flags & 1) {
gf_bs_write_u32(bs, ptr->aux_info_type);
gf_bs_write_u32(bs, ptr->aux_info_type_parameter);
}
gf_bs_write_u32(bs, ptr->entry_count);
if (ptr->entry_count) {
u32 i;
//store position in bitstream before writing data - offsets can be NULL if a single offset is rewritten later on (cf senc_write)
ptr->offset_first_offset_field = gf_bs_get_position(bs);
if (ptr->version==0) {
if (!ptr->offsets) {
gf_bs_write_u32(bs, 0);
} else {
for (i=0; i<ptr->entry_count; i++)
gf_bs_write_u32(bs, ptr->offsets[i]);
}
} else {
if (!ptr->offsets_large) {
gf_bs_write_u64(bs, 0);
} else {
for (i=0; i<ptr->entry_count; i++)
gf_bs_write_u64(bs, ptr->offsets_large[i]);
}
}
}
return GF_OK;
}
GF_Err saio_Size(GF_Box *s)
{
GF_SampleAuxiliaryInfoOffsetBox *ptr = (GF_SampleAuxiliaryInfoOffsetBox*)s;
if (ptr->aux_info_type || ptr->aux_info_type_parameter) {
ptr->flags |= 1;
}
if (ptr->offsets_large) {
ptr->version = 1;
}
if (ptr->flags & 1) ptr->size += 8;
ptr->size += 4;
//a little optim here: in cenc, the saio always points to a single data block, only one entry is needed
switch (ptr->aux_info_type) {
case GF_ISOM_CENC_SCHEME:
case GF_ISOM_CBC_SCHEME:
case GF_ISOM_CENS_SCHEME:
case GF_ISOM_CBCS_SCHEME:
if (ptr->offsets_large) gf_free(ptr->offsets_large);
if (ptr->offsets) gf_free(ptr->offsets);
ptr->offsets_large = NULL;
ptr->offsets = NULL;
ptr->entry_count = 1;
break;
}
ptr->size += ((ptr->version==1) ? 8 : 4) * ptr->entry_count;
return GF_OK;
}
#endif //GPAC_DISABLE_ISOM_WRITE
void prft_del(GF_Box *s)
{
gf_free(s);
}
GF_Err prft_Read(GF_Box *s,GF_BitStream *bs)
{
GF_ProducerReferenceTimeBox *ptr = (GF_ProducerReferenceTimeBox *) s;
ptr->refTrackID = gf_bs_read_u32(bs);
ptr->ntp = gf_bs_read_u64(bs);
if (ptr->version==0) {
ptr->timestamp = gf_bs_read_u32(bs);
} else {
ptr->timestamp = gf_bs_read_u64(bs);
}
return GF_OK;
}
GF_Box *prft_New()
{
ISOM_DECL_BOX_ALLOC(GF_ProducerReferenceTimeBox, GF_ISOM_BOX_TYPE_PRFT);
return (GF_Box *)tmp;
}
#ifndef GPAC_DISABLE_ISOM_WRITE
GF_Err prft_Write(GF_Box *s, GF_BitStream *bs)
{
GF_Err e;
GF_ProducerReferenceTimeBox *ptr = (GF_ProducerReferenceTimeBox *) s;
if (!s) return GF_BAD_PARAM;
e = gf_isom_full_box_write(s, bs);
if (e) return e;
gf_bs_write_u32(bs, ptr->refTrackID);
gf_bs_write_u64(bs, ptr->ntp);
if (ptr->version==0) {
gf_bs_write_u32(bs, (u32) ptr->timestamp);
} else {
gf_bs_write_u64(bs, ptr->timestamp);
}
return GF_OK;
}
GF_Err prft_Size(GF_Box *s)
{
GF_ProducerReferenceTimeBox *ptr = (GF_ProducerReferenceTimeBox*)s;
ptr->size += 4+8+ (ptr->version ? 8 : 4);
return GF_OK;
}
#endif //GPAC_DISABLE_ISOM_WRITE
GF_Box *trgr_New()
{
ISOM_DECL_BOX_ALLOC(GF_TrackGroupBox, GF_ISOM_BOX_TYPE_TRGR);
tmp->groups = gf_list_new();
if (!tmp->groups) {
gf_free(tmp);
return NULL;
}
return (GF_Box *)tmp;
}
void trgr_del(GF_Box *s)
{
GF_TrackGroupBox *ptr = (GF_TrackGroupBox *)s;
if (ptr == NULL) return;
gf_isom_box_array_del(ptr->groups);
gf_free(ptr);
}
GF_Err trgr_AddBox(GF_Box *s, GF_Box *a)
{
GF_TrackGroupBox *ptr = (GF_TrackGroupBox *)s;
return gf_list_add(ptr->groups, a);
}
GF_Err trgr_Read(GF_Box *s, GF_BitStream *bs)
{
return gf_isom_box_array_read_ex(s, bs, trgr_AddBox, s->type);
}
#ifndef GPAC_DISABLE_ISOM_WRITE
GF_Err trgr_Write(GF_Box *s, GF_BitStream *bs)
{
GF_Err e;
GF_TrackGroupBox *ptr = (GF_TrackGroupBox *) s;
if (!s) return GF_BAD_PARAM;
e = gf_isom_box_write_header(s, bs);
if (e) return e;
return gf_isom_box_array_write(s, ptr->groups, bs);
}
GF_Err trgr_Size(GF_Box *s)
{
GF_TrackGroupBox *ptr = (GF_TrackGroupBox *)s;
return gf_isom_box_array_size(s, ptr->groups);
}
#endif /*GPAC_DISABLE_ISOM_WRITE*/
GF_Box *trgt_New()
{
ISOM_DECL_BOX_ALLOC(GF_TrackGroupTypeBox, GF_ISOM_BOX_TYPE_TRGT);
return (GF_Box *)tmp;
}
void trgt_del(GF_Box *s)
{
GF_TrackGroupTypeBox *ptr = (GF_TrackGroupTypeBox *)s;
if (ptr == NULL) return;
gf_free(ptr);
}
GF_Err trgt_Read(GF_Box *s, GF_BitStream *bs)
{
GF_TrackGroupTypeBox *ptr = (GF_TrackGroupTypeBox *)s;
ptr->track_group_id = gf_bs_read_u32(bs);
ISOM_DECREASE_SIZE(ptr, 4);
return GF_OK;
}
#ifndef GPAC_DISABLE_ISOM_WRITE
GF_Err trgt_Write(GF_Box *s, GF_BitStream *bs)
{
GF_Err e;
GF_TrackGroupTypeBox *ptr = (GF_TrackGroupTypeBox *) s;
if (!s) return GF_BAD_PARAM;
s->type = ptr->group_type;
e = gf_isom_full_box_write(s, bs);
s->type = GF_ISOM_BOX_TYPE_TRGT;
if (e) return e;
gf_bs_write_u32(bs, ptr->track_group_id);
return GF_OK;
}
GF_Err trgt_Size(GF_Box *s)
{
GF_TrackGroupBox *ptr = (GF_TrackGroupBox *)s;
ptr->size+= 4;
return GF_OK;
}
#endif /*GPAC_DISABLE_ISOM_WRITE*/
GF_Box *stvi_New()
{
ISOM_DECL_BOX_ALLOC(GF_StereoVideoBox, GF_ISOM_BOX_TYPE_STVI);
return (GF_Box *)tmp;
}
void stvi_del(GF_Box *s)
{
GF_StereoVideoBox *ptr = (GF_StereoVideoBox *)s;
if (ptr == NULL) return;
if (ptr->stereo_indication_type) gf_free(ptr->stereo_indication_type);
gf_free(ptr);
}
GF_Err stvi_Read(GF_Box *s, GF_BitStream *bs)
{
GF_StereoVideoBox *ptr = (GF_StereoVideoBox *)s;
ISOM_DECREASE_SIZE(ptr, 12);
gf_bs_read_int(bs, 30);
ptr->single_view_allowed = gf_bs_read_int(bs, 2);
ptr->stereo_scheme = gf_bs_read_u32(bs);
ptr->sit_len = gf_bs_read_u32(bs);
ISOM_DECREASE_SIZE(ptr, ptr->sit_len);
if (ptr->size < sizeof(char)*ptr->sit_len)
return GF_ISOM_INVALID_FILE;
ptr->stereo_indication_type = gf_malloc(sizeof(char)*ptr->sit_len);
if (!ptr->stereo_indication_type)
return GF_OUT_OF_MEM;
gf_bs_read_data(bs, ptr->stereo_indication_type, ptr->sit_len);
return GF_OK;
}
#ifndef GPAC_DISABLE_ISOM_WRITE
GF_Err stvi_Write(GF_Box *s, GF_BitStream *bs)
{
GF_Err e;
GF_StereoVideoBox *ptr = (GF_StereoVideoBox *) s;
if (!s) return GF_BAD_PARAM;
e = gf_isom_full_box_write(s, bs);
if (e) return e;
gf_bs_write_int(bs, 0, 30);
gf_bs_write_int(bs, ptr->single_view_allowed, 2);
gf_bs_write_u32(bs, ptr->stereo_scheme);
gf_bs_write_u32(bs, ptr->sit_len);
gf_bs_write_data(bs, ptr->stereo_indication_type, ptr->sit_len);
return GF_OK;
}
GF_Err stvi_Size(GF_Box *s)
{
GF_StereoVideoBox *ptr = (GF_StereoVideoBox *)s;
ptr->size+= 12 + ptr->sit_len;
return GF_OK;
}
#endif /*GPAC_DISABLE_ISOM_WRITE*/
GF_Box *fiin_New()
{
ISOM_DECL_BOX_ALLOC(FDItemInformationBox, GF_ISOM_BOX_TYPE_FIIN);
return (GF_Box *)tmp;
}
void fiin_del(GF_Box *s)
{
FDItemInformationBox *ptr = (FDItemInformationBox *)s;
if (ptr == NULL) return;
if (ptr->partition_entries) gf_isom_box_array_del(ptr->partition_entries);
if (ptr->session_info) gf_isom_box_del((GF_Box*)ptr->session_info);
if (ptr->group_id_to_name) gf_isom_box_del((GF_Box*)ptr->group_id_to_name);
gf_free(ptr);
}
GF_Err fiin_AddBox(GF_Box *s, GF_Box *a)
{
FDItemInformationBox *ptr = (FDItemInformationBox *)s;
switch(a->type) {
case GF_ISOM_BOX_TYPE_PAEN:
if (!ptr->partition_entries) ptr->partition_entries = gf_list_new();
return gf_list_add(ptr->partition_entries, a);
case GF_ISOM_BOX_TYPE_SEGR:
if (ptr->session_info) ERROR_ON_DUPLICATED_BOX(a, ptr)
ptr->session_info = (FDSessionGroupBox *)a;
return GF_OK;
case GF_ISOM_BOX_TYPE_GITN:
if (ptr->group_id_to_name) ERROR_ON_DUPLICATED_BOX(a, ptr)
ptr->group_id_to_name = (GroupIdToNameBox *)a;
return GF_OK;
default:
return gf_isom_box_add_default(s, a);
}
return GF_OK;
}
GF_Err fiin_Read(GF_Box *s, GF_BitStream *bs)
{
FDItemInformationBox *ptr = (FDItemInformationBox *)s;
ISOM_DECREASE_SIZE(ptr, 2);
gf_bs_read_u16(bs);
return gf_isom_box_array_read(s, bs, fiin_AddBox);
}
#ifndef GPAC_DISABLE_ISOM_WRITE
GF_Err fiin_Write(GF_Box *s, GF_BitStream *bs)
{
GF_Err e;
FDItemInformationBox *ptr = (FDItemInformationBox *) s;
if (!s) return GF_BAD_PARAM;
e = gf_isom_full_box_write(s, bs);
if (e) return e;
gf_bs_write_u16(bs, gf_list_count(ptr->partition_entries) );
e = gf_isom_box_array_write(s, ptr->partition_entries, bs);
if (e) return e;
if (ptr->session_info) gf_isom_box_write((GF_Box*)ptr->session_info, bs);
if (ptr->group_id_to_name) gf_isom_box_write((GF_Box*)ptr->group_id_to_name, bs);
return GF_OK;
}
GF_Err fiin_Size(GF_Box *s)
{
GF_Err e;
FDItemInformationBox *ptr = (FDItemInformationBox *)s;
ptr->size+= 2;
if (ptr->partition_entries) {
e = gf_isom_box_array_size(s, ptr->partition_entries);
if (e) return e;
}
if (ptr->session_info) {
e = gf_isom_box_size((GF_Box *)ptr->session_info);
if (e) return e;
ptr->size += ptr->session_info->size;
}
if (ptr->group_id_to_name) {
e = gf_isom_box_size((GF_Box *) ptr->group_id_to_name);
if (e) return e;
ptr->size += ptr->group_id_to_name->size;
}
return GF_OK;
}
#endif /*GPAC_DISABLE_ISOM_WRITE*/
GF_Box *paen_New()
{
ISOM_DECL_BOX_ALLOC(FDPartitionEntryBox, GF_ISOM_BOX_TYPE_PAEN);
return (GF_Box *)tmp;
}
void paen_del(GF_Box *s)
{
FDPartitionEntryBox *ptr = (FDPartitionEntryBox *)s;
if (ptr == NULL) return;
if (ptr->blocks_and_symbols) gf_isom_box_del((GF_Box*)ptr->blocks_and_symbols);
if (ptr->FEC_symbol_locations) gf_isom_box_del((GF_Box*)ptr->FEC_symbol_locations);
if (ptr->File_symbol_locations) gf_isom_box_del((GF_Box*)ptr->File_symbol_locations);
gf_free(ptr);
}
GF_Err paen_AddBox(GF_Box *s, GF_Box *a)
{
FDPartitionEntryBox *ptr = (FDPartitionEntryBox *)s;
switch(a->type) {
case GF_ISOM_BOX_TYPE_FPAR:
if (ptr->blocks_and_symbols) ERROR_ON_DUPLICATED_BOX(a, ptr)
ptr->blocks_and_symbols = (FilePartitionBox *)a;
return GF_OK;
case GF_ISOM_BOX_TYPE_FECR:
if (ptr->FEC_symbol_locations) ERROR_ON_DUPLICATED_BOX(a, ptr)
ptr->FEC_symbol_locations = (FECReservoirBox *)a;
return GF_OK;
case GF_ISOM_BOX_TYPE_FIRE:
if (ptr->File_symbol_locations) ERROR_ON_DUPLICATED_BOX(a, ptr)
ptr->File_symbol_locations = (FileReservoirBox *)a;
return GF_OK;
default:
return gf_isom_box_add_default(s, a);
}
return GF_OK;
}
GF_Err paen_Read(GF_Box *s, GF_BitStream *bs)
{
return gf_isom_box_array_read(s, bs, fiin_AddBox);
}
#ifndef GPAC_DISABLE_ISOM_WRITE
GF_Err paen_Write(GF_Box *s, GF_BitStream *bs)
{
GF_Err e;
FDPartitionEntryBox *ptr = (FDPartitionEntryBox *) s;
if (!s) return GF_BAD_PARAM;
e = gf_isom_box_write_header(s, bs);
if (e) return e;
if (ptr->blocks_and_symbols) {
e = gf_isom_box_write((GF_Box *)ptr->blocks_and_symbols, bs);
if (e) return e;
}
if (ptr->FEC_symbol_locations) {
e = gf_isom_box_write((GF_Box *)ptr->FEC_symbol_locations, bs);
if (e) return e;
}
if (ptr->File_symbol_locations) {
e = gf_isom_box_write((GF_Box *)ptr->File_symbol_locations, bs);
if (e) return e;
}
return GF_OK;
}
GF_Err paen_Size(GF_Box *s)
{
GF_Err e;
FDPartitionEntryBox *ptr = (FDPartitionEntryBox *)s;
if (ptr->blocks_and_symbols) {
e = gf_isom_box_size((GF_Box *)ptr->blocks_and_symbols);
if (e) return e;
ptr->size += ptr->blocks_and_symbols->size;
}
if (ptr->FEC_symbol_locations) {
e = gf_isom_box_size((GF_Box *) ptr->FEC_symbol_locations);
if (e) return e;
ptr->size += ptr->FEC_symbol_locations->size;
}
if (ptr->File_symbol_locations) {
e = gf_isom_box_size((GF_Box *) ptr->File_symbol_locations);
if (e) return e;
ptr->size += ptr->File_symbol_locations->size;
}
return GF_OK;
}
#endif /*GPAC_DISABLE_ISOM_WRITE*/
GF_Box *fpar_New()
{
ISOM_DECL_BOX_ALLOC(FilePartitionBox, GF_ISOM_BOX_TYPE_FPAR);
return (GF_Box *)tmp;
}
void fpar_del(GF_Box *s)
{
FilePartitionBox *ptr = (FilePartitionBox *)s;
if (ptr == NULL) return;
if (ptr->scheme_specific_info) gf_free(ptr->scheme_specific_info);
if (ptr->entries) gf_free(ptr->entries);
gf_free(ptr);
}
GF_Err gf_isom_read_null_terminated_string(GF_Box *s, GF_BitStream *bs, u64 size, char **out_str)
{
u32 len=10;
u32 i=0;
*out_str = gf_malloc(sizeof(char)*len);
while (1) {
ISOM_DECREASE_SIZE(s, 1 );
(*out_str)[i] = gf_bs_read_u8(bs);
if (!(*out_str)[i]) break;
i++;
if (i==len) {
len += 10;
*out_str = gf_realloc(*out_str, sizeof(char)*len);
}
if (gf_bs_available(bs) == 0) {
GF_LOG(GF_LOG_WARNING, GF_LOG_CONTAINER, ("[iso file] missing null character in null terminated string\n"));
(*out_str)[i] = 0;
return GF_OK;
}
if (i >= size) {
GF_LOG(GF_LOG_WARNING, GF_LOG_CONTAINER, ("[iso file] string bigger than container, probably missing null character\n"));
(*out_str)[i] = 0;
return GF_OK;
}
}
return GF_OK;
}
GF_Err fpar_Read(GF_Box *s, GF_BitStream *bs)
{
u32 i;
GF_Err e;
FilePartitionBox *ptr = (FilePartitionBox *)s;
ISOM_DECREASE_SIZE(ptr, ((ptr->version ? 4 : 2) + 12) );
ptr->itemID = gf_bs_read_int(bs, ptr->version ? 32 : 16);
ptr->packet_payload_size = gf_bs_read_u16(bs);
gf_bs_read_u8(bs);
ptr->FEC_encoding_ID = gf_bs_read_u8(bs);
ptr->FEC_instance_ID = gf_bs_read_u16(bs);
ptr->max_source_block_length = gf_bs_read_u16(bs);
ptr->encoding_symbol_length = gf_bs_read_u16(bs);
ptr->max_number_of_encoding_symbols = gf_bs_read_u16(bs);
e = gf_isom_read_null_terminated_string(s, bs, ptr->size, &ptr->scheme_specific_info);
if (e) return e;
ISOM_DECREASE_SIZE(ptr, (ptr->version ? 4 : 2) );
ptr->nb_entries = gf_bs_read_int(bs, ptr->version ? 32 : 16);
if (ptr->nb_entries > UINT_MAX / 6)
return GF_ISOM_INVALID_FILE;
ISOM_DECREASE_SIZE(ptr, ptr->nb_entries * 6 );
GF_SAFE_ALLOC_N(ptr->entries, ptr->nb_entries, FilePartitionEntry);
for (i=0;i < ptr->nb_entries; i++) {
ptr->entries[i].block_count = gf_bs_read_u16(bs);
ptr->entries[i].block_size = gf_bs_read_u32(bs);
}
return GF_OK;
}
#ifndef GPAC_DISABLE_ISOM_WRITE
GF_Err fpar_Write(GF_Box *s, GF_BitStream *bs)
{
GF_Err e;
u32 i;
FilePartitionBox *ptr = (FilePartitionBox *) s;
if (!s) return GF_BAD_PARAM;
e = gf_isom_full_box_write(s, bs);
if (e) return e;
gf_bs_write_int(bs, ptr->itemID, ptr->version ? 32 : 16);
gf_bs_write_u16(bs, ptr->packet_payload_size);
gf_bs_write_u8(bs, 0);
gf_bs_write_u8(bs, ptr->FEC_encoding_ID);
gf_bs_write_u16(bs, ptr->FEC_instance_ID);
gf_bs_write_u16(bs, ptr->max_source_block_length);
gf_bs_write_u16(bs, ptr->encoding_symbol_length);
gf_bs_write_u16(bs, ptr->max_number_of_encoding_symbols);
if (ptr->scheme_specific_info) {
gf_bs_write_data(bs, ptr->scheme_specific_info, (u32)strlen(ptr->scheme_specific_info) );
}
//null terminated string
gf_bs_write_u8(bs, 0);
gf_bs_write_int(bs, ptr->nb_entries, ptr->version ? 32 : 16);
for (i=0;i < ptr->nb_entries; i++) {
gf_bs_write_u16(bs, ptr->entries[i].block_count);
gf_bs_write_u32(bs, ptr->entries[i].block_size);
}
return GF_OK;
}
GF_Err fpar_Size(GF_Box *s)
{
FilePartitionBox *ptr = (FilePartitionBox *)s;
ptr->size += 13 + (ptr->version ? 8 : 4);
if (ptr->scheme_specific_info)
ptr->size += strlen(ptr->scheme_specific_info);
ptr->size+= ptr->nb_entries * 6;
return GF_OK;
}
#endif /*GPAC_DISABLE_ISOM_WRITE*/
GF_Box *fecr_New()
{
ISOM_DECL_BOX_ALLOC(FECReservoirBox, GF_ISOM_BOX_TYPE_FECR);
return (GF_Box *)tmp;
}
void fecr_del(GF_Box *s)
{
FECReservoirBox *ptr = (FECReservoirBox *)s;
if (ptr == NULL) return;
if (ptr->entries) gf_free(ptr->entries);
gf_free(ptr);
}
GF_Err fecr_Read(GF_Box *s, GF_BitStream *bs)
{
u32 i;
FECReservoirBox *ptr = (FECReservoirBox *)s;
ISOM_DECREASE_SIZE(ptr, (ptr->version ? 4 : 2) );
ptr->nb_entries = gf_bs_read_int(bs, ptr->version ? 32 : 16);
ISOM_DECREASE_SIZE(ptr, ptr->nb_entries * (ptr->version ? 8 : 6) );
GF_SAFE_ALLOC_N(ptr->entries, ptr->nb_entries, FECReservoirEntry);
for (i=0; i<ptr->nb_entries; i++) {
ptr->entries[i].item_id = gf_bs_read_int(bs, ptr->version ? 32 : 16);
ptr->entries[i].symbol_count = gf_bs_read_u32(bs);
}
return GF_OK;
}
#ifndef GPAC_DISABLE_ISOM_WRITE
GF_Err fecr_Write(GF_Box *s, GF_BitStream *bs)
{
GF_Err e;
u32 i;
FECReservoirBox *ptr = (FECReservoirBox *) s;
if (!s) return GF_BAD_PARAM;
e = gf_isom_full_box_write(s, bs);
if (e) return e;
gf_bs_write_int(bs, ptr->nb_entries, ptr->version ? 32 : 16);
for (i=0; i<ptr->nb_entries; i++) {
gf_bs_write_int(bs, ptr->entries[i].item_id, ptr->version ? 32 : 16);
gf_bs_write_u32(bs, ptr->entries[i].symbol_count);
}
return GF_OK;
}
GF_Err fecr_Size(GF_Box *s)
{
FECReservoirBox *ptr = (FECReservoirBox *)s;
ptr->size += (ptr->version ? 4 : 2) + ptr->nb_entries * (ptr->version ? 8 : 6);
return GF_OK;
}
#endif /*GPAC_DISABLE_ISOM_WRITE*/
GF_Box *segr_New()
{
ISOM_DECL_BOX_ALLOC(FDSessionGroupBox, GF_ISOM_BOX_TYPE_SEGR);
return (GF_Box *)tmp;
}
void segr_del(GF_Box *s)
{
u32 i;
FDSessionGroupBox *ptr = (FDSessionGroupBox *)s;
if (ptr == NULL) return;
for (i=0; i<ptr->num_session_groups; i++) {
if (ptr->session_groups[i].group_ids) gf_free(ptr->session_groups[i].group_ids);
if (ptr->session_groups[i].channels) gf_free(ptr->session_groups[i].channels);
}
if (ptr->session_groups) gf_free(ptr->session_groups);
gf_free(ptr);
}
GF_Err segr_Read(GF_Box *s, GF_BitStream *bs)
{
u32 i, k;
FDSessionGroupBox *ptr = (FDSessionGroupBox *)s;
ISOM_DECREASE_SIZE(ptr, 2);
ptr->num_session_groups = gf_bs_read_u16(bs);
if (ptr->num_session_groups*3>ptr->size) {
GF_LOG(GF_LOG_ERROR, GF_LOG_CONTAINER, ("[iso file] Invalid number of entries %d in segr\n", ptr->num_session_groups));
ptr->num_session_groups = 0;
return GF_ISOM_INVALID_FILE;
}
GF_SAFE_ALLOC_N(ptr->session_groups, ptr->num_session_groups, SessionGroupEntry);
for (i=0; i<ptr->num_session_groups; i++) {
ptr->session_groups[i].nb_groups = gf_bs_read_u8(bs);
ISOM_DECREASE_SIZE(ptr, 1);
GF_SAFE_ALLOC_N(ptr->session_groups[i].group_ids, ptr->session_groups[i].nb_groups, u32);
for (k=0; k<ptr->session_groups[i].nb_groups; k++) {
ISOM_DECREASE_SIZE(ptr, 4);
ptr->session_groups[i].group_ids[k] = gf_bs_read_u32(bs);
}
ptr->session_groups[i].nb_channels = gf_bs_read_u16(bs);
GF_SAFE_ALLOC_N(ptr->session_groups[i].channels, ptr->session_groups[i].nb_channels, u32);
for (k=0; k<ptr->session_groups[i].nb_channels; k++) {
ISOM_DECREASE_SIZE(ptr, 4);
ptr->session_groups[i].channels[k] = gf_bs_read_u32(bs);
}
}
return GF_OK;
}
#ifndef GPAC_DISABLE_ISOM_WRITE
GF_Err segr_Write(GF_Box *s, GF_BitStream *bs)
{
u32 i, k;
GF_Err e;
FDSessionGroupBox *ptr = (FDSessionGroupBox *) s;
if (!s) return GF_BAD_PARAM;
e = gf_isom_box_write_header(s, bs);
if (e) return e;
gf_bs_write_u16(bs, ptr->num_session_groups);
for (i=0; i<ptr->num_session_groups; i++) {
gf_bs_write_u8(bs, ptr->session_groups[i].nb_groups);
for (k=0; k<ptr->session_groups[i].nb_groups; k++) {
gf_bs_write_u32(bs, ptr->session_groups[i].group_ids[k]);
}
gf_bs_write_u16(bs, ptr->session_groups[i].nb_channels);
for (k=0; k<ptr->session_groups[i].nb_channels; k++) {
gf_bs_write_u32(bs, ptr->session_groups[i].channels[k]);
}
}
return GF_OK;
}
GF_Err segr_Size(GF_Box *s)
{
u32 i;
FDSessionGroupBox *ptr = (FDSessionGroupBox *)s;
ptr->size += 2;
for (i=0; i<ptr->num_session_groups; i++) {
ptr->size += 1 + 4*ptr->session_groups[i].nb_groups;
ptr->size += 2 + 4*ptr->session_groups[i].nb_channels;
}
return GF_OK;
}
#endif /*GPAC_DISABLE_ISOM_WRITE*/
GF_Box *gitn_New()
{
ISOM_DECL_BOX_ALLOC(GroupIdToNameBox, GF_ISOM_BOX_TYPE_GITN);
return (GF_Box *)tmp;
}
void gitn_del(GF_Box *s)
{
u32 i;
GroupIdToNameBox *ptr = (GroupIdToNameBox *)s;
if (ptr == NULL) return;
for (i=0; i<ptr->nb_entries; i++) {
if (ptr->entries[i].name) gf_free(ptr->entries[i].name);
}
if (ptr->entries) gf_free(ptr->entries);
gf_free(ptr);
}
GF_Err gitn_Read(GF_Box *s, GF_BitStream *bs)
{
u32 i;
GF_Err e;
GroupIdToNameBox *ptr = (GroupIdToNameBox *)s;
ISOM_DECREASE_SIZE(ptr, 2);
ptr->nb_entries = gf_bs_read_u16(bs);
GF_SAFE_ALLOC_N(ptr->entries, ptr->nb_entries, GroupIdNameEntry);
for (i=0; i<ptr->nb_entries; i++) {
ISOM_DECREASE_SIZE(ptr, 4);
ptr->entries[i].group_id = gf_bs_read_u32(bs);
e = gf_isom_read_null_terminated_string(s, bs, ptr->size, &ptr->entries[i].name);
if (e) return e;
}
return GF_OK;
}
#ifndef GPAC_DISABLE_ISOM_WRITE
GF_Err gitn_Write(GF_Box *s, GF_BitStream *bs)
{
GF_Err e;
u32 i;
GroupIdToNameBox *ptr = (GroupIdToNameBox *) s;
if (!s) return GF_BAD_PARAM;
e = gf_isom_full_box_write(s, bs);
if (e) return e;
gf_bs_write_u16(bs, ptr->nb_entries);
for (i=0; i<ptr->nb_entries; i++) {
gf_bs_write_u32(bs, ptr->entries[i].group_id);
if (ptr->entries[i].name) gf_bs_write_data(bs, ptr->entries[i].name, (u32)strlen(ptr->entries[i].name) );
gf_bs_write_u8(bs, 0);
}
return GF_OK;
}
GF_Err gitn_Size(GF_Box *s)
{
u32 i;
GroupIdToNameBox *ptr = (GroupIdToNameBox *)s;
ptr->size += 2;
for (i=0; i<ptr->nb_entries; i++) {
ptr->size += 5;
if (ptr->entries[i].name) ptr->size += strlen(ptr->entries[i].name);
}
return GF_OK;
}
#endif /*GPAC_DISABLE_ISOM_WRITE*/
#ifndef GPAC_DISABLE_ISOM_HINTING
GF_Box *fdpa_New()
{
ISOM_DECL_BOX_ALLOC(GF_FDpacketBox, GF_ISOM_BOX_TYPE_FDPA);
return (GF_Box *)tmp;
}
void fdpa_del(GF_Box *s)
{
u32 i;
GF_FDpacketBox *ptr = (GF_FDpacketBox *)s;
if (ptr == NULL) return;
if (ptr->headers) {
for (i=0; i<ptr->header_ext_count; i++) {
if (ptr->headers[i].data) gf_free(ptr->headers[i].data);
}
gf_free(ptr->headers);
}
gf_free(ptr);
}
GF_Err fdpa_Read(GF_Box *s, GF_BitStream *bs)
{
u32 i;
GF_FDpacketBox *ptr = (GF_FDpacketBox *)s;
ISOM_DECREASE_SIZE(ptr, 3);
ptr->info.sender_current_time_present = gf_bs_read_int(bs, 1);
ptr->info.expected_residual_time_present = gf_bs_read_int(bs, 1);
ptr->info.session_close_bit = gf_bs_read_int(bs, 1);
ptr->info.object_close_bit = gf_bs_read_int(bs, 1);
gf_bs_read_int(bs, 4);
ptr->info.transport_object_identifier = gf_bs_read_u16(bs);
ISOM_DECREASE_SIZE(ptr, 2);
ptr->header_ext_count = gf_bs_read_u16(bs);
if (ptr->header_ext_count*2>ptr->size) {
GF_LOG(GF_LOG_ERROR, GF_LOG_CONTAINER, ("[iso file] Invalid number of entries %d in fdpa\n", ptr->header_ext_count));
return GF_ISOM_INVALID_FILE;
}
GF_SAFE_ALLOC_N(ptr->headers, ptr->header_ext_count, GF_LCTheaderExtension);
for (i=0; i<ptr->header_ext_count; i++) {
ptr->headers[i].header_extension_type = gf_bs_read_u8(bs);
ISOM_DECREASE_SIZE(ptr, 1);
if (ptr->headers[i].header_extension_type > 127) {
gf_bs_read_data(bs, (char *) ptr->headers[i].content, 3);
} else {
ISOM_DECREASE_SIZE(ptr, 1);
ptr->headers[i].data_length = gf_bs_read_u8(bs);
if (ptr->headers[i].data_length) {
ptr->headers[i].data_length = 4*ptr->headers[i].data_length - 2;
if (ptr->size < sizeof(char) * ptr->headers[i].data_length)
return GF_ISOM_INVALID_FILE;
ptr->headers[i].data = gf_malloc(sizeof(char) * ptr->headers[i].data_length);
if (!ptr->headers[i].data)
return GF_OUT_OF_MEM;
gf_bs_read_data(bs, ptr->headers[i].data, ptr->headers[i].data_length);
}
}
}
return GF_OK;
}
#ifndef GPAC_DISABLE_ISOM_WRITE
GF_Err fdpa_Write(GF_Box *s, GF_BitStream *bs)
{
GF_Err e;
u32 i;
GF_FDpacketBox *ptr = (GF_FDpacketBox *) s;
if (!s) return GF_BAD_PARAM;
e = gf_isom_box_write_header(s, bs);
if (e) return e;
gf_bs_write_int(bs, ptr->info.sender_current_time_present, 1);
gf_bs_write_int(bs, ptr->info.expected_residual_time_present, 1);
gf_bs_write_int(bs, ptr->info.session_close_bit, 1);
gf_bs_write_int(bs, ptr->info.object_close_bit, 1);
gf_bs_write_int(bs, 0, 4);
ptr->info.transport_object_identifier = gf_bs_read_u16(bs);
gf_bs_write_u16(bs, ptr->header_ext_count);
for (i=0; i<ptr->header_ext_count; i++) {
gf_bs_write_u8(bs, ptr->headers[i].header_extension_type);
if (ptr->headers[i].header_extension_type > 127) {
gf_bs_write_data(bs, (const char *) ptr->headers[i].content, 3);
} else {
gf_bs_write_u8(bs, ptr->headers[i].data_length ? (ptr->headers[i].data_length+2)/4 : 0);
if (ptr->headers[i].data_length) {
gf_bs_write_data(bs, ptr->headers[i].data, ptr->headers[i].data_length);
}
}
}
return GF_OK;
}
GF_Err fdpa_Size(GF_Box *s)
{
u32 i;
GF_FDpacketBox *ptr = (GF_FDpacketBox *)s;
ptr->size += 5;
for (i=0; i<ptr->header_ext_count; i++) {
ptr->size += 1;
if (ptr->headers[i].header_extension_type > 127) {
ptr->size += 3;
} else {
ptr->size += 1 + ptr->headers[i].data_length;
}
}
return GF_OK;
}
#endif /*GPAC_DISABLE_ISOM_WRITE*/
GF_Box *extr_New()
{
ISOM_DECL_BOX_ALLOC(GF_ExtraDataBox, GF_ISOM_BOX_TYPE_EXTR);
return (GF_Box *)tmp;
}
void extr_del(GF_Box *s)
{
GF_ExtraDataBox *ptr = (GF_ExtraDataBox *)s;
if (ptr == NULL) return;
if (ptr->feci) gf_isom_box_del((GF_Box*)ptr->feci);
if (ptr->data) gf_free(ptr->data);
gf_free(ptr);
}
GF_Err extr_Read(GF_Box *s, GF_BitStream *bs)
{
GF_Err e;
GF_ExtraDataBox *ptr = (GF_ExtraDataBox *)s;
e = gf_isom_box_parse((GF_Box**) &ptr->feci, bs);
if (e) return e;
if (!ptr->feci || ptr->feci->size > ptr->size) return GF_ISOM_INVALID_MEDIA;
ptr->data_length = (u32) (ptr->size - ptr->feci->size);
ptr->data = gf_malloc(sizeof(char)*ptr->data_length);
if (!ptr->data)
return GF_OUT_OF_MEM;
gf_bs_read_data(bs, ptr->data, ptr->data_length);
return GF_OK;
}
#ifndef GPAC_DISABLE_ISOM_WRITE
GF_Err extr_Write(GF_Box *s, GF_BitStream *bs)
{
GF_Err e;
GF_ExtraDataBox *ptr = (GF_ExtraDataBox *) s;
if (!s) return GF_BAD_PARAM;
e = gf_isom_box_write_header(s, bs);
if (e) return e;
if (ptr->feci) {
e = gf_isom_box_write((GF_Box *)ptr->feci, bs);
if (e) return e;
}
gf_bs_write_data(bs, ptr->data, ptr->data_length);
return GF_OK;
}
GF_Err extr_Size(GF_Box *s)
{
GF_Err e;
GF_ExtraDataBox *ptr = (GF_ExtraDataBox *) s;
if (ptr->feci) {
e = gf_isom_box_size((GF_Box *)ptr->feci);
if (e) return e;
ptr->size += ptr->feci->size;
}
ptr->size += ptr->data_length;
return GF_OK;
}
#endif /*GPAC_DISABLE_ISOM_WRITE*/
GF_Box *fdsa_New()
{
ISOM_DECL_BOX_ALLOC(GF_HintSample, GF_ISOM_BOX_TYPE_FDSA);
if (!tmp) return NULL;
tmp->packetTable = gf_list_new();
tmp->hint_subtype = GF_ISOM_BOX_TYPE_FDP_STSD;
return (GF_Box*)tmp;
}
void fdsa_del(GF_Box *s)
{
GF_HintSample *ptr = (GF_HintSample *)s;
gf_isom_box_array_del(ptr->packetTable);
if (ptr->extra_data) gf_isom_box_del((GF_Box*)ptr->extra_data);
gf_free(ptr);
}
GF_Err fdsa_AddBox(GF_Box *s, GF_Box *a)
{
GF_HintSample *ptr = (GF_HintSample *)s;
switch(a->type) {
case GF_ISOM_BOX_TYPE_FDPA:
gf_list_add(ptr->packetTable, a);
break;
case GF_ISOM_BOX_TYPE_EXTR:
if (ptr->extra_data) ERROR_ON_DUPLICATED_BOX(a, ptr)
ptr->extra_data = (GF_ExtraDataBox*)a;
break;
default:
return gf_isom_box_add_default(s, a);
}
return GF_OK;
}
GF_Err fdsa_Read(GF_Box *s, GF_BitStream *bs)
{
return gf_isom_box_array_read(s, bs, fdsa_AddBox);
}
#ifndef GPAC_DISABLE_ISOM_WRITE
GF_Err fdsa_Write(GF_Box *s, GF_BitStream *bs)
{
GF_Err e;
GF_HintSample *ptr = (GF_HintSample *) s;
if (!s) return GF_BAD_PARAM;
e = gf_isom_box_write_header(s, bs);
if (e) return e;
e = gf_isom_box_array_write(s, ptr->packetTable, bs);
if (e) return e;
if (ptr->extra_data) {
e = gf_isom_box_write((GF_Box *)ptr->extra_data, bs);
if (e) return e;
}
return GF_OK;
}
GF_Err fdsa_Size(GF_Box *s)
{
GF_HintSample *ptr = (GF_HintSample*)s;
GF_Err e;
if (ptr->extra_data) {
e = gf_isom_box_size((GF_Box *)ptr->extra_data);
if (e) return e;
ptr->size += ptr->extra_data->size;
}
return gf_isom_box_array_size(s, ptr->packetTable);
}
#endif /*GPAC_DISABLE_ISOM_WRITE*/
#endif /*GPAC_DISABLE_ISOM_HINTING*/
void trik_del(GF_Box *s)
{
GF_TrickPlayBox *ptr = (GF_TrickPlayBox *) s;
if (ptr == NULL) return;
if (ptr->entries) gf_free(ptr->entries);
gf_free(ptr);
}
GF_Err trik_Read(GF_Box *s,GF_BitStream *bs)
{
u32 i;
GF_TrickPlayBox *ptr = (GF_TrickPlayBox *) s;
ptr->entry_count = (u32) ptr->size;
ptr->entries = (GF_TrickPlayBoxEntry *) gf_malloc(ptr->entry_count * sizeof(GF_TrickPlayBoxEntry) );
if (ptr->entries == NULL) return GF_OUT_OF_MEM;
for (i=0; i< ptr->entry_count; i++) {
ptr->entries[i].pic_type = gf_bs_read_int(bs, 2);
ptr->entries[i].dependency_level = gf_bs_read_int(bs, 6);
}
return GF_OK;
}
GF_Box *trik_New()
{
ISOM_DECL_BOX_ALLOC(GF_TrickPlayBox, GF_ISOM_BOX_TYPE_TRIK);
return (GF_Box *)tmp;
}
#ifndef GPAC_DISABLE_ISOM_WRITE
GF_Err trik_Write(GF_Box *s, GF_BitStream *bs)
{
GF_Err e;
u32 i;
GF_TrickPlayBox *ptr = (GF_TrickPlayBox *) s;
e = gf_isom_full_box_write(s, bs);
if (e) return e;
for (i=0; i < ptr->entry_count; i++ ) {
gf_bs_write_int(bs, ptr->entries[i].pic_type, 2);
gf_bs_write_int(bs, ptr->entries[i].dependency_level, 6);
}
return GF_OK;
}
GF_Err trik_Size(GF_Box *s)
{
GF_TrickPlayBox *ptr = (GF_TrickPlayBox *) s;
ptr->size += 8 * ptr->entry_count;
return GF_OK;
}
#endif /*GPAC_DISABLE_ISOM_WRITE*/
void bloc_del(GF_Box *s)
{
gf_free(s);
}
GF_Err bloc_Read(GF_Box *s,GF_BitStream *bs)
{
GF_BaseLocationBox *ptr = (GF_BaseLocationBox *) s;
ISOM_DECREASE_SIZE(s, 256)
gf_bs_read_data(bs, (char *) ptr->baseLocation, 256);
ISOM_DECREASE_SIZE(s, 256)
gf_bs_read_data(bs, (char *) ptr->basePurlLocation, 256);
ISOM_DECREASE_SIZE(s, 512)
gf_bs_skip_bytes(bs, 512);
return GF_OK;
}
GF_Box *bloc_New()
{
ISOM_DECL_BOX_ALLOC(GF_BaseLocationBox, GF_ISOM_BOX_TYPE_TRIK);
return (GF_Box *)tmp;
}
#ifndef GPAC_DISABLE_ISOM_WRITE
GF_Err bloc_Write(GF_Box *s, GF_BitStream *bs)
{
GF_Err e;
u32 i;
GF_BaseLocationBox *ptr = (GF_BaseLocationBox *) s;
e = gf_isom_full_box_write(s, bs);
if (e) return e;
gf_bs_write_data(bs, (const char *) ptr->baseLocation, 256);
gf_bs_write_data(bs, (const char *) ptr->basePurlLocation, 256);
for (i=0; i < 64; i++ ) {
gf_bs_write_u64(bs, 0);
}
return GF_OK;
}
GF_Err bloc_Size(GF_Box *s)
{
s->size += 1024;
return GF_OK;
}
#endif /*GPAC_DISABLE_ISOM_WRITE*/
void ainf_del(GF_Box *s)
{
GF_AssetInformationBox *ptr = (GF_AssetInformationBox *) s;
if (ptr->APID) gf_free(ptr->APID);
gf_free(s);
}
GF_Err ainf_Read(GF_Box *s,GF_BitStream *bs)
{
GF_AssetInformationBox *ptr = (GF_AssetInformationBox *) s;
ISOM_DECREASE_SIZE(s, 4)
ptr->profile_version = gf_bs_read_u32(bs);
return gf_isom_read_null_terminated_string(s, bs, s->size, &ptr->APID);
}
GF_Box *ainf_New()
{
ISOM_DECL_BOX_ALLOC(GF_AssetInformationBox, GF_ISOM_BOX_TYPE_AINF);
return (GF_Box *)tmp;
}
#ifndef GPAC_DISABLE_ISOM_WRITE
GF_Err ainf_Write(GF_Box *s, GF_BitStream *bs)
{
GF_Err e;
GF_AssetInformationBox *ptr = (GF_AssetInformationBox *) s;
e = gf_isom_full_box_write(s, bs);
if (e) return e;
gf_bs_write_u32(bs, ptr->profile_version);
gf_bs_write_data(bs, ptr->APID, (u32) strlen(ptr->APID) + 1);
return GF_OK;
}
GF_Err ainf_Size(GF_Box *s)
{
GF_AssetInformationBox *ptr = (GF_AssetInformationBox *) s;
s->size += 4 + strlen(ptr->APID) + 1;
return GF_OK;
}
#endif /*GPAC_DISABLE_ISOM_WRITE*/
void mhac_del(GF_Box *s)
{
GF_MHAConfigBox *ptr = (GF_MHAConfigBox *) s;
if (ptr->mha_config) gf_free(ptr->mha_config);
gf_free(s);
}
GF_Err mhac_Read(GF_Box *s,GF_BitStream *bs)
{
GF_MHAConfigBox *ptr = (GF_MHAConfigBox *) s;
ISOM_DECREASE_SIZE(s, 5)
ptr->configuration_version = gf_bs_read_u8(bs);
ptr->mha_pl_indication = gf_bs_read_u8(bs);
ptr->reference_channel_layout = gf_bs_read_u8(bs);
ptr->mha_config_size = gf_bs_read_u16(bs);
if (ptr->mha_config_size) {
ISOM_DECREASE_SIZE(s, ptr->mha_config_size)
if (ptr->size < sizeof(char)*ptr->mha_config_size)
return GF_ISOM_INVALID_FILE;
ptr->mha_config = gf_malloc(sizeof(char)*ptr->mha_config_size);
if (!ptr->mha_config)
return GF_OUT_OF_MEM;
gf_bs_read_data(bs, ptr->mha_config, ptr->mha_config_size);
}
return GF_OK;
}
GF_Box *mhac_New()
{
ISOM_DECL_BOX_ALLOC(GF_MHAConfigBox, GF_ISOM_BOX_TYPE_MHAC);
tmp->configuration_version = 1;
return (GF_Box *)tmp;
}
#ifndef GPAC_DISABLE_ISOM_WRITE
GF_Err mhac_Write(GF_Box *s, GF_BitStream *bs)
{
GF_Err e;
GF_MHAConfigBox *ptr = (GF_MHAConfigBox *) s;
e = gf_isom_box_write(s, bs);
if (e) return e;
gf_bs_write_u8(bs, ptr->configuration_version);
gf_bs_write_u8(bs, ptr->mha_pl_indication);
gf_bs_write_u8(bs, ptr->reference_channel_layout);
gf_bs_write_u16(bs, ptr->mha_config ? ptr->mha_config_size : 0);
if (ptr->mha_config && ptr->mha_config_size)
gf_bs_write_data(bs, ptr->mha_config, ptr->mha_config_size);
return GF_OK;
}
GF_Err mhac_Size(GF_Box *s)
{
GF_MHAConfigBox *ptr = (GF_MHAConfigBox *) s;
s->size += 5;
if (ptr->mha_config_size && ptr->mha_config) s->size += ptr->mha_config_size;
return GF_OK;
}
#endif /*GPAC_DISABLE_ISOM_WRITE*/
/* Dolby Vision */
GF_Box *dvcC_New()
{
GF_DOVIConfigurationBox *tmp = (GF_DOVIConfigurationBox *)gf_malloc(sizeof(GF_DOVIConfigurationBox));
if (tmp == NULL) return NULL;
memset(tmp, 0, sizeof(GF_DOVIConfigurationBox));
tmp->type = GF_ISOM_BOX_TYPE_DVCC;
return (GF_Box *)tmp;
}
void dvcC_del(GF_Box *s)
{
GF_DOVIConfigurationBox *ptr = (GF_DOVIConfigurationBox*)s;
gf_free(ptr);
}
GF_Err dvcC_Read(GF_Box *s, GF_BitStream *bs)
{
GF_DOVIConfigurationBox *ptr = (GF_DOVIConfigurationBox *)s;
//GF_DOVIDecoderConfigurationRecord
ptr->DOVIConfig.dv_version_major = gf_bs_read_u8(bs);
ptr->DOVIConfig.dv_version_minor = gf_bs_read_u8(bs);
ptr->DOVIConfig.dv_profile = gf_bs_read_int(bs, 7);
ptr->DOVIConfig.dv_level = gf_bs_read_int(bs, 6);
ptr->DOVIConfig.rpu_present_flag = gf_bs_read_int(bs, 1);
ptr->DOVIConfig.el_present_flag = gf_bs_read_int(bs, 1);
ptr->DOVIConfig.bl_present_flag = gf_bs_read_int(bs, 1);
{
int i = 0;
u32 data[5];
memset(data, 0, sizeof(data));
gf_bs_read_data(bs, (char*)data, 20);
for (i = 0; i < 5; ++i) {
if (data[i] != 0) {
GF_LOG(GF_LOG_WARNING, GF_LOG_CONTAINER, ("[iso file] dvcC reserved bytes are not zero\n"));
//return GF_ISOM_INVALID_FILE;
}
}
}
return GF_OK;
}
#ifndef GPAC_DISABLE_ISOM_WRITE
GF_Err dvcC_Write(GF_Box *s, GF_BitStream *bs)
{
GF_Err e;
GF_DOVIConfigurationBox *ptr = (GF_DOVIConfigurationBox *) s;
if (!s) return GF_BAD_PARAM;
e = gf_isom_box_write_header(s, bs);
if (e) return e;
//GF_DOVIDecoderConfigurationRecord
gf_bs_write_u8(bs, ptr->DOVIConfig.dv_version_major);
gf_bs_write_u8(bs, ptr->DOVIConfig.dv_version_minor);
gf_bs_write_int(bs, ptr->DOVIConfig.dv_profile, 7);
gf_bs_write_int(bs, ptr->DOVIConfig.dv_level, 6);
gf_bs_write_int(bs, ptr->DOVIConfig.rpu_present_flag, 1);
gf_bs_write_int(bs, ptr->DOVIConfig.el_present_flag, 1);
gf_bs_write_int(bs, ptr->DOVIConfig.bl_present_flag, 1);
{
u32 data[5];
memset(data, 0, sizeof(data));
gf_bs_write_data(bs, (char*)data, sizeof(data));
}
return GF_OK;
}
GF_Err dvcC_Size(GF_Box *s)
{
GF_DOVIConfigurationBox *ptr = (GF_DOVIConfigurationBox *)s;
ptr->size += 24;
return GF_OK;
}
#endif /*GPAC_DISABLE_ISOM_WRITE*/
#endif /*GPAC_DISABLE_ISOM*/
| ./CrossVul/dataset_final_sorted/CWE-416/c/good_2456_0 |
crossvul-cpp_data_good_4230_0 | /*
** $Id: ldo.c $
** Stack and Call structure of Lua
** See Copyright Notice in lua.h
*/
#define ldo_c
#define LUA_CORE
#include "lprefix.h"
#include <setjmp.h>
#include <stdlib.h>
#include <string.h>
#include "lua.h"
#include "lapi.h"
#include "ldebug.h"
#include "ldo.h"
#include "lfunc.h"
#include "lgc.h"
#include "lmem.h"
#include "lobject.h"
#include "lopcodes.h"
#include "lparser.h"
#include "lstate.h"
#include "lstring.h"
#include "ltable.h"
#include "ltm.h"
#include "lundump.h"
#include "lvm.h"
#include "lzio.h"
#define errorstatus(s) ((s) > LUA_YIELD)
/*
** {======================================================
** Error-recovery functions
** =======================================================
*/
/*
** LUAI_THROW/LUAI_TRY define how Lua does exception handling. By
** default, Lua handles errors with exceptions when compiling as
** C++ code, with _longjmp/_setjmp when asked to use them, and with
** longjmp/setjmp otherwise.
*/
#if !defined(LUAI_THROW) /* { */
#if defined(__cplusplus) && !defined(LUA_USE_LONGJMP) /* { */
/* C++ exceptions */
#define LUAI_THROW(L,c) throw(c)
#define LUAI_TRY(L,c,a) \
try { a } catch(...) { if ((c)->status == 0) (c)->status = -1; }
#define luai_jmpbuf int /* dummy variable */
#elif defined(LUA_USE_POSIX) /* }{ */
/* in POSIX, try _longjmp/_setjmp (more efficient) */
#define LUAI_THROW(L,c) _longjmp((c)->b, 1)
#define LUAI_TRY(L,c,a) if (_setjmp((c)->b) == 0) { a }
#define luai_jmpbuf jmp_buf
#else /* }{ */
/* ISO C handling with long jumps */
#define LUAI_THROW(L,c) longjmp((c)->b, 1)
#define LUAI_TRY(L,c,a) if (setjmp((c)->b) == 0) { a }
#define luai_jmpbuf jmp_buf
#endif /* } */
#endif /* } */
/* chain list of long jump buffers */
struct lua_longjmp {
struct lua_longjmp *previous;
luai_jmpbuf b;
volatile int status; /* error code */
};
void luaD_seterrorobj (lua_State *L, int errcode, StkId oldtop) {
switch (errcode) {
case LUA_ERRMEM: { /* memory error? */
setsvalue2s(L, oldtop, G(L)->memerrmsg); /* reuse preregistered msg. */
break;
}
case LUA_ERRERR: {
setsvalue2s(L, oldtop, luaS_newliteral(L, "error in error handling"));
break;
}
case CLOSEPROTECT: {
setnilvalue(s2v(oldtop)); /* no error message */
break;
}
default: {
setobjs2s(L, oldtop, L->top - 1); /* error message on current top */
break;
}
}
L->top = oldtop + 1;
}
l_noret luaD_throw (lua_State *L, int errcode) {
if (L->errorJmp) { /* thread has an error handler? */
L->errorJmp->status = errcode; /* set status */
LUAI_THROW(L, L->errorJmp); /* jump to it */
}
else { /* thread has no error handler */
global_State *g = G(L);
errcode = luaF_close(L, L->stack, errcode); /* close all upvalues */
L->status = cast_byte(errcode); /* mark it as dead */
if (g->mainthread->errorJmp) { /* main thread has a handler? */
setobjs2s(L, g->mainthread->top++, L->top - 1); /* copy error obj. */
luaD_throw(g->mainthread, errcode); /* re-throw in main thread */
}
else { /* no handler at all; abort */
if (g->panic) { /* panic function? */
luaD_seterrorobj(L, errcode, L->top); /* assume EXTRA_STACK */
if (L->ci->top < L->top)
L->ci->top = L->top; /* pushing msg. can break this invariant */
lua_unlock(L);
g->panic(L); /* call panic function (last chance to jump out) */
}
abort();
}
}
}
int luaD_rawrunprotected (lua_State *L, Pfunc f, void *ud) {
global_State *g = G(L);
l_uint32 oldnCcalls = g->Cstacklimit - (L->nCcalls + L->nci);
struct lua_longjmp lj;
lj.status = LUA_OK;
lj.previous = L->errorJmp; /* chain new error handler */
L->errorJmp = &lj;
LUAI_TRY(L, &lj,
(*f)(L, ud);
);
L->errorJmp = lj.previous; /* restore old error handler */
L->nCcalls = g->Cstacklimit - oldnCcalls - L->nci;
return lj.status;
}
/* }====================================================== */
/*
** {==================================================================
** Stack reallocation
** ===================================================================
*/
static void correctstack (lua_State *L, StkId oldstack, StkId newstack) {
CallInfo *ci;
UpVal *up;
if (oldstack == newstack)
return; /* stack address did not change */
L->top = (L->top - oldstack) + newstack;
for (up = L->openupval; up != NULL; up = up->u.open.next)
up->v = s2v((uplevel(up) - oldstack) + newstack);
for (ci = L->ci; ci != NULL; ci = ci->previous) {
ci->top = (ci->top - oldstack) + newstack;
ci->func = (ci->func - oldstack) + newstack;
if (isLua(ci))
ci->u.l.trap = 1; /* signal to update 'trap' in 'luaV_execute' */
}
}
/* some space for error handling */
#define ERRORSTACKSIZE (LUAI_MAXSTACK + 200)
int luaD_reallocstack (lua_State *L, int newsize, int raiseerror) {
int lim = L->stacksize;
StkId newstack = luaM_reallocvector(L, L->stack, lim, newsize, StackValue);
lua_assert(newsize <= LUAI_MAXSTACK || newsize == ERRORSTACKSIZE);
lua_assert(L->stack_last - L->stack == L->stacksize - EXTRA_STACK);
if (unlikely(newstack == NULL)) { /* reallocation failed? */
if (raiseerror)
luaM_error(L);
else return 0; /* do not raise an error */
}
for (; lim < newsize; lim++)
setnilvalue(s2v(newstack + lim)); /* erase new segment */
correctstack(L, L->stack, newstack);
L->stack = newstack;
L->stacksize = newsize;
L->stack_last = L->stack + newsize - EXTRA_STACK;
return 1;
}
/*
** Try to grow the stack by at least 'n' elements. when 'raiseerror'
** is true, raises any error; otherwise, return 0 in case of errors.
*/
int luaD_growstack (lua_State *L, int n, int raiseerror) {
int size = L->stacksize;
int newsize = 2 * size; /* tentative new size */
if (unlikely(size > LUAI_MAXSTACK)) { /* need more space after extra size? */
if (raiseerror)
luaD_throw(L, LUA_ERRERR); /* error inside message handler */
else return 0;
}
else {
int needed = cast_int(L->top - L->stack) + n + EXTRA_STACK;
if (newsize > LUAI_MAXSTACK) /* cannot cross the limit */
newsize = LUAI_MAXSTACK;
if (newsize < needed) /* but must respect what was asked for */
newsize = needed;
if (unlikely(newsize > LUAI_MAXSTACK)) { /* stack overflow? */
/* add extra size to be able to handle the error message */
luaD_reallocstack(L, ERRORSTACKSIZE, raiseerror);
if (raiseerror)
luaG_runerror(L, "stack overflow");
else return 0;
}
} /* else no errors */
return luaD_reallocstack(L, newsize, raiseerror);
}
static int stackinuse (lua_State *L) {
CallInfo *ci;
StkId lim = L->top;
for (ci = L->ci; ci != NULL; ci = ci->previous) {
if (lim < ci->top) lim = ci->top;
}
lua_assert(lim <= L->stack_last);
return cast_int(lim - L->stack) + 1; /* part of stack in use */
}
void luaD_shrinkstack (lua_State *L) {
int inuse = stackinuse(L);
int goodsize = inuse + BASIC_STACK_SIZE;
if (goodsize > LUAI_MAXSTACK)
goodsize = LUAI_MAXSTACK; /* respect stack limit */
/* if thread is currently not handling a stack overflow and its
good size is smaller than current size, shrink its stack */
if (inuse <= (LUAI_MAXSTACK - EXTRA_STACK) && goodsize < L->stacksize)
luaD_reallocstack(L, goodsize, 0); /* ok if that fails */
else /* don't change stack */
condmovestack(L,{},{}); /* (change only for debugging) */
luaE_shrinkCI(L); /* shrink CI list */
}
void luaD_inctop (lua_State *L) {
luaD_checkstack(L, 1);
L->top++;
}
/* }================================================================== */
/*
** Call a hook for the given event. Make sure there is a hook to be
** called. (Both 'L->hook' and 'L->hookmask', which trigger this
** function, can be changed asynchronously by signals.)
*/
void luaD_hook (lua_State *L, int event, int line,
int ftransfer, int ntransfer) {
lua_Hook hook = L->hook;
if (hook && L->allowhook) { /* make sure there is a hook */
int mask = CIST_HOOKED;
CallInfo *ci = L->ci;
ptrdiff_t top = savestack(L, L->top);
ptrdiff_t ci_top = savestack(L, ci->top);
lua_Debug ar;
ar.event = event;
ar.currentline = line;
ar.i_ci = ci;
if (ntransfer != 0) {
mask |= CIST_TRAN; /* 'ci' has transfer information */
ci->u2.transferinfo.ftransfer = ftransfer;
ci->u2.transferinfo.ntransfer = ntransfer;
}
luaD_checkstack(L, LUA_MINSTACK); /* ensure minimum stack size */
if (L->top + LUA_MINSTACK > ci->top)
ci->top = L->top + LUA_MINSTACK;
L->allowhook = 0; /* cannot call hooks inside a hook */
ci->callstatus |= mask;
lua_unlock(L);
(*hook)(L, &ar);
lua_lock(L);
lua_assert(!L->allowhook);
L->allowhook = 1;
ci->top = restorestack(L, ci_top);
L->top = restorestack(L, top);
ci->callstatus &= ~mask;
}
}
/*
** Executes a call hook for Lua functions. This function is called
** whenever 'hookmask' is not zero, so it checks whether call hooks are
** active.
*/
void luaD_hookcall (lua_State *L, CallInfo *ci) {
int hook = (ci->callstatus & CIST_TAIL) ? LUA_HOOKTAILCALL : LUA_HOOKCALL;
Proto *p;
if (!(L->hookmask & LUA_MASKCALL)) /* some other hook? */
return; /* don't call hook */
p = clLvalue(s2v(ci->func))->p;
L->top = ci->top; /* prepare top */
ci->u.l.savedpc++; /* hooks assume 'pc' is already incremented */
luaD_hook(L, hook, -1, 1, p->numparams);
ci->u.l.savedpc--; /* correct 'pc' */
}
static StkId rethook (lua_State *L, CallInfo *ci, StkId firstres, int nres) {
ptrdiff_t oldtop = savestack(L, L->top); /* hook may change top */
int delta = 0;
if (isLuacode(ci)) {
Proto *p = clLvalue(s2v(ci->func))->p;
if (p->is_vararg)
delta = ci->u.l.nextraargs + p->numparams + 1;
if (L->top < ci->top)
L->top = ci->top; /* correct top to run hook */
}
if (L->hookmask & LUA_MASKRET) { /* is return hook on? */
int ftransfer;
ci->func += delta; /* if vararg, back to virtual 'func' */
ftransfer = cast(unsigned short, firstres - ci->func);
luaD_hook(L, LUA_HOOKRET, -1, ftransfer, nres); /* call it */
ci->func -= delta;
}
if (isLua(ci->previous))
L->oldpc = ci->previous->u.l.savedpc; /* update 'oldpc' */
return restorestack(L, oldtop);
}
/*
** Check whether 'func' has a '__call' metafield. If so, put it in the
** stack, below original 'func', so that 'luaD_call' can call it. Raise
** an error if there is no '__call' metafield.
*/
void luaD_tryfuncTM (lua_State *L, StkId func) {
const TValue *tm = luaT_gettmbyobj(L, s2v(func), TM_CALL);
StkId p;
if (unlikely(ttisnil(tm)))
luaG_typeerror(L, s2v(func), "call"); /* nothing to call */
for (p = L->top; p > func; p--) /* open space for metamethod */
setobjs2s(L, p, p-1);
L->top++; /* stack space pre-allocated by the caller */
setobj2s(L, func, tm); /* metamethod is the new function to be called */
}
/*
** Given 'nres' results at 'firstResult', move 'wanted' of them to 'res'.
** Handle most typical cases (zero results for commands, one result for
** expressions, multiple results for tail calls/single parameters)
** separated.
*/
static void moveresults (lua_State *L, StkId res, int nres, int wanted) {
StkId firstresult;
int i;
switch (wanted) { /* handle typical cases separately */
case 0: /* no values needed */
L->top = res;
return;
case 1: /* one value needed */
if (nres == 0) /* no results? */
setnilvalue(s2v(res)); /* adjust with nil */
else
setobjs2s(L, res, L->top - nres); /* move it to proper place */
L->top = res + 1;
return;
case LUA_MULTRET:
wanted = nres; /* we want all results */
break;
default: /* multiple results (or to-be-closed variables) */
if (hastocloseCfunc(wanted)) { /* to-be-closed variables? */
ptrdiff_t savedres = savestack(L, res);
luaF_close(L, res, LUA_OK); /* may change the stack */
res = restorestack(L, savedres);
wanted = codeNresults(wanted); /* correct value */
if (wanted == LUA_MULTRET)
wanted = nres;
}
break;
}
firstresult = L->top - nres; /* index of first result */
/* move all results to correct place */
for (i = 0; i < nres && i < wanted; i++)
setobjs2s(L, res + i, firstresult + i);
for (; i < wanted; i++) /* complete wanted number of results */
setnilvalue(s2v(res + i));
L->top = res + wanted; /* top points after the last result */
}
/*
** Finishes a function call: calls hook if necessary, removes CallInfo,
** moves current number of results to proper place.
*/
void luaD_poscall (lua_State *L, CallInfo *ci, int nres) {
if (L->hookmask)
L->top = rethook(L, ci, L->top - nres, nres);
L->ci = ci->previous; /* back to caller */
/* move results to proper place */
moveresults(L, ci->func, nres, ci->nresults);
}
#define next_ci(L) (L->ci->next ? L->ci->next : luaE_extendCI(L))
/*
** Prepare a function for a tail call, building its call info on top
** of the current call info. 'narg1' is the number of arguments plus 1
** (so that it includes the function itself).
*/
void luaD_pretailcall (lua_State *L, CallInfo *ci, StkId func, int narg1) {
Proto *p = clLvalue(s2v(func))->p;
int fsize = p->maxstacksize; /* frame size */
int nfixparams = p->numparams;
int i;
for (i = 0; i < narg1; i++) /* move down function and arguments */
setobjs2s(L, ci->func + i, func + i);
checkstackGC(L, fsize);
func = ci->func; /* moved-down function */
for (; narg1 <= nfixparams; narg1++)
setnilvalue(s2v(func + narg1)); /* complete missing arguments */
ci->top = func + 1 + fsize; /* top for new function */
lua_assert(ci->top <= L->stack_last);
ci->u.l.savedpc = p->code; /* starting point */
ci->callstatus |= CIST_TAIL;
L->top = func + narg1; /* set top */
}
/*
** Call a function (C or Lua). The function to be called is at *func.
** The arguments are on the stack, right after the function.
** When returns, all the results are on the stack, starting at the original
** function position.
*/
void luaD_call (lua_State *L, StkId func, int nresults) {
lua_CFunction f;
retry:
switch (ttypetag(s2v(func))) {
case LUA_VCCL: /* C closure */
f = clCvalue(s2v(func))->f;
goto Cfunc;
case LUA_VLCF: /* light C function */
f = fvalue(s2v(func));
Cfunc: {
int n; /* number of returns */
CallInfo *ci = next_ci(L);
checkstackp(L, LUA_MINSTACK, func); /* ensure minimum stack size */
ci->nresults = nresults;
ci->callstatus = CIST_C;
ci->top = L->top + LUA_MINSTACK;
ci->func = func;
L->ci = ci;
lua_assert(ci->top <= L->stack_last);
if (L->hookmask & LUA_MASKCALL) {
int narg = cast_int(L->top - func) - 1;
luaD_hook(L, LUA_HOOKCALL, -1, 1, narg);
}
lua_unlock(L);
n = (*f)(L); /* do the actual call */
lua_lock(L);
api_checknelems(L, n);
luaD_poscall(L, ci, n);
break;
}
case LUA_VLCL: { /* Lua function */
CallInfo *ci = next_ci(L);
Proto *p = clLvalue(s2v(func))->p;
int narg = cast_int(L->top - func) - 1; /* number of real arguments */
int nfixparams = p->numparams;
int fsize = p->maxstacksize; /* frame size */
checkstackp(L, fsize, func);
ci->nresults = nresults;
ci->u.l.savedpc = p->code; /* starting point */
ci->callstatus = 0;
ci->top = func + 1 + fsize;
ci->func = func;
L->ci = ci;
for (; narg < nfixparams; narg++)
setnilvalue(s2v(L->top++)); /* complete missing arguments */
lua_assert(ci->top <= L->stack_last);
luaV_execute(L, ci); /* run the function */
break;
}
default: { /* not a function */
checkstackp(L, 1, func); /* space for metamethod */
luaD_tryfuncTM(L, func); /* try to get '__call' metamethod */
goto retry; /* try again with metamethod */
}
}
}
/*
** Similar to 'luaD_call', but does not allow yields during the call.
** If there is a stack overflow, freeing all CI structures will
** force the subsequent call to invoke 'luaE_extendCI', which then
** will raise any errors.
*/
void luaD_callnoyield (lua_State *L, StkId func, int nResults) {
incXCcalls(L);
if (getCcalls(L) <= CSTACKERR) /* possible stack overflow? */
luaE_freeCI(L);
luaD_call(L, func, nResults);
decXCcalls(L);
}
/*
** Completes the execution of an interrupted C function, calling its
** continuation function.
*/
static void finishCcall (lua_State *L, int status) {
CallInfo *ci = L->ci;
int n;
/* must have a continuation and must be able to call it */
lua_assert(ci->u.c.k != NULL && yieldable(L));
/* error status can only happen in a protected call */
lua_assert((ci->callstatus & CIST_YPCALL) || status == LUA_YIELD);
if (ci->callstatus & CIST_YPCALL) { /* was inside a pcall? */
ci->callstatus &= ~CIST_YPCALL; /* continuation is also inside it */
L->errfunc = ci->u.c.old_errfunc; /* with the same error function */
}
/* finish 'lua_callk'/'lua_pcall'; CIST_YPCALL and 'errfunc' already
handled */
adjustresults(L, ci->nresults);
lua_unlock(L);
n = (*ci->u.c.k)(L, status, ci->u.c.ctx); /* call continuation function */
lua_lock(L);
api_checknelems(L, n);
luaD_poscall(L, ci, n); /* finish 'luaD_call' */
}
/*
** Executes "full continuation" (everything in the stack) of a
** previously interrupted coroutine until the stack is empty (or another
** interruption long-jumps out of the loop). If the coroutine is
** recovering from an error, 'ud' points to the error status, which must
** be passed to the first continuation function (otherwise the default
** status is LUA_YIELD).
*/
static void unroll (lua_State *L, void *ud) {
CallInfo *ci;
if (ud != NULL) /* error status? */
finishCcall(L, *(int *)ud); /* finish 'lua_pcallk' callee */
while ((ci = L->ci) != &L->base_ci) { /* something in the stack */
if (!isLua(ci)) /* C function? */
finishCcall(L, LUA_YIELD); /* complete its execution */
else { /* Lua function */
luaV_finishOp(L); /* finish interrupted instruction */
luaV_execute(L, ci); /* execute down to higher C 'boundary' */
}
}
}
/*
** Try to find a suspended protected call (a "recover point") for the
** given thread.
*/
static CallInfo *findpcall (lua_State *L) {
CallInfo *ci;
for (ci = L->ci; ci != NULL; ci = ci->previous) { /* search for a pcall */
if (ci->callstatus & CIST_YPCALL)
return ci;
}
return NULL; /* no pending pcall */
}
/*
** Recovers from an error in a coroutine. Finds a recover point (if
** there is one) and completes the execution of the interrupted
** 'luaD_pcall'. If there is no recover point, returns zero.
*/
static int recover (lua_State *L, int status) {
StkId oldtop;
CallInfo *ci = findpcall(L);
if (ci == NULL) return 0; /* no recovery point */
/* "finish" luaD_pcall */
oldtop = restorestack(L, ci->u2.funcidx);
luaF_close(L, oldtop, status); /* may change the stack */
oldtop = restorestack(L, ci->u2.funcidx);
luaD_seterrorobj(L, status, oldtop);
L->ci = ci;
L->allowhook = getoah(ci->callstatus); /* restore original 'allowhook' */
luaD_shrinkstack(L);
L->errfunc = ci->u.c.old_errfunc;
return 1; /* continue running the coroutine */
}
/*
** Signal an error in the call to 'lua_resume', not in the execution
** of the coroutine itself. (Such errors should not be handled by any
** coroutine error handler and should not kill the coroutine.)
*/
static int resume_error (lua_State *L, const char *msg, int narg) {
L->top -= narg; /* remove args from the stack */
setsvalue2s(L, L->top, luaS_new(L, msg)); /* push error message */
api_incr_top(L);
lua_unlock(L);
return LUA_ERRRUN;
}
/*
** Do the work for 'lua_resume' in protected mode. Most of the work
** depends on the status of the coroutine: initial state, suspended
** inside a hook, or regularly suspended (optionally with a continuation
** function), plus erroneous cases: non-suspended coroutine or dead
** coroutine.
*/
static void resume (lua_State *L, void *ud) {
int n = *(cast(int*, ud)); /* number of arguments */
StkId firstArg = L->top - n; /* first argument */
CallInfo *ci = L->ci;
if (L->status == LUA_OK) { /* starting a coroutine? */
luaD_call(L, firstArg - 1, LUA_MULTRET);
}
else { /* resuming from previous yield */
lua_assert(L->status == LUA_YIELD);
L->status = LUA_OK; /* mark that it is running (again) */
if (isLua(ci)) /* yielded inside a hook? */
luaV_execute(L, ci); /* just continue running Lua code */
else { /* 'common' yield */
if (ci->u.c.k != NULL) { /* does it have a continuation function? */
lua_unlock(L);
n = (*ci->u.c.k)(L, LUA_YIELD, ci->u.c.ctx); /* call continuation */
lua_lock(L);
api_checknelems(L, n);
}
luaD_poscall(L, ci, n); /* finish 'luaD_call' */
}
unroll(L, NULL); /* run continuation */
}
}
LUA_API int lua_resume (lua_State *L, lua_State *from, int nargs,
int *nresults) {
int status;
lua_lock(L);
if (L->status == LUA_OK) { /* may be starting a coroutine */
if (L->ci != &L->base_ci) /* not in base level? */
return resume_error(L, "cannot resume non-suspended coroutine", nargs);
else if (L->top - (L->ci->func + 1) == nargs) /* no function? */
return resume_error(L, "cannot resume dead coroutine", nargs);
}
else if (L->status != LUA_YIELD) /* ended with errors? */
return resume_error(L, "cannot resume dead coroutine", nargs);
if (from == NULL)
L->nCcalls = CSTACKTHREAD;
else /* correct 'nCcalls' for this thread */
L->nCcalls = getCcalls(from) + from->nci - L->nci - CSTACKCF;
if (L->nCcalls <= CSTACKERR)
return resume_error(L, "C stack overflow", nargs);
luai_userstateresume(L, nargs);
api_checknelems(L, (L->status == LUA_OK) ? nargs + 1 : nargs);
status = luaD_rawrunprotected(L, resume, &nargs);
/* continue running after recoverable errors */
while (errorstatus(status) && recover(L, status)) {
/* unroll continuation */
status = luaD_rawrunprotected(L, unroll, &status);
}
if (likely(!errorstatus(status)))
lua_assert(status == L->status); /* normal end or yield */
else { /* unrecoverable error */
L->status = cast_byte(status); /* mark thread as 'dead' */
luaD_seterrorobj(L, status, L->top); /* push error message */
L->ci->top = L->top;
}
*nresults = (status == LUA_YIELD) ? L->ci->u2.nyield
: cast_int(L->top - (L->ci->func + 1));
lua_unlock(L);
return status;
}
LUA_API int lua_isyieldable (lua_State *L) {
return yieldable(L);
}
LUA_API int lua_yieldk (lua_State *L, int nresults, lua_KContext ctx,
lua_KFunction k) {
CallInfo *ci = L->ci;
luai_userstateyield(L, nresults);
lua_lock(L);
api_checknelems(L, nresults);
if (unlikely(!yieldable(L))) {
if (L != G(L)->mainthread)
luaG_runerror(L, "attempt to yield across a C-call boundary");
else
luaG_runerror(L, "attempt to yield from outside a coroutine");
}
L->status = LUA_YIELD;
if (isLua(ci)) { /* inside a hook? */
lua_assert(!isLuacode(ci));
api_check(L, k == NULL, "hooks cannot continue after yielding");
ci->u2.nyield = 0; /* no results */
}
else {
if ((ci->u.c.k = k) != NULL) /* is there a continuation? */
ci->u.c.ctx = ctx; /* save context */
ci->u2.nyield = nresults; /* save number of results */
luaD_throw(L, LUA_YIELD);
}
lua_assert(ci->callstatus & CIST_HOOKED); /* must be inside a hook */
lua_unlock(L);
return 0; /* return to 'luaD_hook' */
}
/*
** Call the C function 'func' in protected mode, restoring basic
** thread information ('allowhook', etc.) and in particular
** its stack level in case of errors.
*/
int luaD_pcall (lua_State *L, Pfunc func, void *u,
ptrdiff_t old_top, ptrdiff_t ef) {
int status;
CallInfo *old_ci = L->ci;
lu_byte old_allowhooks = L->allowhook;
ptrdiff_t old_errfunc = L->errfunc;
L->errfunc = ef;
status = luaD_rawrunprotected(L, func, u);
if (unlikely(status != LUA_OK)) { /* an error occurred? */
StkId oldtop = restorestack(L, old_top);
L->ci = old_ci;
L->allowhook = old_allowhooks;
status = luaF_close(L, oldtop, status);
oldtop = restorestack(L, old_top); /* previous call may change stack */
luaD_seterrorobj(L, status, oldtop);
luaD_shrinkstack(L);
}
L->errfunc = old_errfunc;
return status;
}
/*
** Execute a protected parser.
*/
struct SParser { /* data to 'f_parser' */
ZIO *z;
Mbuffer buff; /* dynamic structure used by the scanner */
Dyndata dyd; /* dynamic structures used by the parser */
const char *mode;
const char *name;
};
static void checkmode (lua_State *L, const char *mode, const char *x) {
if (mode && strchr(mode, x[0]) == NULL) {
luaO_pushfstring(L,
"attempt to load a %s chunk (mode is '%s')", x, mode);
luaD_throw(L, LUA_ERRSYNTAX);
}
}
static void f_parser (lua_State *L, void *ud) {
LClosure *cl;
struct SParser *p = cast(struct SParser *, ud);
int c = zgetc(p->z); /* read first character */
if (c == LUA_SIGNATURE[0]) {
checkmode(L, p->mode, "binary");
cl = luaU_undump(L, p->z, p->name);
}
else {
checkmode(L, p->mode, "text");
cl = luaY_parser(L, p->z, &p->buff, &p->dyd, p->name, c);
}
lua_assert(cl->nupvalues == cl->p->sizeupvalues);
luaF_initupvals(L, cl);
}
int luaD_protectedparser (lua_State *L, ZIO *z, const char *name,
const char *mode) {
struct SParser p;
int status;
incnny(L); /* cannot yield during parsing */
p.z = z; p.name = name; p.mode = mode;
p.dyd.actvar.arr = NULL; p.dyd.actvar.size = 0;
p.dyd.gt.arr = NULL; p.dyd.gt.size = 0;
p.dyd.label.arr = NULL; p.dyd.label.size = 0;
luaZ_initbuffer(L, &p.buff);
status = luaD_pcall(L, f_parser, &p, savestack(L, L->top), L->errfunc);
luaZ_freebuffer(L, &p.buff);
luaM_freearray(L, p.dyd.actvar.arr, p.dyd.actvar.size);
luaM_freearray(L, p.dyd.gt.arr, p.dyd.gt.size);
luaM_freearray(L, p.dyd.label.arr, p.dyd.label.size);
decnny(L);
return status;
}
| ./CrossVul/dataset_final_sorted/CWE-416/c/good_4230_0 |
crossvul-cpp_data_good_5489_0 | /*
* Functions related to mapping data to requests
*/
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/bio.h>
#include <linux/blkdev.h>
#include <linux/uio.h>
#include "blk.h"
/*
* Append a bio to a passthrough request. Only works can be merged into
* the request based on the driver constraints.
*/
int blk_rq_append_bio(struct request *rq, struct bio *bio)
{
if (!rq->bio) {
blk_rq_bio_prep(rq->q, rq, bio);
} else {
if (!ll_back_merge_fn(rq->q, rq, bio))
return -EINVAL;
rq->biotail->bi_next = bio;
rq->biotail = bio;
rq->__data_len += bio->bi_iter.bi_size;
}
return 0;
}
EXPORT_SYMBOL(blk_rq_append_bio);
static int __blk_rq_unmap_user(struct bio *bio)
{
int ret = 0;
if (bio) {
if (bio_flagged(bio, BIO_USER_MAPPED))
bio_unmap_user(bio);
else
ret = bio_uncopy_user(bio);
}
return ret;
}
static int __blk_rq_map_user_iov(struct request *rq,
struct rq_map_data *map_data, struct iov_iter *iter,
gfp_t gfp_mask, bool copy)
{
struct request_queue *q = rq->q;
struct bio *bio, *orig_bio;
int ret;
if (copy)
bio = bio_copy_user_iov(q, map_data, iter, gfp_mask);
else
bio = bio_map_user_iov(q, iter, gfp_mask);
if (IS_ERR(bio))
return PTR_ERR(bio);
if (map_data && map_data->null_mapped)
bio_set_flag(bio, BIO_NULL_MAPPED);
iov_iter_advance(iter, bio->bi_iter.bi_size);
if (map_data)
map_data->offset += bio->bi_iter.bi_size;
orig_bio = bio;
blk_queue_bounce(q, &bio);
/*
* We link the bounce buffer in and could have to traverse it
* later so we have to get a ref to prevent it from being freed
*/
bio_get(bio);
ret = blk_rq_append_bio(rq, bio);
if (ret) {
bio_endio(bio);
__blk_rq_unmap_user(orig_bio);
bio_put(bio);
return ret;
}
return 0;
}
/**
* blk_rq_map_user_iov - map user data to a request, for REQ_TYPE_BLOCK_PC usage
* @q: request queue where request should be inserted
* @rq: request to map data to
* @map_data: pointer to the rq_map_data holding pages (if necessary)
* @iter: iovec iterator
* @gfp_mask: memory allocation flags
*
* Description:
* Data will be mapped directly for zero copy I/O, if possible. Otherwise
* a kernel bounce buffer is used.
*
* A matching blk_rq_unmap_user() must be issued at the end of I/O, while
* still in process context.
*
* Note: The mapped bio may need to be bounced through blk_queue_bounce()
* before being submitted to the device, as pages mapped may be out of
* reach. It's the callers responsibility to make sure this happens. The
* original bio must be passed back in to blk_rq_unmap_user() for proper
* unmapping.
*/
int blk_rq_map_user_iov(struct request_queue *q, struct request *rq,
struct rq_map_data *map_data,
const struct iov_iter *iter, gfp_t gfp_mask)
{
bool copy = false;
unsigned long align = q->dma_pad_mask | queue_dma_alignment(q);
struct bio *bio = NULL;
struct iov_iter i;
int ret;
if (!iter_is_iovec(iter))
goto fail;
if (map_data)
copy = true;
else if (iov_iter_alignment(iter) & align)
copy = true;
else if (queue_virt_boundary(q))
copy = queue_virt_boundary(q) & iov_iter_gap_alignment(iter);
i = *iter;
do {
ret =__blk_rq_map_user_iov(rq, map_data, &i, gfp_mask, copy);
if (ret)
goto unmap_rq;
if (!bio)
bio = rq->bio;
} while (iov_iter_count(&i));
if (!bio_flagged(bio, BIO_USER_MAPPED))
rq->cmd_flags |= REQ_COPY_USER;
return 0;
unmap_rq:
__blk_rq_unmap_user(bio);
fail:
rq->bio = NULL;
return -EINVAL;
}
EXPORT_SYMBOL(blk_rq_map_user_iov);
int blk_rq_map_user(struct request_queue *q, struct request *rq,
struct rq_map_data *map_data, void __user *ubuf,
unsigned long len, gfp_t gfp_mask)
{
struct iovec iov;
struct iov_iter i;
int ret = import_single_range(rq_data_dir(rq), ubuf, len, &iov, &i);
if (unlikely(ret < 0))
return ret;
return blk_rq_map_user_iov(q, rq, map_data, &i, gfp_mask);
}
EXPORT_SYMBOL(blk_rq_map_user);
/**
* blk_rq_unmap_user - unmap a request with user data
* @bio: start of bio list
*
* Description:
* Unmap a rq previously mapped by blk_rq_map_user(). The caller must
* supply the original rq->bio from the blk_rq_map_user() return, since
* the I/O completion may have changed rq->bio.
*/
int blk_rq_unmap_user(struct bio *bio)
{
struct bio *mapped_bio;
int ret = 0, ret2;
while (bio) {
mapped_bio = bio;
if (unlikely(bio_flagged(bio, BIO_BOUNCED)))
mapped_bio = bio->bi_private;
ret2 = __blk_rq_unmap_user(mapped_bio);
if (ret2 && !ret)
ret = ret2;
mapped_bio = bio;
bio = bio->bi_next;
bio_put(mapped_bio);
}
return ret;
}
EXPORT_SYMBOL(blk_rq_unmap_user);
/**
* blk_rq_map_kern - map kernel data to a request, for REQ_TYPE_BLOCK_PC usage
* @q: request queue where request should be inserted
* @rq: request to fill
* @kbuf: the kernel buffer
* @len: length of user data
* @gfp_mask: memory allocation flags
*
* Description:
* Data will be mapped directly if possible. Otherwise a bounce
* buffer is used. Can be called multiple times to append multiple
* buffers.
*/
int blk_rq_map_kern(struct request_queue *q, struct request *rq, void *kbuf,
unsigned int len, gfp_t gfp_mask)
{
int reading = rq_data_dir(rq) == READ;
unsigned long addr = (unsigned long) kbuf;
int do_copy = 0;
struct bio *bio;
int ret;
if (len > (queue_max_hw_sectors(q) << 9))
return -EINVAL;
if (!len || !kbuf)
return -EINVAL;
do_copy = !blk_rq_aligned(q, addr, len) || object_is_on_stack(kbuf);
if (do_copy)
bio = bio_copy_kern(q, kbuf, len, gfp_mask, reading);
else
bio = bio_map_kern(q, kbuf, len, gfp_mask);
if (IS_ERR(bio))
return PTR_ERR(bio);
if (!reading)
bio_set_op_attrs(bio, REQ_OP_WRITE, 0);
if (do_copy)
rq->cmd_flags |= REQ_COPY_USER;
ret = blk_rq_append_bio(rq, bio);
if (unlikely(ret)) {
/* request is too big */
bio_put(bio);
return ret;
}
blk_queue_bounce(q, &rq->bio);
return 0;
}
EXPORT_SYMBOL(blk_rq_map_kern);
| ./CrossVul/dataset_final_sorted/CWE-416/c/good_5489_0 |
crossvul-cpp_data_good_545_0 | /*
* Copyright (C) 1991, 1992 Linus Torvalds
* Copyright (C) 1994, Karl Keyte: Added support for disk statistics
* Elevator latency, (C) 2000 Andrea Arcangeli <andrea@suse.de> SuSE
* Queue request tables / lock, selectable elevator, Jens Axboe <axboe@suse.de>
* kernel-doc documentation started by NeilBrown <neilb@cse.unsw.edu.au>
* - July2000
* bio rewrite, highmem i/o, etc, Jens Axboe <axboe@suse.de> - may 2001
*/
/*
* This handles all read/write requests to block devices
*/
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/backing-dev.h>
#include <linux/bio.h>
#include <linux/blkdev.h>
#include <linux/blk-mq.h>
#include <linux/highmem.h>
#include <linux/mm.h>
#include <linux/kernel_stat.h>
#include <linux/string.h>
#include <linux/init.h>
#include <linux/completion.h>
#include <linux/slab.h>
#include <linux/swap.h>
#include <linux/writeback.h>
#include <linux/task_io_accounting_ops.h>
#include <linux/fault-inject.h>
#include <linux/list_sort.h>
#include <linux/delay.h>
#include <linux/ratelimit.h>
#include <linux/pm_runtime.h>
#include <linux/blk-cgroup.h>
#include <linux/debugfs.h>
#include <linux/bpf.h>
#define CREATE_TRACE_POINTS
#include <trace/events/block.h>
#include "blk.h"
#include "blk-mq.h"
#include "blk-mq-sched.h"
#include "blk-rq-qos.h"
#ifdef CONFIG_DEBUG_FS
struct dentry *blk_debugfs_root;
#endif
EXPORT_TRACEPOINT_SYMBOL_GPL(block_bio_remap);
EXPORT_TRACEPOINT_SYMBOL_GPL(block_rq_remap);
EXPORT_TRACEPOINT_SYMBOL_GPL(block_bio_complete);
EXPORT_TRACEPOINT_SYMBOL_GPL(block_split);
EXPORT_TRACEPOINT_SYMBOL_GPL(block_unplug);
DEFINE_IDA(blk_queue_ida);
/*
* For the allocated request tables
*/
struct kmem_cache *request_cachep;
/*
* For queue allocation
*/
struct kmem_cache *blk_requestq_cachep;
/*
* Controlling structure to kblockd
*/
static struct workqueue_struct *kblockd_workqueue;
/**
* blk_queue_flag_set - atomically set a queue flag
* @flag: flag to be set
* @q: request queue
*/
void blk_queue_flag_set(unsigned int flag, struct request_queue *q)
{
unsigned long flags;
spin_lock_irqsave(q->queue_lock, flags);
queue_flag_set(flag, q);
spin_unlock_irqrestore(q->queue_lock, flags);
}
EXPORT_SYMBOL(blk_queue_flag_set);
/**
* blk_queue_flag_clear - atomically clear a queue flag
* @flag: flag to be cleared
* @q: request queue
*/
void blk_queue_flag_clear(unsigned int flag, struct request_queue *q)
{
unsigned long flags;
spin_lock_irqsave(q->queue_lock, flags);
queue_flag_clear(flag, q);
spin_unlock_irqrestore(q->queue_lock, flags);
}
EXPORT_SYMBOL(blk_queue_flag_clear);
/**
* blk_queue_flag_test_and_set - atomically test and set a queue flag
* @flag: flag to be set
* @q: request queue
*
* Returns the previous value of @flag - 0 if the flag was not set and 1 if
* the flag was already set.
*/
bool blk_queue_flag_test_and_set(unsigned int flag, struct request_queue *q)
{
unsigned long flags;
bool res;
spin_lock_irqsave(q->queue_lock, flags);
res = queue_flag_test_and_set(flag, q);
spin_unlock_irqrestore(q->queue_lock, flags);
return res;
}
EXPORT_SYMBOL_GPL(blk_queue_flag_test_and_set);
/**
* blk_queue_flag_test_and_clear - atomically test and clear a queue flag
* @flag: flag to be cleared
* @q: request queue
*
* Returns the previous value of @flag - 0 if the flag was not set and 1 if
* the flag was set.
*/
bool blk_queue_flag_test_and_clear(unsigned int flag, struct request_queue *q)
{
unsigned long flags;
bool res;
spin_lock_irqsave(q->queue_lock, flags);
res = queue_flag_test_and_clear(flag, q);
spin_unlock_irqrestore(q->queue_lock, flags);
return res;
}
EXPORT_SYMBOL_GPL(blk_queue_flag_test_and_clear);
static void blk_clear_congested(struct request_list *rl, int sync)
{
#ifdef CONFIG_CGROUP_WRITEBACK
clear_wb_congested(rl->blkg->wb_congested, sync);
#else
/*
* If !CGROUP_WRITEBACK, all blkg's map to bdi->wb and we shouldn't
* flip its congestion state for events on other blkcgs.
*/
if (rl == &rl->q->root_rl)
clear_wb_congested(rl->q->backing_dev_info->wb.congested, sync);
#endif
}
static void blk_set_congested(struct request_list *rl, int sync)
{
#ifdef CONFIG_CGROUP_WRITEBACK
set_wb_congested(rl->blkg->wb_congested, sync);
#else
/* see blk_clear_congested() */
if (rl == &rl->q->root_rl)
set_wb_congested(rl->q->backing_dev_info->wb.congested, sync);
#endif
}
void blk_queue_congestion_threshold(struct request_queue *q)
{
int nr;
nr = q->nr_requests - (q->nr_requests / 8) + 1;
if (nr > q->nr_requests)
nr = q->nr_requests;
q->nr_congestion_on = nr;
nr = q->nr_requests - (q->nr_requests / 8) - (q->nr_requests / 16) - 1;
if (nr < 1)
nr = 1;
q->nr_congestion_off = nr;
}
void blk_rq_init(struct request_queue *q, struct request *rq)
{
memset(rq, 0, sizeof(*rq));
INIT_LIST_HEAD(&rq->queuelist);
INIT_LIST_HEAD(&rq->timeout_list);
rq->cpu = -1;
rq->q = q;
rq->__sector = (sector_t) -1;
INIT_HLIST_NODE(&rq->hash);
RB_CLEAR_NODE(&rq->rb_node);
rq->tag = -1;
rq->internal_tag = -1;
rq->start_time_ns = ktime_get_ns();
rq->part = NULL;
}
EXPORT_SYMBOL(blk_rq_init);
static const struct {
int errno;
const char *name;
} blk_errors[] = {
[BLK_STS_OK] = { 0, "" },
[BLK_STS_NOTSUPP] = { -EOPNOTSUPP, "operation not supported" },
[BLK_STS_TIMEOUT] = { -ETIMEDOUT, "timeout" },
[BLK_STS_NOSPC] = { -ENOSPC, "critical space allocation" },
[BLK_STS_TRANSPORT] = { -ENOLINK, "recoverable transport" },
[BLK_STS_TARGET] = { -EREMOTEIO, "critical target" },
[BLK_STS_NEXUS] = { -EBADE, "critical nexus" },
[BLK_STS_MEDIUM] = { -ENODATA, "critical medium" },
[BLK_STS_PROTECTION] = { -EILSEQ, "protection" },
[BLK_STS_RESOURCE] = { -ENOMEM, "kernel resource" },
[BLK_STS_DEV_RESOURCE] = { -EBUSY, "device resource" },
[BLK_STS_AGAIN] = { -EAGAIN, "nonblocking retry" },
/* device mapper special case, should not leak out: */
[BLK_STS_DM_REQUEUE] = { -EREMCHG, "dm internal retry" },
/* everything else not covered above: */
[BLK_STS_IOERR] = { -EIO, "I/O" },
};
blk_status_t errno_to_blk_status(int errno)
{
int i;
for (i = 0; i < ARRAY_SIZE(blk_errors); i++) {
if (blk_errors[i].errno == errno)
return (__force blk_status_t)i;
}
return BLK_STS_IOERR;
}
EXPORT_SYMBOL_GPL(errno_to_blk_status);
int blk_status_to_errno(blk_status_t status)
{
int idx = (__force int)status;
if (WARN_ON_ONCE(idx >= ARRAY_SIZE(blk_errors)))
return -EIO;
return blk_errors[idx].errno;
}
EXPORT_SYMBOL_GPL(blk_status_to_errno);
static void print_req_error(struct request *req, blk_status_t status)
{
int idx = (__force int)status;
if (WARN_ON_ONCE(idx >= ARRAY_SIZE(blk_errors)))
return;
printk_ratelimited(KERN_ERR "%s: %s error, dev %s, sector %llu\n",
__func__, blk_errors[idx].name, req->rq_disk ?
req->rq_disk->disk_name : "?",
(unsigned long long)blk_rq_pos(req));
}
static void req_bio_endio(struct request *rq, struct bio *bio,
unsigned int nbytes, blk_status_t error)
{
if (error)
bio->bi_status = error;
if (unlikely(rq->rq_flags & RQF_QUIET))
bio_set_flag(bio, BIO_QUIET);
bio_advance(bio, nbytes);
/* don't actually finish bio if it's part of flush sequence */
if (bio->bi_iter.bi_size == 0 && !(rq->rq_flags & RQF_FLUSH_SEQ))
bio_endio(bio);
}
void blk_dump_rq_flags(struct request *rq, char *msg)
{
printk(KERN_INFO "%s: dev %s: flags=%llx\n", msg,
rq->rq_disk ? rq->rq_disk->disk_name : "?",
(unsigned long long) rq->cmd_flags);
printk(KERN_INFO " sector %llu, nr/cnr %u/%u\n",
(unsigned long long)blk_rq_pos(rq),
blk_rq_sectors(rq), blk_rq_cur_sectors(rq));
printk(KERN_INFO " bio %p, biotail %p, len %u\n",
rq->bio, rq->biotail, blk_rq_bytes(rq));
}
EXPORT_SYMBOL(blk_dump_rq_flags);
static void blk_delay_work(struct work_struct *work)
{
struct request_queue *q;
q = container_of(work, struct request_queue, delay_work.work);
spin_lock_irq(q->queue_lock);
__blk_run_queue(q);
spin_unlock_irq(q->queue_lock);
}
/**
* blk_delay_queue - restart queueing after defined interval
* @q: The &struct request_queue in question
* @msecs: Delay in msecs
*
* Description:
* Sometimes queueing needs to be postponed for a little while, to allow
* resources to come back. This function will make sure that queueing is
* restarted around the specified time.
*/
void blk_delay_queue(struct request_queue *q, unsigned long msecs)
{
lockdep_assert_held(q->queue_lock);
WARN_ON_ONCE(q->mq_ops);
if (likely(!blk_queue_dead(q)))
queue_delayed_work(kblockd_workqueue, &q->delay_work,
msecs_to_jiffies(msecs));
}
EXPORT_SYMBOL(blk_delay_queue);
/**
* blk_start_queue_async - asynchronously restart a previously stopped queue
* @q: The &struct request_queue in question
*
* Description:
* blk_start_queue_async() will clear the stop flag on the queue, and
* ensure that the request_fn for the queue is run from an async
* context.
**/
void blk_start_queue_async(struct request_queue *q)
{
lockdep_assert_held(q->queue_lock);
WARN_ON_ONCE(q->mq_ops);
queue_flag_clear(QUEUE_FLAG_STOPPED, q);
blk_run_queue_async(q);
}
EXPORT_SYMBOL(blk_start_queue_async);
/**
* blk_start_queue - restart a previously stopped queue
* @q: The &struct request_queue in question
*
* Description:
* blk_start_queue() will clear the stop flag on the queue, and call
* the request_fn for the queue if it was in a stopped state when
* entered. Also see blk_stop_queue().
**/
void blk_start_queue(struct request_queue *q)
{
lockdep_assert_held(q->queue_lock);
WARN_ON_ONCE(q->mq_ops);
queue_flag_clear(QUEUE_FLAG_STOPPED, q);
__blk_run_queue(q);
}
EXPORT_SYMBOL(blk_start_queue);
/**
* blk_stop_queue - stop a queue
* @q: The &struct request_queue in question
*
* Description:
* The Linux block layer assumes that a block driver will consume all
* entries on the request queue when the request_fn strategy is called.
* Often this will not happen, because of hardware limitations (queue
* depth settings). If a device driver gets a 'queue full' response,
* or if it simply chooses not to queue more I/O at one point, it can
* call this function to prevent the request_fn from being called until
* the driver has signalled it's ready to go again. This happens by calling
* blk_start_queue() to restart queue operations.
**/
void blk_stop_queue(struct request_queue *q)
{
lockdep_assert_held(q->queue_lock);
WARN_ON_ONCE(q->mq_ops);
cancel_delayed_work(&q->delay_work);
queue_flag_set(QUEUE_FLAG_STOPPED, q);
}
EXPORT_SYMBOL(blk_stop_queue);
/**
* blk_sync_queue - cancel any pending callbacks on a queue
* @q: the queue
*
* Description:
* The block layer may perform asynchronous callback activity
* on a queue, such as calling the unplug function after a timeout.
* A block device may call blk_sync_queue to ensure that any
* such activity is cancelled, thus allowing it to release resources
* that the callbacks might use. The caller must already have made sure
* that its ->make_request_fn will not re-add plugging prior to calling
* this function.
*
* This function does not cancel any asynchronous activity arising
* out of elevator or throttling code. That would require elevator_exit()
* and blkcg_exit_queue() to be called with queue lock initialized.
*
*/
void blk_sync_queue(struct request_queue *q)
{
del_timer_sync(&q->timeout);
cancel_work_sync(&q->timeout_work);
if (q->mq_ops) {
struct blk_mq_hw_ctx *hctx;
int i;
cancel_delayed_work_sync(&q->requeue_work);
queue_for_each_hw_ctx(q, hctx, i)
cancel_delayed_work_sync(&hctx->run_work);
} else {
cancel_delayed_work_sync(&q->delay_work);
}
}
EXPORT_SYMBOL(blk_sync_queue);
/**
* blk_set_preempt_only - set QUEUE_FLAG_PREEMPT_ONLY
* @q: request queue pointer
*
* Returns the previous value of the PREEMPT_ONLY flag - 0 if the flag was not
* set and 1 if the flag was already set.
*/
int blk_set_preempt_only(struct request_queue *q)
{
return blk_queue_flag_test_and_set(QUEUE_FLAG_PREEMPT_ONLY, q);
}
EXPORT_SYMBOL_GPL(blk_set_preempt_only);
void blk_clear_preempt_only(struct request_queue *q)
{
blk_queue_flag_clear(QUEUE_FLAG_PREEMPT_ONLY, q);
wake_up_all(&q->mq_freeze_wq);
}
EXPORT_SYMBOL_GPL(blk_clear_preempt_only);
/**
* __blk_run_queue_uncond - run a queue whether or not it has been stopped
* @q: The queue to run
*
* Description:
* Invoke request handling on a queue if there are any pending requests.
* May be used to restart request handling after a request has completed.
* This variant runs the queue whether or not the queue has been
* stopped. Must be called with the queue lock held and interrupts
* disabled. See also @blk_run_queue.
*/
inline void __blk_run_queue_uncond(struct request_queue *q)
{
lockdep_assert_held(q->queue_lock);
WARN_ON_ONCE(q->mq_ops);
if (unlikely(blk_queue_dead(q)))
return;
/*
* Some request_fn implementations, e.g. scsi_request_fn(), unlock
* the queue lock internally. As a result multiple threads may be
* running such a request function concurrently. Keep track of the
* number of active request_fn invocations such that blk_drain_queue()
* can wait until all these request_fn calls have finished.
*/
q->request_fn_active++;
q->request_fn(q);
q->request_fn_active--;
}
EXPORT_SYMBOL_GPL(__blk_run_queue_uncond);
/**
* __blk_run_queue - run a single device queue
* @q: The queue to run
*
* Description:
* See @blk_run_queue.
*/
void __blk_run_queue(struct request_queue *q)
{
lockdep_assert_held(q->queue_lock);
WARN_ON_ONCE(q->mq_ops);
if (unlikely(blk_queue_stopped(q)))
return;
__blk_run_queue_uncond(q);
}
EXPORT_SYMBOL(__blk_run_queue);
/**
* blk_run_queue_async - run a single device queue in workqueue context
* @q: The queue to run
*
* Description:
* Tells kblockd to perform the equivalent of @blk_run_queue on behalf
* of us.
*
* Note:
* Since it is not allowed to run q->delay_work after blk_cleanup_queue()
* has canceled q->delay_work, callers must hold the queue lock to avoid
* race conditions between blk_cleanup_queue() and blk_run_queue_async().
*/
void blk_run_queue_async(struct request_queue *q)
{
lockdep_assert_held(q->queue_lock);
WARN_ON_ONCE(q->mq_ops);
if (likely(!blk_queue_stopped(q) && !blk_queue_dead(q)))
mod_delayed_work(kblockd_workqueue, &q->delay_work, 0);
}
EXPORT_SYMBOL(blk_run_queue_async);
/**
* blk_run_queue - run a single device queue
* @q: The queue to run
*
* Description:
* Invoke request handling on this queue, if it has pending work to do.
* May be used to restart queueing when a request has completed.
*/
void blk_run_queue(struct request_queue *q)
{
unsigned long flags;
WARN_ON_ONCE(q->mq_ops);
spin_lock_irqsave(q->queue_lock, flags);
__blk_run_queue(q);
spin_unlock_irqrestore(q->queue_lock, flags);
}
EXPORT_SYMBOL(blk_run_queue);
void blk_put_queue(struct request_queue *q)
{
kobject_put(&q->kobj);
}
EXPORT_SYMBOL(blk_put_queue);
/**
* __blk_drain_queue - drain requests from request_queue
* @q: queue to drain
* @drain_all: whether to drain all requests or only the ones w/ ELVPRIV
*
* Drain requests from @q. If @drain_all is set, all requests are drained.
* If not, only ELVPRIV requests are drained. The caller is responsible
* for ensuring that no new requests which need to be drained are queued.
*/
static void __blk_drain_queue(struct request_queue *q, bool drain_all)
__releases(q->queue_lock)
__acquires(q->queue_lock)
{
int i;
lockdep_assert_held(q->queue_lock);
WARN_ON_ONCE(q->mq_ops);
while (true) {
bool drain = false;
/*
* The caller might be trying to drain @q before its
* elevator is initialized.
*/
if (q->elevator)
elv_drain_elevator(q);
blkcg_drain_queue(q);
/*
* This function might be called on a queue which failed
* driver init after queue creation or is not yet fully
* active yet. Some drivers (e.g. fd and loop) get unhappy
* in such cases. Kick queue iff dispatch queue has
* something on it and @q has request_fn set.
*/
if (!list_empty(&q->queue_head) && q->request_fn)
__blk_run_queue(q);
drain |= q->nr_rqs_elvpriv;
drain |= q->request_fn_active;
/*
* Unfortunately, requests are queued at and tracked from
* multiple places and there's no single counter which can
* be drained. Check all the queues and counters.
*/
if (drain_all) {
struct blk_flush_queue *fq = blk_get_flush_queue(q, NULL);
drain |= !list_empty(&q->queue_head);
for (i = 0; i < 2; i++) {
drain |= q->nr_rqs[i];
drain |= q->in_flight[i];
if (fq)
drain |= !list_empty(&fq->flush_queue[i]);
}
}
if (!drain)
break;
spin_unlock_irq(q->queue_lock);
msleep(10);
spin_lock_irq(q->queue_lock);
}
/*
* With queue marked dead, any woken up waiter will fail the
* allocation path, so the wakeup chaining is lost and we're
* left with hung waiters. We need to wake up those waiters.
*/
if (q->request_fn) {
struct request_list *rl;
blk_queue_for_each_rl(rl, q)
for (i = 0; i < ARRAY_SIZE(rl->wait); i++)
wake_up_all(&rl->wait[i]);
}
}
void blk_drain_queue(struct request_queue *q)
{
spin_lock_irq(q->queue_lock);
__blk_drain_queue(q, true);
spin_unlock_irq(q->queue_lock);
}
/**
* blk_queue_bypass_start - enter queue bypass mode
* @q: queue of interest
*
* In bypass mode, only the dispatch FIFO queue of @q is used. This
* function makes @q enter bypass mode and drains all requests which were
* throttled or issued before. On return, it's guaranteed that no request
* is being throttled or has ELVPRIV set and blk_queue_bypass() %true
* inside queue or RCU read lock.
*/
void blk_queue_bypass_start(struct request_queue *q)
{
WARN_ON_ONCE(q->mq_ops);
spin_lock_irq(q->queue_lock);
q->bypass_depth++;
queue_flag_set(QUEUE_FLAG_BYPASS, q);
spin_unlock_irq(q->queue_lock);
/*
* Queues start drained. Skip actual draining till init is
* complete. This avoids lenghty delays during queue init which
* can happen many times during boot.
*/
if (blk_queue_init_done(q)) {
spin_lock_irq(q->queue_lock);
__blk_drain_queue(q, false);
spin_unlock_irq(q->queue_lock);
/* ensure blk_queue_bypass() is %true inside RCU read lock */
synchronize_rcu();
}
}
EXPORT_SYMBOL_GPL(blk_queue_bypass_start);
/**
* blk_queue_bypass_end - leave queue bypass mode
* @q: queue of interest
*
* Leave bypass mode and restore the normal queueing behavior.
*
* Note: although blk_queue_bypass_start() is only called for blk-sq queues,
* this function is called for both blk-sq and blk-mq queues.
*/
void blk_queue_bypass_end(struct request_queue *q)
{
spin_lock_irq(q->queue_lock);
if (!--q->bypass_depth)
queue_flag_clear(QUEUE_FLAG_BYPASS, q);
WARN_ON_ONCE(q->bypass_depth < 0);
spin_unlock_irq(q->queue_lock);
}
EXPORT_SYMBOL_GPL(blk_queue_bypass_end);
void blk_set_queue_dying(struct request_queue *q)
{
blk_queue_flag_set(QUEUE_FLAG_DYING, q);
/*
* When queue DYING flag is set, we need to block new req
* entering queue, so we call blk_freeze_queue_start() to
* prevent I/O from crossing blk_queue_enter().
*/
blk_freeze_queue_start(q);
if (q->mq_ops)
blk_mq_wake_waiters(q);
else {
struct request_list *rl;
spin_lock_irq(q->queue_lock);
blk_queue_for_each_rl(rl, q) {
if (rl->rq_pool) {
wake_up_all(&rl->wait[BLK_RW_SYNC]);
wake_up_all(&rl->wait[BLK_RW_ASYNC]);
}
}
spin_unlock_irq(q->queue_lock);
}
/* Make blk_queue_enter() reexamine the DYING flag. */
wake_up_all(&q->mq_freeze_wq);
}
EXPORT_SYMBOL_GPL(blk_set_queue_dying);
/**
* blk_cleanup_queue - shutdown a request queue
* @q: request queue to shutdown
*
* Mark @q DYING, drain all pending requests, mark @q DEAD, destroy and
* put it. All future requests will be failed immediately with -ENODEV.
*/
void blk_cleanup_queue(struct request_queue *q)
{
spinlock_t *lock = q->queue_lock;
/* mark @q DYING, no new request or merges will be allowed afterwards */
mutex_lock(&q->sysfs_lock);
blk_set_queue_dying(q);
spin_lock_irq(lock);
/*
* A dying queue is permanently in bypass mode till released. Note
* that, unlike blk_queue_bypass_start(), we aren't performing
* synchronize_rcu() after entering bypass mode to avoid the delay
* as some drivers create and destroy a lot of queues while
* probing. This is still safe because blk_release_queue() will be
* called only after the queue refcnt drops to zero and nothing,
* RCU or not, would be traversing the queue by then.
*/
q->bypass_depth++;
queue_flag_set(QUEUE_FLAG_BYPASS, q);
queue_flag_set(QUEUE_FLAG_NOMERGES, q);
queue_flag_set(QUEUE_FLAG_NOXMERGES, q);
queue_flag_set(QUEUE_FLAG_DYING, q);
spin_unlock_irq(lock);
mutex_unlock(&q->sysfs_lock);
/*
* Drain all requests queued before DYING marking. Set DEAD flag to
* prevent that q->request_fn() gets invoked after draining finished.
*/
blk_freeze_queue(q);
spin_lock_irq(lock);
queue_flag_set(QUEUE_FLAG_DEAD, q);
spin_unlock_irq(lock);
/*
* make sure all in-progress dispatch are completed because
* blk_freeze_queue() can only complete all requests, and
* dispatch may still be in-progress since we dispatch requests
* from more than one contexts.
*
* No need to quiesce queue if it isn't initialized yet since
* blk_freeze_queue() should be enough for cases of passthrough
* request.
*/
if (q->mq_ops && blk_queue_init_done(q))
blk_mq_quiesce_queue(q);
/* for synchronous bio-based driver finish in-flight integrity i/o */
blk_flush_integrity();
/* @q won't process any more request, flush async actions */
del_timer_sync(&q->backing_dev_info->laptop_mode_wb_timer);
blk_sync_queue(q);
/*
* I/O scheduler exit is only safe after the sysfs scheduler attribute
* has been removed.
*/
WARN_ON_ONCE(q->kobj.state_in_sysfs);
/*
* Since the I/O scheduler exit code may access cgroup information,
* perform I/O scheduler exit before disassociating from the block
* cgroup controller.
*/
if (q->elevator) {
ioc_clear_queue(q);
elevator_exit(q, q->elevator);
q->elevator = NULL;
}
/*
* Remove all references to @q from the block cgroup controller before
* restoring @q->queue_lock to avoid that restoring this pointer causes
* e.g. blkcg_print_blkgs() to crash.
*/
blkcg_exit_queue(q);
/*
* Since the cgroup code may dereference the @q->backing_dev_info
* pointer, only decrease its reference count after having removed the
* association with the block cgroup controller.
*/
bdi_put(q->backing_dev_info);
if (q->mq_ops)
blk_mq_free_queue(q);
percpu_ref_exit(&q->q_usage_counter);
spin_lock_irq(lock);
if (q->queue_lock != &q->__queue_lock)
q->queue_lock = &q->__queue_lock;
spin_unlock_irq(lock);
/* @q is and will stay empty, shutdown and put */
blk_put_queue(q);
}
EXPORT_SYMBOL(blk_cleanup_queue);
/* Allocate memory local to the request queue */
static void *alloc_request_simple(gfp_t gfp_mask, void *data)
{
struct request_queue *q = data;
return kmem_cache_alloc_node(request_cachep, gfp_mask, q->node);
}
static void free_request_simple(void *element, void *data)
{
kmem_cache_free(request_cachep, element);
}
static void *alloc_request_size(gfp_t gfp_mask, void *data)
{
struct request_queue *q = data;
struct request *rq;
rq = kmalloc_node(sizeof(struct request) + q->cmd_size, gfp_mask,
q->node);
if (rq && q->init_rq_fn && q->init_rq_fn(q, rq, gfp_mask) < 0) {
kfree(rq);
rq = NULL;
}
return rq;
}
static void free_request_size(void *element, void *data)
{
struct request_queue *q = data;
if (q->exit_rq_fn)
q->exit_rq_fn(q, element);
kfree(element);
}
int blk_init_rl(struct request_list *rl, struct request_queue *q,
gfp_t gfp_mask)
{
if (unlikely(rl->rq_pool) || q->mq_ops)
return 0;
rl->q = q;
rl->count[BLK_RW_SYNC] = rl->count[BLK_RW_ASYNC] = 0;
rl->starved[BLK_RW_SYNC] = rl->starved[BLK_RW_ASYNC] = 0;
init_waitqueue_head(&rl->wait[BLK_RW_SYNC]);
init_waitqueue_head(&rl->wait[BLK_RW_ASYNC]);
if (q->cmd_size) {
rl->rq_pool = mempool_create_node(BLKDEV_MIN_RQ,
alloc_request_size, free_request_size,
q, gfp_mask, q->node);
} else {
rl->rq_pool = mempool_create_node(BLKDEV_MIN_RQ,
alloc_request_simple, free_request_simple,
q, gfp_mask, q->node);
}
if (!rl->rq_pool)
return -ENOMEM;
if (rl != &q->root_rl)
WARN_ON_ONCE(!blk_get_queue(q));
return 0;
}
void blk_exit_rl(struct request_queue *q, struct request_list *rl)
{
if (rl->rq_pool) {
mempool_destroy(rl->rq_pool);
if (rl != &q->root_rl)
blk_put_queue(q);
}
}
struct request_queue *blk_alloc_queue(gfp_t gfp_mask)
{
return blk_alloc_queue_node(gfp_mask, NUMA_NO_NODE, NULL);
}
EXPORT_SYMBOL(blk_alloc_queue);
/**
* blk_queue_enter() - try to increase q->q_usage_counter
* @q: request queue pointer
* @flags: BLK_MQ_REQ_NOWAIT and/or BLK_MQ_REQ_PREEMPT
*/
int blk_queue_enter(struct request_queue *q, blk_mq_req_flags_t flags)
{
const bool preempt = flags & BLK_MQ_REQ_PREEMPT;
while (true) {
bool success = false;
rcu_read_lock();
if (percpu_ref_tryget_live(&q->q_usage_counter)) {
/*
* The code that sets the PREEMPT_ONLY flag is
* responsible for ensuring that that flag is globally
* visible before the queue is unfrozen.
*/
if (preempt || !blk_queue_preempt_only(q)) {
success = true;
} else {
percpu_ref_put(&q->q_usage_counter);
}
}
rcu_read_unlock();
if (success)
return 0;
if (flags & BLK_MQ_REQ_NOWAIT)
return -EBUSY;
/*
* read pair of barrier in blk_freeze_queue_start(),
* we need to order reading __PERCPU_REF_DEAD flag of
* .q_usage_counter and reading .mq_freeze_depth or
* queue dying flag, otherwise the following wait may
* never return if the two reads are reordered.
*/
smp_rmb();
wait_event(q->mq_freeze_wq,
(atomic_read(&q->mq_freeze_depth) == 0 &&
(preempt || !blk_queue_preempt_only(q))) ||
blk_queue_dying(q));
if (blk_queue_dying(q))
return -ENODEV;
}
}
void blk_queue_exit(struct request_queue *q)
{
percpu_ref_put(&q->q_usage_counter);
}
static void blk_queue_usage_counter_release(struct percpu_ref *ref)
{
struct request_queue *q =
container_of(ref, struct request_queue, q_usage_counter);
wake_up_all(&q->mq_freeze_wq);
}
static void blk_rq_timed_out_timer(struct timer_list *t)
{
struct request_queue *q = from_timer(q, t, timeout);
kblockd_schedule_work(&q->timeout_work);
}
/**
* blk_alloc_queue_node - allocate a request queue
* @gfp_mask: memory allocation flags
* @node_id: NUMA node to allocate memory from
* @lock: For legacy queues, pointer to a spinlock that will be used to e.g.
* serialize calls to the legacy .request_fn() callback. Ignored for
* blk-mq request queues.
*
* Note: pass the queue lock as the third argument to this function instead of
* setting the queue lock pointer explicitly to avoid triggering a sporadic
* crash in the blkcg code. This function namely calls blkcg_init_queue() and
* the queue lock pointer must be set before blkcg_init_queue() is called.
*/
struct request_queue *blk_alloc_queue_node(gfp_t gfp_mask, int node_id,
spinlock_t *lock)
{
struct request_queue *q;
int ret;
q = kmem_cache_alloc_node(blk_requestq_cachep,
gfp_mask | __GFP_ZERO, node_id);
if (!q)
return NULL;
INIT_LIST_HEAD(&q->queue_head);
q->last_merge = NULL;
q->end_sector = 0;
q->boundary_rq = NULL;
q->id = ida_simple_get(&blk_queue_ida, 0, 0, gfp_mask);
if (q->id < 0)
goto fail_q;
ret = bioset_init(&q->bio_split, BIO_POOL_SIZE, 0, BIOSET_NEED_BVECS);
if (ret)
goto fail_id;
q->backing_dev_info = bdi_alloc_node(gfp_mask, node_id);
if (!q->backing_dev_info)
goto fail_split;
q->stats = blk_alloc_queue_stats();
if (!q->stats)
goto fail_stats;
q->backing_dev_info->ra_pages =
(VM_MAX_READAHEAD * 1024) / PAGE_SIZE;
q->backing_dev_info->capabilities = BDI_CAP_CGROUP_WRITEBACK;
q->backing_dev_info->name = "block";
q->node = node_id;
timer_setup(&q->backing_dev_info->laptop_mode_wb_timer,
laptop_mode_timer_fn, 0);
timer_setup(&q->timeout, blk_rq_timed_out_timer, 0);
INIT_WORK(&q->timeout_work, NULL);
INIT_LIST_HEAD(&q->queue_head);
INIT_LIST_HEAD(&q->timeout_list);
INIT_LIST_HEAD(&q->icq_list);
#ifdef CONFIG_BLK_CGROUP
INIT_LIST_HEAD(&q->blkg_list);
#endif
INIT_DELAYED_WORK(&q->delay_work, blk_delay_work);
kobject_init(&q->kobj, &blk_queue_ktype);
#ifdef CONFIG_BLK_DEV_IO_TRACE
mutex_init(&q->blk_trace_mutex);
#endif
mutex_init(&q->sysfs_lock);
spin_lock_init(&q->__queue_lock);
if (!q->mq_ops)
q->queue_lock = lock ? : &q->__queue_lock;
/*
* A queue starts its life with bypass turned on to avoid
* unnecessary bypass on/off overhead and nasty surprises during
* init. The initial bypass will be finished when the queue is
* registered by blk_register_queue().
*/
q->bypass_depth = 1;
queue_flag_set_unlocked(QUEUE_FLAG_BYPASS, q);
init_waitqueue_head(&q->mq_freeze_wq);
/*
* Init percpu_ref in atomic mode so that it's faster to shutdown.
* See blk_register_queue() for details.
*/
if (percpu_ref_init(&q->q_usage_counter,
blk_queue_usage_counter_release,
PERCPU_REF_INIT_ATOMIC, GFP_KERNEL))
goto fail_bdi;
if (blkcg_init_queue(q))
goto fail_ref;
return q;
fail_ref:
percpu_ref_exit(&q->q_usage_counter);
fail_bdi:
blk_free_queue_stats(q->stats);
fail_stats:
bdi_put(q->backing_dev_info);
fail_split:
bioset_exit(&q->bio_split);
fail_id:
ida_simple_remove(&blk_queue_ida, q->id);
fail_q:
kmem_cache_free(blk_requestq_cachep, q);
return NULL;
}
EXPORT_SYMBOL(blk_alloc_queue_node);
/**
* blk_init_queue - prepare a request queue for use with a block device
* @rfn: The function to be called to process requests that have been
* placed on the queue.
* @lock: Request queue spin lock
*
* Description:
* If a block device wishes to use the standard request handling procedures,
* which sorts requests and coalesces adjacent requests, then it must
* call blk_init_queue(). The function @rfn will be called when there
* are requests on the queue that need to be processed. If the device
* supports plugging, then @rfn may not be called immediately when requests
* are available on the queue, but may be called at some time later instead.
* Plugged queues are generally unplugged when a buffer belonging to one
* of the requests on the queue is needed, or due to memory pressure.
*
* @rfn is not required, or even expected, to remove all requests off the
* queue, but only as many as it can handle at a time. If it does leave
* requests on the queue, it is responsible for arranging that the requests
* get dealt with eventually.
*
* The queue spin lock must be held while manipulating the requests on the
* request queue; this lock will be taken also from interrupt context, so irq
* disabling is needed for it.
*
* Function returns a pointer to the initialized request queue, or %NULL if
* it didn't succeed.
*
* Note:
* blk_init_queue() must be paired with a blk_cleanup_queue() call
* when the block device is deactivated (such as at module unload).
**/
struct request_queue *blk_init_queue(request_fn_proc *rfn, spinlock_t *lock)
{
return blk_init_queue_node(rfn, lock, NUMA_NO_NODE);
}
EXPORT_SYMBOL(blk_init_queue);
struct request_queue *
blk_init_queue_node(request_fn_proc *rfn, spinlock_t *lock, int node_id)
{
struct request_queue *q;
q = blk_alloc_queue_node(GFP_KERNEL, node_id, lock);
if (!q)
return NULL;
q->request_fn = rfn;
if (blk_init_allocated_queue(q) < 0) {
blk_cleanup_queue(q);
return NULL;
}
return q;
}
EXPORT_SYMBOL(blk_init_queue_node);
static blk_qc_t blk_queue_bio(struct request_queue *q, struct bio *bio);
int blk_init_allocated_queue(struct request_queue *q)
{
WARN_ON_ONCE(q->mq_ops);
q->fq = blk_alloc_flush_queue(q, NUMA_NO_NODE, q->cmd_size);
if (!q->fq)
return -ENOMEM;
if (q->init_rq_fn && q->init_rq_fn(q, q->fq->flush_rq, GFP_KERNEL))
goto out_free_flush_queue;
if (blk_init_rl(&q->root_rl, q, GFP_KERNEL))
goto out_exit_flush_rq;
INIT_WORK(&q->timeout_work, blk_timeout_work);
q->queue_flags |= QUEUE_FLAG_DEFAULT;
/*
* This also sets hw/phys segments, boundary and size
*/
blk_queue_make_request(q, blk_queue_bio);
q->sg_reserved_size = INT_MAX;
if (elevator_init(q))
goto out_exit_flush_rq;
return 0;
out_exit_flush_rq:
if (q->exit_rq_fn)
q->exit_rq_fn(q, q->fq->flush_rq);
out_free_flush_queue:
blk_free_flush_queue(q->fq);
q->fq = NULL;
return -ENOMEM;
}
EXPORT_SYMBOL(blk_init_allocated_queue);
bool blk_get_queue(struct request_queue *q)
{
if (likely(!blk_queue_dying(q))) {
__blk_get_queue(q);
return true;
}
return false;
}
EXPORT_SYMBOL(blk_get_queue);
static inline void blk_free_request(struct request_list *rl, struct request *rq)
{
if (rq->rq_flags & RQF_ELVPRIV) {
elv_put_request(rl->q, rq);
if (rq->elv.icq)
put_io_context(rq->elv.icq->ioc);
}
mempool_free(rq, rl->rq_pool);
}
/*
* ioc_batching returns true if the ioc is a valid batching request and
* should be given priority access to a request.
*/
static inline int ioc_batching(struct request_queue *q, struct io_context *ioc)
{
if (!ioc)
return 0;
/*
* Make sure the process is able to allocate at least 1 request
* even if the batch times out, otherwise we could theoretically
* lose wakeups.
*/
return ioc->nr_batch_requests == q->nr_batching ||
(ioc->nr_batch_requests > 0
&& time_before(jiffies, ioc->last_waited + BLK_BATCH_TIME));
}
/*
* ioc_set_batching sets ioc to be a new "batcher" if it is not one. This
* will cause the process to be a "batcher" on all queues in the system. This
* is the behaviour we want though - once it gets a wakeup it should be given
* a nice run.
*/
static void ioc_set_batching(struct request_queue *q, struct io_context *ioc)
{
if (!ioc || ioc_batching(q, ioc))
return;
ioc->nr_batch_requests = q->nr_batching;
ioc->last_waited = jiffies;
}
static void __freed_request(struct request_list *rl, int sync)
{
struct request_queue *q = rl->q;
if (rl->count[sync] < queue_congestion_off_threshold(q))
blk_clear_congested(rl, sync);
if (rl->count[sync] + 1 <= q->nr_requests) {
if (waitqueue_active(&rl->wait[sync]))
wake_up(&rl->wait[sync]);
blk_clear_rl_full(rl, sync);
}
}
/*
* A request has just been released. Account for it, update the full and
* congestion status, wake up any waiters. Called under q->queue_lock.
*/
static void freed_request(struct request_list *rl, bool sync,
req_flags_t rq_flags)
{
struct request_queue *q = rl->q;
q->nr_rqs[sync]--;
rl->count[sync]--;
if (rq_flags & RQF_ELVPRIV)
q->nr_rqs_elvpriv--;
__freed_request(rl, sync);
if (unlikely(rl->starved[sync ^ 1]))
__freed_request(rl, sync ^ 1);
}
int blk_update_nr_requests(struct request_queue *q, unsigned int nr)
{
struct request_list *rl;
int on_thresh, off_thresh;
WARN_ON_ONCE(q->mq_ops);
spin_lock_irq(q->queue_lock);
q->nr_requests = nr;
blk_queue_congestion_threshold(q);
on_thresh = queue_congestion_on_threshold(q);
off_thresh = queue_congestion_off_threshold(q);
blk_queue_for_each_rl(rl, q) {
if (rl->count[BLK_RW_SYNC] >= on_thresh)
blk_set_congested(rl, BLK_RW_SYNC);
else if (rl->count[BLK_RW_SYNC] < off_thresh)
blk_clear_congested(rl, BLK_RW_SYNC);
if (rl->count[BLK_RW_ASYNC] >= on_thresh)
blk_set_congested(rl, BLK_RW_ASYNC);
else if (rl->count[BLK_RW_ASYNC] < off_thresh)
blk_clear_congested(rl, BLK_RW_ASYNC);
if (rl->count[BLK_RW_SYNC] >= q->nr_requests) {
blk_set_rl_full(rl, BLK_RW_SYNC);
} else {
blk_clear_rl_full(rl, BLK_RW_SYNC);
wake_up(&rl->wait[BLK_RW_SYNC]);
}
if (rl->count[BLK_RW_ASYNC] >= q->nr_requests) {
blk_set_rl_full(rl, BLK_RW_ASYNC);
} else {
blk_clear_rl_full(rl, BLK_RW_ASYNC);
wake_up(&rl->wait[BLK_RW_ASYNC]);
}
}
spin_unlock_irq(q->queue_lock);
return 0;
}
/**
* __get_request - get a free request
* @rl: request list to allocate from
* @op: operation and flags
* @bio: bio to allocate request for (can be %NULL)
* @flags: BLQ_MQ_REQ_* flags
* @gfp_mask: allocator flags
*
* Get a free request from @q. This function may fail under memory
* pressure or if @q is dead.
*
* Must be called with @q->queue_lock held and,
* Returns ERR_PTR on failure, with @q->queue_lock held.
* Returns request pointer on success, with @q->queue_lock *not held*.
*/
static struct request *__get_request(struct request_list *rl, unsigned int op,
struct bio *bio, blk_mq_req_flags_t flags, gfp_t gfp_mask)
{
struct request_queue *q = rl->q;
struct request *rq;
struct elevator_type *et = q->elevator->type;
struct io_context *ioc = rq_ioc(bio);
struct io_cq *icq = NULL;
const bool is_sync = op_is_sync(op);
int may_queue;
req_flags_t rq_flags = RQF_ALLOCED;
lockdep_assert_held(q->queue_lock);
if (unlikely(blk_queue_dying(q)))
return ERR_PTR(-ENODEV);
may_queue = elv_may_queue(q, op);
if (may_queue == ELV_MQUEUE_NO)
goto rq_starved;
if (rl->count[is_sync]+1 >= queue_congestion_on_threshold(q)) {
if (rl->count[is_sync]+1 >= q->nr_requests) {
/*
* The queue will fill after this allocation, so set
* it as full, and mark this process as "batching".
* This process will be allowed to complete a batch of
* requests, others will be blocked.
*/
if (!blk_rl_full(rl, is_sync)) {
ioc_set_batching(q, ioc);
blk_set_rl_full(rl, is_sync);
} else {
if (may_queue != ELV_MQUEUE_MUST
&& !ioc_batching(q, ioc)) {
/*
* The queue is full and the allocating
* process is not a "batcher", and not
* exempted by the IO scheduler
*/
return ERR_PTR(-ENOMEM);
}
}
}
blk_set_congested(rl, is_sync);
}
/*
* Only allow batching queuers to allocate up to 50% over the defined
* limit of requests, otherwise we could have thousands of requests
* allocated with any setting of ->nr_requests
*/
if (rl->count[is_sync] >= (3 * q->nr_requests / 2))
return ERR_PTR(-ENOMEM);
q->nr_rqs[is_sync]++;
rl->count[is_sync]++;
rl->starved[is_sync] = 0;
/*
* Decide whether the new request will be managed by elevator. If
* so, mark @rq_flags and increment elvpriv. Non-zero elvpriv will
* prevent the current elevator from being destroyed until the new
* request is freed. This guarantees icq's won't be destroyed and
* makes creating new ones safe.
*
* Flush requests do not use the elevator so skip initialization.
* This allows a request to share the flush and elevator data.
*
* Also, lookup icq while holding queue_lock. If it doesn't exist,
* it will be created after releasing queue_lock.
*/
if (!op_is_flush(op) && !blk_queue_bypass(q)) {
rq_flags |= RQF_ELVPRIV;
q->nr_rqs_elvpriv++;
if (et->icq_cache && ioc)
icq = ioc_lookup_icq(ioc, q);
}
if (blk_queue_io_stat(q))
rq_flags |= RQF_IO_STAT;
spin_unlock_irq(q->queue_lock);
/* allocate and init request */
rq = mempool_alloc(rl->rq_pool, gfp_mask);
if (!rq)
goto fail_alloc;
blk_rq_init(q, rq);
blk_rq_set_rl(rq, rl);
rq->cmd_flags = op;
rq->rq_flags = rq_flags;
if (flags & BLK_MQ_REQ_PREEMPT)
rq->rq_flags |= RQF_PREEMPT;
/* init elvpriv */
if (rq_flags & RQF_ELVPRIV) {
if (unlikely(et->icq_cache && !icq)) {
if (ioc)
icq = ioc_create_icq(ioc, q, gfp_mask);
if (!icq)
goto fail_elvpriv;
}
rq->elv.icq = icq;
if (unlikely(elv_set_request(q, rq, bio, gfp_mask)))
goto fail_elvpriv;
/* @rq->elv.icq holds io_context until @rq is freed */
if (icq)
get_io_context(icq->ioc);
}
out:
/*
* ioc may be NULL here, and ioc_batching will be false. That's
* OK, if the queue is under the request limit then requests need
* not count toward the nr_batch_requests limit. There will always
* be some limit enforced by BLK_BATCH_TIME.
*/
if (ioc_batching(q, ioc))
ioc->nr_batch_requests--;
trace_block_getrq(q, bio, op);
return rq;
fail_elvpriv:
/*
* elvpriv init failed. ioc, icq and elvpriv aren't mempool backed
* and may fail indefinitely under memory pressure and thus
* shouldn't stall IO. Treat this request as !elvpriv. This will
* disturb iosched and blkcg but weird is bettern than dead.
*/
printk_ratelimited(KERN_WARNING "%s: dev %s: request aux data allocation failed, iosched may be disturbed\n",
__func__, dev_name(q->backing_dev_info->dev));
rq->rq_flags &= ~RQF_ELVPRIV;
rq->elv.icq = NULL;
spin_lock_irq(q->queue_lock);
q->nr_rqs_elvpriv--;
spin_unlock_irq(q->queue_lock);
goto out;
fail_alloc:
/*
* Allocation failed presumably due to memory. Undo anything we
* might have messed up.
*
* Allocating task should really be put onto the front of the wait
* queue, but this is pretty rare.
*/
spin_lock_irq(q->queue_lock);
freed_request(rl, is_sync, rq_flags);
/*
* in the very unlikely event that allocation failed and no
* requests for this direction was pending, mark us starved so that
* freeing of a request in the other direction will notice
* us. another possible fix would be to split the rq mempool into
* READ and WRITE
*/
rq_starved:
if (unlikely(rl->count[is_sync] == 0))
rl->starved[is_sync] = 1;
return ERR_PTR(-ENOMEM);
}
/**
* get_request - get a free request
* @q: request_queue to allocate request from
* @op: operation and flags
* @bio: bio to allocate request for (can be %NULL)
* @flags: BLK_MQ_REQ_* flags.
* @gfp: allocator flags
*
* Get a free request from @q. If %BLK_MQ_REQ_NOWAIT is set in @flags,
* this function keeps retrying under memory pressure and fails iff @q is dead.
*
* Must be called with @q->queue_lock held and,
* Returns ERR_PTR on failure, with @q->queue_lock held.
* Returns request pointer on success, with @q->queue_lock *not held*.
*/
static struct request *get_request(struct request_queue *q, unsigned int op,
struct bio *bio, blk_mq_req_flags_t flags, gfp_t gfp)
{
const bool is_sync = op_is_sync(op);
DEFINE_WAIT(wait);
struct request_list *rl;
struct request *rq;
lockdep_assert_held(q->queue_lock);
WARN_ON_ONCE(q->mq_ops);
rl = blk_get_rl(q, bio); /* transferred to @rq on success */
retry:
rq = __get_request(rl, op, bio, flags, gfp);
if (!IS_ERR(rq))
return rq;
if (op & REQ_NOWAIT) {
blk_put_rl(rl);
return ERR_PTR(-EAGAIN);
}
if ((flags & BLK_MQ_REQ_NOWAIT) || unlikely(blk_queue_dying(q))) {
blk_put_rl(rl);
return rq;
}
/* wait on @rl and retry */
prepare_to_wait_exclusive(&rl->wait[is_sync], &wait,
TASK_UNINTERRUPTIBLE);
trace_block_sleeprq(q, bio, op);
spin_unlock_irq(q->queue_lock);
io_schedule();
/*
* After sleeping, we become a "batching" process and will be able
* to allocate at least one request, and up to a big batch of them
* for a small period time. See ioc_batching, ioc_set_batching
*/
ioc_set_batching(q, current->io_context);
spin_lock_irq(q->queue_lock);
finish_wait(&rl->wait[is_sync], &wait);
goto retry;
}
/* flags: BLK_MQ_REQ_PREEMPT and/or BLK_MQ_REQ_NOWAIT. */
static struct request *blk_old_get_request(struct request_queue *q,
unsigned int op, blk_mq_req_flags_t flags)
{
struct request *rq;
gfp_t gfp_mask = flags & BLK_MQ_REQ_NOWAIT ? GFP_ATOMIC : GFP_NOIO;
int ret = 0;
WARN_ON_ONCE(q->mq_ops);
/* create ioc upfront */
create_io_context(gfp_mask, q->node);
ret = blk_queue_enter(q, flags);
if (ret)
return ERR_PTR(ret);
spin_lock_irq(q->queue_lock);
rq = get_request(q, op, NULL, flags, gfp_mask);
if (IS_ERR(rq)) {
spin_unlock_irq(q->queue_lock);
blk_queue_exit(q);
return rq;
}
/* q->queue_lock is unlocked at this point */
rq->__data_len = 0;
rq->__sector = (sector_t) -1;
rq->bio = rq->biotail = NULL;
return rq;
}
/**
* blk_get_request - allocate a request
* @q: request queue to allocate a request for
* @op: operation (REQ_OP_*) and REQ_* flags, e.g. REQ_SYNC.
* @flags: BLK_MQ_REQ_* flags, e.g. BLK_MQ_REQ_NOWAIT.
*/
struct request *blk_get_request(struct request_queue *q, unsigned int op,
blk_mq_req_flags_t flags)
{
struct request *req;
WARN_ON_ONCE(op & REQ_NOWAIT);
WARN_ON_ONCE(flags & ~(BLK_MQ_REQ_NOWAIT | BLK_MQ_REQ_PREEMPT));
if (q->mq_ops) {
req = blk_mq_alloc_request(q, op, flags);
if (!IS_ERR(req) && q->mq_ops->initialize_rq_fn)
q->mq_ops->initialize_rq_fn(req);
} else {
req = blk_old_get_request(q, op, flags);
if (!IS_ERR(req) && q->initialize_rq_fn)
q->initialize_rq_fn(req);
}
return req;
}
EXPORT_SYMBOL(blk_get_request);
/**
* blk_requeue_request - put a request back on queue
* @q: request queue where request should be inserted
* @rq: request to be inserted
*
* Description:
* Drivers often keep queueing requests until the hardware cannot accept
* more, when that condition happens we need to put the request back
* on the queue. Must be called with queue lock held.
*/
void blk_requeue_request(struct request_queue *q, struct request *rq)
{
lockdep_assert_held(q->queue_lock);
WARN_ON_ONCE(q->mq_ops);
blk_delete_timer(rq);
blk_clear_rq_complete(rq);
trace_block_rq_requeue(q, rq);
rq_qos_requeue(q, rq);
if (rq->rq_flags & RQF_QUEUED)
blk_queue_end_tag(q, rq);
BUG_ON(blk_queued_rq(rq));
elv_requeue_request(q, rq);
}
EXPORT_SYMBOL(blk_requeue_request);
static void add_acct_request(struct request_queue *q, struct request *rq,
int where)
{
blk_account_io_start(rq, true);
__elv_add_request(q, rq, where);
}
static void part_round_stats_single(struct request_queue *q, int cpu,
struct hd_struct *part, unsigned long now,
unsigned int inflight)
{
if (inflight) {
__part_stat_add(cpu, part, time_in_queue,
inflight * (now - part->stamp));
__part_stat_add(cpu, part, io_ticks, (now - part->stamp));
}
part->stamp = now;
}
/**
* part_round_stats() - Round off the performance stats on a struct disk_stats.
* @q: target block queue
* @cpu: cpu number for stats access
* @part: target partition
*
* The average IO queue length and utilisation statistics are maintained
* by observing the current state of the queue length and the amount of
* time it has been in this state for.
*
* Normally, that accounting is done on IO completion, but that can result
* in more than a second's worth of IO being accounted for within any one
* second, leading to >100% utilisation. To deal with that, we call this
* function to do a round-off before returning the results when reading
* /proc/diskstats. This accounts immediately for all queue usage up to
* the current jiffies and restarts the counters again.
*/
void part_round_stats(struct request_queue *q, int cpu, struct hd_struct *part)
{
struct hd_struct *part2 = NULL;
unsigned long now = jiffies;
unsigned int inflight[2];
int stats = 0;
if (part->stamp != now)
stats |= 1;
if (part->partno) {
part2 = &part_to_disk(part)->part0;
if (part2->stamp != now)
stats |= 2;
}
if (!stats)
return;
part_in_flight(q, part, inflight);
if (stats & 2)
part_round_stats_single(q, cpu, part2, now, inflight[1]);
if (stats & 1)
part_round_stats_single(q, cpu, part, now, inflight[0]);
}
EXPORT_SYMBOL_GPL(part_round_stats);
#ifdef CONFIG_PM
static void blk_pm_put_request(struct request *rq)
{
if (rq->q->dev && !(rq->rq_flags & RQF_PM) && !--rq->q->nr_pending)
pm_runtime_mark_last_busy(rq->q->dev);
}
#else
static inline void blk_pm_put_request(struct request *rq) {}
#endif
void __blk_put_request(struct request_queue *q, struct request *req)
{
req_flags_t rq_flags = req->rq_flags;
if (unlikely(!q))
return;
if (q->mq_ops) {
blk_mq_free_request(req);
return;
}
lockdep_assert_held(q->queue_lock);
blk_req_zone_write_unlock(req);
blk_pm_put_request(req);
elv_completed_request(q, req);
/* this is a bio leak */
WARN_ON(req->bio != NULL);
rq_qos_done(q, req);
/*
* Request may not have originated from ll_rw_blk. if not,
* it didn't come out of our reserved rq pools
*/
if (rq_flags & RQF_ALLOCED) {
struct request_list *rl = blk_rq_rl(req);
bool sync = op_is_sync(req->cmd_flags);
BUG_ON(!list_empty(&req->queuelist));
BUG_ON(ELV_ON_HASH(req));
blk_free_request(rl, req);
freed_request(rl, sync, rq_flags);
blk_put_rl(rl);
blk_queue_exit(q);
}
}
EXPORT_SYMBOL_GPL(__blk_put_request);
void blk_put_request(struct request *req)
{
struct request_queue *q = req->q;
if (q->mq_ops)
blk_mq_free_request(req);
else {
unsigned long flags;
spin_lock_irqsave(q->queue_lock, flags);
__blk_put_request(q, req);
spin_unlock_irqrestore(q->queue_lock, flags);
}
}
EXPORT_SYMBOL(blk_put_request);
bool bio_attempt_back_merge(struct request_queue *q, struct request *req,
struct bio *bio)
{
const int ff = bio->bi_opf & REQ_FAILFAST_MASK;
if (!ll_back_merge_fn(q, req, bio))
return false;
trace_block_bio_backmerge(q, req, bio);
if ((req->cmd_flags & REQ_FAILFAST_MASK) != ff)
blk_rq_set_mixed_merge(req);
req->biotail->bi_next = bio;
req->biotail = bio;
req->__data_len += bio->bi_iter.bi_size;
req->ioprio = ioprio_best(req->ioprio, bio_prio(bio));
blk_account_io_start(req, false);
return true;
}
bool bio_attempt_front_merge(struct request_queue *q, struct request *req,
struct bio *bio)
{
const int ff = bio->bi_opf & REQ_FAILFAST_MASK;
if (!ll_front_merge_fn(q, req, bio))
return false;
trace_block_bio_frontmerge(q, req, bio);
if ((req->cmd_flags & REQ_FAILFAST_MASK) != ff)
blk_rq_set_mixed_merge(req);
bio->bi_next = req->bio;
req->bio = bio;
req->__sector = bio->bi_iter.bi_sector;
req->__data_len += bio->bi_iter.bi_size;
req->ioprio = ioprio_best(req->ioprio, bio_prio(bio));
blk_account_io_start(req, false);
return true;
}
bool bio_attempt_discard_merge(struct request_queue *q, struct request *req,
struct bio *bio)
{
unsigned short segments = blk_rq_nr_discard_segments(req);
if (segments >= queue_max_discard_segments(q))
goto no_merge;
if (blk_rq_sectors(req) + bio_sectors(bio) >
blk_rq_get_max_sectors(req, blk_rq_pos(req)))
goto no_merge;
req->biotail->bi_next = bio;
req->biotail = bio;
req->__data_len += bio->bi_iter.bi_size;
req->ioprio = ioprio_best(req->ioprio, bio_prio(bio));
req->nr_phys_segments = segments + 1;
blk_account_io_start(req, false);
return true;
no_merge:
req_set_nomerge(q, req);
return false;
}
/**
* blk_attempt_plug_merge - try to merge with %current's plugged list
* @q: request_queue new bio is being queued at
* @bio: new bio being queued
* @request_count: out parameter for number of traversed plugged requests
* @same_queue_rq: pointer to &struct request that gets filled in when
* another request associated with @q is found on the plug list
* (optional, may be %NULL)
*
* Determine whether @bio being queued on @q can be merged with a request
* on %current's plugged list. Returns %true if merge was successful,
* otherwise %false.
*
* Plugging coalesces IOs from the same issuer for the same purpose without
* going through @q->queue_lock. As such it's more of an issuing mechanism
* than scheduling, and the request, while may have elvpriv data, is not
* added on the elevator at this point. In addition, we don't have
* reliable access to the elevator outside queue lock. Only check basic
* merging parameters without querying the elevator.
*
* Caller must ensure !blk_queue_nomerges(q) beforehand.
*/
bool blk_attempt_plug_merge(struct request_queue *q, struct bio *bio,
unsigned int *request_count,
struct request **same_queue_rq)
{
struct blk_plug *plug;
struct request *rq;
struct list_head *plug_list;
plug = current->plug;
if (!plug)
return false;
*request_count = 0;
if (q->mq_ops)
plug_list = &plug->mq_list;
else
plug_list = &plug->list;
list_for_each_entry_reverse(rq, plug_list, queuelist) {
bool merged = false;
if (rq->q == q) {
(*request_count)++;
/*
* Only blk-mq multiple hardware queues case checks the
* rq in the same queue, there should be only one such
* rq in a queue
**/
if (same_queue_rq)
*same_queue_rq = rq;
}
if (rq->q != q || !blk_rq_merge_ok(rq, bio))
continue;
switch (blk_try_merge(rq, bio)) {
case ELEVATOR_BACK_MERGE:
merged = bio_attempt_back_merge(q, rq, bio);
break;
case ELEVATOR_FRONT_MERGE:
merged = bio_attempt_front_merge(q, rq, bio);
break;
case ELEVATOR_DISCARD_MERGE:
merged = bio_attempt_discard_merge(q, rq, bio);
break;
default:
break;
}
if (merged)
return true;
}
return false;
}
unsigned int blk_plug_queued_count(struct request_queue *q)
{
struct blk_plug *plug;
struct request *rq;
struct list_head *plug_list;
unsigned int ret = 0;
plug = current->plug;
if (!plug)
goto out;
if (q->mq_ops)
plug_list = &plug->mq_list;
else
plug_list = &plug->list;
list_for_each_entry(rq, plug_list, queuelist) {
if (rq->q == q)
ret++;
}
out:
return ret;
}
void blk_init_request_from_bio(struct request *req, struct bio *bio)
{
struct io_context *ioc = rq_ioc(bio);
if (bio->bi_opf & REQ_RAHEAD)
req->cmd_flags |= REQ_FAILFAST_MASK;
req->__sector = bio->bi_iter.bi_sector;
if (ioprio_valid(bio_prio(bio)))
req->ioprio = bio_prio(bio);
else if (ioc)
req->ioprio = ioc->ioprio;
else
req->ioprio = IOPRIO_PRIO_VALUE(IOPRIO_CLASS_NONE, 0);
req->write_hint = bio->bi_write_hint;
blk_rq_bio_prep(req->q, req, bio);
}
EXPORT_SYMBOL_GPL(blk_init_request_from_bio);
static blk_qc_t blk_queue_bio(struct request_queue *q, struct bio *bio)
{
struct blk_plug *plug;
int where = ELEVATOR_INSERT_SORT;
struct request *req, *free;
unsigned int request_count = 0;
/*
* low level driver can indicate that it wants pages above a
* certain limit bounced to low memory (ie for highmem, or even
* ISA dma in theory)
*/
blk_queue_bounce(q, &bio);
blk_queue_split(q, &bio);
if (!bio_integrity_prep(bio))
return BLK_QC_T_NONE;
if (op_is_flush(bio->bi_opf)) {
spin_lock_irq(q->queue_lock);
where = ELEVATOR_INSERT_FLUSH;
goto get_rq;
}
/*
* Check if we can merge with the plugged list before grabbing
* any locks.
*/
if (!blk_queue_nomerges(q)) {
if (blk_attempt_plug_merge(q, bio, &request_count, NULL))
return BLK_QC_T_NONE;
} else
request_count = blk_plug_queued_count(q);
spin_lock_irq(q->queue_lock);
switch (elv_merge(q, &req, bio)) {
case ELEVATOR_BACK_MERGE:
if (!bio_attempt_back_merge(q, req, bio))
break;
elv_bio_merged(q, req, bio);
free = attempt_back_merge(q, req);
if (free)
__blk_put_request(q, free);
else
elv_merged_request(q, req, ELEVATOR_BACK_MERGE);
goto out_unlock;
case ELEVATOR_FRONT_MERGE:
if (!bio_attempt_front_merge(q, req, bio))
break;
elv_bio_merged(q, req, bio);
free = attempt_front_merge(q, req);
if (free)
__blk_put_request(q, free);
else
elv_merged_request(q, req, ELEVATOR_FRONT_MERGE);
goto out_unlock;
default:
break;
}
get_rq:
rq_qos_throttle(q, bio, q->queue_lock);
/*
* Grab a free request. This is might sleep but can not fail.
* Returns with the queue unlocked.
*/
blk_queue_enter_live(q);
req = get_request(q, bio->bi_opf, bio, 0, GFP_NOIO);
if (IS_ERR(req)) {
blk_queue_exit(q);
rq_qos_cleanup(q, bio);
if (PTR_ERR(req) == -ENOMEM)
bio->bi_status = BLK_STS_RESOURCE;
else
bio->bi_status = BLK_STS_IOERR;
bio_endio(bio);
goto out_unlock;
}
rq_qos_track(q, req, bio);
/*
* After dropping the lock and possibly sleeping here, our request
* may now be mergeable after it had proven unmergeable (above).
* We don't worry about that case for efficiency. It won't happen
* often, and the elevators are able to handle it.
*/
blk_init_request_from_bio(req, bio);
if (test_bit(QUEUE_FLAG_SAME_COMP, &q->queue_flags))
req->cpu = raw_smp_processor_id();
plug = current->plug;
if (plug) {
/*
* If this is the first request added after a plug, fire
* of a plug trace.
*
* @request_count may become stale because of schedule
* out, so check plug list again.
*/
if (!request_count || list_empty(&plug->list))
trace_block_plug(q);
else {
struct request *last = list_entry_rq(plug->list.prev);
if (request_count >= BLK_MAX_REQUEST_COUNT ||
blk_rq_bytes(last) >= BLK_PLUG_FLUSH_SIZE) {
blk_flush_plug_list(plug, false);
trace_block_plug(q);
}
}
list_add_tail(&req->queuelist, &plug->list);
blk_account_io_start(req, true);
} else {
spin_lock_irq(q->queue_lock);
add_acct_request(q, req, where);
__blk_run_queue(q);
out_unlock:
spin_unlock_irq(q->queue_lock);
}
return BLK_QC_T_NONE;
}
static void handle_bad_sector(struct bio *bio, sector_t maxsector)
{
char b[BDEVNAME_SIZE];
printk(KERN_INFO "attempt to access beyond end of device\n");
printk(KERN_INFO "%s: rw=%d, want=%Lu, limit=%Lu\n",
bio_devname(bio, b), bio->bi_opf,
(unsigned long long)bio_end_sector(bio),
(long long)maxsector);
}
#ifdef CONFIG_FAIL_MAKE_REQUEST
static DECLARE_FAULT_ATTR(fail_make_request);
static int __init setup_fail_make_request(char *str)
{
return setup_fault_attr(&fail_make_request, str);
}
__setup("fail_make_request=", setup_fail_make_request);
static bool should_fail_request(struct hd_struct *part, unsigned int bytes)
{
return part->make_it_fail && should_fail(&fail_make_request, bytes);
}
static int __init fail_make_request_debugfs(void)
{
struct dentry *dir = fault_create_debugfs_attr("fail_make_request",
NULL, &fail_make_request);
return PTR_ERR_OR_ZERO(dir);
}
late_initcall(fail_make_request_debugfs);
#else /* CONFIG_FAIL_MAKE_REQUEST */
static inline bool should_fail_request(struct hd_struct *part,
unsigned int bytes)
{
return false;
}
#endif /* CONFIG_FAIL_MAKE_REQUEST */
static inline bool bio_check_ro(struct bio *bio, struct hd_struct *part)
{
if (part->policy && op_is_write(bio_op(bio))) {
char b[BDEVNAME_SIZE];
printk(KERN_ERR
"generic_make_request: Trying to write "
"to read-only block-device %s (partno %d)\n",
bio_devname(bio, b), part->partno);
return true;
}
return false;
}
static noinline int should_fail_bio(struct bio *bio)
{
if (should_fail_request(&bio->bi_disk->part0, bio->bi_iter.bi_size))
return -EIO;
return 0;
}
ALLOW_ERROR_INJECTION(should_fail_bio, ERRNO);
/*
* Check whether this bio extends beyond the end of the device or partition.
* This may well happen - the kernel calls bread() without checking the size of
* the device, e.g., when mounting a file system.
*/
static inline int bio_check_eod(struct bio *bio, sector_t maxsector)
{
unsigned int nr_sectors = bio_sectors(bio);
if (nr_sectors && maxsector &&
(nr_sectors > maxsector ||
bio->bi_iter.bi_sector > maxsector - nr_sectors)) {
handle_bad_sector(bio, maxsector);
return -EIO;
}
return 0;
}
/*
* Remap block n of partition p to block n+start(p) of the disk.
*/
static inline int blk_partition_remap(struct bio *bio)
{
struct hd_struct *p;
int ret = -EIO;
rcu_read_lock();
p = __disk_get_part(bio->bi_disk, bio->bi_partno);
if (unlikely(!p))
goto out;
if (unlikely(should_fail_request(p, bio->bi_iter.bi_size)))
goto out;
if (unlikely(bio_check_ro(bio, p)))
goto out;
/*
* Zone reset does not include bi_size so bio_sectors() is always 0.
* Include a test for the reset op code and perform the remap if needed.
*/
if (bio_sectors(bio) || bio_op(bio) == REQ_OP_ZONE_RESET) {
if (bio_check_eod(bio, part_nr_sects_read(p)))
goto out;
bio->bi_iter.bi_sector += p->start_sect;
trace_block_bio_remap(bio->bi_disk->queue, bio, part_devt(p),
bio->bi_iter.bi_sector - p->start_sect);
}
bio->bi_partno = 0;
ret = 0;
out:
rcu_read_unlock();
return ret;
}
static noinline_for_stack bool
generic_make_request_checks(struct bio *bio)
{
struct request_queue *q;
int nr_sectors = bio_sectors(bio);
blk_status_t status = BLK_STS_IOERR;
char b[BDEVNAME_SIZE];
might_sleep();
q = bio->bi_disk->queue;
if (unlikely(!q)) {
printk(KERN_ERR
"generic_make_request: Trying to access "
"nonexistent block-device %s (%Lu)\n",
bio_devname(bio, b), (long long)bio->bi_iter.bi_sector);
goto end_io;
}
/*
* For a REQ_NOWAIT based request, return -EOPNOTSUPP
* if queue is not a request based queue.
*/
if ((bio->bi_opf & REQ_NOWAIT) && !queue_is_rq_based(q))
goto not_supported;
if (should_fail_bio(bio))
goto end_io;
if (bio->bi_partno) {
if (unlikely(blk_partition_remap(bio)))
goto end_io;
} else {
if (unlikely(bio_check_ro(bio, &bio->bi_disk->part0)))
goto end_io;
if (unlikely(bio_check_eod(bio, get_capacity(bio->bi_disk))))
goto end_io;
}
/*
* Filter flush bio's early so that make_request based
* drivers without flush support don't have to worry
* about them.
*/
if (op_is_flush(bio->bi_opf) &&
!test_bit(QUEUE_FLAG_WC, &q->queue_flags)) {
bio->bi_opf &= ~(REQ_PREFLUSH | REQ_FUA);
if (!nr_sectors) {
status = BLK_STS_OK;
goto end_io;
}
}
switch (bio_op(bio)) {
case REQ_OP_DISCARD:
if (!blk_queue_discard(q))
goto not_supported;
break;
case REQ_OP_SECURE_ERASE:
if (!blk_queue_secure_erase(q))
goto not_supported;
break;
case REQ_OP_WRITE_SAME:
if (!q->limits.max_write_same_sectors)
goto not_supported;
break;
case REQ_OP_ZONE_REPORT:
case REQ_OP_ZONE_RESET:
if (!blk_queue_is_zoned(q))
goto not_supported;
break;
case REQ_OP_WRITE_ZEROES:
if (!q->limits.max_write_zeroes_sectors)
goto not_supported;
break;
default:
break;
}
/*
* Various block parts want %current->io_context and lazy ioc
* allocation ends up trading a lot of pain for a small amount of
* memory. Just allocate it upfront. This may fail and block
* layer knows how to live with it.
*/
create_io_context(GFP_ATOMIC, q->node);
if (!blkcg_bio_issue_check(q, bio))
return false;
if (!bio_flagged(bio, BIO_TRACE_COMPLETION)) {
trace_block_bio_queue(q, bio);
/* Now that enqueuing has been traced, we need to trace
* completion as well.
*/
bio_set_flag(bio, BIO_TRACE_COMPLETION);
}
return true;
not_supported:
status = BLK_STS_NOTSUPP;
end_io:
bio->bi_status = status;
bio_endio(bio);
return false;
}
/**
* generic_make_request - hand a buffer to its device driver for I/O
* @bio: The bio describing the location in memory and on the device.
*
* generic_make_request() is used to make I/O requests of block
* devices. It is passed a &struct bio, which describes the I/O that needs
* to be done.
*
* generic_make_request() does not return any status. The
* success/failure status of the request, along with notification of
* completion, is delivered asynchronously through the bio->bi_end_io
* function described (one day) else where.
*
* The caller of generic_make_request must make sure that bi_io_vec
* are set to describe the memory buffer, and that bi_dev and bi_sector are
* set to describe the device address, and the
* bi_end_io and optionally bi_private are set to describe how
* completion notification should be signaled.
*
* generic_make_request and the drivers it calls may use bi_next if this
* bio happens to be merged with someone else, and may resubmit the bio to
* a lower device by calling into generic_make_request recursively, which
* means the bio should NOT be touched after the call to ->make_request_fn.
*/
blk_qc_t generic_make_request(struct bio *bio)
{
/*
* bio_list_on_stack[0] contains bios submitted by the current
* make_request_fn.
* bio_list_on_stack[1] contains bios that were submitted before
* the current make_request_fn, but that haven't been processed
* yet.
*/
struct bio_list bio_list_on_stack[2];
blk_mq_req_flags_t flags = 0;
struct request_queue *q = bio->bi_disk->queue;
blk_qc_t ret = BLK_QC_T_NONE;
if (bio->bi_opf & REQ_NOWAIT)
flags = BLK_MQ_REQ_NOWAIT;
if (bio_flagged(bio, BIO_QUEUE_ENTERED))
blk_queue_enter_live(q);
else if (blk_queue_enter(q, flags) < 0) {
if (!blk_queue_dying(q) && (bio->bi_opf & REQ_NOWAIT))
bio_wouldblock_error(bio);
else
bio_io_error(bio);
return ret;
}
if (!generic_make_request_checks(bio))
goto out;
/*
* We only want one ->make_request_fn to be active at a time, else
* stack usage with stacked devices could be a problem. So use
* current->bio_list to keep a list of requests submited by a
* make_request_fn function. current->bio_list is also used as a
* flag to say if generic_make_request is currently active in this
* task or not. If it is NULL, then no make_request is active. If
* it is non-NULL, then a make_request is active, and new requests
* should be added at the tail
*/
if (current->bio_list) {
bio_list_add(¤t->bio_list[0], bio);
goto out;
}
/* following loop may be a bit non-obvious, and so deserves some
* explanation.
* Before entering the loop, bio->bi_next is NULL (as all callers
* ensure that) so we have a list with a single bio.
* We pretend that we have just taken it off a longer list, so
* we assign bio_list to a pointer to the bio_list_on_stack,
* thus initialising the bio_list of new bios to be
* added. ->make_request() may indeed add some more bios
* through a recursive call to generic_make_request. If it
* did, we find a non-NULL value in bio_list and re-enter the loop
* from the top. In this case we really did just take the bio
* of the top of the list (no pretending) and so remove it from
* bio_list, and call into ->make_request() again.
*/
BUG_ON(bio->bi_next);
bio_list_init(&bio_list_on_stack[0]);
current->bio_list = bio_list_on_stack;
do {
bool enter_succeeded = true;
if (unlikely(q != bio->bi_disk->queue)) {
if (q)
blk_queue_exit(q);
q = bio->bi_disk->queue;
flags = 0;
if (bio->bi_opf & REQ_NOWAIT)
flags = BLK_MQ_REQ_NOWAIT;
if (blk_queue_enter(q, flags) < 0) {
enter_succeeded = false;
q = NULL;
}
}
if (enter_succeeded) {
struct bio_list lower, same;
/* Create a fresh bio_list for all subordinate requests */
bio_list_on_stack[1] = bio_list_on_stack[0];
bio_list_init(&bio_list_on_stack[0]);
ret = q->make_request_fn(q, bio);
/* sort new bios into those for a lower level
* and those for the same level
*/
bio_list_init(&lower);
bio_list_init(&same);
while ((bio = bio_list_pop(&bio_list_on_stack[0])) != NULL)
if (q == bio->bi_disk->queue)
bio_list_add(&same, bio);
else
bio_list_add(&lower, bio);
/* now assemble so we handle the lowest level first */
bio_list_merge(&bio_list_on_stack[0], &lower);
bio_list_merge(&bio_list_on_stack[0], &same);
bio_list_merge(&bio_list_on_stack[0], &bio_list_on_stack[1]);
} else {
if (unlikely(!blk_queue_dying(q) &&
(bio->bi_opf & REQ_NOWAIT)))
bio_wouldblock_error(bio);
else
bio_io_error(bio);
}
bio = bio_list_pop(&bio_list_on_stack[0]);
} while (bio);
current->bio_list = NULL; /* deactivate */
out:
if (q)
blk_queue_exit(q);
return ret;
}
EXPORT_SYMBOL(generic_make_request);
/**
* direct_make_request - hand a buffer directly to its device driver for I/O
* @bio: The bio describing the location in memory and on the device.
*
* This function behaves like generic_make_request(), but does not protect
* against recursion. Must only be used if the called driver is known
* to not call generic_make_request (or direct_make_request) again from
* its make_request function. (Calling direct_make_request again from
* a workqueue is perfectly fine as that doesn't recurse).
*/
blk_qc_t direct_make_request(struct bio *bio)
{
struct request_queue *q = bio->bi_disk->queue;
bool nowait = bio->bi_opf & REQ_NOWAIT;
blk_qc_t ret;
if (!generic_make_request_checks(bio))
return BLK_QC_T_NONE;
if (unlikely(blk_queue_enter(q, nowait ? BLK_MQ_REQ_NOWAIT : 0))) {
if (nowait && !blk_queue_dying(q))
bio->bi_status = BLK_STS_AGAIN;
else
bio->bi_status = BLK_STS_IOERR;
bio_endio(bio);
return BLK_QC_T_NONE;
}
ret = q->make_request_fn(q, bio);
blk_queue_exit(q);
return ret;
}
EXPORT_SYMBOL_GPL(direct_make_request);
/**
* submit_bio - submit a bio to the block device layer for I/O
* @bio: The &struct bio which describes the I/O
*
* submit_bio() is very similar in purpose to generic_make_request(), and
* uses that function to do most of the work. Both are fairly rough
* interfaces; @bio must be presetup and ready for I/O.
*
*/
blk_qc_t submit_bio(struct bio *bio)
{
/*
* If it's a regular read/write or a barrier with data attached,
* go through the normal accounting stuff before submission.
*/
if (bio_has_data(bio)) {
unsigned int count;
if (unlikely(bio_op(bio) == REQ_OP_WRITE_SAME))
count = queue_logical_block_size(bio->bi_disk->queue) >> 9;
else
count = bio_sectors(bio);
if (op_is_write(bio_op(bio))) {
count_vm_events(PGPGOUT, count);
} else {
task_io_account_read(bio->bi_iter.bi_size);
count_vm_events(PGPGIN, count);
}
if (unlikely(block_dump)) {
char b[BDEVNAME_SIZE];
printk(KERN_DEBUG "%s(%d): %s block %Lu on %s (%u sectors)\n",
current->comm, task_pid_nr(current),
op_is_write(bio_op(bio)) ? "WRITE" : "READ",
(unsigned long long)bio->bi_iter.bi_sector,
bio_devname(bio, b), count);
}
}
return generic_make_request(bio);
}
EXPORT_SYMBOL(submit_bio);
bool blk_poll(struct request_queue *q, blk_qc_t cookie)
{
if (!q->poll_fn || !blk_qc_t_valid(cookie))
return false;
if (current->plug)
blk_flush_plug_list(current->plug, false);
return q->poll_fn(q, cookie);
}
EXPORT_SYMBOL_GPL(blk_poll);
/**
* blk_cloned_rq_check_limits - Helper function to check a cloned request
* for new the queue limits
* @q: the queue
* @rq: the request being checked
*
* Description:
* @rq may have been made based on weaker limitations of upper-level queues
* in request stacking drivers, and it may violate the limitation of @q.
* Since the block layer and the underlying device driver trust @rq
* after it is inserted to @q, it should be checked against @q before
* the insertion using this generic function.
*
* Request stacking drivers like request-based dm may change the queue
* limits when retrying requests on other queues. Those requests need
* to be checked against the new queue limits again during dispatch.
*/
static int blk_cloned_rq_check_limits(struct request_queue *q,
struct request *rq)
{
if (blk_rq_sectors(rq) > blk_queue_get_max_sectors(q, req_op(rq))) {
printk(KERN_ERR "%s: over max size limit.\n", __func__);
return -EIO;
}
/*
* queue's settings related to segment counting like q->bounce_pfn
* may differ from that of other stacking queues.
* Recalculate it to check the request correctly on this queue's
* limitation.
*/
blk_recalc_rq_segments(rq);
if (rq->nr_phys_segments > queue_max_segments(q)) {
printk(KERN_ERR "%s: over max segments limit.\n", __func__);
return -EIO;
}
return 0;
}
/**
* blk_insert_cloned_request - Helper for stacking drivers to submit a request
* @q: the queue to submit the request
* @rq: the request being queued
*/
blk_status_t blk_insert_cloned_request(struct request_queue *q, struct request *rq)
{
unsigned long flags;
int where = ELEVATOR_INSERT_BACK;
if (blk_cloned_rq_check_limits(q, rq))
return BLK_STS_IOERR;
if (rq->rq_disk &&
should_fail_request(&rq->rq_disk->part0, blk_rq_bytes(rq)))
return BLK_STS_IOERR;
if (q->mq_ops) {
if (blk_queue_io_stat(q))
blk_account_io_start(rq, true);
/*
* Since we have a scheduler attached on the top device,
* bypass a potential scheduler on the bottom device for
* insert.
*/
return blk_mq_request_issue_directly(rq);
}
spin_lock_irqsave(q->queue_lock, flags);
if (unlikely(blk_queue_dying(q))) {
spin_unlock_irqrestore(q->queue_lock, flags);
return BLK_STS_IOERR;
}
/*
* Submitting request must be dequeued before calling this function
* because it will be linked to another request_queue
*/
BUG_ON(blk_queued_rq(rq));
if (op_is_flush(rq->cmd_flags))
where = ELEVATOR_INSERT_FLUSH;
add_acct_request(q, rq, where);
if (where == ELEVATOR_INSERT_FLUSH)
__blk_run_queue(q);
spin_unlock_irqrestore(q->queue_lock, flags);
return BLK_STS_OK;
}
EXPORT_SYMBOL_GPL(blk_insert_cloned_request);
/**
* blk_rq_err_bytes - determine number of bytes till the next failure boundary
* @rq: request to examine
*
* Description:
* A request could be merge of IOs which require different failure
* handling. This function determines the number of bytes which
* can be failed from the beginning of the request without
* crossing into area which need to be retried further.
*
* Return:
* The number of bytes to fail.
*/
unsigned int blk_rq_err_bytes(const struct request *rq)
{
unsigned int ff = rq->cmd_flags & REQ_FAILFAST_MASK;
unsigned int bytes = 0;
struct bio *bio;
if (!(rq->rq_flags & RQF_MIXED_MERGE))
return blk_rq_bytes(rq);
/*
* Currently the only 'mixing' which can happen is between
* different fastfail types. We can safely fail portions
* which have all the failfast bits that the first one has -
* the ones which are at least as eager to fail as the first
* one.
*/
for (bio = rq->bio; bio; bio = bio->bi_next) {
if ((bio->bi_opf & ff) != ff)
break;
bytes += bio->bi_iter.bi_size;
}
/* this could lead to infinite loop */
BUG_ON(blk_rq_bytes(rq) && !bytes);
return bytes;
}
EXPORT_SYMBOL_GPL(blk_rq_err_bytes);
void blk_account_io_completion(struct request *req, unsigned int bytes)
{
if (blk_do_io_stat(req)) {
const int sgrp = op_stat_group(req_op(req));
struct hd_struct *part;
int cpu;
cpu = part_stat_lock();
part = req->part;
part_stat_add(cpu, part, sectors[sgrp], bytes >> 9);
part_stat_unlock();
}
}
void blk_account_io_done(struct request *req, u64 now)
{
/*
* Account IO completion. flush_rq isn't accounted as a
* normal IO on queueing nor completion. Accounting the
* containing request is enough.
*/
if (blk_do_io_stat(req) && !(req->rq_flags & RQF_FLUSH_SEQ)) {
unsigned long duration;
const int sgrp = op_stat_group(req_op(req));
struct hd_struct *part;
int cpu;
duration = nsecs_to_jiffies(now - req->start_time_ns);
cpu = part_stat_lock();
part = req->part;
part_stat_inc(cpu, part, ios[sgrp]);
part_stat_add(cpu, part, ticks[sgrp], duration);
part_round_stats(req->q, cpu, part);
part_dec_in_flight(req->q, part, rq_data_dir(req));
hd_struct_put(part);
part_stat_unlock();
}
}
#ifdef CONFIG_PM
/*
* Don't process normal requests when queue is suspended
* or in the process of suspending/resuming
*/
static bool blk_pm_allow_request(struct request *rq)
{
switch (rq->q->rpm_status) {
case RPM_RESUMING:
case RPM_SUSPENDING:
return rq->rq_flags & RQF_PM;
case RPM_SUSPENDED:
return false;
default:
return true;
}
}
#else
static bool blk_pm_allow_request(struct request *rq)
{
return true;
}
#endif
void blk_account_io_start(struct request *rq, bool new_io)
{
struct hd_struct *part;
int rw = rq_data_dir(rq);
int cpu;
if (!blk_do_io_stat(rq))
return;
cpu = part_stat_lock();
if (!new_io) {
part = rq->part;
part_stat_inc(cpu, part, merges[rw]);
} else {
part = disk_map_sector_rcu(rq->rq_disk, blk_rq_pos(rq));
if (!hd_struct_try_get(part)) {
/*
* The partition is already being removed,
* the request will be accounted on the disk only
*
* We take a reference on disk->part0 although that
* partition will never be deleted, so we can treat
* it as any other partition.
*/
part = &rq->rq_disk->part0;
hd_struct_get(part);
}
part_round_stats(rq->q, cpu, part);
part_inc_in_flight(rq->q, part, rw);
rq->part = part;
}
part_stat_unlock();
}
static struct request *elv_next_request(struct request_queue *q)
{
struct request *rq;
struct blk_flush_queue *fq = blk_get_flush_queue(q, NULL);
WARN_ON_ONCE(q->mq_ops);
while (1) {
list_for_each_entry(rq, &q->queue_head, queuelist) {
if (blk_pm_allow_request(rq))
return rq;
if (rq->rq_flags & RQF_SOFTBARRIER)
break;
}
/*
* Flush request is running and flush request isn't queueable
* in the drive, we can hold the queue till flush request is
* finished. Even we don't do this, driver can't dispatch next
* requests and will requeue them. And this can improve
* throughput too. For example, we have request flush1, write1,
* flush 2. flush1 is dispatched, then queue is hold, write1
* isn't inserted to queue. After flush1 is finished, flush2
* will be dispatched. Since disk cache is already clean,
* flush2 will be finished very soon, so looks like flush2 is
* folded to flush1.
* Since the queue is hold, a flag is set to indicate the queue
* should be restarted later. Please see flush_end_io() for
* details.
*/
if (fq->flush_pending_idx != fq->flush_running_idx &&
!queue_flush_queueable(q)) {
fq->flush_queue_delayed = 1;
return NULL;
}
if (unlikely(blk_queue_bypass(q)) ||
!q->elevator->type->ops.sq.elevator_dispatch_fn(q, 0))
return NULL;
}
}
/**
* blk_peek_request - peek at the top of a request queue
* @q: request queue to peek at
*
* Description:
* Return the request at the top of @q. The returned request
* should be started using blk_start_request() before LLD starts
* processing it.
*
* Return:
* Pointer to the request at the top of @q if available. Null
* otherwise.
*/
struct request *blk_peek_request(struct request_queue *q)
{
struct request *rq;
int ret;
lockdep_assert_held(q->queue_lock);
WARN_ON_ONCE(q->mq_ops);
while ((rq = elv_next_request(q)) != NULL) {
if (!(rq->rq_flags & RQF_STARTED)) {
/*
* This is the first time the device driver
* sees this request (possibly after
* requeueing). Notify IO scheduler.
*/
if (rq->rq_flags & RQF_SORTED)
elv_activate_rq(q, rq);
/*
* just mark as started even if we don't start
* it, a request that has been delayed should
* not be passed by new incoming requests
*/
rq->rq_flags |= RQF_STARTED;
trace_block_rq_issue(q, rq);
}
if (!q->boundary_rq || q->boundary_rq == rq) {
q->end_sector = rq_end_sector(rq);
q->boundary_rq = NULL;
}
if (rq->rq_flags & RQF_DONTPREP)
break;
if (q->dma_drain_size && blk_rq_bytes(rq)) {
/*
* make sure space for the drain appears we
* know we can do this because max_hw_segments
* has been adjusted to be one fewer than the
* device can handle
*/
rq->nr_phys_segments++;
}
if (!q->prep_rq_fn)
break;
ret = q->prep_rq_fn(q, rq);
if (ret == BLKPREP_OK) {
break;
} else if (ret == BLKPREP_DEFER) {
/*
* the request may have been (partially) prepped.
* we need to keep this request in the front to
* avoid resource deadlock. RQF_STARTED will
* prevent other fs requests from passing this one.
*/
if (q->dma_drain_size && blk_rq_bytes(rq) &&
!(rq->rq_flags & RQF_DONTPREP)) {
/*
* remove the space for the drain we added
* so that we don't add it again
*/
--rq->nr_phys_segments;
}
rq = NULL;
break;
} else if (ret == BLKPREP_KILL || ret == BLKPREP_INVALID) {
rq->rq_flags |= RQF_QUIET;
/*
* Mark this request as started so we don't trigger
* any debug logic in the end I/O path.
*/
blk_start_request(rq);
__blk_end_request_all(rq, ret == BLKPREP_INVALID ?
BLK_STS_TARGET : BLK_STS_IOERR);
} else {
printk(KERN_ERR "%s: bad return=%d\n", __func__, ret);
break;
}
}
return rq;
}
EXPORT_SYMBOL(blk_peek_request);
static void blk_dequeue_request(struct request *rq)
{
struct request_queue *q = rq->q;
BUG_ON(list_empty(&rq->queuelist));
BUG_ON(ELV_ON_HASH(rq));
list_del_init(&rq->queuelist);
/*
* the time frame between a request being removed from the lists
* and to it is freed is accounted as io that is in progress at
* the driver side.
*/
if (blk_account_rq(rq))
q->in_flight[rq_is_sync(rq)]++;
}
/**
* blk_start_request - start request processing on the driver
* @req: request to dequeue
*
* Description:
* Dequeue @req and start timeout timer on it. This hands off the
* request to the driver.
*/
void blk_start_request(struct request *req)
{
lockdep_assert_held(req->q->queue_lock);
WARN_ON_ONCE(req->q->mq_ops);
blk_dequeue_request(req);
if (test_bit(QUEUE_FLAG_STATS, &req->q->queue_flags)) {
req->io_start_time_ns = ktime_get_ns();
#ifdef CONFIG_BLK_DEV_THROTTLING_LOW
req->throtl_size = blk_rq_sectors(req);
#endif
req->rq_flags |= RQF_STATS;
rq_qos_issue(req->q, req);
}
BUG_ON(blk_rq_is_complete(req));
blk_add_timer(req);
}
EXPORT_SYMBOL(blk_start_request);
/**
* blk_fetch_request - fetch a request from a request queue
* @q: request queue to fetch a request from
*
* Description:
* Return the request at the top of @q. The request is started on
* return and LLD can start processing it immediately.
*
* Return:
* Pointer to the request at the top of @q if available. Null
* otherwise.
*/
struct request *blk_fetch_request(struct request_queue *q)
{
struct request *rq;
lockdep_assert_held(q->queue_lock);
WARN_ON_ONCE(q->mq_ops);
rq = blk_peek_request(q);
if (rq)
blk_start_request(rq);
return rq;
}
EXPORT_SYMBOL(blk_fetch_request);
/*
* Steal bios from a request and add them to a bio list.
* The request must not have been partially completed before.
*/
void blk_steal_bios(struct bio_list *list, struct request *rq)
{
if (rq->bio) {
if (list->tail)
list->tail->bi_next = rq->bio;
else
list->head = rq->bio;
list->tail = rq->biotail;
rq->bio = NULL;
rq->biotail = NULL;
}
rq->__data_len = 0;
}
EXPORT_SYMBOL_GPL(blk_steal_bios);
/**
* blk_update_request - Special helper function for request stacking drivers
* @req: the request being processed
* @error: block status code
* @nr_bytes: number of bytes to complete @req
*
* Description:
* Ends I/O on a number of bytes attached to @req, but doesn't complete
* the request structure even if @req doesn't have leftover.
* If @req has leftover, sets it up for the next range of segments.
*
* This special helper function is only for request stacking drivers
* (e.g. request-based dm) so that they can handle partial completion.
* Actual device drivers should use blk_end_request instead.
*
* Passing the result of blk_rq_bytes() as @nr_bytes guarantees
* %false return from this function.
*
* Note:
* The RQF_SPECIAL_PAYLOAD flag is ignored on purpose in both
* blk_rq_bytes() and in blk_update_request().
*
* Return:
* %false - this request doesn't have any more data
* %true - this request has more data
**/
bool blk_update_request(struct request *req, blk_status_t error,
unsigned int nr_bytes)
{
int total_bytes;
trace_block_rq_complete(req, blk_status_to_errno(error), nr_bytes);
if (!req->bio)
return false;
if (unlikely(error && !blk_rq_is_passthrough(req) &&
!(req->rq_flags & RQF_QUIET)))
print_req_error(req, error);
blk_account_io_completion(req, nr_bytes);
total_bytes = 0;
while (req->bio) {
struct bio *bio = req->bio;
unsigned bio_bytes = min(bio->bi_iter.bi_size, nr_bytes);
if (bio_bytes == bio->bi_iter.bi_size)
req->bio = bio->bi_next;
/* Completion has already been traced */
bio_clear_flag(bio, BIO_TRACE_COMPLETION);
req_bio_endio(req, bio, bio_bytes, error);
total_bytes += bio_bytes;
nr_bytes -= bio_bytes;
if (!nr_bytes)
break;
}
/*
* completely done
*/
if (!req->bio) {
/*
* Reset counters so that the request stacking driver
* can find how many bytes remain in the request
* later.
*/
req->__data_len = 0;
return false;
}
req->__data_len -= total_bytes;
/* update sector only for requests with clear definition of sector */
if (!blk_rq_is_passthrough(req))
req->__sector += total_bytes >> 9;
/* mixed attributes always follow the first bio */
if (req->rq_flags & RQF_MIXED_MERGE) {
req->cmd_flags &= ~REQ_FAILFAST_MASK;
req->cmd_flags |= req->bio->bi_opf & REQ_FAILFAST_MASK;
}
if (!(req->rq_flags & RQF_SPECIAL_PAYLOAD)) {
/*
* If total number of sectors is less than the first segment
* size, something has gone terribly wrong.
*/
if (blk_rq_bytes(req) < blk_rq_cur_bytes(req)) {
blk_dump_rq_flags(req, "request botched");
req->__data_len = blk_rq_cur_bytes(req);
}
/* recalculate the number of segments */
blk_recalc_rq_segments(req);
}
return true;
}
EXPORT_SYMBOL_GPL(blk_update_request);
static bool blk_update_bidi_request(struct request *rq, blk_status_t error,
unsigned int nr_bytes,
unsigned int bidi_bytes)
{
if (blk_update_request(rq, error, nr_bytes))
return true;
/* Bidi request must be completed as a whole */
if (unlikely(blk_bidi_rq(rq)) &&
blk_update_request(rq->next_rq, error, bidi_bytes))
return true;
if (blk_queue_add_random(rq->q))
add_disk_randomness(rq->rq_disk);
return false;
}
/**
* blk_unprep_request - unprepare a request
* @req: the request
*
* This function makes a request ready for complete resubmission (or
* completion). It happens only after all error handling is complete,
* so represents the appropriate moment to deallocate any resources
* that were allocated to the request in the prep_rq_fn. The queue
* lock is held when calling this.
*/
void blk_unprep_request(struct request *req)
{
struct request_queue *q = req->q;
req->rq_flags &= ~RQF_DONTPREP;
if (q->unprep_rq_fn)
q->unprep_rq_fn(q, req);
}
EXPORT_SYMBOL_GPL(blk_unprep_request);
void blk_finish_request(struct request *req, blk_status_t error)
{
struct request_queue *q = req->q;
u64 now = ktime_get_ns();
lockdep_assert_held(req->q->queue_lock);
WARN_ON_ONCE(q->mq_ops);
if (req->rq_flags & RQF_STATS)
blk_stat_add(req, now);
if (req->rq_flags & RQF_QUEUED)
blk_queue_end_tag(q, req);
BUG_ON(blk_queued_rq(req));
if (unlikely(laptop_mode) && !blk_rq_is_passthrough(req))
laptop_io_completion(req->q->backing_dev_info);
blk_delete_timer(req);
if (req->rq_flags & RQF_DONTPREP)
blk_unprep_request(req);
blk_account_io_done(req, now);
if (req->end_io) {
rq_qos_done(q, req);
req->end_io(req, error);
} else {
if (blk_bidi_rq(req))
__blk_put_request(req->next_rq->q, req->next_rq);
__blk_put_request(q, req);
}
}
EXPORT_SYMBOL(blk_finish_request);
/**
* blk_end_bidi_request - Complete a bidi request
* @rq: the request to complete
* @error: block status code
* @nr_bytes: number of bytes to complete @rq
* @bidi_bytes: number of bytes to complete @rq->next_rq
*
* Description:
* Ends I/O on a number of bytes attached to @rq and @rq->next_rq.
* Drivers that supports bidi can safely call this member for any
* type of request, bidi or uni. In the later case @bidi_bytes is
* just ignored.
*
* Return:
* %false - we are done with this request
* %true - still buffers pending for this request
**/
static bool blk_end_bidi_request(struct request *rq, blk_status_t error,
unsigned int nr_bytes, unsigned int bidi_bytes)
{
struct request_queue *q = rq->q;
unsigned long flags;
WARN_ON_ONCE(q->mq_ops);
if (blk_update_bidi_request(rq, error, nr_bytes, bidi_bytes))
return true;
spin_lock_irqsave(q->queue_lock, flags);
blk_finish_request(rq, error);
spin_unlock_irqrestore(q->queue_lock, flags);
return false;
}
/**
* __blk_end_bidi_request - Complete a bidi request with queue lock held
* @rq: the request to complete
* @error: block status code
* @nr_bytes: number of bytes to complete @rq
* @bidi_bytes: number of bytes to complete @rq->next_rq
*
* Description:
* Identical to blk_end_bidi_request() except that queue lock is
* assumed to be locked on entry and remains so on return.
*
* Return:
* %false - we are done with this request
* %true - still buffers pending for this request
**/
static bool __blk_end_bidi_request(struct request *rq, blk_status_t error,
unsigned int nr_bytes, unsigned int bidi_bytes)
{
lockdep_assert_held(rq->q->queue_lock);
WARN_ON_ONCE(rq->q->mq_ops);
if (blk_update_bidi_request(rq, error, nr_bytes, bidi_bytes))
return true;
blk_finish_request(rq, error);
return false;
}
/**
* blk_end_request - Helper function for drivers to complete the request.
* @rq: the request being processed
* @error: block status code
* @nr_bytes: number of bytes to complete
*
* Description:
* Ends I/O on a number of bytes attached to @rq.
* If @rq has leftover, sets it up for the next range of segments.
*
* Return:
* %false - we are done with this request
* %true - still buffers pending for this request
**/
bool blk_end_request(struct request *rq, blk_status_t error,
unsigned int nr_bytes)
{
WARN_ON_ONCE(rq->q->mq_ops);
return blk_end_bidi_request(rq, error, nr_bytes, 0);
}
EXPORT_SYMBOL(blk_end_request);
/**
* blk_end_request_all - Helper function for drives to finish the request.
* @rq: the request to finish
* @error: block status code
*
* Description:
* Completely finish @rq.
*/
void blk_end_request_all(struct request *rq, blk_status_t error)
{
bool pending;
unsigned int bidi_bytes = 0;
if (unlikely(blk_bidi_rq(rq)))
bidi_bytes = blk_rq_bytes(rq->next_rq);
pending = blk_end_bidi_request(rq, error, blk_rq_bytes(rq), bidi_bytes);
BUG_ON(pending);
}
EXPORT_SYMBOL(blk_end_request_all);
/**
* __blk_end_request - Helper function for drivers to complete the request.
* @rq: the request being processed
* @error: block status code
* @nr_bytes: number of bytes to complete
*
* Description:
* Must be called with queue lock held unlike blk_end_request().
*
* Return:
* %false - we are done with this request
* %true - still buffers pending for this request
**/
bool __blk_end_request(struct request *rq, blk_status_t error,
unsigned int nr_bytes)
{
lockdep_assert_held(rq->q->queue_lock);
WARN_ON_ONCE(rq->q->mq_ops);
return __blk_end_bidi_request(rq, error, nr_bytes, 0);
}
EXPORT_SYMBOL(__blk_end_request);
/**
* __blk_end_request_all - Helper function for drives to finish the request.
* @rq: the request to finish
* @error: block status code
*
* Description:
* Completely finish @rq. Must be called with queue lock held.
*/
void __blk_end_request_all(struct request *rq, blk_status_t error)
{
bool pending;
unsigned int bidi_bytes = 0;
lockdep_assert_held(rq->q->queue_lock);
WARN_ON_ONCE(rq->q->mq_ops);
if (unlikely(blk_bidi_rq(rq)))
bidi_bytes = blk_rq_bytes(rq->next_rq);
pending = __blk_end_bidi_request(rq, error, blk_rq_bytes(rq), bidi_bytes);
BUG_ON(pending);
}
EXPORT_SYMBOL(__blk_end_request_all);
/**
* __blk_end_request_cur - Helper function to finish the current request chunk.
* @rq: the request to finish the current chunk for
* @error: block status code
*
* Description:
* Complete the current consecutively mapped chunk from @rq. Must
* be called with queue lock held.
*
* Return:
* %false - we are done with this request
* %true - still buffers pending for this request
*/
bool __blk_end_request_cur(struct request *rq, blk_status_t error)
{
return __blk_end_request(rq, error, blk_rq_cur_bytes(rq));
}
EXPORT_SYMBOL(__blk_end_request_cur);
void blk_rq_bio_prep(struct request_queue *q, struct request *rq,
struct bio *bio)
{
if (bio_has_data(bio))
rq->nr_phys_segments = bio_phys_segments(q, bio);
else if (bio_op(bio) == REQ_OP_DISCARD)
rq->nr_phys_segments = 1;
rq->__data_len = bio->bi_iter.bi_size;
rq->bio = rq->biotail = bio;
if (bio->bi_disk)
rq->rq_disk = bio->bi_disk;
}
#if ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE
/**
* rq_flush_dcache_pages - Helper function to flush all pages in a request
* @rq: the request to be flushed
*
* Description:
* Flush all pages in @rq.
*/
void rq_flush_dcache_pages(struct request *rq)
{
struct req_iterator iter;
struct bio_vec bvec;
rq_for_each_segment(bvec, rq, iter)
flush_dcache_page(bvec.bv_page);
}
EXPORT_SYMBOL_GPL(rq_flush_dcache_pages);
#endif
/**
* blk_lld_busy - Check if underlying low-level drivers of a device are busy
* @q : the queue of the device being checked
*
* Description:
* Check if underlying low-level drivers of a device are busy.
* If the drivers want to export their busy state, they must set own
* exporting function using blk_queue_lld_busy() first.
*
* Basically, this function is used only by request stacking drivers
* to stop dispatching requests to underlying devices when underlying
* devices are busy. This behavior helps more I/O merging on the queue
* of the request stacking driver and prevents I/O throughput regression
* on burst I/O load.
*
* Return:
* 0 - Not busy (The request stacking driver should dispatch request)
* 1 - Busy (The request stacking driver should stop dispatching request)
*/
int blk_lld_busy(struct request_queue *q)
{
if (q->lld_busy_fn)
return q->lld_busy_fn(q);
return 0;
}
EXPORT_SYMBOL_GPL(blk_lld_busy);
/**
* blk_rq_unprep_clone - Helper function to free all bios in a cloned request
* @rq: the clone request to be cleaned up
*
* Description:
* Free all bios in @rq for a cloned request.
*/
void blk_rq_unprep_clone(struct request *rq)
{
struct bio *bio;
while ((bio = rq->bio) != NULL) {
rq->bio = bio->bi_next;
bio_put(bio);
}
}
EXPORT_SYMBOL_GPL(blk_rq_unprep_clone);
/*
* Copy attributes of the original request to the clone request.
* The actual data parts (e.g. ->cmd, ->sense) are not copied.
*/
static void __blk_rq_prep_clone(struct request *dst, struct request *src)
{
dst->cpu = src->cpu;
dst->__sector = blk_rq_pos(src);
dst->__data_len = blk_rq_bytes(src);
if (src->rq_flags & RQF_SPECIAL_PAYLOAD) {
dst->rq_flags |= RQF_SPECIAL_PAYLOAD;
dst->special_vec = src->special_vec;
}
dst->nr_phys_segments = src->nr_phys_segments;
dst->ioprio = src->ioprio;
dst->extra_len = src->extra_len;
}
/**
* blk_rq_prep_clone - Helper function to setup clone request
* @rq: the request to be setup
* @rq_src: original request to be cloned
* @bs: bio_set that bios for clone are allocated from
* @gfp_mask: memory allocation mask for bio
* @bio_ctr: setup function to be called for each clone bio.
* Returns %0 for success, non %0 for failure.
* @data: private data to be passed to @bio_ctr
*
* Description:
* Clones bios in @rq_src to @rq, and copies attributes of @rq_src to @rq.
* The actual data parts of @rq_src (e.g. ->cmd, ->sense)
* are not copied, and copying such parts is the caller's responsibility.
* Also, pages which the original bios are pointing to are not copied
* and the cloned bios just point same pages.
* So cloned bios must be completed before original bios, which means
* the caller must complete @rq before @rq_src.
*/
int blk_rq_prep_clone(struct request *rq, struct request *rq_src,
struct bio_set *bs, gfp_t gfp_mask,
int (*bio_ctr)(struct bio *, struct bio *, void *),
void *data)
{
struct bio *bio, *bio_src;
if (!bs)
bs = &fs_bio_set;
__rq_for_each_bio(bio_src, rq_src) {
bio = bio_clone_fast(bio_src, gfp_mask, bs);
if (!bio)
goto free_and_out;
if (bio_ctr && bio_ctr(bio, bio_src, data))
goto free_and_out;
if (rq->bio) {
rq->biotail->bi_next = bio;
rq->biotail = bio;
} else
rq->bio = rq->biotail = bio;
}
__blk_rq_prep_clone(rq, rq_src);
return 0;
free_and_out:
if (bio)
bio_put(bio);
blk_rq_unprep_clone(rq);
return -ENOMEM;
}
EXPORT_SYMBOL_GPL(blk_rq_prep_clone);
int kblockd_schedule_work(struct work_struct *work)
{
return queue_work(kblockd_workqueue, work);
}
EXPORT_SYMBOL(kblockd_schedule_work);
int kblockd_schedule_work_on(int cpu, struct work_struct *work)
{
return queue_work_on(cpu, kblockd_workqueue, work);
}
EXPORT_SYMBOL(kblockd_schedule_work_on);
int kblockd_mod_delayed_work_on(int cpu, struct delayed_work *dwork,
unsigned long delay)
{
return mod_delayed_work_on(cpu, kblockd_workqueue, dwork, delay);
}
EXPORT_SYMBOL(kblockd_mod_delayed_work_on);
/**
* blk_start_plug - initialize blk_plug and track it inside the task_struct
* @plug: The &struct blk_plug that needs to be initialized
*
* Description:
* Tracking blk_plug inside the task_struct will help with auto-flushing the
* pending I/O should the task end up blocking between blk_start_plug() and
* blk_finish_plug(). This is important from a performance perspective, but
* also ensures that we don't deadlock. For instance, if the task is blocking
* for a memory allocation, memory reclaim could end up wanting to free a
* page belonging to that request that is currently residing in our private
* plug. By flushing the pending I/O when the process goes to sleep, we avoid
* this kind of deadlock.
*/
void blk_start_plug(struct blk_plug *plug)
{
struct task_struct *tsk = current;
/*
* If this is a nested plug, don't actually assign it.
*/
if (tsk->plug)
return;
INIT_LIST_HEAD(&plug->list);
INIT_LIST_HEAD(&plug->mq_list);
INIT_LIST_HEAD(&plug->cb_list);
/*
* Store ordering should not be needed here, since a potential
* preempt will imply a full memory barrier
*/
tsk->plug = plug;
}
EXPORT_SYMBOL(blk_start_plug);
static int plug_rq_cmp(void *priv, struct list_head *a, struct list_head *b)
{
struct request *rqa = container_of(a, struct request, queuelist);
struct request *rqb = container_of(b, struct request, queuelist);
return !(rqa->q < rqb->q ||
(rqa->q == rqb->q && blk_rq_pos(rqa) < blk_rq_pos(rqb)));
}
/*
* If 'from_schedule' is true, then postpone the dispatch of requests
* until a safe kblockd context. We due this to avoid accidental big
* additional stack usage in driver dispatch, in places where the originally
* plugger did not intend it.
*/
static void queue_unplugged(struct request_queue *q, unsigned int depth,
bool from_schedule)
__releases(q->queue_lock)
{
lockdep_assert_held(q->queue_lock);
trace_block_unplug(q, depth, !from_schedule);
if (from_schedule)
blk_run_queue_async(q);
else
__blk_run_queue(q);
spin_unlock_irq(q->queue_lock);
}
static void flush_plug_callbacks(struct blk_plug *plug, bool from_schedule)
{
LIST_HEAD(callbacks);
while (!list_empty(&plug->cb_list)) {
list_splice_init(&plug->cb_list, &callbacks);
while (!list_empty(&callbacks)) {
struct blk_plug_cb *cb = list_first_entry(&callbacks,
struct blk_plug_cb,
list);
list_del(&cb->list);
cb->callback(cb, from_schedule);
}
}
}
struct blk_plug_cb *blk_check_plugged(blk_plug_cb_fn unplug, void *data,
int size)
{
struct blk_plug *plug = current->plug;
struct blk_plug_cb *cb;
if (!plug)
return NULL;
list_for_each_entry(cb, &plug->cb_list, list)
if (cb->callback == unplug && cb->data == data)
return cb;
/* Not currently on the callback list */
BUG_ON(size < sizeof(*cb));
cb = kzalloc(size, GFP_ATOMIC);
if (cb) {
cb->data = data;
cb->callback = unplug;
list_add(&cb->list, &plug->cb_list);
}
return cb;
}
EXPORT_SYMBOL(blk_check_plugged);
void blk_flush_plug_list(struct blk_plug *plug, bool from_schedule)
{
struct request_queue *q;
struct request *rq;
LIST_HEAD(list);
unsigned int depth;
flush_plug_callbacks(plug, from_schedule);
if (!list_empty(&plug->mq_list))
blk_mq_flush_plug_list(plug, from_schedule);
if (list_empty(&plug->list))
return;
list_splice_init(&plug->list, &list);
list_sort(NULL, &list, plug_rq_cmp);
q = NULL;
depth = 0;
while (!list_empty(&list)) {
rq = list_entry_rq(list.next);
list_del_init(&rq->queuelist);
BUG_ON(!rq->q);
if (rq->q != q) {
/*
* This drops the queue lock
*/
if (q)
queue_unplugged(q, depth, from_schedule);
q = rq->q;
depth = 0;
spin_lock_irq(q->queue_lock);
}
/*
* Short-circuit if @q is dead
*/
if (unlikely(blk_queue_dying(q))) {
__blk_end_request_all(rq, BLK_STS_IOERR);
continue;
}
/*
* rq is already accounted, so use raw insert
*/
if (op_is_flush(rq->cmd_flags))
__elv_add_request(q, rq, ELEVATOR_INSERT_FLUSH);
else
__elv_add_request(q, rq, ELEVATOR_INSERT_SORT_MERGE);
depth++;
}
/*
* This drops the queue lock
*/
if (q)
queue_unplugged(q, depth, from_schedule);
}
void blk_finish_plug(struct blk_plug *plug)
{
if (plug != current->plug)
return;
blk_flush_plug_list(plug, false);
current->plug = NULL;
}
EXPORT_SYMBOL(blk_finish_plug);
#ifdef CONFIG_PM
/**
* blk_pm_runtime_init - Block layer runtime PM initialization routine
* @q: the queue of the device
* @dev: the device the queue belongs to
*
* Description:
* Initialize runtime-PM-related fields for @q and start auto suspend for
* @dev. Drivers that want to take advantage of request-based runtime PM
* should call this function after @dev has been initialized, and its
* request queue @q has been allocated, and runtime PM for it can not happen
* yet(either due to disabled/forbidden or its usage_count > 0). In most
* cases, driver should call this function before any I/O has taken place.
*
* This function takes care of setting up using auto suspend for the device,
* the autosuspend delay is set to -1 to make runtime suspend impossible
* until an updated value is either set by user or by driver. Drivers do
* not need to touch other autosuspend settings.
*
* The block layer runtime PM is request based, so only works for drivers
* that use request as their IO unit instead of those directly use bio's.
*/
void blk_pm_runtime_init(struct request_queue *q, struct device *dev)
{
/* not support for RQF_PM and ->rpm_status in blk-mq yet */
if (q->mq_ops)
return;
q->dev = dev;
q->rpm_status = RPM_ACTIVE;
pm_runtime_set_autosuspend_delay(q->dev, -1);
pm_runtime_use_autosuspend(q->dev);
}
EXPORT_SYMBOL(blk_pm_runtime_init);
/**
* blk_pre_runtime_suspend - Pre runtime suspend check
* @q: the queue of the device
*
* Description:
* This function will check if runtime suspend is allowed for the device
* by examining if there are any requests pending in the queue. If there
* are requests pending, the device can not be runtime suspended; otherwise,
* the queue's status will be updated to SUSPENDING and the driver can
* proceed to suspend the device.
*
* For the not allowed case, we mark last busy for the device so that
* runtime PM core will try to autosuspend it some time later.
*
* This function should be called near the start of the device's
* runtime_suspend callback.
*
* Return:
* 0 - OK to runtime suspend the device
* -EBUSY - Device should not be runtime suspended
*/
int blk_pre_runtime_suspend(struct request_queue *q)
{
int ret = 0;
if (!q->dev)
return ret;
spin_lock_irq(q->queue_lock);
if (q->nr_pending) {
ret = -EBUSY;
pm_runtime_mark_last_busy(q->dev);
} else {
q->rpm_status = RPM_SUSPENDING;
}
spin_unlock_irq(q->queue_lock);
return ret;
}
EXPORT_SYMBOL(blk_pre_runtime_suspend);
/**
* blk_post_runtime_suspend - Post runtime suspend processing
* @q: the queue of the device
* @err: return value of the device's runtime_suspend function
*
* Description:
* Update the queue's runtime status according to the return value of the
* device's runtime suspend function and mark last busy for the device so
* that PM core will try to auto suspend the device at a later time.
*
* This function should be called near the end of the device's
* runtime_suspend callback.
*/
void blk_post_runtime_suspend(struct request_queue *q, int err)
{
if (!q->dev)
return;
spin_lock_irq(q->queue_lock);
if (!err) {
q->rpm_status = RPM_SUSPENDED;
} else {
q->rpm_status = RPM_ACTIVE;
pm_runtime_mark_last_busy(q->dev);
}
spin_unlock_irq(q->queue_lock);
}
EXPORT_SYMBOL(blk_post_runtime_suspend);
/**
* blk_pre_runtime_resume - Pre runtime resume processing
* @q: the queue of the device
*
* Description:
* Update the queue's runtime status to RESUMING in preparation for the
* runtime resume of the device.
*
* This function should be called near the start of the device's
* runtime_resume callback.
*/
void blk_pre_runtime_resume(struct request_queue *q)
{
if (!q->dev)
return;
spin_lock_irq(q->queue_lock);
q->rpm_status = RPM_RESUMING;
spin_unlock_irq(q->queue_lock);
}
EXPORT_SYMBOL(blk_pre_runtime_resume);
/**
* blk_post_runtime_resume - Post runtime resume processing
* @q: the queue of the device
* @err: return value of the device's runtime_resume function
*
* Description:
* Update the queue's runtime status according to the return value of the
* device's runtime_resume function. If it is successfully resumed, process
* the requests that are queued into the device's queue when it is resuming
* and then mark last busy and initiate autosuspend for it.
*
* This function should be called near the end of the device's
* runtime_resume callback.
*/
void blk_post_runtime_resume(struct request_queue *q, int err)
{
if (!q->dev)
return;
spin_lock_irq(q->queue_lock);
if (!err) {
q->rpm_status = RPM_ACTIVE;
__blk_run_queue(q);
pm_runtime_mark_last_busy(q->dev);
pm_request_autosuspend(q->dev);
} else {
q->rpm_status = RPM_SUSPENDED;
}
spin_unlock_irq(q->queue_lock);
}
EXPORT_SYMBOL(blk_post_runtime_resume);
/**
* blk_set_runtime_active - Force runtime status of the queue to be active
* @q: the queue of the device
*
* If the device is left runtime suspended during system suspend the resume
* hook typically resumes the device and corrects runtime status
* accordingly. However, that does not affect the queue runtime PM status
* which is still "suspended". This prevents processing requests from the
* queue.
*
* This function can be used in driver's resume hook to correct queue
* runtime PM status and re-enable peeking requests from the queue. It
* should be called before first request is added to the queue.
*/
void blk_set_runtime_active(struct request_queue *q)
{
spin_lock_irq(q->queue_lock);
q->rpm_status = RPM_ACTIVE;
pm_runtime_mark_last_busy(q->dev);
pm_request_autosuspend(q->dev);
spin_unlock_irq(q->queue_lock);
}
EXPORT_SYMBOL(blk_set_runtime_active);
#endif
int __init blk_dev_init(void)
{
BUILD_BUG_ON(REQ_OP_LAST >= (1 << REQ_OP_BITS));
BUILD_BUG_ON(REQ_OP_BITS + REQ_FLAG_BITS > 8 *
FIELD_SIZEOF(struct request, cmd_flags));
BUILD_BUG_ON(REQ_OP_BITS + REQ_FLAG_BITS > 8 *
FIELD_SIZEOF(struct bio, bi_opf));
/* used for unplugging and affects IO latency/throughput - HIGHPRI */
kblockd_workqueue = alloc_workqueue("kblockd",
WQ_MEM_RECLAIM | WQ_HIGHPRI, 0);
if (!kblockd_workqueue)
panic("Failed to create kblockd\n");
request_cachep = kmem_cache_create("blkdev_requests",
sizeof(struct request), 0, SLAB_PANIC, NULL);
blk_requestq_cachep = kmem_cache_create("request_queue",
sizeof(struct request_queue), 0, SLAB_PANIC, NULL);
#ifdef CONFIG_DEBUG_FS
blk_debugfs_root = debugfs_create_dir("block", NULL);
#endif
return 0;
}
| ./CrossVul/dataset_final_sorted/CWE-416/c/good_545_0 |
crossvul-cpp_data_good_3045_0 | /******************************************************************************
* emulate.c
*
* Generic x86 (32-bit and 64-bit) instruction decoder and emulator.
*
* Copyright (c) 2005 Keir Fraser
*
* Linux coding style, mod r/m decoder, segment base fixes, real-mode
* privileged instructions:
*
* Copyright (C) 2006 Qumranet
* Copyright 2010 Red Hat, Inc. and/or its affiliates.
*
* Avi Kivity <avi@qumranet.com>
* Yaniv Kamay <yaniv@qumranet.com>
*
* This work is licensed under the terms of the GNU GPL, version 2. See
* the COPYING file in the top-level directory.
*
* From: xen-unstable 10676:af9809f51f81a3c43f276f00c81a52ef558afda4
*/
#include <linux/kvm_host.h>
#include "kvm_cache_regs.h"
#include <asm/kvm_emulate.h>
#include <linux/stringify.h>
#include <asm/debugreg.h>
#include "x86.h"
#include "tss.h"
/*
* Operand types
*/
#define OpNone 0ull
#define OpImplicit 1ull /* No generic decode */
#define OpReg 2ull /* Register */
#define OpMem 3ull /* Memory */
#define OpAcc 4ull /* Accumulator: AL/AX/EAX/RAX */
#define OpDI 5ull /* ES:DI/EDI/RDI */
#define OpMem64 6ull /* Memory, 64-bit */
#define OpImmUByte 7ull /* Zero-extended 8-bit immediate */
#define OpDX 8ull /* DX register */
#define OpCL 9ull /* CL register (for shifts) */
#define OpImmByte 10ull /* 8-bit sign extended immediate */
#define OpOne 11ull /* Implied 1 */
#define OpImm 12ull /* Sign extended up to 32-bit immediate */
#define OpMem16 13ull /* Memory operand (16-bit). */
#define OpMem32 14ull /* Memory operand (32-bit). */
#define OpImmU 15ull /* Immediate operand, zero extended */
#define OpSI 16ull /* SI/ESI/RSI */
#define OpImmFAddr 17ull /* Immediate far address */
#define OpMemFAddr 18ull /* Far address in memory */
#define OpImmU16 19ull /* Immediate operand, 16 bits, zero extended */
#define OpES 20ull /* ES */
#define OpCS 21ull /* CS */
#define OpSS 22ull /* SS */
#define OpDS 23ull /* DS */
#define OpFS 24ull /* FS */
#define OpGS 25ull /* GS */
#define OpMem8 26ull /* 8-bit zero extended memory operand */
#define OpImm64 27ull /* Sign extended 16/32/64-bit immediate */
#define OpXLat 28ull /* memory at BX/EBX/RBX + zero-extended AL */
#define OpAccLo 29ull /* Low part of extended acc (AX/AX/EAX/RAX) */
#define OpAccHi 30ull /* High part of extended acc (-/DX/EDX/RDX) */
#define OpBits 5 /* Width of operand field */
#define OpMask ((1ull << OpBits) - 1)
/*
* Opcode effective-address decode tables.
* Note that we only emulate instructions that have at least one memory
* operand (excluding implicit stack references). We assume that stack
* references and instruction fetches will never occur in special memory
* areas that require emulation. So, for example, 'mov <imm>,<reg>' need
* not be handled.
*/
/* Operand sizes: 8-bit operands or specified/overridden size. */
#define ByteOp (1<<0) /* 8-bit operands. */
/* Destination operand type. */
#define DstShift 1
#define ImplicitOps (OpImplicit << DstShift)
#define DstReg (OpReg << DstShift)
#define DstMem (OpMem << DstShift)
#define DstAcc (OpAcc << DstShift)
#define DstDI (OpDI << DstShift)
#define DstMem64 (OpMem64 << DstShift)
#define DstMem16 (OpMem16 << DstShift)
#define DstImmUByte (OpImmUByte << DstShift)
#define DstDX (OpDX << DstShift)
#define DstAccLo (OpAccLo << DstShift)
#define DstMask (OpMask << DstShift)
/* Source operand type. */
#define SrcShift 6
#define SrcNone (OpNone << SrcShift)
#define SrcReg (OpReg << SrcShift)
#define SrcMem (OpMem << SrcShift)
#define SrcMem16 (OpMem16 << SrcShift)
#define SrcMem32 (OpMem32 << SrcShift)
#define SrcImm (OpImm << SrcShift)
#define SrcImmByte (OpImmByte << SrcShift)
#define SrcOne (OpOne << SrcShift)
#define SrcImmUByte (OpImmUByte << SrcShift)
#define SrcImmU (OpImmU << SrcShift)
#define SrcSI (OpSI << SrcShift)
#define SrcXLat (OpXLat << SrcShift)
#define SrcImmFAddr (OpImmFAddr << SrcShift)
#define SrcMemFAddr (OpMemFAddr << SrcShift)
#define SrcAcc (OpAcc << SrcShift)
#define SrcImmU16 (OpImmU16 << SrcShift)
#define SrcImm64 (OpImm64 << SrcShift)
#define SrcDX (OpDX << SrcShift)
#define SrcMem8 (OpMem8 << SrcShift)
#define SrcAccHi (OpAccHi << SrcShift)
#define SrcMask (OpMask << SrcShift)
#define BitOp (1<<11)
#define MemAbs (1<<12) /* Memory operand is absolute displacement */
#define String (1<<13) /* String instruction (rep capable) */
#define Stack (1<<14) /* Stack instruction (push/pop) */
#define GroupMask (7<<15) /* Opcode uses one of the group mechanisms */
#define Group (1<<15) /* Bits 3:5 of modrm byte extend opcode */
#define GroupDual (2<<15) /* Alternate decoding of mod == 3 */
#define Prefix (3<<15) /* Instruction varies with 66/f2/f3 prefix */
#define RMExt (4<<15) /* Opcode extension in ModRM r/m if mod == 3 */
#define Escape (5<<15) /* Escape to coprocessor instruction */
#define InstrDual (6<<15) /* Alternate instruction decoding of mod == 3 */
#define ModeDual (7<<15) /* Different instruction for 32/64 bit */
#define Sse (1<<18) /* SSE Vector instruction */
/* Generic ModRM decode. */
#define ModRM (1<<19)
/* Destination is only written; never read. */
#define Mov (1<<20)
/* Misc flags */
#define Prot (1<<21) /* instruction generates #UD if not in prot-mode */
#define EmulateOnUD (1<<22) /* Emulate if unsupported by the host */
#define NoAccess (1<<23) /* Don't access memory (lea/invlpg/verr etc) */
#define Op3264 (1<<24) /* Operand is 64b in long mode, 32b otherwise */
#define Undefined (1<<25) /* No Such Instruction */
#define Lock (1<<26) /* lock prefix is allowed for the instruction */
#define Priv (1<<27) /* instruction generates #GP if current CPL != 0 */
#define No64 (1<<28)
#define PageTable (1 << 29) /* instruction used to write page table */
#define NotImpl (1 << 30) /* instruction is not implemented */
/* Source 2 operand type */
#define Src2Shift (31)
#define Src2None (OpNone << Src2Shift)
#define Src2Mem (OpMem << Src2Shift)
#define Src2CL (OpCL << Src2Shift)
#define Src2ImmByte (OpImmByte << Src2Shift)
#define Src2One (OpOne << Src2Shift)
#define Src2Imm (OpImm << Src2Shift)
#define Src2ES (OpES << Src2Shift)
#define Src2CS (OpCS << Src2Shift)
#define Src2SS (OpSS << Src2Shift)
#define Src2DS (OpDS << Src2Shift)
#define Src2FS (OpFS << Src2Shift)
#define Src2GS (OpGS << Src2Shift)
#define Src2Mask (OpMask << Src2Shift)
#define Mmx ((u64)1 << 40) /* MMX Vector instruction */
#define AlignMask ((u64)7 << 41)
#define Aligned ((u64)1 << 41) /* Explicitly aligned (e.g. MOVDQA) */
#define Unaligned ((u64)2 << 41) /* Explicitly unaligned (e.g. MOVDQU) */
#define Avx ((u64)3 << 41) /* Advanced Vector Extensions */
#define Aligned16 ((u64)4 << 41) /* Aligned to 16 byte boundary (e.g. FXSAVE) */
#define Fastop ((u64)1 << 44) /* Use opcode::u.fastop */
#define NoWrite ((u64)1 << 45) /* No writeback */
#define SrcWrite ((u64)1 << 46) /* Write back src operand */
#define NoMod ((u64)1 << 47) /* Mod field is ignored */
#define Intercept ((u64)1 << 48) /* Has valid intercept field */
#define CheckPerm ((u64)1 << 49) /* Has valid check_perm field */
#define PrivUD ((u64)1 << 51) /* #UD instead of #GP on CPL > 0 */
#define NearBranch ((u64)1 << 52) /* Near branches */
#define No16 ((u64)1 << 53) /* No 16 bit operand */
#define IncSP ((u64)1 << 54) /* SP is incremented before ModRM calc */
#define DstXacc (DstAccLo | SrcAccHi | SrcWrite)
#define X2(x...) x, x
#define X3(x...) X2(x), x
#define X4(x...) X2(x), X2(x)
#define X5(x...) X4(x), x
#define X6(x...) X4(x), X2(x)
#define X7(x...) X4(x), X3(x)
#define X8(x...) X4(x), X4(x)
#define X16(x...) X8(x), X8(x)
#define NR_FASTOP (ilog2(sizeof(ulong)) + 1)
#define FASTOP_SIZE 8
/*
* fastop functions have a special calling convention:
*
* dst: rax (in/out)
* src: rdx (in/out)
* src2: rcx (in)
* flags: rflags (in/out)
* ex: rsi (in:fastop pointer, out:zero if exception)
*
* Moreover, they are all exactly FASTOP_SIZE bytes long, so functions for
* different operand sizes can be reached by calculation, rather than a jump
* table (which would be bigger than the code).
*
* fastop functions are declared as taking a never-defined fastop parameter,
* so they can't be called from C directly.
*/
struct fastop;
struct opcode {
u64 flags : 56;
u64 intercept : 8;
union {
int (*execute)(struct x86_emulate_ctxt *ctxt);
const struct opcode *group;
const struct group_dual *gdual;
const struct gprefix *gprefix;
const struct escape *esc;
const struct instr_dual *idual;
const struct mode_dual *mdual;
void (*fastop)(struct fastop *fake);
} u;
int (*check_perm)(struct x86_emulate_ctxt *ctxt);
};
struct group_dual {
struct opcode mod012[8];
struct opcode mod3[8];
};
struct gprefix {
struct opcode pfx_no;
struct opcode pfx_66;
struct opcode pfx_f2;
struct opcode pfx_f3;
};
struct escape {
struct opcode op[8];
struct opcode high[64];
};
struct instr_dual {
struct opcode mod012;
struct opcode mod3;
};
struct mode_dual {
struct opcode mode32;
struct opcode mode64;
};
#define EFLG_RESERVED_ZEROS_MASK 0xffc0802a
enum x86_transfer_type {
X86_TRANSFER_NONE,
X86_TRANSFER_CALL_JMP,
X86_TRANSFER_RET,
X86_TRANSFER_TASK_SWITCH,
};
static ulong reg_read(struct x86_emulate_ctxt *ctxt, unsigned nr)
{
if (!(ctxt->regs_valid & (1 << nr))) {
ctxt->regs_valid |= 1 << nr;
ctxt->_regs[nr] = ctxt->ops->read_gpr(ctxt, nr);
}
return ctxt->_regs[nr];
}
static ulong *reg_write(struct x86_emulate_ctxt *ctxt, unsigned nr)
{
ctxt->regs_valid |= 1 << nr;
ctxt->regs_dirty |= 1 << nr;
return &ctxt->_regs[nr];
}
static ulong *reg_rmw(struct x86_emulate_ctxt *ctxt, unsigned nr)
{
reg_read(ctxt, nr);
return reg_write(ctxt, nr);
}
static void writeback_registers(struct x86_emulate_ctxt *ctxt)
{
unsigned reg;
for_each_set_bit(reg, (ulong *)&ctxt->regs_dirty, 16)
ctxt->ops->write_gpr(ctxt, reg, ctxt->_regs[reg]);
}
static void invalidate_registers(struct x86_emulate_ctxt *ctxt)
{
ctxt->regs_dirty = 0;
ctxt->regs_valid = 0;
}
/*
* These EFLAGS bits are restored from saved value during emulation, and
* any changes are written back to the saved value after emulation.
*/
#define EFLAGS_MASK (X86_EFLAGS_OF|X86_EFLAGS_SF|X86_EFLAGS_ZF|X86_EFLAGS_AF|\
X86_EFLAGS_PF|X86_EFLAGS_CF)
#ifdef CONFIG_X86_64
#define ON64(x) x
#else
#define ON64(x)
#endif
static int fastop(struct x86_emulate_ctxt *ctxt, void (*fop)(struct fastop *));
#define FOP_FUNC(name) \
".align " __stringify(FASTOP_SIZE) " \n\t" \
".type " name ", @function \n\t" \
name ":\n\t"
#define FOP_RET "ret \n\t"
#define FOP_START(op) \
extern void em_##op(struct fastop *fake); \
asm(".pushsection .text, \"ax\" \n\t" \
".global em_" #op " \n\t" \
FOP_FUNC("em_" #op)
#define FOP_END \
".popsection")
#define FOPNOP() \
FOP_FUNC(__stringify(__UNIQUE_ID(nop))) \
FOP_RET
#define FOP1E(op, dst) \
FOP_FUNC(#op "_" #dst) \
"10: " #op " %" #dst " \n\t" FOP_RET
#define FOP1EEX(op, dst) \
FOP1E(op, dst) _ASM_EXTABLE(10b, kvm_fastop_exception)
#define FASTOP1(op) \
FOP_START(op) \
FOP1E(op##b, al) \
FOP1E(op##w, ax) \
FOP1E(op##l, eax) \
ON64(FOP1E(op##q, rax)) \
FOP_END
/* 1-operand, using src2 (for MUL/DIV r/m) */
#define FASTOP1SRC2(op, name) \
FOP_START(name) \
FOP1E(op, cl) \
FOP1E(op, cx) \
FOP1E(op, ecx) \
ON64(FOP1E(op, rcx)) \
FOP_END
/* 1-operand, using src2 (for MUL/DIV r/m), with exceptions */
#define FASTOP1SRC2EX(op, name) \
FOP_START(name) \
FOP1EEX(op, cl) \
FOP1EEX(op, cx) \
FOP1EEX(op, ecx) \
ON64(FOP1EEX(op, rcx)) \
FOP_END
#define FOP2E(op, dst, src) \
FOP_FUNC(#op "_" #dst "_" #src) \
#op " %" #src ", %" #dst " \n\t" FOP_RET
#define FASTOP2(op) \
FOP_START(op) \
FOP2E(op##b, al, dl) \
FOP2E(op##w, ax, dx) \
FOP2E(op##l, eax, edx) \
ON64(FOP2E(op##q, rax, rdx)) \
FOP_END
/* 2 operand, word only */
#define FASTOP2W(op) \
FOP_START(op) \
FOPNOP() \
FOP2E(op##w, ax, dx) \
FOP2E(op##l, eax, edx) \
ON64(FOP2E(op##q, rax, rdx)) \
FOP_END
/* 2 operand, src is CL */
#define FASTOP2CL(op) \
FOP_START(op) \
FOP2E(op##b, al, cl) \
FOP2E(op##w, ax, cl) \
FOP2E(op##l, eax, cl) \
ON64(FOP2E(op##q, rax, cl)) \
FOP_END
/* 2 operand, src and dest are reversed */
#define FASTOP2R(op, name) \
FOP_START(name) \
FOP2E(op##b, dl, al) \
FOP2E(op##w, dx, ax) \
FOP2E(op##l, edx, eax) \
ON64(FOP2E(op##q, rdx, rax)) \
FOP_END
#define FOP3E(op, dst, src, src2) \
FOP_FUNC(#op "_" #dst "_" #src "_" #src2) \
#op " %" #src2 ", %" #src ", %" #dst " \n\t" FOP_RET
/* 3-operand, word-only, src2=cl */
#define FASTOP3WCL(op) \
FOP_START(op) \
FOPNOP() \
FOP3E(op##w, ax, dx, cl) \
FOP3E(op##l, eax, edx, cl) \
ON64(FOP3E(op##q, rax, rdx, cl)) \
FOP_END
/* Special case for SETcc - 1 instruction per cc */
#define FOP_SETCC(op) \
".align 4 \n\t" \
".type " #op ", @function \n\t" \
#op ": \n\t" \
#op " %al \n\t" \
FOP_RET
asm(".global kvm_fastop_exception \n"
"kvm_fastop_exception: xor %esi, %esi; ret");
FOP_START(setcc)
FOP_SETCC(seto)
FOP_SETCC(setno)
FOP_SETCC(setc)
FOP_SETCC(setnc)
FOP_SETCC(setz)
FOP_SETCC(setnz)
FOP_SETCC(setbe)
FOP_SETCC(setnbe)
FOP_SETCC(sets)
FOP_SETCC(setns)
FOP_SETCC(setp)
FOP_SETCC(setnp)
FOP_SETCC(setl)
FOP_SETCC(setnl)
FOP_SETCC(setle)
FOP_SETCC(setnle)
FOP_END;
FOP_START(salc) "pushf; sbb %al, %al; popf \n\t" FOP_RET
FOP_END;
/*
* XXX: inoutclob user must know where the argument is being expanded.
* Relying on CC_HAVE_ASM_GOTO would allow us to remove _fault.
*/
#define asm_safe(insn, inoutclob...) \
({ \
int _fault = 0; \
\
asm volatile("1:" insn "\n" \
"2:\n" \
".pushsection .fixup, \"ax\"\n" \
"3: movl $1, %[_fault]\n" \
" jmp 2b\n" \
".popsection\n" \
_ASM_EXTABLE(1b, 3b) \
: [_fault] "+qm"(_fault) inoutclob ); \
\
_fault ? X86EMUL_UNHANDLEABLE : X86EMUL_CONTINUE; \
})
static int emulator_check_intercept(struct x86_emulate_ctxt *ctxt,
enum x86_intercept intercept,
enum x86_intercept_stage stage)
{
struct x86_instruction_info info = {
.intercept = intercept,
.rep_prefix = ctxt->rep_prefix,
.modrm_mod = ctxt->modrm_mod,
.modrm_reg = ctxt->modrm_reg,
.modrm_rm = ctxt->modrm_rm,
.src_val = ctxt->src.val64,
.dst_val = ctxt->dst.val64,
.src_bytes = ctxt->src.bytes,
.dst_bytes = ctxt->dst.bytes,
.ad_bytes = ctxt->ad_bytes,
.next_rip = ctxt->eip,
};
return ctxt->ops->intercept(ctxt, &info, stage);
}
static void assign_masked(ulong *dest, ulong src, ulong mask)
{
*dest = (*dest & ~mask) | (src & mask);
}
static void assign_register(unsigned long *reg, u64 val, int bytes)
{
/* The 4-byte case *is* correct: in 64-bit mode we zero-extend. */
switch (bytes) {
case 1:
*(u8 *)reg = (u8)val;
break;
case 2:
*(u16 *)reg = (u16)val;
break;
case 4:
*reg = (u32)val;
break; /* 64b: zero-extend */
case 8:
*reg = val;
break;
}
}
static inline unsigned long ad_mask(struct x86_emulate_ctxt *ctxt)
{
return (1UL << (ctxt->ad_bytes << 3)) - 1;
}
static ulong stack_mask(struct x86_emulate_ctxt *ctxt)
{
u16 sel;
struct desc_struct ss;
if (ctxt->mode == X86EMUL_MODE_PROT64)
return ~0UL;
ctxt->ops->get_segment(ctxt, &sel, &ss, NULL, VCPU_SREG_SS);
return ~0U >> ((ss.d ^ 1) * 16); /* d=0: 0xffff; d=1: 0xffffffff */
}
static int stack_size(struct x86_emulate_ctxt *ctxt)
{
return (__fls(stack_mask(ctxt)) + 1) >> 3;
}
/* Access/update address held in a register, based on addressing mode. */
static inline unsigned long
address_mask(struct x86_emulate_ctxt *ctxt, unsigned long reg)
{
if (ctxt->ad_bytes == sizeof(unsigned long))
return reg;
else
return reg & ad_mask(ctxt);
}
static inline unsigned long
register_address(struct x86_emulate_ctxt *ctxt, int reg)
{
return address_mask(ctxt, reg_read(ctxt, reg));
}
static void masked_increment(ulong *reg, ulong mask, int inc)
{
assign_masked(reg, *reg + inc, mask);
}
static inline void
register_address_increment(struct x86_emulate_ctxt *ctxt, int reg, int inc)
{
ulong *preg = reg_rmw(ctxt, reg);
assign_register(preg, *preg + inc, ctxt->ad_bytes);
}
static void rsp_increment(struct x86_emulate_ctxt *ctxt, int inc)
{
masked_increment(reg_rmw(ctxt, VCPU_REGS_RSP), stack_mask(ctxt), inc);
}
static u32 desc_limit_scaled(struct desc_struct *desc)
{
u32 limit = get_desc_limit(desc);
return desc->g ? (limit << 12) | 0xfff : limit;
}
static unsigned long seg_base(struct x86_emulate_ctxt *ctxt, int seg)
{
if (ctxt->mode == X86EMUL_MODE_PROT64 && seg < VCPU_SREG_FS)
return 0;
return ctxt->ops->get_cached_segment_base(ctxt, seg);
}
static int emulate_exception(struct x86_emulate_ctxt *ctxt, int vec,
u32 error, bool valid)
{
WARN_ON(vec > 0x1f);
ctxt->exception.vector = vec;
ctxt->exception.error_code = error;
ctxt->exception.error_code_valid = valid;
return X86EMUL_PROPAGATE_FAULT;
}
static int emulate_db(struct x86_emulate_ctxt *ctxt)
{
return emulate_exception(ctxt, DB_VECTOR, 0, false);
}
static int emulate_gp(struct x86_emulate_ctxt *ctxt, int err)
{
return emulate_exception(ctxt, GP_VECTOR, err, true);
}
static int emulate_ss(struct x86_emulate_ctxt *ctxt, int err)
{
return emulate_exception(ctxt, SS_VECTOR, err, true);
}
static int emulate_ud(struct x86_emulate_ctxt *ctxt)
{
return emulate_exception(ctxt, UD_VECTOR, 0, false);
}
static int emulate_ts(struct x86_emulate_ctxt *ctxt, int err)
{
return emulate_exception(ctxt, TS_VECTOR, err, true);
}
static int emulate_de(struct x86_emulate_ctxt *ctxt)
{
return emulate_exception(ctxt, DE_VECTOR, 0, false);
}
static int emulate_nm(struct x86_emulate_ctxt *ctxt)
{
return emulate_exception(ctxt, NM_VECTOR, 0, false);
}
static u16 get_segment_selector(struct x86_emulate_ctxt *ctxt, unsigned seg)
{
u16 selector;
struct desc_struct desc;
ctxt->ops->get_segment(ctxt, &selector, &desc, NULL, seg);
return selector;
}
static void set_segment_selector(struct x86_emulate_ctxt *ctxt, u16 selector,
unsigned seg)
{
u16 dummy;
u32 base3;
struct desc_struct desc;
ctxt->ops->get_segment(ctxt, &dummy, &desc, &base3, seg);
ctxt->ops->set_segment(ctxt, selector, &desc, base3, seg);
}
/*
* x86 defines three classes of vector instructions: explicitly
* aligned, explicitly unaligned, and the rest, which change behaviour
* depending on whether they're AVX encoded or not.
*
* Also included is CMPXCHG16B which is not a vector instruction, yet it is
* subject to the same check. FXSAVE and FXRSTOR are checked here too as their
* 512 bytes of data must be aligned to a 16 byte boundary.
*/
static unsigned insn_alignment(struct x86_emulate_ctxt *ctxt, unsigned size)
{
u64 alignment = ctxt->d & AlignMask;
if (likely(size < 16))
return 1;
switch (alignment) {
case Unaligned:
case Avx:
return 1;
case Aligned16:
return 16;
case Aligned:
default:
return size;
}
}
static __always_inline int __linearize(struct x86_emulate_ctxt *ctxt,
struct segmented_address addr,
unsigned *max_size, unsigned size,
bool write, bool fetch,
enum x86emul_mode mode, ulong *linear)
{
struct desc_struct desc;
bool usable;
ulong la;
u32 lim;
u16 sel;
la = seg_base(ctxt, addr.seg) + addr.ea;
*max_size = 0;
switch (mode) {
case X86EMUL_MODE_PROT64:
*linear = la;
if (is_noncanonical_address(la))
goto bad;
*max_size = min_t(u64, ~0u, (1ull << 48) - la);
if (size > *max_size)
goto bad;
break;
default:
*linear = la = (u32)la;
usable = ctxt->ops->get_segment(ctxt, &sel, &desc, NULL,
addr.seg);
if (!usable)
goto bad;
/* code segment in protected mode or read-only data segment */
if ((((ctxt->mode != X86EMUL_MODE_REAL) && (desc.type & 8))
|| !(desc.type & 2)) && write)
goto bad;
/* unreadable code segment */
if (!fetch && (desc.type & 8) && !(desc.type & 2))
goto bad;
lim = desc_limit_scaled(&desc);
if (!(desc.type & 8) && (desc.type & 4)) {
/* expand-down segment */
if (addr.ea <= lim)
goto bad;
lim = desc.d ? 0xffffffff : 0xffff;
}
if (addr.ea > lim)
goto bad;
if (lim == 0xffffffff)
*max_size = ~0u;
else {
*max_size = (u64)lim + 1 - addr.ea;
if (size > *max_size)
goto bad;
}
break;
}
if (la & (insn_alignment(ctxt, size) - 1))
return emulate_gp(ctxt, 0);
return X86EMUL_CONTINUE;
bad:
if (addr.seg == VCPU_SREG_SS)
return emulate_ss(ctxt, 0);
else
return emulate_gp(ctxt, 0);
}
static int linearize(struct x86_emulate_ctxt *ctxt,
struct segmented_address addr,
unsigned size, bool write,
ulong *linear)
{
unsigned max_size;
return __linearize(ctxt, addr, &max_size, size, write, false,
ctxt->mode, linear);
}
static inline int assign_eip(struct x86_emulate_ctxt *ctxt, ulong dst,
enum x86emul_mode mode)
{
ulong linear;
int rc;
unsigned max_size;
struct segmented_address addr = { .seg = VCPU_SREG_CS,
.ea = dst };
if (ctxt->op_bytes != sizeof(unsigned long))
addr.ea = dst & ((1UL << (ctxt->op_bytes << 3)) - 1);
rc = __linearize(ctxt, addr, &max_size, 1, false, true, mode, &linear);
if (rc == X86EMUL_CONTINUE)
ctxt->_eip = addr.ea;
return rc;
}
static inline int assign_eip_near(struct x86_emulate_ctxt *ctxt, ulong dst)
{
return assign_eip(ctxt, dst, ctxt->mode);
}
static int assign_eip_far(struct x86_emulate_ctxt *ctxt, ulong dst,
const struct desc_struct *cs_desc)
{
enum x86emul_mode mode = ctxt->mode;
int rc;
#ifdef CONFIG_X86_64
if (ctxt->mode >= X86EMUL_MODE_PROT16) {
if (cs_desc->l) {
u64 efer = 0;
ctxt->ops->get_msr(ctxt, MSR_EFER, &efer);
if (efer & EFER_LMA)
mode = X86EMUL_MODE_PROT64;
} else
mode = X86EMUL_MODE_PROT32; /* temporary value */
}
#endif
if (mode == X86EMUL_MODE_PROT16 || mode == X86EMUL_MODE_PROT32)
mode = cs_desc->d ? X86EMUL_MODE_PROT32 : X86EMUL_MODE_PROT16;
rc = assign_eip(ctxt, dst, mode);
if (rc == X86EMUL_CONTINUE)
ctxt->mode = mode;
return rc;
}
static inline int jmp_rel(struct x86_emulate_ctxt *ctxt, int rel)
{
return assign_eip_near(ctxt, ctxt->_eip + rel);
}
static int segmented_read_std(struct x86_emulate_ctxt *ctxt,
struct segmented_address addr,
void *data,
unsigned size)
{
int rc;
ulong linear;
rc = linearize(ctxt, addr, size, false, &linear);
if (rc != X86EMUL_CONTINUE)
return rc;
return ctxt->ops->read_std(ctxt, linear, data, size, &ctxt->exception);
}
static int segmented_write_std(struct x86_emulate_ctxt *ctxt,
struct segmented_address addr,
void *data,
unsigned int size)
{
int rc;
ulong linear;
rc = linearize(ctxt, addr, size, true, &linear);
if (rc != X86EMUL_CONTINUE)
return rc;
return ctxt->ops->write_std(ctxt, linear, data, size, &ctxt->exception);
}
/*
* Prefetch the remaining bytes of the instruction without crossing page
* boundary if they are not in fetch_cache yet.
*/
static int __do_insn_fetch_bytes(struct x86_emulate_ctxt *ctxt, int op_size)
{
int rc;
unsigned size, max_size;
unsigned long linear;
int cur_size = ctxt->fetch.end - ctxt->fetch.data;
struct segmented_address addr = { .seg = VCPU_SREG_CS,
.ea = ctxt->eip + cur_size };
/*
* We do not know exactly how many bytes will be needed, and
* __linearize is expensive, so fetch as much as possible. We
* just have to avoid going beyond the 15 byte limit, the end
* of the segment, or the end of the page.
*
* __linearize is called with size 0 so that it does not do any
* boundary check itself. Instead, we use max_size to check
* against op_size.
*/
rc = __linearize(ctxt, addr, &max_size, 0, false, true, ctxt->mode,
&linear);
if (unlikely(rc != X86EMUL_CONTINUE))
return rc;
size = min_t(unsigned, 15UL ^ cur_size, max_size);
size = min_t(unsigned, size, PAGE_SIZE - offset_in_page(linear));
/*
* One instruction can only straddle two pages,
* and one has been loaded at the beginning of
* x86_decode_insn. So, if not enough bytes
* still, we must have hit the 15-byte boundary.
*/
if (unlikely(size < op_size))
return emulate_gp(ctxt, 0);
rc = ctxt->ops->fetch(ctxt, linear, ctxt->fetch.end,
size, &ctxt->exception);
if (unlikely(rc != X86EMUL_CONTINUE))
return rc;
ctxt->fetch.end += size;
return X86EMUL_CONTINUE;
}
static __always_inline int do_insn_fetch_bytes(struct x86_emulate_ctxt *ctxt,
unsigned size)
{
unsigned done_size = ctxt->fetch.end - ctxt->fetch.ptr;
if (unlikely(done_size < size))
return __do_insn_fetch_bytes(ctxt, size - done_size);
else
return X86EMUL_CONTINUE;
}
/* Fetch next part of the instruction being emulated. */
#define insn_fetch(_type, _ctxt) \
({ _type _x; \
\
rc = do_insn_fetch_bytes(_ctxt, sizeof(_type)); \
if (rc != X86EMUL_CONTINUE) \
goto done; \
ctxt->_eip += sizeof(_type); \
_x = *(_type __aligned(1) *) ctxt->fetch.ptr; \
ctxt->fetch.ptr += sizeof(_type); \
_x; \
})
#define insn_fetch_arr(_arr, _size, _ctxt) \
({ \
rc = do_insn_fetch_bytes(_ctxt, _size); \
if (rc != X86EMUL_CONTINUE) \
goto done; \
ctxt->_eip += (_size); \
memcpy(_arr, ctxt->fetch.ptr, _size); \
ctxt->fetch.ptr += (_size); \
})
/*
* Given the 'reg' portion of a ModRM byte, and a register block, return a
* pointer into the block that addresses the relevant register.
* @highbyte_regs specifies whether to decode AH,CH,DH,BH.
*/
static void *decode_register(struct x86_emulate_ctxt *ctxt, u8 modrm_reg,
int byteop)
{
void *p;
int highbyte_regs = (ctxt->rex_prefix == 0) && byteop;
if (highbyte_regs && modrm_reg >= 4 && modrm_reg < 8)
p = (unsigned char *)reg_rmw(ctxt, modrm_reg & 3) + 1;
else
p = reg_rmw(ctxt, modrm_reg);
return p;
}
static int read_descriptor(struct x86_emulate_ctxt *ctxt,
struct segmented_address addr,
u16 *size, unsigned long *address, int op_bytes)
{
int rc;
if (op_bytes == 2)
op_bytes = 3;
*address = 0;
rc = segmented_read_std(ctxt, addr, size, 2);
if (rc != X86EMUL_CONTINUE)
return rc;
addr.ea += 2;
rc = segmented_read_std(ctxt, addr, address, op_bytes);
return rc;
}
FASTOP2(add);
FASTOP2(or);
FASTOP2(adc);
FASTOP2(sbb);
FASTOP2(and);
FASTOP2(sub);
FASTOP2(xor);
FASTOP2(cmp);
FASTOP2(test);
FASTOP1SRC2(mul, mul_ex);
FASTOP1SRC2(imul, imul_ex);
FASTOP1SRC2EX(div, div_ex);
FASTOP1SRC2EX(idiv, idiv_ex);
FASTOP3WCL(shld);
FASTOP3WCL(shrd);
FASTOP2W(imul);
FASTOP1(not);
FASTOP1(neg);
FASTOP1(inc);
FASTOP1(dec);
FASTOP2CL(rol);
FASTOP2CL(ror);
FASTOP2CL(rcl);
FASTOP2CL(rcr);
FASTOP2CL(shl);
FASTOP2CL(shr);
FASTOP2CL(sar);
FASTOP2W(bsf);
FASTOP2W(bsr);
FASTOP2W(bt);
FASTOP2W(bts);
FASTOP2W(btr);
FASTOP2W(btc);
FASTOP2(xadd);
FASTOP2R(cmp, cmp_r);
static int em_bsf_c(struct x86_emulate_ctxt *ctxt)
{
/* If src is zero, do not writeback, but update flags */
if (ctxt->src.val == 0)
ctxt->dst.type = OP_NONE;
return fastop(ctxt, em_bsf);
}
static int em_bsr_c(struct x86_emulate_ctxt *ctxt)
{
/* If src is zero, do not writeback, but update flags */
if (ctxt->src.val == 0)
ctxt->dst.type = OP_NONE;
return fastop(ctxt, em_bsr);
}
static __always_inline u8 test_cc(unsigned int condition, unsigned long flags)
{
u8 rc;
void (*fop)(void) = (void *)em_setcc + 4 * (condition & 0xf);
flags = (flags & EFLAGS_MASK) | X86_EFLAGS_IF;
asm("push %[flags]; popf; call *%[fastop]"
: "=a"(rc) : [fastop]"r"(fop), [flags]"r"(flags));
return rc;
}
static void fetch_register_operand(struct operand *op)
{
switch (op->bytes) {
case 1:
op->val = *(u8 *)op->addr.reg;
break;
case 2:
op->val = *(u16 *)op->addr.reg;
break;
case 4:
op->val = *(u32 *)op->addr.reg;
break;
case 8:
op->val = *(u64 *)op->addr.reg;
break;
}
}
static void read_sse_reg(struct x86_emulate_ctxt *ctxt, sse128_t *data, int reg)
{
ctxt->ops->get_fpu(ctxt);
switch (reg) {
case 0: asm("movdqa %%xmm0, %0" : "=m"(*data)); break;
case 1: asm("movdqa %%xmm1, %0" : "=m"(*data)); break;
case 2: asm("movdqa %%xmm2, %0" : "=m"(*data)); break;
case 3: asm("movdqa %%xmm3, %0" : "=m"(*data)); break;
case 4: asm("movdqa %%xmm4, %0" : "=m"(*data)); break;
case 5: asm("movdqa %%xmm5, %0" : "=m"(*data)); break;
case 6: asm("movdqa %%xmm6, %0" : "=m"(*data)); break;
case 7: asm("movdqa %%xmm7, %0" : "=m"(*data)); break;
#ifdef CONFIG_X86_64
case 8: asm("movdqa %%xmm8, %0" : "=m"(*data)); break;
case 9: asm("movdqa %%xmm9, %0" : "=m"(*data)); break;
case 10: asm("movdqa %%xmm10, %0" : "=m"(*data)); break;
case 11: asm("movdqa %%xmm11, %0" : "=m"(*data)); break;
case 12: asm("movdqa %%xmm12, %0" : "=m"(*data)); break;
case 13: asm("movdqa %%xmm13, %0" : "=m"(*data)); break;
case 14: asm("movdqa %%xmm14, %0" : "=m"(*data)); break;
case 15: asm("movdqa %%xmm15, %0" : "=m"(*data)); break;
#endif
default: BUG();
}
ctxt->ops->put_fpu(ctxt);
}
static void write_sse_reg(struct x86_emulate_ctxt *ctxt, sse128_t *data,
int reg)
{
ctxt->ops->get_fpu(ctxt);
switch (reg) {
case 0: asm("movdqa %0, %%xmm0" : : "m"(*data)); break;
case 1: asm("movdqa %0, %%xmm1" : : "m"(*data)); break;
case 2: asm("movdqa %0, %%xmm2" : : "m"(*data)); break;
case 3: asm("movdqa %0, %%xmm3" : : "m"(*data)); break;
case 4: asm("movdqa %0, %%xmm4" : : "m"(*data)); break;
case 5: asm("movdqa %0, %%xmm5" : : "m"(*data)); break;
case 6: asm("movdqa %0, %%xmm6" : : "m"(*data)); break;
case 7: asm("movdqa %0, %%xmm7" : : "m"(*data)); break;
#ifdef CONFIG_X86_64
case 8: asm("movdqa %0, %%xmm8" : : "m"(*data)); break;
case 9: asm("movdqa %0, %%xmm9" : : "m"(*data)); break;
case 10: asm("movdqa %0, %%xmm10" : : "m"(*data)); break;
case 11: asm("movdqa %0, %%xmm11" : : "m"(*data)); break;
case 12: asm("movdqa %0, %%xmm12" : : "m"(*data)); break;
case 13: asm("movdqa %0, %%xmm13" : : "m"(*data)); break;
case 14: asm("movdqa %0, %%xmm14" : : "m"(*data)); break;
case 15: asm("movdqa %0, %%xmm15" : : "m"(*data)); break;
#endif
default: BUG();
}
ctxt->ops->put_fpu(ctxt);
}
static void read_mmx_reg(struct x86_emulate_ctxt *ctxt, u64 *data, int reg)
{
ctxt->ops->get_fpu(ctxt);
switch (reg) {
case 0: asm("movq %%mm0, %0" : "=m"(*data)); break;
case 1: asm("movq %%mm1, %0" : "=m"(*data)); break;
case 2: asm("movq %%mm2, %0" : "=m"(*data)); break;
case 3: asm("movq %%mm3, %0" : "=m"(*data)); break;
case 4: asm("movq %%mm4, %0" : "=m"(*data)); break;
case 5: asm("movq %%mm5, %0" : "=m"(*data)); break;
case 6: asm("movq %%mm6, %0" : "=m"(*data)); break;
case 7: asm("movq %%mm7, %0" : "=m"(*data)); break;
default: BUG();
}
ctxt->ops->put_fpu(ctxt);
}
static void write_mmx_reg(struct x86_emulate_ctxt *ctxt, u64 *data, int reg)
{
ctxt->ops->get_fpu(ctxt);
switch (reg) {
case 0: asm("movq %0, %%mm0" : : "m"(*data)); break;
case 1: asm("movq %0, %%mm1" : : "m"(*data)); break;
case 2: asm("movq %0, %%mm2" : : "m"(*data)); break;
case 3: asm("movq %0, %%mm3" : : "m"(*data)); break;
case 4: asm("movq %0, %%mm4" : : "m"(*data)); break;
case 5: asm("movq %0, %%mm5" : : "m"(*data)); break;
case 6: asm("movq %0, %%mm6" : : "m"(*data)); break;
case 7: asm("movq %0, %%mm7" : : "m"(*data)); break;
default: BUG();
}
ctxt->ops->put_fpu(ctxt);
}
static int em_fninit(struct x86_emulate_ctxt *ctxt)
{
if (ctxt->ops->get_cr(ctxt, 0) & (X86_CR0_TS | X86_CR0_EM))
return emulate_nm(ctxt);
ctxt->ops->get_fpu(ctxt);
asm volatile("fninit");
ctxt->ops->put_fpu(ctxt);
return X86EMUL_CONTINUE;
}
static int em_fnstcw(struct x86_emulate_ctxt *ctxt)
{
u16 fcw;
if (ctxt->ops->get_cr(ctxt, 0) & (X86_CR0_TS | X86_CR0_EM))
return emulate_nm(ctxt);
ctxt->ops->get_fpu(ctxt);
asm volatile("fnstcw %0": "+m"(fcw));
ctxt->ops->put_fpu(ctxt);
ctxt->dst.val = fcw;
return X86EMUL_CONTINUE;
}
static int em_fnstsw(struct x86_emulate_ctxt *ctxt)
{
u16 fsw;
if (ctxt->ops->get_cr(ctxt, 0) & (X86_CR0_TS | X86_CR0_EM))
return emulate_nm(ctxt);
ctxt->ops->get_fpu(ctxt);
asm volatile("fnstsw %0": "+m"(fsw));
ctxt->ops->put_fpu(ctxt);
ctxt->dst.val = fsw;
return X86EMUL_CONTINUE;
}
static void decode_register_operand(struct x86_emulate_ctxt *ctxt,
struct operand *op)
{
unsigned reg = ctxt->modrm_reg;
if (!(ctxt->d & ModRM))
reg = (ctxt->b & 7) | ((ctxt->rex_prefix & 1) << 3);
if (ctxt->d & Sse) {
op->type = OP_XMM;
op->bytes = 16;
op->addr.xmm = reg;
read_sse_reg(ctxt, &op->vec_val, reg);
return;
}
if (ctxt->d & Mmx) {
reg &= 7;
op->type = OP_MM;
op->bytes = 8;
op->addr.mm = reg;
return;
}
op->type = OP_REG;
op->bytes = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes;
op->addr.reg = decode_register(ctxt, reg, ctxt->d & ByteOp);
fetch_register_operand(op);
op->orig_val = op->val;
}
static void adjust_modrm_seg(struct x86_emulate_ctxt *ctxt, int base_reg)
{
if (base_reg == VCPU_REGS_RSP || base_reg == VCPU_REGS_RBP)
ctxt->modrm_seg = VCPU_SREG_SS;
}
static int decode_modrm(struct x86_emulate_ctxt *ctxt,
struct operand *op)
{
u8 sib;
int index_reg, base_reg, scale;
int rc = X86EMUL_CONTINUE;
ulong modrm_ea = 0;
ctxt->modrm_reg = ((ctxt->rex_prefix << 1) & 8); /* REX.R */
index_reg = (ctxt->rex_prefix << 2) & 8; /* REX.X */
base_reg = (ctxt->rex_prefix << 3) & 8; /* REX.B */
ctxt->modrm_mod = (ctxt->modrm & 0xc0) >> 6;
ctxt->modrm_reg |= (ctxt->modrm & 0x38) >> 3;
ctxt->modrm_rm = base_reg | (ctxt->modrm & 0x07);
ctxt->modrm_seg = VCPU_SREG_DS;
if (ctxt->modrm_mod == 3 || (ctxt->d & NoMod)) {
op->type = OP_REG;
op->bytes = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes;
op->addr.reg = decode_register(ctxt, ctxt->modrm_rm,
ctxt->d & ByteOp);
if (ctxt->d & Sse) {
op->type = OP_XMM;
op->bytes = 16;
op->addr.xmm = ctxt->modrm_rm;
read_sse_reg(ctxt, &op->vec_val, ctxt->modrm_rm);
return rc;
}
if (ctxt->d & Mmx) {
op->type = OP_MM;
op->bytes = 8;
op->addr.mm = ctxt->modrm_rm & 7;
return rc;
}
fetch_register_operand(op);
return rc;
}
op->type = OP_MEM;
if (ctxt->ad_bytes == 2) {
unsigned bx = reg_read(ctxt, VCPU_REGS_RBX);
unsigned bp = reg_read(ctxt, VCPU_REGS_RBP);
unsigned si = reg_read(ctxt, VCPU_REGS_RSI);
unsigned di = reg_read(ctxt, VCPU_REGS_RDI);
/* 16-bit ModR/M decode. */
switch (ctxt->modrm_mod) {
case 0:
if (ctxt->modrm_rm == 6)
modrm_ea += insn_fetch(u16, ctxt);
break;
case 1:
modrm_ea += insn_fetch(s8, ctxt);
break;
case 2:
modrm_ea += insn_fetch(u16, ctxt);
break;
}
switch (ctxt->modrm_rm) {
case 0:
modrm_ea += bx + si;
break;
case 1:
modrm_ea += bx + di;
break;
case 2:
modrm_ea += bp + si;
break;
case 3:
modrm_ea += bp + di;
break;
case 4:
modrm_ea += si;
break;
case 5:
modrm_ea += di;
break;
case 6:
if (ctxt->modrm_mod != 0)
modrm_ea += bp;
break;
case 7:
modrm_ea += bx;
break;
}
if (ctxt->modrm_rm == 2 || ctxt->modrm_rm == 3 ||
(ctxt->modrm_rm == 6 && ctxt->modrm_mod != 0))
ctxt->modrm_seg = VCPU_SREG_SS;
modrm_ea = (u16)modrm_ea;
} else {
/* 32/64-bit ModR/M decode. */
if ((ctxt->modrm_rm & 7) == 4) {
sib = insn_fetch(u8, ctxt);
index_reg |= (sib >> 3) & 7;
base_reg |= sib & 7;
scale = sib >> 6;
if ((base_reg & 7) == 5 && ctxt->modrm_mod == 0)
modrm_ea += insn_fetch(s32, ctxt);
else {
modrm_ea += reg_read(ctxt, base_reg);
adjust_modrm_seg(ctxt, base_reg);
/* Increment ESP on POP [ESP] */
if ((ctxt->d & IncSP) &&
base_reg == VCPU_REGS_RSP)
modrm_ea += ctxt->op_bytes;
}
if (index_reg != 4)
modrm_ea += reg_read(ctxt, index_reg) << scale;
} else if ((ctxt->modrm_rm & 7) == 5 && ctxt->modrm_mod == 0) {
modrm_ea += insn_fetch(s32, ctxt);
if (ctxt->mode == X86EMUL_MODE_PROT64)
ctxt->rip_relative = 1;
} else {
base_reg = ctxt->modrm_rm;
modrm_ea += reg_read(ctxt, base_reg);
adjust_modrm_seg(ctxt, base_reg);
}
switch (ctxt->modrm_mod) {
case 1:
modrm_ea += insn_fetch(s8, ctxt);
break;
case 2:
modrm_ea += insn_fetch(s32, ctxt);
break;
}
}
op->addr.mem.ea = modrm_ea;
if (ctxt->ad_bytes != 8)
ctxt->memop.addr.mem.ea = (u32)ctxt->memop.addr.mem.ea;
done:
return rc;
}
static int decode_abs(struct x86_emulate_ctxt *ctxt,
struct operand *op)
{
int rc = X86EMUL_CONTINUE;
op->type = OP_MEM;
switch (ctxt->ad_bytes) {
case 2:
op->addr.mem.ea = insn_fetch(u16, ctxt);
break;
case 4:
op->addr.mem.ea = insn_fetch(u32, ctxt);
break;
case 8:
op->addr.mem.ea = insn_fetch(u64, ctxt);
break;
}
done:
return rc;
}
static void fetch_bit_operand(struct x86_emulate_ctxt *ctxt)
{
long sv = 0, mask;
if (ctxt->dst.type == OP_MEM && ctxt->src.type == OP_REG) {
mask = ~((long)ctxt->dst.bytes * 8 - 1);
if (ctxt->src.bytes == 2)
sv = (s16)ctxt->src.val & (s16)mask;
else if (ctxt->src.bytes == 4)
sv = (s32)ctxt->src.val & (s32)mask;
else
sv = (s64)ctxt->src.val & (s64)mask;
ctxt->dst.addr.mem.ea = address_mask(ctxt,
ctxt->dst.addr.mem.ea + (sv >> 3));
}
/* only subword offset */
ctxt->src.val &= (ctxt->dst.bytes << 3) - 1;
}
static int read_emulated(struct x86_emulate_ctxt *ctxt,
unsigned long addr, void *dest, unsigned size)
{
int rc;
struct read_cache *mc = &ctxt->mem_read;
if (mc->pos < mc->end)
goto read_cached;
WARN_ON((mc->end + size) >= sizeof(mc->data));
rc = ctxt->ops->read_emulated(ctxt, addr, mc->data + mc->end, size,
&ctxt->exception);
if (rc != X86EMUL_CONTINUE)
return rc;
mc->end += size;
read_cached:
memcpy(dest, mc->data + mc->pos, size);
mc->pos += size;
return X86EMUL_CONTINUE;
}
static int segmented_read(struct x86_emulate_ctxt *ctxt,
struct segmented_address addr,
void *data,
unsigned size)
{
int rc;
ulong linear;
rc = linearize(ctxt, addr, size, false, &linear);
if (rc != X86EMUL_CONTINUE)
return rc;
return read_emulated(ctxt, linear, data, size);
}
static int segmented_write(struct x86_emulate_ctxt *ctxt,
struct segmented_address addr,
const void *data,
unsigned size)
{
int rc;
ulong linear;
rc = linearize(ctxt, addr, size, true, &linear);
if (rc != X86EMUL_CONTINUE)
return rc;
return ctxt->ops->write_emulated(ctxt, linear, data, size,
&ctxt->exception);
}
static int segmented_cmpxchg(struct x86_emulate_ctxt *ctxt,
struct segmented_address addr,
const void *orig_data, const void *data,
unsigned size)
{
int rc;
ulong linear;
rc = linearize(ctxt, addr, size, true, &linear);
if (rc != X86EMUL_CONTINUE)
return rc;
return ctxt->ops->cmpxchg_emulated(ctxt, linear, orig_data, data,
size, &ctxt->exception);
}
static int pio_in_emulated(struct x86_emulate_ctxt *ctxt,
unsigned int size, unsigned short port,
void *dest)
{
struct read_cache *rc = &ctxt->io_read;
if (rc->pos == rc->end) { /* refill pio read ahead */
unsigned int in_page, n;
unsigned int count = ctxt->rep_prefix ?
address_mask(ctxt, reg_read(ctxt, VCPU_REGS_RCX)) : 1;
in_page = (ctxt->eflags & X86_EFLAGS_DF) ?
offset_in_page(reg_read(ctxt, VCPU_REGS_RDI)) :
PAGE_SIZE - offset_in_page(reg_read(ctxt, VCPU_REGS_RDI));
n = min3(in_page, (unsigned int)sizeof(rc->data) / size, count);
if (n == 0)
n = 1;
rc->pos = rc->end = 0;
if (!ctxt->ops->pio_in_emulated(ctxt, size, port, rc->data, n))
return 0;
rc->end = n * size;
}
if (ctxt->rep_prefix && (ctxt->d & String) &&
!(ctxt->eflags & X86_EFLAGS_DF)) {
ctxt->dst.data = rc->data + rc->pos;
ctxt->dst.type = OP_MEM_STR;
ctxt->dst.count = (rc->end - rc->pos) / size;
rc->pos = rc->end;
} else {
memcpy(dest, rc->data + rc->pos, size);
rc->pos += size;
}
return 1;
}
static int read_interrupt_descriptor(struct x86_emulate_ctxt *ctxt,
u16 index, struct desc_struct *desc)
{
struct desc_ptr dt;
ulong addr;
ctxt->ops->get_idt(ctxt, &dt);
if (dt.size < index * 8 + 7)
return emulate_gp(ctxt, index << 3 | 0x2);
addr = dt.address + index * 8;
return ctxt->ops->read_std(ctxt, addr, desc, sizeof *desc,
&ctxt->exception);
}
static void get_descriptor_table_ptr(struct x86_emulate_ctxt *ctxt,
u16 selector, struct desc_ptr *dt)
{
const struct x86_emulate_ops *ops = ctxt->ops;
u32 base3 = 0;
if (selector & 1 << 2) {
struct desc_struct desc;
u16 sel;
memset (dt, 0, sizeof *dt);
if (!ops->get_segment(ctxt, &sel, &desc, &base3,
VCPU_SREG_LDTR))
return;
dt->size = desc_limit_scaled(&desc); /* what if limit > 65535? */
dt->address = get_desc_base(&desc) | ((u64)base3 << 32);
} else
ops->get_gdt(ctxt, dt);
}
static int get_descriptor_ptr(struct x86_emulate_ctxt *ctxt,
u16 selector, ulong *desc_addr_p)
{
struct desc_ptr dt;
u16 index = selector >> 3;
ulong addr;
get_descriptor_table_ptr(ctxt, selector, &dt);
if (dt.size < index * 8 + 7)
return emulate_gp(ctxt, selector & 0xfffc);
addr = dt.address + index * 8;
#ifdef CONFIG_X86_64
if (addr >> 32 != 0) {
u64 efer = 0;
ctxt->ops->get_msr(ctxt, MSR_EFER, &efer);
if (!(efer & EFER_LMA))
addr &= (u32)-1;
}
#endif
*desc_addr_p = addr;
return X86EMUL_CONTINUE;
}
/* allowed just for 8 bytes segments */
static int read_segment_descriptor(struct x86_emulate_ctxt *ctxt,
u16 selector, struct desc_struct *desc,
ulong *desc_addr_p)
{
int rc;
rc = get_descriptor_ptr(ctxt, selector, desc_addr_p);
if (rc != X86EMUL_CONTINUE)
return rc;
return ctxt->ops->read_std(ctxt, *desc_addr_p, desc, sizeof(*desc),
&ctxt->exception);
}
/* allowed just for 8 bytes segments */
static int write_segment_descriptor(struct x86_emulate_ctxt *ctxt,
u16 selector, struct desc_struct *desc)
{
int rc;
ulong addr;
rc = get_descriptor_ptr(ctxt, selector, &addr);
if (rc != X86EMUL_CONTINUE)
return rc;
return ctxt->ops->write_std(ctxt, addr, desc, sizeof *desc,
&ctxt->exception);
}
/* Does not support long mode */
static int __load_segment_descriptor(struct x86_emulate_ctxt *ctxt,
u16 selector, int seg, u8 cpl,
enum x86_transfer_type transfer,
struct desc_struct *desc)
{
struct desc_struct seg_desc, old_desc;
u8 dpl, rpl;
unsigned err_vec = GP_VECTOR;
u32 err_code = 0;
bool null_selector = !(selector & ~0x3); /* 0000-0003 are null */
ulong desc_addr;
int ret;
u16 dummy;
u32 base3 = 0;
memset(&seg_desc, 0, sizeof seg_desc);
if (ctxt->mode == X86EMUL_MODE_REAL) {
/* set real mode segment descriptor (keep limit etc. for
* unreal mode) */
ctxt->ops->get_segment(ctxt, &dummy, &seg_desc, NULL, seg);
set_desc_base(&seg_desc, selector << 4);
goto load;
} else if (seg <= VCPU_SREG_GS && ctxt->mode == X86EMUL_MODE_VM86) {
/* VM86 needs a clean new segment descriptor */
set_desc_base(&seg_desc, selector << 4);
set_desc_limit(&seg_desc, 0xffff);
seg_desc.type = 3;
seg_desc.p = 1;
seg_desc.s = 1;
seg_desc.dpl = 3;
goto load;
}
rpl = selector & 3;
/* NULL selector is not valid for TR, CS and SS (except for long mode) */
if ((seg == VCPU_SREG_CS
|| (seg == VCPU_SREG_SS
&& (ctxt->mode != X86EMUL_MODE_PROT64 || rpl != cpl))
|| seg == VCPU_SREG_TR)
&& null_selector)
goto exception;
/* TR should be in GDT only */
if (seg == VCPU_SREG_TR && (selector & (1 << 2)))
goto exception;
if (null_selector) /* for NULL selector skip all following checks */
goto load;
ret = read_segment_descriptor(ctxt, selector, &seg_desc, &desc_addr);
if (ret != X86EMUL_CONTINUE)
return ret;
err_code = selector & 0xfffc;
err_vec = (transfer == X86_TRANSFER_TASK_SWITCH) ? TS_VECTOR :
GP_VECTOR;
/* can't load system descriptor into segment selector */
if (seg <= VCPU_SREG_GS && !seg_desc.s) {
if (transfer == X86_TRANSFER_CALL_JMP)
return X86EMUL_UNHANDLEABLE;
goto exception;
}
if (!seg_desc.p) {
err_vec = (seg == VCPU_SREG_SS) ? SS_VECTOR : NP_VECTOR;
goto exception;
}
dpl = seg_desc.dpl;
switch (seg) {
case VCPU_SREG_SS:
/*
* segment is not a writable data segment or segment
* selector's RPL != CPL or segment selector's RPL != CPL
*/
if (rpl != cpl || (seg_desc.type & 0xa) != 0x2 || dpl != cpl)
goto exception;
break;
case VCPU_SREG_CS:
if (!(seg_desc.type & 8))
goto exception;
if (seg_desc.type & 4) {
/* conforming */
if (dpl > cpl)
goto exception;
} else {
/* nonconforming */
if (rpl > cpl || dpl != cpl)
goto exception;
}
/* in long-mode d/b must be clear if l is set */
if (seg_desc.d && seg_desc.l) {
u64 efer = 0;
ctxt->ops->get_msr(ctxt, MSR_EFER, &efer);
if (efer & EFER_LMA)
goto exception;
}
/* CS(RPL) <- CPL */
selector = (selector & 0xfffc) | cpl;
break;
case VCPU_SREG_TR:
if (seg_desc.s || (seg_desc.type != 1 && seg_desc.type != 9))
goto exception;
old_desc = seg_desc;
seg_desc.type |= 2; /* busy */
ret = ctxt->ops->cmpxchg_emulated(ctxt, desc_addr, &old_desc, &seg_desc,
sizeof(seg_desc), &ctxt->exception);
if (ret != X86EMUL_CONTINUE)
return ret;
break;
case VCPU_SREG_LDTR:
if (seg_desc.s || seg_desc.type != 2)
goto exception;
break;
default: /* DS, ES, FS, or GS */
/*
* segment is not a data or readable code segment or
* ((segment is a data or nonconforming code segment)
* and (both RPL and CPL > DPL))
*/
if ((seg_desc.type & 0xa) == 0x8 ||
(((seg_desc.type & 0xc) != 0xc) &&
(rpl > dpl && cpl > dpl)))
goto exception;
break;
}
if (seg_desc.s) {
/* mark segment as accessed */
if (!(seg_desc.type & 1)) {
seg_desc.type |= 1;
ret = write_segment_descriptor(ctxt, selector,
&seg_desc);
if (ret != X86EMUL_CONTINUE)
return ret;
}
} else if (ctxt->mode == X86EMUL_MODE_PROT64) {
ret = ctxt->ops->read_std(ctxt, desc_addr+8, &base3,
sizeof(base3), &ctxt->exception);
if (ret != X86EMUL_CONTINUE)
return ret;
if (is_noncanonical_address(get_desc_base(&seg_desc) |
((u64)base3 << 32)))
return emulate_gp(ctxt, 0);
}
load:
ctxt->ops->set_segment(ctxt, selector, &seg_desc, base3, seg);
if (desc)
*desc = seg_desc;
return X86EMUL_CONTINUE;
exception:
return emulate_exception(ctxt, err_vec, err_code, true);
}
static int load_segment_descriptor(struct x86_emulate_ctxt *ctxt,
u16 selector, int seg)
{
u8 cpl = ctxt->ops->cpl(ctxt);
return __load_segment_descriptor(ctxt, selector, seg, cpl,
X86_TRANSFER_NONE, NULL);
}
static void write_register_operand(struct operand *op)
{
return assign_register(op->addr.reg, op->val, op->bytes);
}
static int writeback(struct x86_emulate_ctxt *ctxt, struct operand *op)
{
switch (op->type) {
case OP_REG:
write_register_operand(op);
break;
case OP_MEM:
if (ctxt->lock_prefix)
return segmented_cmpxchg(ctxt,
op->addr.mem,
&op->orig_val,
&op->val,
op->bytes);
else
return segmented_write(ctxt,
op->addr.mem,
&op->val,
op->bytes);
break;
case OP_MEM_STR:
return segmented_write(ctxt,
op->addr.mem,
op->data,
op->bytes * op->count);
break;
case OP_XMM:
write_sse_reg(ctxt, &op->vec_val, op->addr.xmm);
break;
case OP_MM:
write_mmx_reg(ctxt, &op->mm_val, op->addr.mm);
break;
case OP_NONE:
/* no writeback */
break;
default:
break;
}
return X86EMUL_CONTINUE;
}
static int push(struct x86_emulate_ctxt *ctxt, void *data, int bytes)
{
struct segmented_address addr;
rsp_increment(ctxt, -bytes);
addr.ea = reg_read(ctxt, VCPU_REGS_RSP) & stack_mask(ctxt);
addr.seg = VCPU_SREG_SS;
return segmented_write(ctxt, addr, data, bytes);
}
static int em_push(struct x86_emulate_ctxt *ctxt)
{
/* Disable writeback. */
ctxt->dst.type = OP_NONE;
return push(ctxt, &ctxt->src.val, ctxt->op_bytes);
}
static int emulate_pop(struct x86_emulate_ctxt *ctxt,
void *dest, int len)
{
int rc;
struct segmented_address addr;
addr.ea = reg_read(ctxt, VCPU_REGS_RSP) & stack_mask(ctxt);
addr.seg = VCPU_SREG_SS;
rc = segmented_read(ctxt, addr, dest, len);
if (rc != X86EMUL_CONTINUE)
return rc;
rsp_increment(ctxt, len);
return rc;
}
static int em_pop(struct x86_emulate_ctxt *ctxt)
{
return emulate_pop(ctxt, &ctxt->dst.val, ctxt->op_bytes);
}
static int emulate_popf(struct x86_emulate_ctxt *ctxt,
void *dest, int len)
{
int rc;
unsigned long val, change_mask;
int iopl = (ctxt->eflags & X86_EFLAGS_IOPL) >> X86_EFLAGS_IOPL_BIT;
int cpl = ctxt->ops->cpl(ctxt);
rc = emulate_pop(ctxt, &val, len);
if (rc != X86EMUL_CONTINUE)
return rc;
change_mask = X86_EFLAGS_CF | X86_EFLAGS_PF | X86_EFLAGS_AF |
X86_EFLAGS_ZF | X86_EFLAGS_SF | X86_EFLAGS_OF |
X86_EFLAGS_TF | X86_EFLAGS_DF | X86_EFLAGS_NT |
X86_EFLAGS_AC | X86_EFLAGS_ID;
switch(ctxt->mode) {
case X86EMUL_MODE_PROT64:
case X86EMUL_MODE_PROT32:
case X86EMUL_MODE_PROT16:
if (cpl == 0)
change_mask |= X86_EFLAGS_IOPL;
if (cpl <= iopl)
change_mask |= X86_EFLAGS_IF;
break;
case X86EMUL_MODE_VM86:
if (iopl < 3)
return emulate_gp(ctxt, 0);
change_mask |= X86_EFLAGS_IF;
break;
default: /* real mode */
change_mask |= (X86_EFLAGS_IOPL | X86_EFLAGS_IF);
break;
}
*(unsigned long *)dest =
(ctxt->eflags & ~change_mask) | (val & change_mask);
return rc;
}
static int em_popf(struct x86_emulate_ctxt *ctxt)
{
ctxt->dst.type = OP_REG;
ctxt->dst.addr.reg = &ctxt->eflags;
ctxt->dst.bytes = ctxt->op_bytes;
return emulate_popf(ctxt, &ctxt->dst.val, ctxt->op_bytes);
}
static int em_enter(struct x86_emulate_ctxt *ctxt)
{
int rc;
unsigned frame_size = ctxt->src.val;
unsigned nesting_level = ctxt->src2.val & 31;
ulong rbp;
if (nesting_level)
return X86EMUL_UNHANDLEABLE;
rbp = reg_read(ctxt, VCPU_REGS_RBP);
rc = push(ctxt, &rbp, stack_size(ctxt));
if (rc != X86EMUL_CONTINUE)
return rc;
assign_masked(reg_rmw(ctxt, VCPU_REGS_RBP), reg_read(ctxt, VCPU_REGS_RSP),
stack_mask(ctxt));
assign_masked(reg_rmw(ctxt, VCPU_REGS_RSP),
reg_read(ctxt, VCPU_REGS_RSP) - frame_size,
stack_mask(ctxt));
return X86EMUL_CONTINUE;
}
static int em_leave(struct x86_emulate_ctxt *ctxt)
{
assign_masked(reg_rmw(ctxt, VCPU_REGS_RSP), reg_read(ctxt, VCPU_REGS_RBP),
stack_mask(ctxt));
return emulate_pop(ctxt, reg_rmw(ctxt, VCPU_REGS_RBP), ctxt->op_bytes);
}
static int em_push_sreg(struct x86_emulate_ctxt *ctxt)
{
int seg = ctxt->src2.val;
ctxt->src.val = get_segment_selector(ctxt, seg);
if (ctxt->op_bytes == 4) {
rsp_increment(ctxt, -2);
ctxt->op_bytes = 2;
}
return em_push(ctxt);
}
static int em_pop_sreg(struct x86_emulate_ctxt *ctxt)
{
int seg = ctxt->src2.val;
unsigned long selector;
int rc;
rc = emulate_pop(ctxt, &selector, 2);
if (rc != X86EMUL_CONTINUE)
return rc;
if (ctxt->modrm_reg == VCPU_SREG_SS)
ctxt->interruptibility = KVM_X86_SHADOW_INT_MOV_SS;
if (ctxt->op_bytes > 2)
rsp_increment(ctxt, ctxt->op_bytes - 2);
rc = load_segment_descriptor(ctxt, (u16)selector, seg);
return rc;
}
static int em_pusha(struct x86_emulate_ctxt *ctxt)
{
unsigned long old_esp = reg_read(ctxt, VCPU_REGS_RSP);
int rc = X86EMUL_CONTINUE;
int reg = VCPU_REGS_RAX;
while (reg <= VCPU_REGS_RDI) {
(reg == VCPU_REGS_RSP) ?
(ctxt->src.val = old_esp) : (ctxt->src.val = reg_read(ctxt, reg));
rc = em_push(ctxt);
if (rc != X86EMUL_CONTINUE)
return rc;
++reg;
}
return rc;
}
static int em_pushf(struct x86_emulate_ctxt *ctxt)
{
ctxt->src.val = (unsigned long)ctxt->eflags & ~X86_EFLAGS_VM;
return em_push(ctxt);
}
static int em_popa(struct x86_emulate_ctxt *ctxt)
{
int rc = X86EMUL_CONTINUE;
int reg = VCPU_REGS_RDI;
u32 val;
while (reg >= VCPU_REGS_RAX) {
if (reg == VCPU_REGS_RSP) {
rsp_increment(ctxt, ctxt->op_bytes);
--reg;
}
rc = emulate_pop(ctxt, &val, ctxt->op_bytes);
if (rc != X86EMUL_CONTINUE)
break;
assign_register(reg_rmw(ctxt, reg), val, ctxt->op_bytes);
--reg;
}
return rc;
}
static int __emulate_int_real(struct x86_emulate_ctxt *ctxt, int irq)
{
const struct x86_emulate_ops *ops = ctxt->ops;
int rc;
struct desc_ptr dt;
gva_t cs_addr;
gva_t eip_addr;
u16 cs, eip;
/* TODO: Add limit checks */
ctxt->src.val = ctxt->eflags;
rc = em_push(ctxt);
if (rc != X86EMUL_CONTINUE)
return rc;
ctxt->eflags &= ~(X86_EFLAGS_IF | X86_EFLAGS_TF | X86_EFLAGS_AC);
ctxt->src.val = get_segment_selector(ctxt, VCPU_SREG_CS);
rc = em_push(ctxt);
if (rc != X86EMUL_CONTINUE)
return rc;
ctxt->src.val = ctxt->_eip;
rc = em_push(ctxt);
if (rc != X86EMUL_CONTINUE)
return rc;
ops->get_idt(ctxt, &dt);
eip_addr = dt.address + (irq << 2);
cs_addr = dt.address + (irq << 2) + 2;
rc = ops->read_std(ctxt, cs_addr, &cs, 2, &ctxt->exception);
if (rc != X86EMUL_CONTINUE)
return rc;
rc = ops->read_std(ctxt, eip_addr, &eip, 2, &ctxt->exception);
if (rc != X86EMUL_CONTINUE)
return rc;
rc = load_segment_descriptor(ctxt, cs, VCPU_SREG_CS);
if (rc != X86EMUL_CONTINUE)
return rc;
ctxt->_eip = eip;
return rc;
}
int emulate_int_real(struct x86_emulate_ctxt *ctxt, int irq)
{
int rc;
invalidate_registers(ctxt);
rc = __emulate_int_real(ctxt, irq);
if (rc == X86EMUL_CONTINUE)
writeback_registers(ctxt);
return rc;
}
static int emulate_int(struct x86_emulate_ctxt *ctxt, int irq)
{
switch(ctxt->mode) {
case X86EMUL_MODE_REAL:
return __emulate_int_real(ctxt, irq);
case X86EMUL_MODE_VM86:
case X86EMUL_MODE_PROT16:
case X86EMUL_MODE_PROT32:
case X86EMUL_MODE_PROT64:
default:
/* Protected mode interrupts unimplemented yet */
return X86EMUL_UNHANDLEABLE;
}
}
static int emulate_iret_real(struct x86_emulate_ctxt *ctxt)
{
int rc = X86EMUL_CONTINUE;
unsigned long temp_eip = 0;
unsigned long temp_eflags = 0;
unsigned long cs = 0;
unsigned long mask = X86_EFLAGS_CF | X86_EFLAGS_PF | X86_EFLAGS_AF |
X86_EFLAGS_ZF | X86_EFLAGS_SF | X86_EFLAGS_TF |
X86_EFLAGS_IF | X86_EFLAGS_DF | X86_EFLAGS_OF |
X86_EFLAGS_IOPL | X86_EFLAGS_NT | X86_EFLAGS_RF |
X86_EFLAGS_AC | X86_EFLAGS_ID |
X86_EFLAGS_FIXED;
unsigned long vm86_mask = X86_EFLAGS_VM | X86_EFLAGS_VIF |
X86_EFLAGS_VIP;
/* TODO: Add stack limit check */
rc = emulate_pop(ctxt, &temp_eip, ctxt->op_bytes);
if (rc != X86EMUL_CONTINUE)
return rc;
if (temp_eip & ~0xffff)
return emulate_gp(ctxt, 0);
rc = emulate_pop(ctxt, &cs, ctxt->op_bytes);
if (rc != X86EMUL_CONTINUE)
return rc;
rc = emulate_pop(ctxt, &temp_eflags, ctxt->op_bytes);
if (rc != X86EMUL_CONTINUE)
return rc;
rc = load_segment_descriptor(ctxt, (u16)cs, VCPU_SREG_CS);
if (rc != X86EMUL_CONTINUE)
return rc;
ctxt->_eip = temp_eip;
if (ctxt->op_bytes == 4)
ctxt->eflags = ((temp_eflags & mask) | (ctxt->eflags & vm86_mask));
else if (ctxt->op_bytes == 2) {
ctxt->eflags &= ~0xffff;
ctxt->eflags |= temp_eflags;
}
ctxt->eflags &= ~EFLG_RESERVED_ZEROS_MASK; /* Clear reserved zeros */
ctxt->eflags |= X86_EFLAGS_FIXED;
ctxt->ops->set_nmi_mask(ctxt, false);
return rc;
}
static int em_iret(struct x86_emulate_ctxt *ctxt)
{
switch(ctxt->mode) {
case X86EMUL_MODE_REAL:
return emulate_iret_real(ctxt);
case X86EMUL_MODE_VM86:
case X86EMUL_MODE_PROT16:
case X86EMUL_MODE_PROT32:
case X86EMUL_MODE_PROT64:
default:
/* iret from protected mode unimplemented yet */
return X86EMUL_UNHANDLEABLE;
}
}
static int em_jmp_far(struct x86_emulate_ctxt *ctxt)
{
int rc;
unsigned short sel;
struct desc_struct new_desc;
u8 cpl = ctxt->ops->cpl(ctxt);
memcpy(&sel, ctxt->src.valptr + ctxt->op_bytes, 2);
rc = __load_segment_descriptor(ctxt, sel, VCPU_SREG_CS, cpl,
X86_TRANSFER_CALL_JMP,
&new_desc);
if (rc != X86EMUL_CONTINUE)
return rc;
rc = assign_eip_far(ctxt, ctxt->src.val, &new_desc);
/* Error handling is not implemented. */
if (rc != X86EMUL_CONTINUE)
return X86EMUL_UNHANDLEABLE;
return rc;
}
static int em_jmp_abs(struct x86_emulate_ctxt *ctxt)
{
return assign_eip_near(ctxt, ctxt->src.val);
}
static int em_call_near_abs(struct x86_emulate_ctxt *ctxt)
{
int rc;
long int old_eip;
old_eip = ctxt->_eip;
rc = assign_eip_near(ctxt, ctxt->src.val);
if (rc != X86EMUL_CONTINUE)
return rc;
ctxt->src.val = old_eip;
rc = em_push(ctxt);
return rc;
}
static int em_cmpxchg8b(struct x86_emulate_ctxt *ctxt)
{
u64 old = ctxt->dst.orig_val64;
if (ctxt->dst.bytes == 16)
return X86EMUL_UNHANDLEABLE;
if (((u32) (old >> 0) != (u32) reg_read(ctxt, VCPU_REGS_RAX)) ||
((u32) (old >> 32) != (u32) reg_read(ctxt, VCPU_REGS_RDX))) {
*reg_write(ctxt, VCPU_REGS_RAX) = (u32) (old >> 0);
*reg_write(ctxt, VCPU_REGS_RDX) = (u32) (old >> 32);
ctxt->eflags &= ~X86_EFLAGS_ZF;
} else {
ctxt->dst.val64 = ((u64)reg_read(ctxt, VCPU_REGS_RCX) << 32) |
(u32) reg_read(ctxt, VCPU_REGS_RBX);
ctxt->eflags |= X86_EFLAGS_ZF;
}
return X86EMUL_CONTINUE;
}
static int em_ret(struct x86_emulate_ctxt *ctxt)
{
int rc;
unsigned long eip;
rc = emulate_pop(ctxt, &eip, ctxt->op_bytes);
if (rc != X86EMUL_CONTINUE)
return rc;
return assign_eip_near(ctxt, eip);
}
static int em_ret_far(struct x86_emulate_ctxt *ctxt)
{
int rc;
unsigned long eip, cs;
int cpl = ctxt->ops->cpl(ctxt);
struct desc_struct new_desc;
rc = emulate_pop(ctxt, &eip, ctxt->op_bytes);
if (rc != X86EMUL_CONTINUE)
return rc;
rc = emulate_pop(ctxt, &cs, ctxt->op_bytes);
if (rc != X86EMUL_CONTINUE)
return rc;
/* Outer-privilege level return is not implemented */
if (ctxt->mode >= X86EMUL_MODE_PROT16 && (cs & 3) > cpl)
return X86EMUL_UNHANDLEABLE;
rc = __load_segment_descriptor(ctxt, (u16)cs, VCPU_SREG_CS, cpl,
X86_TRANSFER_RET,
&new_desc);
if (rc != X86EMUL_CONTINUE)
return rc;
rc = assign_eip_far(ctxt, eip, &new_desc);
/* Error handling is not implemented. */
if (rc != X86EMUL_CONTINUE)
return X86EMUL_UNHANDLEABLE;
return rc;
}
static int em_ret_far_imm(struct x86_emulate_ctxt *ctxt)
{
int rc;
rc = em_ret_far(ctxt);
if (rc != X86EMUL_CONTINUE)
return rc;
rsp_increment(ctxt, ctxt->src.val);
return X86EMUL_CONTINUE;
}
static int em_cmpxchg(struct x86_emulate_ctxt *ctxt)
{
/* Save real source value, then compare EAX against destination. */
ctxt->dst.orig_val = ctxt->dst.val;
ctxt->dst.val = reg_read(ctxt, VCPU_REGS_RAX);
ctxt->src.orig_val = ctxt->src.val;
ctxt->src.val = ctxt->dst.orig_val;
fastop(ctxt, em_cmp);
if (ctxt->eflags & X86_EFLAGS_ZF) {
/* Success: write back to memory; no update of EAX */
ctxt->src.type = OP_NONE;
ctxt->dst.val = ctxt->src.orig_val;
} else {
/* Failure: write the value we saw to EAX. */
ctxt->src.type = OP_REG;
ctxt->src.addr.reg = reg_rmw(ctxt, VCPU_REGS_RAX);
ctxt->src.val = ctxt->dst.orig_val;
/* Create write-cycle to dest by writing the same value */
ctxt->dst.val = ctxt->dst.orig_val;
}
return X86EMUL_CONTINUE;
}
static int em_lseg(struct x86_emulate_ctxt *ctxt)
{
int seg = ctxt->src2.val;
unsigned short sel;
int rc;
memcpy(&sel, ctxt->src.valptr + ctxt->op_bytes, 2);
rc = load_segment_descriptor(ctxt, sel, seg);
if (rc != X86EMUL_CONTINUE)
return rc;
ctxt->dst.val = ctxt->src.val;
return rc;
}
static int emulator_has_longmode(struct x86_emulate_ctxt *ctxt)
{
u32 eax, ebx, ecx, edx;
eax = 0x80000001;
ecx = 0;
ctxt->ops->get_cpuid(ctxt, &eax, &ebx, &ecx, &edx);
return edx & bit(X86_FEATURE_LM);
}
#define GET_SMSTATE(type, smbase, offset) \
({ \
type __val; \
int r = ctxt->ops->read_phys(ctxt, smbase + offset, &__val, \
sizeof(__val)); \
if (r != X86EMUL_CONTINUE) \
return X86EMUL_UNHANDLEABLE; \
__val; \
})
static void rsm_set_desc_flags(struct desc_struct *desc, u32 flags)
{
desc->g = (flags >> 23) & 1;
desc->d = (flags >> 22) & 1;
desc->l = (flags >> 21) & 1;
desc->avl = (flags >> 20) & 1;
desc->p = (flags >> 15) & 1;
desc->dpl = (flags >> 13) & 3;
desc->s = (flags >> 12) & 1;
desc->type = (flags >> 8) & 15;
}
static int rsm_load_seg_32(struct x86_emulate_ctxt *ctxt, u64 smbase, int n)
{
struct desc_struct desc;
int offset;
u16 selector;
selector = GET_SMSTATE(u32, smbase, 0x7fa8 + n * 4);
if (n < 3)
offset = 0x7f84 + n * 12;
else
offset = 0x7f2c + (n - 3) * 12;
set_desc_base(&desc, GET_SMSTATE(u32, smbase, offset + 8));
set_desc_limit(&desc, GET_SMSTATE(u32, smbase, offset + 4));
rsm_set_desc_flags(&desc, GET_SMSTATE(u32, smbase, offset));
ctxt->ops->set_segment(ctxt, selector, &desc, 0, n);
return X86EMUL_CONTINUE;
}
static int rsm_load_seg_64(struct x86_emulate_ctxt *ctxt, u64 smbase, int n)
{
struct desc_struct desc;
int offset;
u16 selector;
u32 base3;
offset = 0x7e00 + n * 16;
selector = GET_SMSTATE(u16, smbase, offset);
rsm_set_desc_flags(&desc, GET_SMSTATE(u16, smbase, offset + 2) << 8);
set_desc_limit(&desc, GET_SMSTATE(u32, smbase, offset + 4));
set_desc_base(&desc, GET_SMSTATE(u32, smbase, offset + 8));
base3 = GET_SMSTATE(u32, smbase, offset + 12);
ctxt->ops->set_segment(ctxt, selector, &desc, base3, n);
return X86EMUL_CONTINUE;
}
static int rsm_enter_protected_mode(struct x86_emulate_ctxt *ctxt,
u64 cr0, u64 cr4)
{
int bad;
/*
* First enable PAE, long mode needs it before CR0.PG = 1 is set.
* Then enable protected mode. However, PCID cannot be enabled
* if EFER.LMA=0, so set it separately.
*/
bad = ctxt->ops->set_cr(ctxt, 4, cr4 & ~X86_CR4_PCIDE);
if (bad)
return X86EMUL_UNHANDLEABLE;
bad = ctxt->ops->set_cr(ctxt, 0, cr0);
if (bad)
return X86EMUL_UNHANDLEABLE;
if (cr4 & X86_CR4_PCIDE) {
bad = ctxt->ops->set_cr(ctxt, 4, cr4);
if (bad)
return X86EMUL_UNHANDLEABLE;
}
return X86EMUL_CONTINUE;
}
static int rsm_load_state_32(struct x86_emulate_ctxt *ctxt, u64 smbase)
{
struct desc_struct desc;
struct desc_ptr dt;
u16 selector;
u32 val, cr0, cr4;
int i;
cr0 = GET_SMSTATE(u32, smbase, 0x7ffc);
ctxt->ops->set_cr(ctxt, 3, GET_SMSTATE(u32, smbase, 0x7ff8));
ctxt->eflags = GET_SMSTATE(u32, smbase, 0x7ff4) | X86_EFLAGS_FIXED;
ctxt->_eip = GET_SMSTATE(u32, smbase, 0x7ff0);
for (i = 0; i < 8; i++)
*reg_write(ctxt, i) = GET_SMSTATE(u32, smbase, 0x7fd0 + i * 4);
val = GET_SMSTATE(u32, smbase, 0x7fcc);
ctxt->ops->set_dr(ctxt, 6, (val & DR6_VOLATILE) | DR6_FIXED_1);
val = GET_SMSTATE(u32, smbase, 0x7fc8);
ctxt->ops->set_dr(ctxt, 7, (val & DR7_VOLATILE) | DR7_FIXED_1);
selector = GET_SMSTATE(u32, smbase, 0x7fc4);
set_desc_base(&desc, GET_SMSTATE(u32, smbase, 0x7f64));
set_desc_limit(&desc, GET_SMSTATE(u32, smbase, 0x7f60));
rsm_set_desc_flags(&desc, GET_SMSTATE(u32, smbase, 0x7f5c));
ctxt->ops->set_segment(ctxt, selector, &desc, 0, VCPU_SREG_TR);
selector = GET_SMSTATE(u32, smbase, 0x7fc0);
set_desc_base(&desc, GET_SMSTATE(u32, smbase, 0x7f80));
set_desc_limit(&desc, GET_SMSTATE(u32, smbase, 0x7f7c));
rsm_set_desc_flags(&desc, GET_SMSTATE(u32, smbase, 0x7f78));
ctxt->ops->set_segment(ctxt, selector, &desc, 0, VCPU_SREG_LDTR);
dt.address = GET_SMSTATE(u32, smbase, 0x7f74);
dt.size = GET_SMSTATE(u32, smbase, 0x7f70);
ctxt->ops->set_gdt(ctxt, &dt);
dt.address = GET_SMSTATE(u32, smbase, 0x7f58);
dt.size = GET_SMSTATE(u32, smbase, 0x7f54);
ctxt->ops->set_idt(ctxt, &dt);
for (i = 0; i < 6; i++) {
int r = rsm_load_seg_32(ctxt, smbase, i);
if (r != X86EMUL_CONTINUE)
return r;
}
cr4 = GET_SMSTATE(u32, smbase, 0x7f14);
ctxt->ops->set_smbase(ctxt, GET_SMSTATE(u32, smbase, 0x7ef8));
return rsm_enter_protected_mode(ctxt, cr0, cr4);
}
static int rsm_load_state_64(struct x86_emulate_ctxt *ctxt, u64 smbase)
{
struct desc_struct desc;
struct desc_ptr dt;
u64 val, cr0, cr4;
u32 base3;
u16 selector;
int i, r;
for (i = 0; i < 16; i++)
*reg_write(ctxt, i) = GET_SMSTATE(u64, smbase, 0x7ff8 - i * 8);
ctxt->_eip = GET_SMSTATE(u64, smbase, 0x7f78);
ctxt->eflags = GET_SMSTATE(u32, smbase, 0x7f70) | X86_EFLAGS_FIXED;
val = GET_SMSTATE(u32, smbase, 0x7f68);
ctxt->ops->set_dr(ctxt, 6, (val & DR6_VOLATILE) | DR6_FIXED_1);
val = GET_SMSTATE(u32, smbase, 0x7f60);
ctxt->ops->set_dr(ctxt, 7, (val & DR7_VOLATILE) | DR7_FIXED_1);
cr0 = GET_SMSTATE(u64, smbase, 0x7f58);
ctxt->ops->set_cr(ctxt, 3, GET_SMSTATE(u64, smbase, 0x7f50));
cr4 = GET_SMSTATE(u64, smbase, 0x7f48);
ctxt->ops->set_smbase(ctxt, GET_SMSTATE(u32, smbase, 0x7f00));
val = GET_SMSTATE(u64, smbase, 0x7ed0);
ctxt->ops->set_msr(ctxt, MSR_EFER, val & ~EFER_LMA);
selector = GET_SMSTATE(u32, smbase, 0x7e90);
rsm_set_desc_flags(&desc, GET_SMSTATE(u32, smbase, 0x7e92) << 8);
set_desc_limit(&desc, GET_SMSTATE(u32, smbase, 0x7e94));
set_desc_base(&desc, GET_SMSTATE(u32, smbase, 0x7e98));
base3 = GET_SMSTATE(u32, smbase, 0x7e9c);
ctxt->ops->set_segment(ctxt, selector, &desc, base3, VCPU_SREG_TR);
dt.size = GET_SMSTATE(u32, smbase, 0x7e84);
dt.address = GET_SMSTATE(u64, smbase, 0x7e88);
ctxt->ops->set_idt(ctxt, &dt);
selector = GET_SMSTATE(u32, smbase, 0x7e70);
rsm_set_desc_flags(&desc, GET_SMSTATE(u32, smbase, 0x7e72) << 8);
set_desc_limit(&desc, GET_SMSTATE(u32, smbase, 0x7e74));
set_desc_base(&desc, GET_SMSTATE(u32, smbase, 0x7e78));
base3 = GET_SMSTATE(u32, smbase, 0x7e7c);
ctxt->ops->set_segment(ctxt, selector, &desc, base3, VCPU_SREG_LDTR);
dt.size = GET_SMSTATE(u32, smbase, 0x7e64);
dt.address = GET_SMSTATE(u64, smbase, 0x7e68);
ctxt->ops->set_gdt(ctxt, &dt);
r = rsm_enter_protected_mode(ctxt, cr0, cr4);
if (r != X86EMUL_CONTINUE)
return r;
for (i = 0; i < 6; i++) {
r = rsm_load_seg_64(ctxt, smbase, i);
if (r != X86EMUL_CONTINUE)
return r;
}
return X86EMUL_CONTINUE;
}
static int em_rsm(struct x86_emulate_ctxt *ctxt)
{
unsigned long cr0, cr4, efer;
u64 smbase;
int ret;
if ((ctxt->emul_flags & X86EMUL_SMM_MASK) == 0)
return emulate_ud(ctxt);
/*
* Get back to real mode, to prepare a safe state in which to load
* CR0/CR3/CR4/EFER. It's all a bit more complicated if the vCPU
* supports long mode.
*/
cr4 = ctxt->ops->get_cr(ctxt, 4);
if (emulator_has_longmode(ctxt)) {
struct desc_struct cs_desc;
/* Zero CR4.PCIDE before CR0.PG. */
if (cr4 & X86_CR4_PCIDE) {
ctxt->ops->set_cr(ctxt, 4, cr4 & ~X86_CR4_PCIDE);
cr4 &= ~X86_CR4_PCIDE;
}
/* A 32-bit code segment is required to clear EFER.LMA. */
memset(&cs_desc, 0, sizeof(cs_desc));
cs_desc.type = 0xb;
cs_desc.s = cs_desc.g = cs_desc.p = 1;
ctxt->ops->set_segment(ctxt, 0, &cs_desc, 0, VCPU_SREG_CS);
}
/* For the 64-bit case, this will clear EFER.LMA. */
cr0 = ctxt->ops->get_cr(ctxt, 0);
if (cr0 & X86_CR0_PE)
ctxt->ops->set_cr(ctxt, 0, cr0 & ~(X86_CR0_PG | X86_CR0_PE));
/* Now clear CR4.PAE (which must be done before clearing EFER.LME). */
if (cr4 & X86_CR4_PAE)
ctxt->ops->set_cr(ctxt, 4, cr4 & ~X86_CR4_PAE);
/* And finally go back to 32-bit mode. */
efer = 0;
ctxt->ops->set_msr(ctxt, MSR_EFER, efer);
smbase = ctxt->ops->get_smbase(ctxt);
if (emulator_has_longmode(ctxt))
ret = rsm_load_state_64(ctxt, smbase + 0x8000);
else
ret = rsm_load_state_32(ctxt, smbase + 0x8000);
if (ret != X86EMUL_CONTINUE) {
/* FIXME: should triple fault */
return X86EMUL_UNHANDLEABLE;
}
if ((ctxt->emul_flags & X86EMUL_SMM_INSIDE_NMI_MASK) == 0)
ctxt->ops->set_nmi_mask(ctxt, false);
ctxt->emul_flags &= ~X86EMUL_SMM_INSIDE_NMI_MASK;
ctxt->emul_flags &= ~X86EMUL_SMM_MASK;
return X86EMUL_CONTINUE;
}
static void
setup_syscalls_segments(struct x86_emulate_ctxt *ctxt,
struct desc_struct *cs, struct desc_struct *ss)
{
cs->l = 0; /* will be adjusted later */
set_desc_base(cs, 0); /* flat segment */
cs->g = 1; /* 4kb granularity */
set_desc_limit(cs, 0xfffff); /* 4GB limit */
cs->type = 0x0b; /* Read, Execute, Accessed */
cs->s = 1;
cs->dpl = 0; /* will be adjusted later */
cs->p = 1;
cs->d = 1;
cs->avl = 0;
set_desc_base(ss, 0); /* flat segment */
set_desc_limit(ss, 0xfffff); /* 4GB limit */
ss->g = 1; /* 4kb granularity */
ss->s = 1;
ss->type = 0x03; /* Read/Write, Accessed */
ss->d = 1; /* 32bit stack segment */
ss->dpl = 0;
ss->p = 1;
ss->l = 0;
ss->avl = 0;
}
static bool vendor_intel(struct x86_emulate_ctxt *ctxt)
{
u32 eax, ebx, ecx, edx;
eax = ecx = 0;
ctxt->ops->get_cpuid(ctxt, &eax, &ebx, &ecx, &edx);
return ebx == X86EMUL_CPUID_VENDOR_GenuineIntel_ebx
&& ecx == X86EMUL_CPUID_VENDOR_GenuineIntel_ecx
&& edx == X86EMUL_CPUID_VENDOR_GenuineIntel_edx;
}
static bool em_syscall_is_enabled(struct x86_emulate_ctxt *ctxt)
{
const struct x86_emulate_ops *ops = ctxt->ops;
u32 eax, ebx, ecx, edx;
/*
* syscall should always be enabled in longmode - so only become
* vendor specific (cpuid) if other modes are active...
*/
if (ctxt->mode == X86EMUL_MODE_PROT64)
return true;
eax = 0x00000000;
ecx = 0x00000000;
ops->get_cpuid(ctxt, &eax, &ebx, &ecx, &edx);
/*
* Intel ("GenuineIntel")
* remark: Intel CPUs only support "syscall" in 64bit
* longmode. Also an 64bit guest with a
* 32bit compat-app running will #UD !! While this
* behaviour can be fixed (by emulating) into AMD
* response - CPUs of AMD can't behave like Intel.
*/
if (ebx == X86EMUL_CPUID_VENDOR_GenuineIntel_ebx &&
ecx == X86EMUL_CPUID_VENDOR_GenuineIntel_ecx &&
edx == X86EMUL_CPUID_VENDOR_GenuineIntel_edx)
return false;
/* AMD ("AuthenticAMD") */
if (ebx == X86EMUL_CPUID_VENDOR_AuthenticAMD_ebx &&
ecx == X86EMUL_CPUID_VENDOR_AuthenticAMD_ecx &&
edx == X86EMUL_CPUID_VENDOR_AuthenticAMD_edx)
return true;
/* AMD ("AMDisbetter!") */
if (ebx == X86EMUL_CPUID_VENDOR_AMDisbetterI_ebx &&
ecx == X86EMUL_CPUID_VENDOR_AMDisbetterI_ecx &&
edx == X86EMUL_CPUID_VENDOR_AMDisbetterI_edx)
return true;
/* default: (not Intel, not AMD), apply Intel's stricter rules... */
return false;
}
static int em_syscall(struct x86_emulate_ctxt *ctxt)
{
const struct x86_emulate_ops *ops = ctxt->ops;
struct desc_struct cs, ss;
u64 msr_data;
u16 cs_sel, ss_sel;
u64 efer = 0;
/* syscall is not available in real mode */
if (ctxt->mode == X86EMUL_MODE_REAL ||
ctxt->mode == X86EMUL_MODE_VM86)
return emulate_ud(ctxt);
if (!(em_syscall_is_enabled(ctxt)))
return emulate_ud(ctxt);
ops->get_msr(ctxt, MSR_EFER, &efer);
setup_syscalls_segments(ctxt, &cs, &ss);
if (!(efer & EFER_SCE))
return emulate_ud(ctxt);
ops->get_msr(ctxt, MSR_STAR, &msr_data);
msr_data >>= 32;
cs_sel = (u16)(msr_data & 0xfffc);
ss_sel = (u16)(msr_data + 8);
if (efer & EFER_LMA) {
cs.d = 0;
cs.l = 1;
}
ops->set_segment(ctxt, cs_sel, &cs, 0, VCPU_SREG_CS);
ops->set_segment(ctxt, ss_sel, &ss, 0, VCPU_SREG_SS);
*reg_write(ctxt, VCPU_REGS_RCX) = ctxt->_eip;
if (efer & EFER_LMA) {
#ifdef CONFIG_X86_64
*reg_write(ctxt, VCPU_REGS_R11) = ctxt->eflags;
ops->get_msr(ctxt,
ctxt->mode == X86EMUL_MODE_PROT64 ?
MSR_LSTAR : MSR_CSTAR, &msr_data);
ctxt->_eip = msr_data;
ops->get_msr(ctxt, MSR_SYSCALL_MASK, &msr_data);
ctxt->eflags &= ~msr_data;
ctxt->eflags |= X86_EFLAGS_FIXED;
#endif
} else {
/* legacy mode */
ops->get_msr(ctxt, MSR_STAR, &msr_data);
ctxt->_eip = (u32)msr_data;
ctxt->eflags &= ~(X86_EFLAGS_VM | X86_EFLAGS_IF);
}
return X86EMUL_CONTINUE;
}
static int em_sysenter(struct x86_emulate_ctxt *ctxt)
{
const struct x86_emulate_ops *ops = ctxt->ops;
struct desc_struct cs, ss;
u64 msr_data;
u16 cs_sel, ss_sel;
u64 efer = 0;
ops->get_msr(ctxt, MSR_EFER, &efer);
/* inject #GP if in real mode */
if (ctxt->mode == X86EMUL_MODE_REAL)
return emulate_gp(ctxt, 0);
/*
* Not recognized on AMD in compat mode (but is recognized in legacy
* mode).
*/
if ((ctxt->mode != X86EMUL_MODE_PROT64) && (efer & EFER_LMA)
&& !vendor_intel(ctxt))
return emulate_ud(ctxt);
/* sysenter/sysexit have not been tested in 64bit mode. */
if (ctxt->mode == X86EMUL_MODE_PROT64)
return X86EMUL_UNHANDLEABLE;
setup_syscalls_segments(ctxt, &cs, &ss);
ops->get_msr(ctxt, MSR_IA32_SYSENTER_CS, &msr_data);
if ((msr_data & 0xfffc) == 0x0)
return emulate_gp(ctxt, 0);
ctxt->eflags &= ~(X86_EFLAGS_VM | X86_EFLAGS_IF);
cs_sel = (u16)msr_data & ~SEGMENT_RPL_MASK;
ss_sel = cs_sel + 8;
if (efer & EFER_LMA) {
cs.d = 0;
cs.l = 1;
}
ops->set_segment(ctxt, cs_sel, &cs, 0, VCPU_SREG_CS);
ops->set_segment(ctxt, ss_sel, &ss, 0, VCPU_SREG_SS);
ops->get_msr(ctxt, MSR_IA32_SYSENTER_EIP, &msr_data);
ctxt->_eip = (efer & EFER_LMA) ? msr_data : (u32)msr_data;
ops->get_msr(ctxt, MSR_IA32_SYSENTER_ESP, &msr_data);
*reg_write(ctxt, VCPU_REGS_RSP) = (efer & EFER_LMA) ? msr_data :
(u32)msr_data;
return X86EMUL_CONTINUE;
}
static int em_sysexit(struct x86_emulate_ctxt *ctxt)
{
const struct x86_emulate_ops *ops = ctxt->ops;
struct desc_struct cs, ss;
u64 msr_data, rcx, rdx;
int usermode;
u16 cs_sel = 0, ss_sel = 0;
/* inject #GP if in real mode or Virtual 8086 mode */
if (ctxt->mode == X86EMUL_MODE_REAL ||
ctxt->mode == X86EMUL_MODE_VM86)
return emulate_gp(ctxt, 0);
setup_syscalls_segments(ctxt, &cs, &ss);
if ((ctxt->rex_prefix & 0x8) != 0x0)
usermode = X86EMUL_MODE_PROT64;
else
usermode = X86EMUL_MODE_PROT32;
rcx = reg_read(ctxt, VCPU_REGS_RCX);
rdx = reg_read(ctxt, VCPU_REGS_RDX);
cs.dpl = 3;
ss.dpl = 3;
ops->get_msr(ctxt, MSR_IA32_SYSENTER_CS, &msr_data);
switch (usermode) {
case X86EMUL_MODE_PROT32:
cs_sel = (u16)(msr_data + 16);
if ((msr_data & 0xfffc) == 0x0)
return emulate_gp(ctxt, 0);
ss_sel = (u16)(msr_data + 24);
rcx = (u32)rcx;
rdx = (u32)rdx;
break;
case X86EMUL_MODE_PROT64:
cs_sel = (u16)(msr_data + 32);
if (msr_data == 0x0)
return emulate_gp(ctxt, 0);
ss_sel = cs_sel + 8;
cs.d = 0;
cs.l = 1;
if (is_noncanonical_address(rcx) ||
is_noncanonical_address(rdx))
return emulate_gp(ctxt, 0);
break;
}
cs_sel |= SEGMENT_RPL_MASK;
ss_sel |= SEGMENT_RPL_MASK;
ops->set_segment(ctxt, cs_sel, &cs, 0, VCPU_SREG_CS);
ops->set_segment(ctxt, ss_sel, &ss, 0, VCPU_SREG_SS);
ctxt->_eip = rdx;
*reg_write(ctxt, VCPU_REGS_RSP) = rcx;
return X86EMUL_CONTINUE;
}
static bool emulator_bad_iopl(struct x86_emulate_ctxt *ctxt)
{
int iopl;
if (ctxt->mode == X86EMUL_MODE_REAL)
return false;
if (ctxt->mode == X86EMUL_MODE_VM86)
return true;
iopl = (ctxt->eflags & X86_EFLAGS_IOPL) >> X86_EFLAGS_IOPL_BIT;
return ctxt->ops->cpl(ctxt) > iopl;
}
static bool emulator_io_port_access_allowed(struct x86_emulate_ctxt *ctxt,
u16 port, u16 len)
{
const struct x86_emulate_ops *ops = ctxt->ops;
struct desc_struct tr_seg;
u32 base3;
int r;
u16 tr, io_bitmap_ptr, perm, bit_idx = port & 0x7;
unsigned mask = (1 << len) - 1;
unsigned long base;
ops->get_segment(ctxt, &tr, &tr_seg, &base3, VCPU_SREG_TR);
if (!tr_seg.p)
return false;
if (desc_limit_scaled(&tr_seg) < 103)
return false;
base = get_desc_base(&tr_seg);
#ifdef CONFIG_X86_64
base |= ((u64)base3) << 32;
#endif
r = ops->read_std(ctxt, base + 102, &io_bitmap_ptr, 2, NULL);
if (r != X86EMUL_CONTINUE)
return false;
if (io_bitmap_ptr + port/8 > desc_limit_scaled(&tr_seg))
return false;
r = ops->read_std(ctxt, base + io_bitmap_ptr + port/8, &perm, 2, NULL);
if (r != X86EMUL_CONTINUE)
return false;
if ((perm >> bit_idx) & mask)
return false;
return true;
}
static bool emulator_io_permited(struct x86_emulate_ctxt *ctxt,
u16 port, u16 len)
{
if (ctxt->perm_ok)
return true;
if (emulator_bad_iopl(ctxt))
if (!emulator_io_port_access_allowed(ctxt, port, len))
return false;
ctxt->perm_ok = true;
return true;
}
static void string_registers_quirk(struct x86_emulate_ctxt *ctxt)
{
/*
* Intel CPUs mask the counter and pointers in quite strange
* manner when ECX is zero due to REP-string optimizations.
*/
#ifdef CONFIG_X86_64
if (ctxt->ad_bytes != 4 || !vendor_intel(ctxt))
return;
*reg_write(ctxt, VCPU_REGS_RCX) = 0;
switch (ctxt->b) {
case 0xa4: /* movsb */
case 0xa5: /* movsd/w */
*reg_rmw(ctxt, VCPU_REGS_RSI) &= (u32)-1;
/* fall through */
case 0xaa: /* stosb */
case 0xab: /* stosd/w */
*reg_rmw(ctxt, VCPU_REGS_RDI) &= (u32)-1;
}
#endif
}
static void save_state_to_tss16(struct x86_emulate_ctxt *ctxt,
struct tss_segment_16 *tss)
{
tss->ip = ctxt->_eip;
tss->flag = ctxt->eflags;
tss->ax = reg_read(ctxt, VCPU_REGS_RAX);
tss->cx = reg_read(ctxt, VCPU_REGS_RCX);
tss->dx = reg_read(ctxt, VCPU_REGS_RDX);
tss->bx = reg_read(ctxt, VCPU_REGS_RBX);
tss->sp = reg_read(ctxt, VCPU_REGS_RSP);
tss->bp = reg_read(ctxt, VCPU_REGS_RBP);
tss->si = reg_read(ctxt, VCPU_REGS_RSI);
tss->di = reg_read(ctxt, VCPU_REGS_RDI);
tss->es = get_segment_selector(ctxt, VCPU_SREG_ES);
tss->cs = get_segment_selector(ctxt, VCPU_SREG_CS);
tss->ss = get_segment_selector(ctxt, VCPU_SREG_SS);
tss->ds = get_segment_selector(ctxt, VCPU_SREG_DS);
tss->ldt = get_segment_selector(ctxt, VCPU_SREG_LDTR);
}
static int load_state_from_tss16(struct x86_emulate_ctxt *ctxt,
struct tss_segment_16 *tss)
{
int ret;
u8 cpl;
ctxt->_eip = tss->ip;
ctxt->eflags = tss->flag | 2;
*reg_write(ctxt, VCPU_REGS_RAX) = tss->ax;
*reg_write(ctxt, VCPU_REGS_RCX) = tss->cx;
*reg_write(ctxt, VCPU_REGS_RDX) = tss->dx;
*reg_write(ctxt, VCPU_REGS_RBX) = tss->bx;
*reg_write(ctxt, VCPU_REGS_RSP) = tss->sp;
*reg_write(ctxt, VCPU_REGS_RBP) = tss->bp;
*reg_write(ctxt, VCPU_REGS_RSI) = tss->si;
*reg_write(ctxt, VCPU_REGS_RDI) = tss->di;
/*
* SDM says that segment selectors are loaded before segment
* descriptors
*/
set_segment_selector(ctxt, tss->ldt, VCPU_SREG_LDTR);
set_segment_selector(ctxt, tss->es, VCPU_SREG_ES);
set_segment_selector(ctxt, tss->cs, VCPU_SREG_CS);
set_segment_selector(ctxt, tss->ss, VCPU_SREG_SS);
set_segment_selector(ctxt, tss->ds, VCPU_SREG_DS);
cpl = tss->cs & 3;
/*
* Now load segment descriptors. If fault happens at this stage
* it is handled in a context of new task
*/
ret = __load_segment_descriptor(ctxt, tss->ldt, VCPU_SREG_LDTR, cpl,
X86_TRANSFER_TASK_SWITCH, NULL);
if (ret != X86EMUL_CONTINUE)
return ret;
ret = __load_segment_descriptor(ctxt, tss->es, VCPU_SREG_ES, cpl,
X86_TRANSFER_TASK_SWITCH, NULL);
if (ret != X86EMUL_CONTINUE)
return ret;
ret = __load_segment_descriptor(ctxt, tss->cs, VCPU_SREG_CS, cpl,
X86_TRANSFER_TASK_SWITCH, NULL);
if (ret != X86EMUL_CONTINUE)
return ret;
ret = __load_segment_descriptor(ctxt, tss->ss, VCPU_SREG_SS, cpl,
X86_TRANSFER_TASK_SWITCH, NULL);
if (ret != X86EMUL_CONTINUE)
return ret;
ret = __load_segment_descriptor(ctxt, tss->ds, VCPU_SREG_DS, cpl,
X86_TRANSFER_TASK_SWITCH, NULL);
if (ret != X86EMUL_CONTINUE)
return ret;
return X86EMUL_CONTINUE;
}
static int task_switch_16(struct x86_emulate_ctxt *ctxt,
u16 tss_selector, u16 old_tss_sel,
ulong old_tss_base, struct desc_struct *new_desc)
{
const struct x86_emulate_ops *ops = ctxt->ops;
struct tss_segment_16 tss_seg;
int ret;
u32 new_tss_base = get_desc_base(new_desc);
ret = ops->read_std(ctxt, old_tss_base, &tss_seg, sizeof tss_seg,
&ctxt->exception);
if (ret != X86EMUL_CONTINUE)
return ret;
save_state_to_tss16(ctxt, &tss_seg);
ret = ops->write_std(ctxt, old_tss_base, &tss_seg, sizeof tss_seg,
&ctxt->exception);
if (ret != X86EMUL_CONTINUE)
return ret;
ret = ops->read_std(ctxt, new_tss_base, &tss_seg, sizeof tss_seg,
&ctxt->exception);
if (ret != X86EMUL_CONTINUE)
return ret;
if (old_tss_sel != 0xffff) {
tss_seg.prev_task_link = old_tss_sel;
ret = ops->write_std(ctxt, new_tss_base,
&tss_seg.prev_task_link,
sizeof tss_seg.prev_task_link,
&ctxt->exception);
if (ret != X86EMUL_CONTINUE)
return ret;
}
return load_state_from_tss16(ctxt, &tss_seg);
}
static void save_state_to_tss32(struct x86_emulate_ctxt *ctxt,
struct tss_segment_32 *tss)
{
/* CR3 and ldt selector are not saved intentionally */
tss->eip = ctxt->_eip;
tss->eflags = ctxt->eflags;
tss->eax = reg_read(ctxt, VCPU_REGS_RAX);
tss->ecx = reg_read(ctxt, VCPU_REGS_RCX);
tss->edx = reg_read(ctxt, VCPU_REGS_RDX);
tss->ebx = reg_read(ctxt, VCPU_REGS_RBX);
tss->esp = reg_read(ctxt, VCPU_REGS_RSP);
tss->ebp = reg_read(ctxt, VCPU_REGS_RBP);
tss->esi = reg_read(ctxt, VCPU_REGS_RSI);
tss->edi = reg_read(ctxt, VCPU_REGS_RDI);
tss->es = get_segment_selector(ctxt, VCPU_SREG_ES);
tss->cs = get_segment_selector(ctxt, VCPU_SREG_CS);
tss->ss = get_segment_selector(ctxt, VCPU_SREG_SS);
tss->ds = get_segment_selector(ctxt, VCPU_SREG_DS);
tss->fs = get_segment_selector(ctxt, VCPU_SREG_FS);
tss->gs = get_segment_selector(ctxt, VCPU_SREG_GS);
}
static int load_state_from_tss32(struct x86_emulate_ctxt *ctxt,
struct tss_segment_32 *tss)
{
int ret;
u8 cpl;
if (ctxt->ops->set_cr(ctxt, 3, tss->cr3))
return emulate_gp(ctxt, 0);
ctxt->_eip = tss->eip;
ctxt->eflags = tss->eflags | 2;
/* General purpose registers */
*reg_write(ctxt, VCPU_REGS_RAX) = tss->eax;
*reg_write(ctxt, VCPU_REGS_RCX) = tss->ecx;
*reg_write(ctxt, VCPU_REGS_RDX) = tss->edx;
*reg_write(ctxt, VCPU_REGS_RBX) = tss->ebx;
*reg_write(ctxt, VCPU_REGS_RSP) = tss->esp;
*reg_write(ctxt, VCPU_REGS_RBP) = tss->ebp;
*reg_write(ctxt, VCPU_REGS_RSI) = tss->esi;
*reg_write(ctxt, VCPU_REGS_RDI) = tss->edi;
/*
* SDM says that segment selectors are loaded before segment
* descriptors. This is important because CPL checks will
* use CS.RPL.
*/
set_segment_selector(ctxt, tss->ldt_selector, VCPU_SREG_LDTR);
set_segment_selector(ctxt, tss->es, VCPU_SREG_ES);
set_segment_selector(ctxt, tss->cs, VCPU_SREG_CS);
set_segment_selector(ctxt, tss->ss, VCPU_SREG_SS);
set_segment_selector(ctxt, tss->ds, VCPU_SREG_DS);
set_segment_selector(ctxt, tss->fs, VCPU_SREG_FS);
set_segment_selector(ctxt, tss->gs, VCPU_SREG_GS);
/*
* If we're switching between Protected Mode and VM86, we need to make
* sure to update the mode before loading the segment descriptors so
* that the selectors are interpreted correctly.
*/
if (ctxt->eflags & X86_EFLAGS_VM) {
ctxt->mode = X86EMUL_MODE_VM86;
cpl = 3;
} else {
ctxt->mode = X86EMUL_MODE_PROT32;
cpl = tss->cs & 3;
}
/*
* Now load segment descriptors. If fault happenes at this stage
* it is handled in a context of new task
*/
ret = __load_segment_descriptor(ctxt, tss->ldt_selector, VCPU_SREG_LDTR,
cpl, X86_TRANSFER_TASK_SWITCH, NULL);
if (ret != X86EMUL_CONTINUE)
return ret;
ret = __load_segment_descriptor(ctxt, tss->es, VCPU_SREG_ES, cpl,
X86_TRANSFER_TASK_SWITCH, NULL);
if (ret != X86EMUL_CONTINUE)
return ret;
ret = __load_segment_descriptor(ctxt, tss->cs, VCPU_SREG_CS, cpl,
X86_TRANSFER_TASK_SWITCH, NULL);
if (ret != X86EMUL_CONTINUE)
return ret;
ret = __load_segment_descriptor(ctxt, tss->ss, VCPU_SREG_SS, cpl,
X86_TRANSFER_TASK_SWITCH, NULL);
if (ret != X86EMUL_CONTINUE)
return ret;
ret = __load_segment_descriptor(ctxt, tss->ds, VCPU_SREG_DS, cpl,
X86_TRANSFER_TASK_SWITCH, NULL);
if (ret != X86EMUL_CONTINUE)
return ret;
ret = __load_segment_descriptor(ctxt, tss->fs, VCPU_SREG_FS, cpl,
X86_TRANSFER_TASK_SWITCH, NULL);
if (ret != X86EMUL_CONTINUE)
return ret;
ret = __load_segment_descriptor(ctxt, tss->gs, VCPU_SREG_GS, cpl,
X86_TRANSFER_TASK_SWITCH, NULL);
return ret;
}
static int task_switch_32(struct x86_emulate_ctxt *ctxt,
u16 tss_selector, u16 old_tss_sel,
ulong old_tss_base, struct desc_struct *new_desc)
{
const struct x86_emulate_ops *ops = ctxt->ops;
struct tss_segment_32 tss_seg;
int ret;
u32 new_tss_base = get_desc_base(new_desc);
u32 eip_offset = offsetof(struct tss_segment_32, eip);
u32 ldt_sel_offset = offsetof(struct tss_segment_32, ldt_selector);
ret = ops->read_std(ctxt, old_tss_base, &tss_seg, sizeof tss_seg,
&ctxt->exception);
if (ret != X86EMUL_CONTINUE)
return ret;
save_state_to_tss32(ctxt, &tss_seg);
/* Only GP registers and segment selectors are saved */
ret = ops->write_std(ctxt, old_tss_base + eip_offset, &tss_seg.eip,
ldt_sel_offset - eip_offset, &ctxt->exception);
if (ret != X86EMUL_CONTINUE)
return ret;
ret = ops->read_std(ctxt, new_tss_base, &tss_seg, sizeof tss_seg,
&ctxt->exception);
if (ret != X86EMUL_CONTINUE)
return ret;
if (old_tss_sel != 0xffff) {
tss_seg.prev_task_link = old_tss_sel;
ret = ops->write_std(ctxt, new_tss_base,
&tss_seg.prev_task_link,
sizeof tss_seg.prev_task_link,
&ctxt->exception);
if (ret != X86EMUL_CONTINUE)
return ret;
}
return load_state_from_tss32(ctxt, &tss_seg);
}
static int emulator_do_task_switch(struct x86_emulate_ctxt *ctxt,
u16 tss_selector, int idt_index, int reason,
bool has_error_code, u32 error_code)
{
const struct x86_emulate_ops *ops = ctxt->ops;
struct desc_struct curr_tss_desc, next_tss_desc;
int ret;
u16 old_tss_sel = get_segment_selector(ctxt, VCPU_SREG_TR);
ulong old_tss_base =
ops->get_cached_segment_base(ctxt, VCPU_SREG_TR);
u32 desc_limit;
ulong desc_addr, dr7;
/* FIXME: old_tss_base == ~0 ? */
ret = read_segment_descriptor(ctxt, tss_selector, &next_tss_desc, &desc_addr);
if (ret != X86EMUL_CONTINUE)
return ret;
ret = read_segment_descriptor(ctxt, old_tss_sel, &curr_tss_desc, &desc_addr);
if (ret != X86EMUL_CONTINUE)
return ret;
/* FIXME: check that next_tss_desc is tss */
/*
* Check privileges. The three cases are task switch caused by...
*
* 1. jmp/call/int to task gate: Check against DPL of the task gate
* 2. Exception/IRQ/iret: No check is performed
* 3. jmp/call to TSS/task-gate: No check is performed since the
* hardware checks it before exiting.
*/
if (reason == TASK_SWITCH_GATE) {
if (idt_index != -1) {
/* Software interrupts */
struct desc_struct task_gate_desc;
int dpl;
ret = read_interrupt_descriptor(ctxt, idt_index,
&task_gate_desc);
if (ret != X86EMUL_CONTINUE)
return ret;
dpl = task_gate_desc.dpl;
if ((tss_selector & 3) > dpl || ops->cpl(ctxt) > dpl)
return emulate_gp(ctxt, (idt_index << 3) | 0x2);
}
}
desc_limit = desc_limit_scaled(&next_tss_desc);
if (!next_tss_desc.p ||
((desc_limit < 0x67 && (next_tss_desc.type & 8)) ||
desc_limit < 0x2b)) {
return emulate_ts(ctxt, tss_selector & 0xfffc);
}
if (reason == TASK_SWITCH_IRET || reason == TASK_SWITCH_JMP) {
curr_tss_desc.type &= ~(1 << 1); /* clear busy flag */
write_segment_descriptor(ctxt, old_tss_sel, &curr_tss_desc);
}
if (reason == TASK_SWITCH_IRET)
ctxt->eflags = ctxt->eflags & ~X86_EFLAGS_NT;
/* set back link to prev task only if NT bit is set in eflags
note that old_tss_sel is not used after this point */
if (reason != TASK_SWITCH_CALL && reason != TASK_SWITCH_GATE)
old_tss_sel = 0xffff;
if (next_tss_desc.type & 8)
ret = task_switch_32(ctxt, tss_selector, old_tss_sel,
old_tss_base, &next_tss_desc);
else
ret = task_switch_16(ctxt, tss_selector, old_tss_sel,
old_tss_base, &next_tss_desc);
if (ret != X86EMUL_CONTINUE)
return ret;
if (reason == TASK_SWITCH_CALL || reason == TASK_SWITCH_GATE)
ctxt->eflags = ctxt->eflags | X86_EFLAGS_NT;
if (reason != TASK_SWITCH_IRET) {
next_tss_desc.type |= (1 << 1); /* set busy flag */
write_segment_descriptor(ctxt, tss_selector, &next_tss_desc);
}
ops->set_cr(ctxt, 0, ops->get_cr(ctxt, 0) | X86_CR0_TS);
ops->set_segment(ctxt, tss_selector, &next_tss_desc, 0, VCPU_SREG_TR);
if (has_error_code) {
ctxt->op_bytes = ctxt->ad_bytes = (next_tss_desc.type & 8) ? 4 : 2;
ctxt->lock_prefix = 0;
ctxt->src.val = (unsigned long) error_code;
ret = em_push(ctxt);
}
ops->get_dr(ctxt, 7, &dr7);
ops->set_dr(ctxt, 7, dr7 & ~(DR_LOCAL_ENABLE_MASK | DR_LOCAL_SLOWDOWN));
return ret;
}
int emulator_task_switch(struct x86_emulate_ctxt *ctxt,
u16 tss_selector, int idt_index, int reason,
bool has_error_code, u32 error_code)
{
int rc;
invalidate_registers(ctxt);
ctxt->_eip = ctxt->eip;
ctxt->dst.type = OP_NONE;
rc = emulator_do_task_switch(ctxt, tss_selector, idt_index, reason,
has_error_code, error_code);
if (rc == X86EMUL_CONTINUE) {
ctxt->eip = ctxt->_eip;
writeback_registers(ctxt);
}
return (rc == X86EMUL_UNHANDLEABLE) ? EMULATION_FAILED : EMULATION_OK;
}
static void string_addr_inc(struct x86_emulate_ctxt *ctxt, int reg,
struct operand *op)
{
int df = (ctxt->eflags & X86_EFLAGS_DF) ? -op->count : op->count;
register_address_increment(ctxt, reg, df * op->bytes);
op->addr.mem.ea = register_address(ctxt, reg);
}
static int em_das(struct x86_emulate_ctxt *ctxt)
{
u8 al, old_al;
bool af, cf, old_cf;
cf = ctxt->eflags & X86_EFLAGS_CF;
al = ctxt->dst.val;
old_al = al;
old_cf = cf;
cf = false;
af = ctxt->eflags & X86_EFLAGS_AF;
if ((al & 0x0f) > 9 || af) {
al -= 6;
cf = old_cf | (al >= 250);
af = true;
} else {
af = false;
}
if (old_al > 0x99 || old_cf) {
al -= 0x60;
cf = true;
}
ctxt->dst.val = al;
/* Set PF, ZF, SF */
ctxt->src.type = OP_IMM;
ctxt->src.val = 0;
ctxt->src.bytes = 1;
fastop(ctxt, em_or);
ctxt->eflags &= ~(X86_EFLAGS_AF | X86_EFLAGS_CF);
if (cf)
ctxt->eflags |= X86_EFLAGS_CF;
if (af)
ctxt->eflags |= X86_EFLAGS_AF;
return X86EMUL_CONTINUE;
}
static int em_aam(struct x86_emulate_ctxt *ctxt)
{
u8 al, ah;
if (ctxt->src.val == 0)
return emulate_de(ctxt);
al = ctxt->dst.val & 0xff;
ah = al / ctxt->src.val;
al %= ctxt->src.val;
ctxt->dst.val = (ctxt->dst.val & 0xffff0000) | al | (ah << 8);
/* Set PF, ZF, SF */
ctxt->src.type = OP_IMM;
ctxt->src.val = 0;
ctxt->src.bytes = 1;
fastop(ctxt, em_or);
return X86EMUL_CONTINUE;
}
static int em_aad(struct x86_emulate_ctxt *ctxt)
{
u8 al = ctxt->dst.val & 0xff;
u8 ah = (ctxt->dst.val >> 8) & 0xff;
al = (al + (ah * ctxt->src.val)) & 0xff;
ctxt->dst.val = (ctxt->dst.val & 0xffff0000) | al;
/* Set PF, ZF, SF */
ctxt->src.type = OP_IMM;
ctxt->src.val = 0;
ctxt->src.bytes = 1;
fastop(ctxt, em_or);
return X86EMUL_CONTINUE;
}
static int em_call(struct x86_emulate_ctxt *ctxt)
{
int rc;
long rel = ctxt->src.val;
ctxt->src.val = (unsigned long)ctxt->_eip;
rc = jmp_rel(ctxt, rel);
if (rc != X86EMUL_CONTINUE)
return rc;
return em_push(ctxt);
}
static int em_call_far(struct x86_emulate_ctxt *ctxt)
{
u16 sel, old_cs;
ulong old_eip;
int rc;
struct desc_struct old_desc, new_desc;
const struct x86_emulate_ops *ops = ctxt->ops;
int cpl = ctxt->ops->cpl(ctxt);
enum x86emul_mode prev_mode = ctxt->mode;
old_eip = ctxt->_eip;
ops->get_segment(ctxt, &old_cs, &old_desc, NULL, VCPU_SREG_CS);
memcpy(&sel, ctxt->src.valptr + ctxt->op_bytes, 2);
rc = __load_segment_descriptor(ctxt, sel, VCPU_SREG_CS, cpl,
X86_TRANSFER_CALL_JMP, &new_desc);
if (rc != X86EMUL_CONTINUE)
return rc;
rc = assign_eip_far(ctxt, ctxt->src.val, &new_desc);
if (rc != X86EMUL_CONTINUE)
goto fail;
ctxt->src.val = old_cs;
rc = em_push(ctxt);
if (rc != X86EMUL_CONTINUE)
goto fail;
ctxt->src.val = old_eip;
rc = em_push(ctxt);
/* If we failed, we tainted the memory, but the very least we should
restore cs */
if (rc != X86EMUL_CONTINUE) {
pr_warn_once("faulting far call emulation tainted memory\n");
goto fail;
}
return rc;
fail:
ops->set_segment(ctxt, old_cs, &old_desc, 0, VCPU_SREG_CS);
ctxt->mode = prev_mode;
return rc;
}
static int em_ret_near_imm(struct x86_emulate_ctxt *ctxt)
{
int rc;
unsigned long eip;
rc = emulate_pop(ctxt, &eip, ctxt->op_bytes);
if (rc != X86EMUL_CONTINUE)
return rc;
rc = assign_eip_near(ctxt, eip);
if (rc != X86EMUL_CONTINUE)
return rc;
rsp_increment(ctxt, ctxt->src.val);
return X86EMUL_CONTINUE;
}
static int em_xchg(struct x86_emulate_ctxt *ctxt)
{
/* Write back the register source. */
ctxt->src.val = ctxt->dst.val;
write_register_operand(&ctxt->src);
/* Write back the memory destination with implicit LOCK prefix. */
ctxt->dst.val = ctxt->src.orig_val;
ctxt->lock_prefix = 1;
return X86EMUL_CONTINUE;
}
static int em_imul_3op(struct x86_emulate_ctxt *ctxt)
{
ctxt->dst.val = ctxt->src2.val;
return fastop(ctxt, em_imul);
}
static int em_cwd(struct x86_emulate_ctxt *ctxt)
{
ctxt->dst.type = OP_REG;
ctxt->dst.bytes = ctxt->src.bytes;
ctxt->dst.addr.reg = reg_rmw(ctxt, VCPU_REGS_RDX);
ctxt->dst.val = ~((ctxt->src.val >> (ctxt->src.bytes * 8 - 1)) - 1);
return X86EMUL_CONTINUE;
}
static int em_rdtsc(struct x86_emulate_ctxt *ctxt)
{
u64 tsc = 0;
ctxt->ops->get_msr(ctxt, MSR_IA32_TSC, &tsc);
*reg_write(ctxt, VCPU_REGS_RAX) = (u32)tsc;
*reg_write(ctxt, VCPU_REGS_RDX) = tsc >> 32;
return X86EMUL_CONTINUE;
}
static int em_rdpmc(struct x86_emulate_ctxt *ctxt)
{
u64 pmc;
if (ctxt->ops->read_pmc(ctxt, reg_read(ctxt, VCPU_REGS_RCX), &pmc))
return emulate_gp(ctxt, 0);
*reg_write(ctxt, VCPU_REGS_RAX) = (u32)pmc;
*reg_write(ctxt, VCPU_REGS_RDX) = pmc >> 32;
return X86EMUL_CONTINUE;
}
static int em_mov(struct x86_emulate_ctxt *ctxt)
{
memcpy(ctxt->dst.valptr, ctxt->src.valptr, sizeof(ctxt->src.valptr));
return X86EMUL_CONTINUE;
}
#define FFL(x) bit(X86_FEATURE_##x)
static int em_movbe(struct x86_emulate_ctxt *ctxt)
{
u32 ebx, ecx, edx, eax = 1;
u16 tmp;
/*
* Check MOVBE is set in the guest-visible CPUID leaf.
*/
ctxt->ops->get_cpuid(ctxt, &eax, &ebx, &ecx, &edx);
if (!(ecx & FFL(MOVBE)))
return emulate_ud(ctxt);
switch (ctxt->op_bytes) {
case 2:
/*
* From MOVBE definition: "...When the operand size is 16 bits,
* the upper word of the destination register remains unchanged
* ..."
*
* Both casting ->valptr and ->val to u16 breaks strict aliasing
* rules so we have to do the operation almost per hand.
*/
tmp = (u16)ctxt->src.val;
ctxt->dst.val &= ~0xffffUL;
ctxt->dst.val |= (unsigned long)swab16(tmp);
break;
case 4:
ctxt->dst.val = swab32((u32)ctxt->src.val);
break;
case 8:
ctxt->dst.val = swab64(ctxt->src.val);
break;
default:
BUG();
}
return X86EMUL_CONTINUE;
}
static int em_cr_write(struct x86_emulate_ctxt *ctxt)
{
if (ctxt->ops->set_cr(ctxt, ctxt->modrm_reg, ctxt->src.val))
return emulate_gp(ctxt, 0);
/* Disable writeback. */
ctxt->dst.type = OP_NONE;
return X86EMUL_CONTINUE;
}
static int em_dr_write(struct x86_emulate_ctxt *ctxt)
{
unsigned long val;
if (ctxt->mode == X86EMUL_MODE_PROT64)
val = ctxt->src.val & ~0ULL;
else
val = ctxt->src.val & ~0U;
/* #UD condition is already handled. */
if (ctxt->ops->set_dr(ctxt, ctxt->modrm_reg, val) < 0)
return emulate_gp(ctxt, 0);
/* Disable writeback. */
ctxt->dst.type = OP_NONE;
return X86EMUL_CONTINUE;
}
static int em_wrmsr(struct x86_emulate_ctxt *ctxt)
{
u64 msr_data;
msr_data = (u32)reg_read(ctxt, VCPU_REGS_RAX)
| ((u64)reg_read(ctxt, VCPU_REGS_RDX) << 32);
if (ctxt->ops->set_msr(ctxt, reg_read(ctxt, VCPU_REGS_RCX), msr_data))
return emulate_gp(ctxt, 0);
return X86EMUL_CONTINUE;
}
static int em_rdmsr(struct x86_emulate_ctxt *ctxt)
{
u64 msr_data;
if (ctxt->ops->get_msr(ctxt, reg_read(ctxt, VCPU_REGS_RCX), &msr_data))
return emulate_gp(ctxt, 0);
*reg_write(ctxt, VCPU_REGS_RAX) = (u32)msr_data;
*reg_write(ctxt, VCPU_REGS_RDX) = msr_data >> 32;
return X86EMUL_CONTINUE;
}
static int em_mov_rm_sreg(struct x86_emulate_ctxt *ctxt)
{
if (ctxt->modrm_reg > VCPU_SREG_GS)
return emulate_ud(ctxt);
ctxt->dst.val = get_segment_selector(ctxt, ctxt->modrm_reg);
if (ctxt->dst.bytes == 4 && ctxt->dst.type == OP_MEM)
ctxt->dst.bytes = 2;
return X86EMUL_CONTINUE;
}
static int em_mov_sreg_rm(struct x86_emulate_ctxt *ctxt)
{
u16 sel = ctxt->src.val;
if (ctxt->modrm_reg == VCPU_SREG_CS || ctxt->modrm_reg > VCPU_SREG_GS)
return emulate_ud(ctxt);
if (ctxt->modrm_reg == VCPU_SREG_SS)
ctxt->interruptibility = KVM_X86_SHADOW_INT_MOV_SS;
/* Disable writeback. */
ctxt->dst.type = OP_NONE;
return load_segment_descriptor(ctxt, sel, ctxt->modrm_reg);
}
static int em_lldt(struct x86_emulate_ctxt *ctxt)
{
u16 sel = ctxt->src.val;
/* Disable writeback. */
ctxt->dst.type = OP_NONE;
return load_segment_descriptor(ctxt, sel, VCPU_SREG_LDTR);
}
static int em_ltr(struct x86_emulate_ctxt *ctxt)
{
u16 sel = ctxt->src.val;
/* Disable writeback. */
ctxt->dst.type = OP_NONE;
return load_segment_descriptor(ctxt, sel, VCPU_SREG_TR);
}
static int em_invlpg(struct x86_emulate_ctxt *ctxt)
{
int rc;
ulong linear;
rc = linearize(ctxt, ctxt->src.addr.mem, 1, false, &linear);
if (rc == X86EMUL_CONTINUE)
ctxt->ops->invlpg(ctxt, linear);
/* Disable writeback. */
ctxt->dst.type = OP_NONE;
return X86EMUL_CONTINUE;
}
static int em_clts(struct x86_emulate_ctxt *ctxt)
{
ulong cr0;
cr0 = ctxt->ops->get_cr(ctxt, 0);
cr0 &= ~X86_CR0_TS;
ctxt->ops->set_cr(ctxt, 0, cr0);
return X86EMUL_CONTINUE;
}
static int em_hypercall(struct x86_emulate_ctxt *ctxt)
{
int rc = ctxt->ops->fix_hypercall(ctxt);
if (rc != X86EMUL_CONTINUE)
return rc;
/* Let the processor re-execute the fixed hypercall */
ctxt->_eip = ctxt->eip;
/* Disable writeback. */
ctxt->dst.type = OP_NONE;
return X86EMUL_CONTINUE;
}
static int emulate_store_desc_ptr(struct x86_emulate_ctxt *ctxt,
void (*get)(struct x86_emulate_ctxt *ctxt,
struct desc_ptr *ptr))
{
struct desc_ptr desc_ptr;
if (ctxt->mode == X86EMUL_MODE_PROT64)
ctxt->op_bytes = 8;
get(ctxt, &desc_ptr);
if (ctxt->op_bytes == 2) {
ctxt->op_bytes = 4;
desc_ptr.address &= 0x00ffffff;
}
/* Disable writeback. */
ctxt->dst.type = OP_NONE;
return segmented_write_std(ctxt, ctxt->dst.addr.mem,
&desc_ptr, 2 + ctxt->op_bytes);
}
static int em_sgdt(struct x86_emulate_ctxt *ctxt)
{
return emulate_store_desc_ptr(ctxt, ctxt->ops->get_gdt);
}
static int em_sidt(struct x86_emulate_ctxt *ctxt)
{
return emulate_store_desc_ptr(ctxt, ctxt->ops->get_idt);
}
static int em_lgdt_lidt(struct x86_emulate_ctxt *ctxt, bool lgdt)
{
struct desc_ptr desc_ptr;
int rc;
if (ctxt->mode == X86EMUL_MODE_PROT64)
ctxt->op_bytes = 8;
rc = read_descriptor(ctxt, ctxt->src.addr.mem,
&desc_ptr.size, &desc_ptr.address,
ctxt->op_bytes);
if (rc != X86EMUL_CONTINUE)
return rc;
if (ctxt->mode == X86EMUL_MODE_PROT64 &&
is_noncanonical_address(desc_ptr.address))
return emulate_gp(ctxt, 0);
if (lgdt)
ctxt->ops->set_gdt(ctxt, &desc_ptr);
else
ctxt->ops->set_idt(ctxt, &desc_ptr);
/* Disable writeback. */
ctxt->dst.type = OP_NONE;
return X86EMUL_CONTINUE;
}
static int em_lgdt(struct x86_emulate_ctxt *ctxt)
{
return em_lgdt_lidt(ctxt, true);
}
static int em_lidt(struct x86_emulate_ctxt *ctxt)
{
return em_lgdt_lidt(ctxt, false);
}
static int em_smsw(struct x86_emulate_ctxt *ctxt)
{
if (ctxt->dst.type == OP_MEM)
ctxt->dst.bytes = 2;
ctxt->dst.val = ctxt->ops->get_cr(ctxt, 0);
return X86EMUL_CONTINUE;
}
static int em_lmsw(struct x86_emulate_ctxt *ctxt)
{
ctxt->ops->set_cr(ctxt, 0, (ctxt->ops->get_cr(ctxt, 0) & ~0x0eul)
| (ctxt->src.val & 0x0f));
ctxt->dst.type = OP_NONE;
return X86EMUL_CONTINUE;
}
static int em_loop(struct x86_emulate_ctxt *ctxt)
{
int rc = X86EMUL_CONTINUE;
register_address_increment(ctxt, VCPU_REGS_RCX, -1);
if ((address_mask(ctxt, reg_read(ctxt, VCPU_REGS_RCX)) != 0) &&
(ctxt->b == 0xe2 || test_cc(ctxt->b ^ 0x5, ctxt->eflags)))
rc = jmp_rel(ctxt, ctxt->src.val);
return rc;
}
static int em_jcxz(struct x86_emulate_ctxt *ctxt)
{
int rc = X86EMUL_CONTINUE;
if (address_mask(ctxt, reg_read(ctxt, VCPU_REGS_RCX)) == 0)
rc = jmp_rel(ctxt, ctxt->src.val);
return rc;
}
static int em_in(struct x86_emulate_ctxt *ctxt)
{
if (!pio_in_emulated(ctxt, ctxt->dst.bytes, ctxt->src.val,
&ctxt->dst.val))
return X86EMUL_IO_NEEDED;
return X86EMUL_CONTINUE;
}
static int em_out(struct x86_emulate_ctxt *ctxt)
{
ctxt->ops->pio_out_emulated(ctxt, ctxt->src.bytes, ctxt->dst.val,
&ctxt->src.val, 1);
/* Disable writeback. */
ctxt->dst.type = OP_NONE;
return X86EMUL_CONTINUE;
}
static int em_cli(struct x86_emulate_ctxt *ctxt)
{
if (emulator_bad_iopl(ctxt))
return emulate_gp(ctxt, 0);
ctxt->eflags &= ~X86_EFLAGS_IF;
return X86EMUL_CONTINUE;
}
static int em_sti(struct x86_emulate_ctxt *ctxt)
{
if (emulator_bad_iopl(ctxt))
return emulate_gp(ctxt, 0);
ctxt->interruptibility = KVM_X86_SHADOW_INT_STI;
ctxt->eflags |= X86_EFLAGS_IF;
return X86EMUL_CONTINUE;
}
static int em_cpuid(struct x86_emulate_ctxt *ctxt)
{
u32 eax, ebx, ecx, edx;
eax = reg_read(ctxt, VCPU_REGS_RAX);
ecx = reg_read(ctxt, VCPU_REGS_RCX);
ctxt->ops->get_cpuid(ctxt, &eax, &ebx, &ecx, &edx);
*reg_write(ctxt, VCPU_REGS_RAX) = eax;
*reg_write(ctxt, VCPU_REGS_RBX) = ebx;
*reg_write(ctxt, VCPU_REGS_RCX) = ecx;
*reg_write(ctxt, VCPU_REGS_RDX) = edx;
return X86EMUL_CONTINUE;
}
static int em_sahf(struct x86_emulate_ctxt *ctxt)
{
u32 flags;
flags = X86_EFLAGS_CF | X86_EFLAGS_PF | X86_EFLAGS_AF | X86_EFLAGS_ZF |
X86_EFLAGS_SF;
flags &= *reg_rmw(ctxt, VCPU_REGS_RAX) >> 8;
ctxt->eflags &= ~0xffUL;
ctxt->eflags |= flags | X86_EFLAGS_FIXED;
return X86EMUL_CONTINUE;
}
static int em_lahf(struct x86_emulate_ctxt *ctxt)
{
*reg_rmw(ctxt, VCPU_REGS_RAX) &= ~0xff00UL;
*reg_rmw(ctxt, VCPU_REGS_RAX) |= (ctxt->eflags & 0xff) << 8;
return X86EMUL_CONTINUE;
}
static int em_bswap(struct x86_emulate_ctxt *ctxt)
{
switch (ctxt->op_bytes) {
#ifdef CONFIG_X86_64
case 8:
asm("bswap %0" : "+r"(ctxt->dst.val));
break;
#endif
default:
asm("bswap %0" : "+r"(*(u32 *)&ctxt->dst.val));
break;
}
return X86EMUL_CONTINUE;
}
static int em_clflush(struct x86_emulate_ctxt *ctxt)
{
/* emulating clflush regardless of cpuid */
return X86EMUL_CONTINUE;
}
static int em_movsxd(struct x86_emulate_ctxt *ctxt)
{
ctxt->dst.val = (s32) ctxt->src.val;
return X86EMUL_CONTINUE;
}
static int check_fxsr(struct x86_emulate_ctxt *ctxt)
{
u32 eax = 1, ebx, ecx = 0, edx;
ctxt->ops->get_cpuid(ctxt, &eax, &ebx, &ecx, &edx);
if (!(edx & FFL(FXSR)))
return emulate_ud(ctxt);
if (ctxt->ops->get_cr(ctxt, 0) & (X86_CR0_TS | X86_CR0_EM))
return emulate_nm(ctxt);
/*
* Don't emulate a case that should never be hit, instead of working
* around a lack of fxsave64/fxrstor64 on old compilers.
*/
if (ctxt->mode >= X86EMUL_MODE_PROT64)
return X86EMUL_UNHANDLEABLE;
return X86EMUL_CONTINUE;
}
/*
* FXSAVE and FXRSTOR have 4 different formats depending on execution mode,
* 1) 16 bit mode
* 2) 32 bit mode
* - like (1), but FIP and FDP (foo) are only 16 bit. At least Intel CPUs
* preserve whole 32 bit values, though, so (1) and (2) are the same wrt.
* save and restore
* 3) 64-bit mode with REX.W prefix
* - like (2), but XMM 8-15 are being saved and restored
* 4) 64-bit mode without REX.W prefix
* - like (3), but FIP and FDP are 64 bit
*
* Emulation uses (3) for (1) and (2) and preserves XMM 8-15 to reach the
* desired result. (4) is not emulated.
*
* Note: Guest and host CPUID.(EAX=07H,ECX=0H):EBX[bit 13] (deprecate FPU CS
* and FPU DS) should match.
*/
static int em_fxsave(struct x86_emulate_ctxt *ctxt)
{
struct fxregs_state fx_state;
size_t size;
int rc;
rc = check_fxsr(ctxt);
if (rc != X86EMUL_CONTINUE)
return rc;
ctxt->ops->get_fpu(ctxt);
rc = asm_safe("fxsave %[fx]", , [fx] "+m"(fx_state));
ctxt->ops->put_fpu(ctxt);
if (rc != X86EMUL_CONTINUE)
return rc;
if (ctxt->ops->get_cr(ctxt, 4) & X86_CR4_OSFXSR)
size = offsetof(struct fxregs_state, xmm_space[8 * 16/4]);
else
size = offsetof(struct fxregs_state, xmm_space[0]);
return segmented_write_std(ctxt, ctxt->memop.addr.mem, &fx_state, size);
}
static int fxrstor_fixup(struct x86_emulate_ctxt *ctxt,
struct fxregs_state *new)
{
int rc = X86EMUL_CONTINUE;
struct fxregs_state old;
rc = asm_safe("fxsave %[fx]", , [fx] "+m"(old));
if (rc != X86EMUL_CONTINUE)
return rc;
/*
* 64 bit host will restore XMM 8-15, which is not correct on non-64
* bit guests. Load the current values in order to preserve 64 bit
* XMMs after fxrstor.
*/
#ifdef CONFIG_X86_64
/* XXX: accessing XMM 8-15 very awkwardly */
memcpy(&new->xmm_space[8 * 16/4], &old.xmm_space[8 * 16/4], 8 * 16);
#endif
/*
* Hardware doesn't save and restore XMM 0-7 without CR4.OSFXSR, but
* does save and restore MXCSR.
*/
if (!(ctxt->ops->get_cr(ctxt, 4) & X86_CR4_OSFXSR))
memcpy(new->xmm_space, old.xmm_space, 8 * 16);
return rc;
}
static int em_fxrstor(struct x86_emulate_ctxt *ctxt)
{
struct fxregs_state fx_state;
int rc;
rc = check_fxsr(ctxt);
if (rc != X86EMUL_CONTINUE)
return rc;
rc = segmented_read_std(ctxt, ctxt->memop.addr.mem, &fx_state, 512);
if (rc != X86EMUL_CONTINUE)
return rc;
if (fx_state.mxcsr >> 16)
return emulate_gp(ctxt, 0);
ctxt->ops->get_fpu(ctxt);
if (ctxt->mode < X86EMUL_MODE_PROT64)
rc = fxrstor_fixup(ctxt, &fx_state);
if (rc == X86EMUL_CONTINUE)
rc = asm_safe("fxrstor %[fx]", : [fx] "m"(fx_state));
ctxt->ops->put_fpu(ctxt);
return rc;
}
static bool valid_cr(int nr)
{
switch (nr) {
case 0:
case 2 ... 4:
case 8:
return true;
default:
return false;
}
}
static int check_cr_read(struct x86_emulate_ctxt *ctxt)
{
if (!valid_cr(ctxt->modrm_reg))
return emulate_ud(ctxt);
return X86EMUL_CONTINUE;
}
static int check_cr_write(struct x86_emulate_ctxt *ctxt)
{
u64 new_val = ctxt->src.val64;
int cr = ctxt->modrm_reg;
u64 efer = 0;
static u64 cr_reserved_bits[] = {
0xffffffff00000000ULL,
0, 0, 0, /* CR3 checked later */
CR4_RESERVED_BITS,
0, 0, 0,
CR8_RESERVED_BITS,
};
if (!valid_cr(cr))
return emulate_ud(ctxt);
if (new_val & cr_reserved_bits[cr])
return emulate_gp(ctxt, 0);
switch (cr) {
case 0: {
u64 cr4;
if (((new_val & X86_CR0_PG) && !(new_val & X86_CR0_PE)) ||
((new_val & X86_CR0_NW) && !(new_val & X86_CR0_CD)))
return emulate_gp(ctxt, 0);
cr4 = ctxt->ops->get_cr(ctxt, 4);
ctxt->ops->get_msr(ctxt, MSR_EFER, &efer);
if ((new_val & X86_CR0_PG) && (efer & EFER_LME) &&
!(cr4 & X86_CR4_PAE))
return emulate_gp(ctxt, 0);
break;
}
case 3: {
u64 rsvd = 0;
ctxt->ops->get_msr(ctxt, MSR_EFER, &efer);
if (efer & EFER_LMA)
rsvd = CR3_L_MODE_RESERVED_BITS & ~CR3_PCID_INVD;
if (new_val & rsvd)
return emulate_gp(ctxt, 0);
break;
}
case 4: {
ctxt->ops->get_msr(ctxt, MSR_EFER, &efer);
if ((efer & EFER_LMA) && !(new_val & X86_CR4_PAE))
return emulate_gp(ctxt, 0);
break;
}
}
return X86EMUL_CONTINUE;
}
static int check_dr7_gd(struct x86_emulate_ctxt *ctxt)
{
unsigned long dr7;
ctxt->ops->get_dr(ctxt, 7, &dr7);
/* Check if DR7.Global_Enable is set */
return dr7 & (1 << 13);
}
static int check_dr_read(struct x86_emulate_ctxt *ctxt)
{
int dr = ctxt->modrm_reg;
u64 cr4;
if (dr > 7)
return emulate_ud(ctxt);
cr4 = ctxt->ops->get_cr(ctxt, 4);
if ((cr4 & X86_CR4_DE) && (dr == 4 || dr == 5))
return emulate_ud(ctxt);
if (check_dr7_gd(ctxt)) {
ulong dr6;
ctxt->ops->get_dr(ctxt, 6, &dr6);
dr6 &= ~15;
dr6 |= DR6_BD | DR6_RTM;
ctxt->ops->set_dr(ctxt, 6, dr6);
return emulate_db(ctxt);
}
return X86EMUL_CONTINUE;
}
static int check_dr_write(struct x86_emulate_ctxt *ctxt)
{
u64 new_val = ctxt->src.val64;
int dr = ctxt->modrm_reg;
if ((dr == 6 || dr == 7) && (new_val & 0xffffffff00000000ULL))
return emulate_gp(ctxt, 0);
return check_dr_read(ctxt);
}
static int check_svme(struct x86_emulate_ctxt *ctxt)
{
u64 efer;
ctxt->ops->get_msr(ctxt, MSR_EFER, &efer);
if (!(efer & EFER_SVME))
return emulate_ud(ctxt);
return X86EMUL_CONTINUE;
}
static int check_svme_pa(struct x86_emulate_ctxt *ctxt)
{
u64 rax = reg_read(ctxt, VCPU_REGS_RAX);
/* Valid physical address? */
if (rax & 0xffff000000000000ULL)
return emulate_gp(ctxt, 0);
return check_svme(ctxt);
}
static int check_rdtsc(struct x86_emulate_ctxt *ctxt)
{
u64 cr4 = ctxt->ops->get_cr(ctxt, 4);
if (cr4 & X86_CR4_TSD && ctxt->ops->cpl(ctxt))
return emulate_ud(ctxt);
return X86EMUL_CONTINUE;
}
static int check_rdpmc(struct x86_emulate_ctxt *ctxt)
{
u64 cr4 = ctxt->ops->get_cr(ctxt, 4);
u64 rcx = reg_read(ctxt, VCPU_REGS_RCX);
if ((!(cr4 & X86_CR4_PCE) && ctxt->ops->cpl(ctxt)) ||
ctxt->ops->check_pmc(ctxt, rcx))
return emulate_gp(ctxt, 0);
return X86EMUL_CONTINUE;
}
static int check_perm_in(struct x86_emulate_ctxt *ctxt)
{
ctxt->dst.bytes = min(ctxt->dst.bytes, 4u);
if (!emulator_io_permited(ctxt, ctxt->src.val, ctxt->dst.bytes))
return emulate_gp(ctxt, 0);
return X86EMUL_CONTINUE;
}
static int check_perm_out(struct x86_emulate_ctxt *ctxt)
{
ctxt->src.bytes = min(ctxt->src.bytes, 4u);
if (!emulator_io_permited(ctxt, ctxt->dst.val, ctxt->src.bytes))
return emulate_gp(ctxt, 0);
return X86EMUL_CONTINUE;
}
#define D(_y) { .flags = (_y) }
#define DI(_y, _i) { .flags = (_y)|Intercept, .intercept = x86_intercept_##_i }
#define DIP(_y, _i, _p) { .flags = (_y)|Intercept|CheckPerm, \
.intercept = x86_intercept_##_i, .check_perm = (_p) }
#define N D(NotImpl)
#define EXT(_f, _e) { .flags = ((_f) | RMExt), .u.group = (_e) }
#define G(_f, _g) { .flags = ((_f) | Group | ModRM), .u.group = (_g) }
#define GD(_f, _g) { .flags = ((_f) | GroupDual | ModRM), .u.gdual = (_g) }
#define ID(_f, _i) { .flags = ((_f) | InstrDual | ModRM), .u.idual = (_i) }
#define MD(_f, _m) { .flags = ((_f) | ModeDual), .u.mdual = (_m) }
#define E(_f, _e) { .flags = ((_f) | Escape | ModRM), .u.esc = (_e) }
#define I(_f, _e) { .flags = (_f), .u.execute = (_e) }
#define F(_f, _e) { .flags = (_f) | Fastop, .u.fastop = (_e) }
#define II(_f, _e, _i) \
{ .flags = (_f)|Intercept, .u.execute = (_e), .intercept = x86_intercept_##_i }
#define IIP(_f, _e, _i, _p) \
{ .flags = (_f)|Intercept|CheckPerm, .u.execute = (_e), \
.intercept = x86_intercept_##_i, .check_perm = (_p) }
#define GP(_f, _g) { .flags = ((_f) | Prefix), .u.gprefix = (_g) }
#define D2bv(_f) D((_f) | ByteOp), D(_f)
#define D2bvIP(_f, _i, _p) DIP((_f) | ByteOp, _i, _p), DIP(_f, _i, _p)
#define I2bv(_f, _e) I((_f) | ByteOp, _e), I(_f, _e)
#define F2bv(_f, _e) F((_f) | ByteOp, _e), F(_f, _e)
#define I2bvIP(_f, _e, _i, _p) \
IIP((_f) | ByteOp, _e, _i, _p), IIP(_f, _e, _i, _p)
#define F6ALU(_f, _e) F2bv((_f) | DstMem | SrcReg | ModRM, _e), \
F2bv(((_f) | DstReg | SrcMem | ModRM) & ~Lock, _e), \
F2bv(((_f) & ~Lock) | DstAcc | SrcImm, _e)
static const struct opcode group7_rm0[] = {
N,
I(SrcNone | Priv | EmulateOnUD, em_hypercall),
N, N, N, N, N, N,
};
static const struct opcode group7_rm1[] = {
DI(SrcNone | Priv, monitor),
DI(SrcNone | Priv, mwait),
N, N, N, N, N, N,
};
static const struct opcode group7_rm3[] = {
DIP(SrcNone | Prot | Priv, vmrun, check_svme_pa),
II(SrcNone | Prot | EmulateOnUD, em_hypercall, vmmcall),
DIP(SrcNone | Prot | Priv, vmload, check_svme_pa),
DIP(SrcNone | Prot | Priv, vmsave, check_svme_pa),
DIP(SrcNone | Prot | Priv, stgi, check_svme),
DIP(SrcNone | Prot | Priv, clgi, check_svme),
DIP(SrcNone | Prot | Priv, skinit, check_svme),
DIP(SrcNone | Prot | Priv, invlpga, check_svme),
};
static const struct opcode group7_rm7[] = {
N,
DIP(SrcNone, rdtscp, check_rdtsc),
N, N, N, N, N, N,
};
static const struct opcode group1[] = {
F(Lock, em_add),
F(Lock | PageTable, em_or),
F(Lock, em_adc),
F(Lock, em_sbb),
F(Lock | PageTable, em_and),
F(Lock, em_sub),
F(Lock, em_xor),
F(NoWrite, em_cmp),
};
static const struct opcode group1A[] = {
I(DstMem | SrcNone | Mov | Stack | IncSP, em_pop), N, N, N, N, N, N, N,
};
static const struct opcode group2[] = {
F(DstMem | ModRM, em_rol),
F(DstMem | ModRM, em_ror),
F(DstMem | ModRM, em_rcl),
F(DstMem | ModRM, em_rcr),
F(DstMem | ModRM, em_shl),
F(DstMem | ModRM, em_shr),
F(DstMem | ModRM, em_shl),
F(DstMem | ModRM, em_sar),
};
static const struct opcode group3[] = {
F(DstMem | SrcImm | NoWrite, em_test),
F(DstMem | SrcImm | NoWrite, em_test),
F(DstMem | SrcNone | Lock, em_not),
F(DstMem | SrcNone | Lock, em_neg),
F(DstXacc | Src2Mem, em_mul_ex),
F(DstXacc | Src2Mem, em_imul_ex),
F(DstXacc | Src2Mem, em_div_ex),
F(DstXacc | Src2Mem, em_idiv_ex),
};
static const struct opcode group4[] = {
F(ByteOp | DstMem | SrcNone | Lock, em_inc),
F(ByteOp | DstMem | SrcNone | Lock, em_dec),
N, N, N, N, N, N,
};
static const struct opcode group5[] = {
F(DstMem | SrcNone | Lock, em_inc),
F(DstMem | SrcNone | Lock, em_dec),
I(SrcMem | NearBranch, em_call_near_abs),
I(SrcMemFAddr | ImplicitOps, em_call_far),
I(SrcMem | NearBranch, em_jmp_abs),
I(SrcMemFAddr | ImplicitOps, em_jmp_far),
I(SrcMem | Stack, em_push), D(Undefined),
};
static const struct opcode group6[] = {
DI(Prot | DstMem, sldt),
DI(Prot | DstMem, str),
II(Prot | Priv | SrcMem16, em_lldt, lldt),
II(Prot | Priv | SrcMem16, em_ltr, ltr),
N, N, N, N,
};
static const struct group_dual group7 = { {
II(Mov | DstMem, em_sgdt, sgdt),
II(Mov | DstMem, em_sidt, sidt),
II(SrcMem | Priv, em_lgdt, lgdt),
II(SrcMem | Priv, em_lidt, lidt),
II(SrcNone | DstMem | Mov, em_smsw, smsw), N,
II(SrcMem16 | Mov | Priv, em_lmsw, lmsw),
II(SrcMem | ByteOp | Priv | NoAccess, em_invlpg, invlpg),
}, {
EXT(0, group7_rm0),
EXT(0, group7_rm1),
N, EXT(0, group7_rm3),
II(SrcNone | DstMem | Mov, em_smsw, smsw), N,
II(SrcMem16 | Mov | Priv, em_lmsw, lmsw),
EXT(0, group7_rm7),
} };
static const struct opcode group8[] = {
N, N, N, N,
F(DstMem | SrcImmByte | NoWrite, em_bt),
F(DstMem | SrcImmByte | Lock | PageTable, em_bts),
F(DstMem | SrcImmByte | Lock, em_btr),
F(DstMem | SrcImmByte | Lock | PageTable, em_btc),
};
static const struct group_dual group9 = { {
N, I(DstMem64 | Lock | PageTable, em_cmpxchg8b), N, N, N, N, N, N,
}, {
N, N, N, N, N, N, N, N,
} };
static const struct opcode group11[] = {
I(DstMem | SrcImm | Mov | PageTable, em_mov),
X7(D(Undefined)),
};
static const struct gprefix pfx_0f_ae_7 = {
I(SrcMem | ByteOp, em_clflush), N, N, N,
};
static const struct group_dual group15 = { {
I(ModRM | Aligned16, em_fxsave),
I(ModRM | Aligned16, em_fxrstor),
N, N, N, N, N, GP(0, &pfx_0f_ae_7),
}, {
N, N, N, N, N, N, N, N,
} };
static const struct gprefix pfx_0f_6f_0f_7f = {
I(Mmx, em_mov), I(Sse | Aligned, em_mov), N, I(Sse | Unaligned, em_mov),
};
static const struct instr_dual instr_dual_0f_2b = {
I(0, em_mov), N
};
static const struct gprefix pfx_0f_2b = {
ID(0, &instr_dual_0f_2b), ID(0, &instr_dual_0f_2b), N, N,
};
static const struct gprefix pfx_0f_28_0f_29 = {
I(Aligned, em_mov), I(Aligned, em_mov), N, N,
};
static const struct gprefix pfx_0f_e7 = {
N, I(Sse, em_mov), N, N,
};
static const struct escape escape_d9 = { {
N, N, N, N, N, N, N, I(DstMem16 | Mov, em_fnstcw),
}, {
/* 0xC0 - 0xC7 */
N, N, N, N, N, N, N, N,
/* 0xC8 - 0xCF */
N, N, N, N, N, N, N, N,
/* 0xD0 - 0xC7 */
N, N, N, N, N, N, N, N,
/* 0xD8 - 0xDF */
N, N, N, N, N, N, N, N,
/* 0xE0 - 0xE7 */
N, N, N, N, N, N, N, N,
/* 0xE8 - 0xEF */
N, N, N, N, N, N, N, N,
/* 0xF0 - 0xF7 */
N, N, N, N, N, N, N, N,
/* 0xF8 - 0xFF */
N, N, N, N, N, N, N, N,
} };
static const struct escape escape_db = { {
N, N, N, N, N, N, N, N,
}, {
/* 0xC0 - 0xC7 */
N, N, N, N, N, N, N, N,
/* 0xC8 - 0xCF */
N, N, N, N, N, N, N, N,
/* 0xD0 - 0xC7 */
N, N, N, N, N, N, N, N,
/* 0xD8 - 0xDF */
N, N, N, N, N, N, N, N,
/* 0xE0 - 0xE7 */
N, N, N, I(ImplicitOps, em_fninit), N, N, N, N,
/* 0xE8 - 0xEF */
N, N, N, N, N, N, N, N,
/* 0xF0 - 0xF7 */
N, N, N, N, N, N, N, N,
/* 0xF8 - 0xFF */
N, N, N, N, N, N, N, N,
} };
static const struct escape escape_dd = { {
N, N, N, N, N, N, N, I(DstMem16 | Mov, em_fnstsw),
}, {
/* 0xC0 - 0xC7 */
N, N, N, N, N, N, N, N,
/* 0xC8 - 0xCF */
N, N, N, N, N, N, N, N,
/* 0xD0 - 0xC7 */
N, N, N, N, N, N, N, N,
/* 0xD8 - 0xDF */
N, N, N, N, N, N, N, N,
/* 0xE0 - 0xE7 */
N, N, N, N, N, N, N, N,
/* 0xE8 - 0xEF */
N, N, N, N, N, N, N, N,
/* 0xF0 - 0xF7 */
N, N, N, N, N, N, N, N,
/* 0xF8 - 0xFF */
N, N, N, N, N, N, N, N,
} };
static const struct instr_dual instr_dual_0f_c3 = {
I(DstMem | SrcReg | ModRM | No16 | Mov, em_mov), N
};
static const struct mode_dual mode_dual_63 = {
N, I(DstReg | SrcMem32 | ModRM | Mov, em_movsxd)
};
static const struct opcode opcode_table[256] = {
/* 0x00 - 0x07 */
F6ALU(Lock, em_add),
I(ImplicitOps | Stack | No64 | Src2ES, em_push_sreg),
I(ImplicitOps | Stack | No64 | Src2ES, em_pop_sreg),
/* 0x08 - 0x0F */
F6ALU(Lock | PageTable, em_or),
I(ImplicitOps | Stack | No64 | Src2CS, em_push_sreg),
N,
/* 0x10 - 0x17 */
F6ALU(Lock, em_adc),
I(ImplicitOps | Stack | No64 | Src2SS, em_push_sreg),
I(ImplicitOps | Stack | No64 | Src2SS, em_pop_sreg),
/* 0x18 - 0x1F */
F6ALU(Lock, em_sbb),
I(ImplicitOps | Stack | No64 | Src2DS, em_push_sreg),
I(ImplicitOps | Stack | No64 | Src2DS, em_pop_sreg),
/* 0x20 - 0x27 */
F6ALU(Lock | PageTable, em_and), N, N,
/* 0x28 - 0x2F */
F6ALU(Lock, em_sub), N, I(ByteOp | DstAcc | No64, em_das),
/* 0x30 - 0x37 */
F6ALU(Lock, em_xor), N, N,
/* 0x38 - 0x3F */
F6ALU(NoWrite, em_cmp), N, N,
/* 0x40 - 0x4F */
X8(F(DstReg, em_inc)), X8(F(DstReg, em_dec)),
/* 0x50 - 0x57 */
X8(I(SrcReg | Stack, em_push)),
/* 0x58 - 0x5F */
X8(I(DstReg | Stack, em_pop)),
/* 0x60 - 0x67 */
I(ImplicitOps | Stack | No64, em_pusha),
I(ImplicitOps | Stack | No64, em_popa),
N, MD(ModRM, &mode_dual_63),
N, N, N, N,
/* 0x68 - 0x6F */
I(SrcImm | Mov | Stack, em_push),
I(DstReg | SrcMem | ModRM | Src2Imm, em_imul_3op),
I(SrcImmByte | Mov | Stack, em_push),
I(DstReg | SrcMem | ModRM | Src2ImmByte, em_imul_3op),
I2bvIP(DstDI | SrcDX | Mov | String | Unaligned, em_in, ins, check_perm_in), /* insb, insw/insd */
I2bvIP(SrcSI | DstDX | String, em_out, outs, check_perm_out), /* outsb, outsw/outsd */
/* 0x70 - 0x7F */
X16(D(SrcImmByte | NearBranch)),
/* 0x80 - 0x87 */
G(ByteOp | DstMem | SrcImm, group1),
G(DstMem | SrcImm, group1),
G(ByteOp | DstMem | SrcImm | No64, group1),
G(DstMem | SrcImmByte, group1),
F2bv(DstMem | SrcReg | ModRM | NoWrite, em_test),
I2bv(DstMem | SrcReg | ModRM | Lock | PageTable, em_xchg),
/* 0x88 - 0x8F */
I2bv(DstMem | SrcReg | ModRM | Mov | PageTable, em_mov),
I2bv(DstReg | SrcMem | ModRM | Mov, em_mov),
I(DstMem | SrcNone | ModRM | Mov | PageTable, em_mov_rm_sreg),
D(ModRM | SrcMem | NoAccess | DstReg),
I(ImplicitOps | SrcMem16 | ModRM, em_mov_sreg_rm),
G(0, group1A),
/* 0x90 - 0x97 */
DI(SrcAcc | DstReg, pause), X7(D(SrcAcc | DstReg)),
/* 0x98 - 0x9F */
D(DstAcc | SrcNone), I(ImplicitOps | SrcAcc, em_cwd),
I(SrcImmFAddr | No64, em_call_far), N,
II(ImplicitOps | Stack, em_pushf, pushf),
II(ImplicitOps | Stack, em_popf, popf),
I(ImplicitOps, em_sahf), I(ImplicitOps, em_lahf),
/* 0xA0 - 0xA7 */
I2bv(DstAcc | SrcMem | Mov | MemAbs, em_mov),
I2bv(DstMem | SrcAcc | Mov | MemAbs | PageTable, em_mov),
I2bv(SrcSI | DstDI | Mov | String, em_mov),
F2bv(SrcSI | DstDI | String | NoWrite, em_cmp_r),
/* 0xA8 - 0xAF */
F2bv(DstAcc | SrcImm | NoWrite, em_test),
I2bv(SrcAcc | DstDI | Mov | String, em_mov),
I2bv(SrcSI | DstAcc | Mov | String, em_mov),
F2bv(SrcAcc | DstDI | String | NoWrite, em_cmp_r),
/* 0xB0 - 0xB7 */
X8(I(ByteOp | DstReg | SrcImm | Mov, em_mov)),
/* 0xB8 - 0xBF */
X8(I(DstReg | SrcImm64 | Mov, em_mov)),
/* 0xC0 - 0xC7 */
G(ByteOp | Src2ImmByte, group2), G(Src2ImmByte, group2),
I(ImplicitOps | NearBranch | SrcImmU16, em_ret_near_imm),
I(ImplicitOps | NearBranch, em_ret),
I(DstReg | SrcMemFAddr | ModRM | No64 | Src2ES, em_lseg),
I(DstReg | SrcMemFAddr | ModRM | No64 | Src2DS, em_lseg),
G(ByteOp, group11), G(0, group11),
/* 0xC8 - 0xCF */
I(Stack | SrcImmU16 | Src2ImmByte, em_enter), I(Stack, em_leave),
I(ImplicitOps | SrcImmU16, em_ret_far_imm),
I(ImplicitOps, em_ret_far),
D(ImplicitOps), DI(SrcImmByte, intn),
D(ImplicitOps | No64), II(ImplicitOps, em_iret, iret),
/* 0xD0 - 0xD7 */
G(Src2One | ByteOp, group2), G(Src2One, group2),
G(Src2CL | ByteOp, group2), G(Src2CL, group2),
I(DstAcc | SrcImmUByte | No64, em_aam),
I(DstAcc | SrcImmUByte | No64, em_aad),
F(DstAcc | ByteOp | No64, em_salc),
I(DstAcc | SrcXLat | ByteOp, em_mov),
/* 0xD8 - 0xDF */
N, E(0, &escape_d9), N, E(0, &escape_db), N, E(0, &escape_dd), N, N,
/* 0xE0 - 0xE7 */
X3(I(SrcImmByte | NearBranch, em_loop)),
I(SrcImmByte | NearBranch, em_jcxz),
I2bvIP(SrcImmUByte | DstAcc, em_in, in, check_perm_in),
I2bvIP(SrcAcc | DstImmUByte, em_out, out, check_perm_out),
/* 0xE8 - 0xEF */
I(SrcImm | NearBranch, em_call), D(SrcImm | ImplicitOps | NearBranch),
I(SrcImmFAddr | No64, em_jmp_far),
D(SrcImmByte | ImplicitOps | NearBranch),
I2bvIP(SrcDX | DstAcc, em_in, in, check_perm_in),
I2bvIP(SrcAcc | DstDX, em_out, out, check_perm_out),
/* 0xF0 - 0xF7 */
N, DI(ImplicitOps, icebp), N, N,
DI(ImplicitOps | Priv, hlt), D(ImplicitOps),
G(ByteOp, group3), G(0, group3),
/* 0xF8 - 0xFF */
D(ImplicitOps), D(ImplicitOps),
I(ImplicitOps, em_cli), I(ImplicitOps, em_sti),
D(ImplicitOps), D(ImplicitOps), G(0, group4), G(0, group5),
};
static const struct opcode twobyte_table[256] = {
/* 0x00 - 0x0F */
G(0, group6), GD(0, &group7), N, N,
N, I(ImplicitOps | EmulateOnUD, em_syscall),
II(ImplicitOps | Priv, em_clts, clts), N,
DI(ImplicitOps | Priv, invd), DI(ImplicitOps | Priv, wbinvd), N, N,
N, D(ImplicitOps | ModRM | SrcMem | NoAccess), N, N,
/* 0x10 - 0x1F */
N, N, N, N, N, N, N, N,
D(ImplicitOps | ModRM | SrcMem | NoAccess),
N, N, N, N, N, N, D(ImplicitOps | ModRM | SrcMem | NoAccess),
/* 0x20 - 0x2F */
DIP(ModRM | DstMem | Priv | Op3264 | NoMod, cr_read, check_cr_read),
DIP(ModRM | DstMem | Priv | Op3264 | NoMod, dr_read, check_dr_read),
IIP(ModRM | SrcMem | Priv | Op3264 | NoMod, em_cr_write, cr_write,
check_cr_write),
IIP(ModRM | SrcMem | Priv | Op3264 | NoMod, em_dr_write, dr_write,
check_dr_write),
N, N, N, N,
GP(ModRM | DstReg | SrcMem | Mov | Sse, &pfx_0f_28_0f_29),
GP(ModRM | DstMem | SrcReg | Mov | Sse, &pfx_0f_28_0f_29),
N, GP(ModRM | DstMem | SrcReg | Mov | Sse, &pfx_0f_2b),
N, N, N, N,
/* 0x30 - 0x3F */
II(ImplicitOps | Priv, em_wrmsr, wrmsr),
IIP(ImplicitOps, em_rdtsc, rdtsc, check_rdtsc),
II(ImplicitOps | Priv, em_rdmsr, rdmsr),
IIP(ImplicitOps, em_rdpmc, rdpmc, check_rdpmc),
I(ImplicitOps | EmulateOnUD, em_sysenter),
I(ImplicitOps | Priv | EmulateOnUD, em_sysexit),
N, N,
N, N, N, N, N, N, N, N,
/* 0x40 - 0x4F */
X16(D(DstReg | SrcMem | ModRM)),
/* 0x50 - 0x5F */
N, N, N, N, N, N, N, N, N, N, N, N, N, N, N, N,
/* 0x60 - 0x6F */
N, N, N, N,
N, N, N, N,
N, N, N, N,
N, N, N, GP(SrcMem | DstReg | ModRM | Mov, &pfx_0f_6f_0f_7f),
/* 0x70 - 0x7F */
N, N, N, N,
N, N, N, N,
N, N, N, N,
N, N, N, GP(SrcReg | DstMem | ModRM | Mov, &pfx_0f_6f_0f_7f),
/* 0x80 - 0x8F */
X16(D(SrcImm | NearBranch)),
/* 0x90 - 0x9F */
X16(D(ByteOp | DstMem | SrcNone | ModRM| Mov)),
/* 0xA0 - 0xA7 */
I(Stack | Src2FS, em_push_sreg), I(Stack | Src2FS, em_pop_sreg),
II(ImplicitOps, em_cpuid, cpuid),
F(DstMem | SrcReg | ModRM | BitOp | NoWrite, em_bt),
F(DstMem | SrcReg | Src2ImmByte | ModRM, em_shld),
F(DstMem | SrcReg | Src2CL | ModRM, em_shld), N, N,
/* 0xA8 - 0xAF */
I(Stack | Src2GS, em_push_sreg), I(Stack | Src2GS, em_pop_sreg),
II(EmulateOnUD | ImplicitOps, em_rsm, rsm),
F(DstMem | SrcReg | ModRM | BitOp | Lock | PageTable, em_bts),
F(DstMem | SrcReg | Src2ImmByte | ModRM, em_shrd),
F(DstMem | SrcReg | Src2CL | ModRM, em_shrd),
GD(0, &group15), F(DstReg | SrcMem | ModRM, em_imul),
/* 0xB0 - 0xB7 */
I2bv(DstMem | SrcReg | ModRM | Lock | PageTable | SrcWrite, em_cmpxchg),
I(DstReg | SrcMemFAddr | ModRM | Src2SS, em_lseg),
F(DstMem | SrcReg | ModRM | BitOp | Lock, em_btr),
I(DstReg | SrcMemFAddr | ModRM | Src2FS, em_lseg),
I(DstReg | SrcMemFAddr | ModRM | Src2GS, em_lseg),
D(DstReg | SrcMem8 | ModRM | Mov), D(DstReg | SrcMem16 | ModRM | Mov),
/* 0xB8 - 0xBF */
N, N,
G(BitOp, group8),
F(DstMem | SrcReg | ModRM | BitOp | Lock | PageTable, em_btc),
I(DstReg | SrcMem | ModRM, em_bsf_c),
I(DstReg | SrcMem | ModRM, em_bsr_c),
D(DstReg | SrcMem8 | ModRM | Mov), D(DstReg | SrcMem16 | ModRM | Mov),
/* 0xC0 - 0xC7 */
F2bv(DstMem | SrcReg | ModRM | SrcWrite | Lock, em_xadd),
N, ID(0, &instr_dual_0f_c3),
N, N, N, GD(0, &group9),
/* 0xC8 - 0xCF */
X8(I(DstReg, em_bswap)),
/* 0xD0 - 0xDF */
N, N, N, N, N, N, N, N, N, N, N, N, N, N, N, N,
/* 0xE0 - 0xEF */
N, N, N, N, N, N, N, GP(SrcReg | DstMem | ModRM | Mov, &pfx_0f_e7),
N, N, N, N, N, N, N, N,
/* 0xF0 - 0xFF */
N, N, N, N, N, N, N, N, N, N, N, N, N, N, N, N
};
static const struct instr_dual instr_dual_0f_38_f0 = {
I(DstReg | SrcMem | Mov, em_movbe), N
};
static const struct instr_dual instr_dual_0f_38_f1 = {
I(DstMem | SrcReg | Mov, em_movbe), N
};
static const struct gprefix three_byte_0f_38_f0 = {
ID(0, &instr_dual_0f_38_f0), N, N, N
};
static const struct gprefix three_byte_0f_38_f1 = {
ID(0, &instr_dual_0f_38_f1), N, N, N
};
/*
* Insns below are selected by the prefix which indexed by the third opcode
* byte.
*/
static const struct opcode opcode_map_0f_38[256] = {
/* 0x00 - 0x7f */
X16(N), X16(N), X16(N), X16(N), X16(N), X16(N), X16(N), X16(N),
/* 0x80 - 0xef */
X16(N), X16(N), X16(N), X16(N), X16(N), X16(N), X16(N),
/* 0xf0 - 0xf1 */
GP(EmulateOnUD | ModRM, &three_byte_0f_38_f0),
GP(EmulateOnUD | ModRM, &three_byte_0f_38_f1),
/* 0xf2 - 0xff */
N, N, X4(N), X8(N)
};
#undef D
#undef N
#undef G
#undef GD
#undef I
#undef GP
#undef EXT
#undef MD
#undef ID
#undef D2bv
#undef D2bvIP
#undef I2bv
#undef I2bvIP
#undef I6ALU
static unsigned imm_size(struct x86_emulate_ctxt *ctxt)
{
unsigned size;
size = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes;
if (size == 8)
size = 4;
return size;
}
static int decode_imm(struct x86_emulate_ctxt *ctxt, struct operand *op,
unsigned size, bool sign_extension)
{
int rc = X86EMUL_CONTINUE;
op->type = OP_IMM;
op->bytes = size;
op->addr.mem.ea = ctxt->_eip;
/* NB. Immediates are sign-extended as necessary. */
switch (op->bytes) {
case 1:
op->val = insn_fetch(s8, ctxt);
break;
case 2:
op->val = insn_fetch(s16, ctxt);
break;
case 4:
op->val = insn_fetch(s32, ctxt);
break;
case 8:
op->val = insn_fetch(s64, ctxt);
break;
}
if (!sign_extension) {
switch (op->bytes) {
case 1:
op->val &= 0xff;
break;
case 2:
op->val &= 0xffff;
break;
case 4:
op->val &= 0xffffffff;
break;
}
}
done:
return rc;
}
static int decode_operand(struct x86_emulate_ctxt *ctxt, struct operand *op,
unsigned d)
{
int rc = X86EMUL_CONTINUE;
switch (d) {
case OpReg:
decode_register_operand(ctxt, op);
break;
case OpImmUByte:
rc = decode_imm(ctxt, op, 1, false);
break;
case OpMem:
ctxt->memop.bytes = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes;
mem_common:
*op = ctxt->memop;
ctxt->memopp = op;
if (ctxt->d & BitOp)
fetch_bit_operand(ctxt);
op->orig_val = op->val;
break;
case OpMem64:
ctxt->memop.bytes = (ctxt->op_bytes == 8) ? 16 : 8;
goto mem_common;
case OpAcc:
op->type = OP_REG;
op->bytes = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes;
op->addr.reg = reg_rmw(ctxt, VCPU_REGS_RAX);
fetch_register_operand(op);
op->orig_val = op->val;
break;
case OpAccLo:
op->type = OP_REG;
op->bytes = (ctxt->d & ByteOp) ? 2 : ctxt->op_bytes;
op->addr.reg = reg_rmw(ctxt, VCPU_REGS_RAX);
fetch_register_operand(op);
op->orig_val = op->val;
break;
case OpAccHi:
if (ctxt->d & ByteOp) {
op->type = OP_NONE;
break;
}
op->type = OP_REG;
op->bytes = ctxt->op_bytes;
op->addr.reg = reg_rmw(ctxt, VCPU_REGS_RDX);
fetch_register_operand(op);
op->orig_val = op->val;
break;
case OpDI:
op->type = OP_MEM;
op->bytes = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes;
op->addr.mem.ea =
register_address(ctxt, VCPU_REGS_RDI);
op->addr.mem.seg = VCPU_SREG_ES;
op->val = 0;
op->count = 1;
break;
case OpDX:
op->type = OP_REG;
op->bytes = 2;
op->addr.reg = reg_rmw(ctxt, VCPU_REGS_RDX);
fetch_register_operand(op);
break;
case OpCL:
op->type = OP_IMM;
op->bytes = 1;
op->val = reg_read(ctxt, VCPU_REGS_RCX) & 0xff;
break;
case OpImmByte:
rc = decode_imm(ctxt, op, 1, true);
break;
case OpOne:
op->type = OP_IMM;
op->bytes = 1;
op->val = 1;
break;
case OpImm:
rc = decode_imm(ctxt, op, imm_size(ctxt), true);
break;
case OpImm64:
rc = decode_imm(ctxt, op, ctxt->op_bytes, true);
break;
case OpMem8:
ctxt->memop.bytes = 1;
if (ctxt->memop.type == OP_REG) {
ctxt->memop.addr.reg = decode_register(ctxt,
ctxt->modrm_rm, true);
fetch_register_operand(&ctxt->memop);
}
goto mem_common;
case OpMem16:
ctxt->memop.bytes = 2;
goto mem_common;
case OpMem32:
ctxt->memop.bytes = 4;
goto mem_common;
case OpImmU16:
rc = decode_imm(ctxt, op, 2, false);
break;
case OpImmU:
rc = decode_imm(ctxt, op, imm_size(ctxt), false);
break;
case OpSI:
op->type = OP_MEM;
op->bytes = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes;
op->addr.mem.ea =
register_address(ctxt, VCPU_REGS_RSI);
op->addr.mem.seg = ctxt->seg_override;
op->val = 0;
op->count = 1;
break;
case OpXLat:
op->type = OP_MEM;
op->bytes = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes;
op->addr.mem.ea =
address_mask(ctxt,
reg_read(ctxt, VCPU_REGS_RBX) +
(reg_read(ctxt, VCPU_REGS_RAX) & 0xff));
op->addr.mem.seg = ctxt->seg_override;
op->val = 0;
break;
case OpImmFAddr:
op->type = OP_IMM;
op->addr.mem.ea = ctxt->_eip;
op->bytes = ctxt->op_bytes + 2;
insn_fetch_arr(op->valptr, op->bytes, ctxt);
break;
case OpMemFAddr:
ctxt->memop.bytes = ctxt->op_bytes + 2;
goto mem_common;
case OpES:
op->type = OP_IMM;
op->val = VCPU_SREG_ES;
break;
case OpCS:
op->type = OP_IMM;
op->val = VCPU_SREG_CS;
break;
case OpSS:
op->type = OP_IMM;
op->val = VCPU_SREG_SS;
break;
case OpDS:
op->type = OP_IMM;
op->val = VCPU_SREG_DS;
break;
case OpFS:
op->type = OP_IMM;
op->val = VCPU_SREG_FS;
break;
case OpGS:
op->type = OP_IMM;
op->val = VCPU_SREG_GS;
break;
case OpImplicit:
/* Special instructions do their own operand decoding. */
default:
op->type = OP_NONE; /* Disable writeback. */
break;
}
done:
return rc;
}
int x86_decode_insn(struct x86_emulate_ctxt *ctxt, void *insn, int insn_len)
{
int rc = X86EMUL_CONTINUE;
int mode = ctxt->mode;
int def_op_bytes, def_ad_bytes, goffset, simd_prefix;
bool op_prefix = false;
bool has_seg_override = false;
struct opcode opcode;
ctxt->memop.type = OP_NONE;
ctxt->memopp = NULL;
ctxt->_eip = ctxt->eip;
ctxt->fetch.ptr = ctxt->fetch.data;
ctxt->fetch.end = ctxt->fetch.data + insn_len;
ctxt->opcode_len = 1;
if (insn_len > 0)
memcpy(ctxt->fetch.data, insn, insn_len);
else {
rc = __do_insn_fetch_bytes(ctxt, 1);
if (rc != X86EMUL_CONTINUE)
return rc;
}
switch (mode) {
case X86EMUL_MODE_REAL:
case X86EMUL_MODE_VM86:
case X86EMUL_MODE_PROT16:
def_op_bytes = def_ad_bytes = 2;
break;
case X86EMUL_MODE_PROT32:
def_op_bytes = def_ad_bytes = 4;
break;
#ifdef CONFIG_X86_64
case X86EMUL_MODE_PROT64:
def_op_bytes = 4;
def_ad_bytes = 8;
break;
#endif
default:
return EMULATION_FAILED;
}
ctxt->op_bytes = def_op_bytes;
ctxt->ad_bytes = def_ad_bytes;
/* Legacy prefixes. */
for (;;) {
switch (ctxt->b = insn_fetch(u8, ctxt)) {
case 0x66: /* operand-size override */
op_prefix = true;
/* switch between 2/4 bytes */
ctxt->op_bytes = def_op_bytes ^ 6;
break;
case 0x67: /* address-size override */
if (mode == X86EMUL_MODE_PROT64)
/* switch between 4/8 bytes */
ctxt->ad_bytes = def_ad_bytes ^ 12;
else
/* switch between 2/4 bytes */
ctxt->ad_bytes = def_ad_bytes ^ 6;
break;
case 0x26: /* ES override */
case 0x2e: /* CS override */
case 0x36: /* SS override */
case 0x3e: /* DS override */
has_seg_override = true;
ctxt->seg_override = (ctxt->b >> 3) & 3;
break;
case 0x64: /* FS override */
case 0x65: /* GS override */
has_seg_override = true;
ctxt->seg_override = ctxt->b & 7;
break;
case 0x40 ... 0x4f: /* REX */
if (mode != X86EMUL_MODE_PROT64)
goto done_prefixes;
ctxt->rex_prefix = ctxt->b;
continue;
case 0xf0: /* LOCK */
ctxt->lock_prefix = 1;
break;
case 0xf2: /* REPNE/REPNZ */
case 0xf3: /* REP/REPE/REPZ */
ctxt->rep_prefix = ctxt->b;
break;
default:
goto done_prefixes;
}
/* Any legacy prefix after a REX prefix nullifies its effect. */
ctxt->rex_prefix = 0;
}
done_prefixes:
/* REX prefix. */
if (ctxt->rex_prefix & 8)
ctxt->op_bytes = 8; /* REX.W */
/* Opcode byte(s). */
opcode = opcode_table[ctxt->b];
/* Two-byte opcode? */
if (ctxt->b == 0x0f) {
ctxt->opcode_len = 2;
ctxt->b = insn_fetch(u8, ctxt);
opcode = twobyte_table[ctxt->b];
/* 0F_38 opcode map */
if (ctxt->b == 0x38) {
ctxt->opcode_len = 3;
ctxt->b = insn_fetch(u8, ctxt);
opcode = opcode_map_0f_38[ctxt->b];
}
}
ctxt->d = opcode.flags;
if (ctxt->d & ModRM)
ctxt->modrm = insn_fetch(u8, ctxt);
/* vex-prefix instructions are not implemented */
if (ctxt->opcode_len == 1 && (ctxt->b == 0xc5 || ctxt->b == 0xc4) &&
(mode == X86EMUL_MODE_PROT64 || (ctxt->modrm & 0xc0) == 0xc0)) {
ctxt->d = NotImpl;
}
while (ctxt->d & GroupMask) {
switch (ctxt->d & GroupMask) {
case Group:
goffset = (ctxt->modrm >> 3) & 7;
opcode = opcode.u.group[goffset];
break;
case GroupDual:
goffset = (ctxt->modrm >> 3) & 7;
if ((ctxt->modrm >> 6) == 3)
opcode = opcode.u.gdual->mod3[goffset];
else
opcode = opcode.u.gdual->mod012[goffset];
break;
case RMExt:
goffset = ctxt->modrm & 7;
opcode = opcode.u.group[goffset];
break;
case Prefix:
if (ctxt->rep_prefix && op_prefix)
return EMULATION_FAILED;
simd_prefix = op_prefix ? 0x66 : ctxt->rep_prefix;
switch (simd_prefix) {
case 0x00: opcode = opcode.u.gprefix->pfx_no; break;
case 0x66: opcode = opcode.u.gprefix->pfx_66; break;
case 0xf2: opcode = opcode.u.gprefix->pfx_f2; break;
case 0xf3: opcode = opcode.u.gprefix->pfx_f3; break;
}
break;
case Escape:
if (ctxt->modrm > 0xbf)
opcode = opcode.u.esc->high[ctxt->modrm - 0xc0];
else
opcode = opcode.u.esc->op[(ctxt->modrm >> 3) & 7];
break;
case InstrDual:
if ((ctxt->modrm >> 6) == 3)
opcode = opcode.u.idual->mod3;
else
opcode = opcode.u.idual->mod012;
break;
case ModeDual:
if (ctxt->mode == X86EMUL_MODE_PROT64)
opcode = opcode.u.mdual->mode64;
else
opcode = opcode.u.mdual->mode32;
break;
default:
return EMULATION_FAILED;
}
ctxt->d &= ~(u64)GroupMask;
ctxt->d |= opcode.flags;
}
/* Unrecognised? */
if (ctxt->d == 0)
return EMULATION_FAILED;
ctxt->execute = opcode.u.execute;
if (unlikely(ctxt->ud) && likely(!(ctxt->d & EmulateOnUD)))
return EMULATION_FAILED;
if (unlikely(ctxt->d &
(NotImpl|Stack|Op3264|Sse|Mmx|Intercept|CheckPerm|NearBranch|
No16))) {
/*
* These are copied unconditionally here, and checked unconditionally
* in x86_emulate_insn.
*/
ctxt->check_perm = opcode.check_perm;
ctxt->intercept = opcode.intercept;
if (ctxt->d & NotImpl)
return EMULATION_FAILED;
if (mode == X86EMUL_MODE_PROT64) {
if (ctxt->op_bytes == 4 && (ctxt->d & Stack))
ctxt->op_bytes = 8;
else if (ctxt->d & NearBranch)
ctxt->op_bytes = 8;
}
if (ctxt->d & Op3264) {
if (mode == X86EMUL_MODE_PROT64)
ctxt->op_bytes = 8;
else
ctxt->op_bytes = 4;
}
if ((ctxt->d & No16) && ctxt->op_bytes == 2)
ctxt->op_bytes = 4;
if (ctxt->d & Sse)
ctxt->op_bytes = 16;
else if (ctxt->d & Mmx)
ctxt->op_bytes = 8;
}
/* ModRM and SIB bytes. */
if (ctxt->d & ModRM) {
rc = decode_modrm(ctxt, &ctxt->memop);
if (!has_seg_override) {
has_seg_override = true;
ctxt->seg_override = ctxt->modrm_seg;
}
} else if (ctxt->d & MemAbs)
rc = decode_abs(ctxt, &ctxt->memop);
if (rc != X86EMUL_CONTINUE)
goto done;
if (!has_seg_override)
ctxt->seg_override = VCPU_SREG_DS;
ctxt->memop.addr.mem.seg = ctxt->seg_override;
/*
* Decode and fetch the source operand: register, memory
* or immediate.
*/
rc = decode_operand(ctxt, &ctxt->src, (ctxt->d >> SrcShift) & OpMask);
if (rc != X86EMUL_CONTINUE)
goto done;
/*
* Decode and fetch the second source operand: register, memory
* or immediate.
*/
rc = decode_operand(ctxt, &ctxt->src2, (ctxt->d >> Src2Shift) & OpMask);
if (rc != X86EMUL_CONTINUE)
goto done;
/* Decode and fetch the destination operand: register or memory. */
rc = decode_operand(ctxt, &ctxt->dst, (ctxt->d >> DstShift) & OpMask);
if (ctxt->rip_relative && likely(ctxt->memopp))
ctxt->memopp->addr.mem.ea = address_mask(ctxt,
ctxt->memopp->addr.mem.ea + ctxt->_eip);
done:
return (rc != X86EMUL_CONTINUE) ? EMULATION_FAILED : EMULATION_OK;
}
bool x86_page_table_writing_insn(struct x86_emulate_ctxt *ctxt)
{
return ctxt->d & PageTable;
}
static bool string_insn_completed(struct x86_emulate_ctxt *ctxt)
{
/* The second termination condition only applies for REPE
* and REPNE. Test if the repeat string operation prefix is
* REPE/REPZ or REPNE/REPNZ and if it's the case it tests the
* corresponding termination condition according to:
* - if REPE/REPZ and ZF = 0 then done
* - if REPNE/REPNZ and ZF = 1 then done
*/
if (((ctxt->b == 0xa6) || (ctxt->b == 0xa7) ||
(ctxt->b == 0xae) || (ctxt->b == 0xaf))
&& (((ctxt->rep_prefix == REPE_PREFIX) &&
((ctxt->eflags & X86_EFLAGS_ZF) == 0))
|| ((ctxt->rep_prefix == REPNE_PREFIX) &&
((ctxt->eflags & X86_EFLAGS_ZF) == X86_EFLAGS_ZF))))
return true;
return false;
}
static int flush_pending_x87_faults(struct x86_emulate_ctxt *ctxt)
{
int rc;
ctxt->ops->get_fpu(ctxt);
rc = asm_safe("fwait");
ctxt->ops->put_fpu(ctxt);
if (unlikely(rc != X86EMUL_CONTINUE))
return emulate_exception(ctxt, MF_VECTOR, 0, false);
return X86EMUL_CONTINUE;
}
static void fetch_possible_mmx_operand(struct x86_emulate_ctxt *ctxt,
struct operand *op)
{
if (op->type == OP_MM)
read_mmx_reg(ctxt, &op->mm_val, op->addr.mm);
}
static int fastop(struct x86_emulate_ctxt *ctxt, void (*fop)(struct fastop *))
{
register void *__sp asm(_ASM_SP);
ulong flags = (ctxt->eflags & EFLAGS_MASK) | X86_EFLAGS_IF;
if (!(ctxt->d & ByteOp))
fop += __ffs(ctxt->dst.bytes) * FASTOP_SIZE;
asm("push %[flags]; popf; call *%[fastop]; pushf; pop %[flags]\n"
: "+a"(ctxt->dst.val), "+d"(ctxt->src.val), [flags]"+D"(flags),
[fastop]"+S"(fop), "+r"(__sp)
: "c"(ctxt->src2.val));
ctxt->eflags = (ctxt->eflags & ~EFLAGS_MASK) | (flags & EFLAGS_MASK);
if (!fop) /* exception is returned in fop variable */
return emulate_de(ctxt);
return X86EMUL_CONTINUE;
}
void init_decode_cache(struct x86_emulate_ctxt *ctxt)
{
memset(&ctxt->rip_relative, 0,
(void *)&ctxt->modrm - (void *)&ctxt->rip_relative);
ctxt->io_read.pos = 0;
ctxt->io_read.end = 0;
ctxt->mem_read.end = 0;
}
int x86_emulate_insn(struct x86_emulate_ctxt *ctxt)
{
const struct x86_emulate_ops *ops = ctxt->ops;
int rc = X86EMUL_CONTINUE;
int saved_dst_type = ctxt->dst.type;
ctxt->mem_read.pos = 0;
/* LOCK prefix is allowed only with some instructions */
if (ctxt->lock_prefix && (!(ctxt->d & Lock) || ctxt->dst.type != OP_MEM)) {
rc = emulate_ud(ctxt);
goto done;
}
if ((ctxt->d & SrcMask) == SrcMemFAddr && ctxt->src.type != OP_MEM) {
rc = emulate_ud(ctxt);
goto done;
}
if (unlikely(ctxt->d &
(No64|Undefined|Sse|Mmx|Intercept|CheckPerm|Priv|Prot|String))) {
if ((ctxt->mode == X86EMUL_MODE_PROT64 && (ctxt->d & No64)) ||
(ctxt->d & Undefined)) {
rc = emulate_ud(ctxt);
goto done;
}
if (((ctxt->d & (Sse|Mmx)) && ((ops->get_cr(ctxt, 0) & X86_CR0_EM)))
|| ((ctxt->d & Sse) && !(ops->get_cr(ctxt, 4) & X86_CR4_OSFXSR))) {
rc = emulate_ud(ctxt);
goto done;
}
if ((ctxt->d & (Sse|Mmx)) && (ops->get_cr(ctxt, 0) & X86_CR0_TS)) {
rc = emulate_nm(ctxt);
goto done;
}
if (ctxt->d & Mmx) {
rc = flush_pending_x87_faults(ctxt);
if (rc != X86EMUL_CONTINUE)
goto done;
/*
* Now that we know the fpu is exception safe, we can fetch
* operands from it.
*/
fetch_possible_mmx_operand(ctxt, &ctxt->src);
fetch_possible_mmx_operand(ctxt, &ctxt->src2);
if (!(ctxt->d & Mov))
fetch_possible_mmx_operand(ctxt, &ctxt->dst);
}
if (unlikely(ctxt->emul_flags & X86EMUL_GUEST_MASK) && ctxt->intercept) {
rc = emulator_check_intercept(ctxt, ctxt->intercept,
X86_ICPT_PRE_EXCEPT);
if (rc != X86EMUL_CONTINUE)
goto done;
}
/* Instruction can only be executed in protected mode */
if ((ctxt->d & Prot) && ctxt->mode < X86EMUL_MODE_PROT16) {
rc = emulate_ud(ctxt);
goto done;
}
/* Privileged instruction can be executed only in CPL=0 */
if ((ctxt->d & Priv) && ops->cpl(ctxt)) {
if (ctxt->d & PrivUD)
rc = emulate_ud(ctxt);
else
rc = emulate_gp(ctxt, 0);
goto done;
}
/* Do instruction specific permission checks */
if (ctxt->d & CheckPerm) {
rc = ctxt->check_perm(ctxt);
if (rc != X86EMUL_CONTINUE)
goto done;
}
if (unlikely(ctxt->emul_flags & X86EMUL_GUEST_MASK) && (ctxt->d & Intercept)) {
rc = emulator_check_intercept(ctxt, ctxt->intercept,
X86_ICPT_POST_EXCEPT);
if (rc != X86EMUL_CONTINUE)
goto done;
}
if (ctxt->rep_prefix && (ctxt->d & String)) {
/* All REP prefixes have the same first termination condition */
if (address_mask(ctxt, reg_read(ctxt, VCPU_REGS_RCX)) == 0) {
string_registers_quirk(ctxt);
ctxt->eip = ctxt->_eip;
ctxt->eflags &= ~X86_EFLAGS_RF;
goto done;
}
}
}
if ((ctxt->src.type == OP_MEM) && !(ctxt->d & NoAccess)) {
rc = segmented_read(ctxt, ctxt->src.addr.mem,
ctxt->src.valptr, ctxt->src.bytes);
if (rc != X86EMUL_CONTINUE)
goto done;
ctxt->src.orig_val64 = ctxt->src.val64;
}
if (ctxt->src2.type == OP_MEM) {
rc = segmented_read(ctxt, ctxt->src2.addr.mem,
&ctxt->src2.val, ctxt->src2.bytes);
if (rc != X86EMUL_CONTINUE)
goto done;
}
if ((ctxt->d & DstMask) == ImplicitOps)
goto special_insn;
if ((ctxt->dst.type == OP_MEM) && !(ctxt->d & Mov)) {
/* optimisation - avoid slow emulated read if Mov */
rc = segmented_read(ctxt, ctxt->dst.addr.mem,
&ctxt->dst.val, ctxt->dst.bytes);
if (rc != X86EMUL_CONTINUE) {
if (!(ctxt->d & NoWrite) &&
rc == X86EMUL_PROPAGATE_FAULT &&
ctxt->exception.vector == PF_VECTOR)
ctxt->exception.error_code |= PFERR_WRITE_MASK;
goto done;
}
}
/* Copy full 64-bit value for CMPXCHG8B. */
ctxt->dst.orig_val64 = ctxt->dst.val64;
special_insn:
if (unlikely(ctxt->emul_flags & X86EMUL_GUEST_MASK) && (ctxt->d & Intercept)) {
rc = emulator_check_intercept(ctxt, ctxt->intercept,
X86_ICPT_POST_MEMACCESS);
if (rc != X86EMUL_CONTINUE)
goto done;
}
if (ctxt->rep_prefix && (ctxt->d & String))
ctxt->eflags |= X86_EFLAGS_RF;
else
ctxt->eflags &= ~X86_EFLAGS_RF;
if (ctxt->execute) {
if (ctxt->d & Fastop) {
void (*fop)(struct fastop *) = (void *)ctxt->execute;
rc = fastop(ctxt, fop);
if (rc != X86EMUL_CONTINUE)
goto done;
goto writeback;
}
rc = ctxt->execute(ctxt);
if (rc != X86EMUL_CONTINUE)
goto done;
goto writeback;
}
if (ctxt->opcode_len == 2)
goto twobyte_insn;
else if (ctxt->opcode_len == 3)
goto threebyte_insn;
switch (ctxt->b) {
case 0x70 ... 0x7f: /* jcc (short) */
if (test_cc(ctxt->b, ctxt->eflags))
rc = jmp_rel(ctxt, ctxt->src.val);
break;
case 0x8d: /* lea r16/r32, m */
ctxt->dst.val = ctxt->src.addr.mem.ea;
break;
case 0x90 ... 0x97: /* nop / xchg reg, rax */
if (ctxt->dst.addr.reg == reg_rmw(ctxt, VCPU_REGS_RAX))
ctxt->dst.type = OP_NONE;
else
rc = em_xchg(ctxt);
break;
case 0x98: /* cbw/cwde/cdqe */
switch (ctxt->op_bytes) {
case 2: ctxt->dst.val = (s8)ctxt->dst.val; break;
case 4: ctxt->dst.val = (s16)ctxt->dst.val; break;
case 8: ctxt->dst.val = (s32)ctxt->dst.val; break;
}
break;
case 0xcc: /* int3 */
rc = emulate_int(ctxt, 3);
break;
case 0xcd: /* int n */
rc = emulate_int(ctxt, ctxt->src.val);
break;
case 0xce: /* into */
if (ctxt->eflags & X86_EFLAGS_OF)
rc = emulate_int(ctxt, 4);
break;
case 0xe9: /* jmp rel */
case 0xeb: /* jmp rel short */
rc = jmp_rel(ctxt, ctxt->src.val);
ctxt->dst.type = OP_NONE; /* Disable writeback. */
break;
case 0xf4: /* hlt */
ctxt->ops->halt(ctxt);
break;
case 0xf5: /* cmc */
/* complement carry flag from eflags reg */
ctxt->eflags ^= X86_EFLAGS_CF;
break;
case 0xf8: /* clc */
ctxt->eflags &= ~X86_EFLAGS_CF;
break;
case 0xf9: /* stc */
ctxt->eflags |= X86_EFLAGS_CF;
break;
case 0xfc: /* cld */
ctxt->eflags &= ~X86_EFLAGS_DF;
break;
case 0xfd: /* std */
ctxt->eflags |= X86_EFLAGS_DF;
break;
default:
goto cannot_emulate;
}
if (rc != X86EMUL_CONTINUE)
goto done;
writeback:
if (ctxt->d & SrcWrite) {
BUG_ON(ctxt->src.type == OP_MEM || ctxt->src.type == OP_MEM_STR);
rc = writeback(ctxt, &ctxt->src);
if (rc != X86EMUL_CONTINUE)
goto done;
}
if (!(ctxt->d & NoWrite)) {
rc = writeback(ctxt, &ctxt->dst);
if (rc != X86EMUL_CONTINUE)
goto done;
}
/*
* restore dst type in case the decoding will be reused
* (happens for string instruction )
*/
ctxt->dst.type = saved_dst_type;
if ((ctxt->d & SrcMask) == SrcSI)
string_addr_inc(ctxt, VCPU_REGS_RSI, &ctxt->src);
if ((ctxt->d & DstMask) == DstDI)
string_addr_inc(ctxt, VCPU_REGS_RDI, &ctxt->dst);
if (ctxt->rep_prefix && (ctxt->d & String)) {
unsigned int count;
struct read_cache *r = &ctxt->io_read;
if ((ctxt->d & SrcMask) == SrcSI)
count = ctxt->src.count;
else
count = ctxt->dst.count;
register_address_increment(ctxt, VCPU_REGS_RCX, -count);
if (!string_insn_completed(ctxt)) {
/*
* Re-enter guest when pio read ahead buffer is empty
* or, if it is not used, after each 1024 iteration.
*/
if ((r->end != 0 || reg_read(ctxt, VCPU_REGS_RCX) & 0x3ff) &&
(r->end == 0 || r->end != r->pos)) {
/*
* Reset read cache. Usually happens before
* decode, but since instruction is restarted
* we have to do it here.
*/
ctxt->mem_read.end = 0;
writeback_registers(ctxt);
return EMULATION_RESTART;
}
goto done; /* skip rip writeback */
}
ctxt->eflags &= ~X86_EFLAGS_RF;
}
ctxt->eip = ctxt->_eip;
done:
if (rc == X86EMUL_PROPAGATE_FAULT) {
WARN_ON(ctxt->exception.vector > 0x1f);
ctxt->have_exception = true;
}
if (rc == X86EMUL_INTERCEPTED)
return EMULATION_INTERCEPTED;
if (rc == X86EMUL_CONTINUE)
writeback_registers(ctxt);
return (rc == X86EMUL_UNHANDLEABLE) ? EMULATION_FAILED : EMULATION_OK;
twobyte_insn:
switch (ctxt->b) {
case 0x09: /* wbinvd */
(ctxt->ops->wbinvd)(ctxt);
break;
case 0x08: /* invd */
case 0x0d: /* GrpP (prefetch) */
case 0x18: /* Grp16 (prefetch/nop) */
case 0x1f: /* nop */
break;
case 0x20: /* mov cr, reg */
ctxt->dst.val = ops->get_cr(ctxt, ctxt->modrm_reg);
break;
case 0x21: /* mov from dr to reg */
ops->get_dr(ctxt, ctxt->modrm_reg, &ctxt->dst.val);
break;
case 0x40 ... 0x4f: /* cmov */
if (test_cc(ctxt->b, ctxt->eflags))
ctxt->dst.val = ctxt->src.val;
else if (ctxt->op_bytes != 4)
ctxt->dst.type = OP_NONE; /* no writeback */
break;
case 0x80 ... 0x8f: /* jnz rel, etc*/
if (test_cc(ctxt->b, ctxt->eflags))
rc = jmp_rel(ctxt, ctxt->src.val);
break;
case 0x90 ... 0x9f: /* setcc r/m8 */
ctxt->dst.val = test_cc(ctxt->b, ctxt->eflags);
break;
case 0xb6 ... 0xb7: /* movzx */
ctxt->dst.bytes = ctxt->op_bytes;
ctxt->dst.val = (ctxt->src.bytes == 1) ? (u8) ctxt->src.val
: (u16) ctxt->src.val;
break;
case 0xbe ... 0xbf: /* movsx */
ctxt->dst.bytes = ctxt->op_bytes;
ctxt->dst.val = (ctxt->src.bytes == 1) ? (s8) ctxt->src.val :
(s16) ctxt->src.val;
break;
default:
goto cannot_emulate;
}
threebyte_insn:
if (rc != X86EMUL_CONTINUE)
goto done;
goto writeback;
cannot_emulate:
return EMULATION_FAILED;
}
void emulator_invalidate_register_cache(struct x86_emulate_ctxt *ctxt)
{
invalidate_registers(ctxt);
}
void emulator_writeback_register_cache(struct x86_emulate_ctxt *ctxt)
{
writeback_registers(ctxt);
}
| ./CrossVul/dataset_final_sorted/CWE-416/c/good_3045_0 |
crossvul-cpp_data_good_1179_0 | /* load a GIF with giflib
*
* 10/2/16
* - from svgload.c
* 25/4/16
* - add giflib5 support
* 26/7/16
* - transparency was wrong if there was no EXTENSION_RECORD
* - write 1, 2, 3, or 4 bands depending on file contents
* 17/8/16
* - support unicode on win
* 19/8/16
* - better transparency detection, thanks diegocsandrim
* 25/11/16
* - support @n, page-height
* 5/10/17
* - colormap can be missing thanks Kleis
* 21/11/17
* - add "gif-delay", "gif-loop", "gif-comment" metadata
* - add dispose handling
* 13/8/18
* - init pages to 0 before load
* 14/2/19
* - rework as a sequential loader ... simpler, much lower mem use
* 23/8/18
* - allow GIF read errors during header scan
* - better feof() handling
*/
/*
This file is part of VIPS.
VIPS is free software; you can redistribute it and/or modify
it under the terms of the GNU Lesser General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU Lesser General Public License for more details.
You should have received a copy of the GNU Lesser General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
02110-1301 USA
*/
/*
These files are distributed with VIPS - http://www.vips.ecs.soton.ac.uk
*/
/*
#define DEBUG_VERBOSE
#define VIPS_DEBUG
*/
#ifdef HAVE_CONFIG_H
#include <config.h>
#endif /*HAVE_CONFIG_H*/
#include <vips/intl.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <errno.h>
#include <ctype.h>
#include <vips/vips.h>
#include <vips/buf.h>
#include <vips/internal.h>
#include <vips/debug.h>
#ifdef HAVE_GIFLIB
#include <gif_lib.h>
/* giflib 5 is rather different :-( functions have error returns and there's
* no LastError().
*
* GIFLIB_MAJOR was introduced in 4.1.6. Use it to test for giflib 5.x.
*/
#ifdef GIFLIB_MAJOR
# if GIFLIB_MAJOR > 4
# define HAVE_GIFLIB_5
# endif
#endif
/* Added in giflib5.
*/
#ifndef HAVE_GIFLIB_5
#define DISPOSAL_UNSPECIFIED 0
#define DISPOSE_DO_NOT 1
#define DISPOSE_BACKGROUND 2
#define DISPOSE_PREVIOUS 3
#endif
#define VIPS_TYPE_FOREIGN_LOAD_GIF (vips_foreign_load_gif_get_type())
#define VIPS_FOREIGN_LOAD_GIF( obj ) \
(G_TYPE_CHECK_INSTANCE_CAST( (obj), \
VIPS_TYPE_FOREIGN_LOAD_GIF, VipsForeignLoadGif ))
#define VIPS_FOREIGN_LOAD_GIF_CLASS( klass ) \
(G_TYPE_CHECK_CLASS_CAST( (klass), \
VIPS_TYPE_FOREIGN_LOAD_GIF, VipsForeignLoadGifClass))
#define VIPS_IS_FOREIGN_LOAD_GIF( obj ) \
(G_TYPE_CHECK_INSTANCE_TYPE( (obj), VIPS_TYPE_FOREIGN_LOAD_GIF ))
#define VIPS_IS_FOREIGN_LOAD_GIF_CLASS( klass ) \
(G_TYPE_CHECK_CLASS_TYPE( (klass), VIPS_TYPE_FOREIGN_LOAD_GIF ))
#define VIPS_FOREIGN_LOAD_GIF_GET_CLASS( obj ) \
(G_TYPE_INSTANCE_GET_CLASS( (obj), \
VIPS_TYPE_FOREIGN_LOAD_GIF, VipsForeignLoadGifClass ))
typedef struct _VipsForeignLoadGif {
VipsForeignLoad parent_object;
/* Load from this page (frame number).
*/
int page;
/* Load this many pages.
*/
int n;
GifFileType *file;
/* We decompress the whole thing to a huge RGBA memory image, and
* as we render, watch for bands and transparency. At the end of
* loading, we copy 1 or 3 bands, with or without transparency to
* output.
*/
gboolean has_transparency;
gboolean has_colour;
/* Delay in 1/100ths of a second. We only track a single delay
* value for the whole file, and we report the first delay we see. Some
* GIFs have a long delay on the final frame.
*/
gboolean has_delay;
int delay;
/* Number of times to loop the animation.
*/
int loop;
/* The GIF comment, if any.
*/
char *comment;
/* The number of pages (frame) in the image.
*/
int n_pages;
/* A memory image the sized of one frame ... we accumulate to this as
* we scan the image, and copy lines to the output on generate.
*/
VipsImage *frame;
/* A copy of the previous frame, in case we need a DISPOSE_PREVIOUS.
*/
VipsImage *previous;
/* The position of @frame, in pages.
*/
int current_page;
/* Decompress lines of the gif file to here.
*/
GifPixelType *line;
/* The current dispose method.
*/
int dispose;
/* Set for EOF detected.
*/
gboolean eof;
/* The current cmap unpacked to a simple LUT. Each uint32 is really an
* RGBA pixel ready to be blasted into @frame.
*/
guint32 cmap[256];
/* As we scan the file, the index of the transparent pixel for this
* frame.
*/
int transparency;
/* Params for DGifOpen(). Set by subclasses, called by base class in
* _open().
*/
InputFunc read_func;
} VipsForeignLoadGif;
typedef struct _VipsForeignLoadGifClass {
VipsForeignLoadClass parent_class;
/* Close and reopen gif->file.
*/
int (*open)( VipsForeignLoadGif *gif );
} VipsForeignLoadGifClass;
G_DEFINE_ABSTRACT_TYPE( VipsForeignLoadGif, vips_foreign_load_gif,
VIPS_TYPE_FOREIGN_LOAD );
/* From gif2rgb.c ... offsets and jumps for interlaced GIF images.
*/
static int
InterlacedOffset[] = { 0, 4, 2, 1 },
InterlacedJumps[] = { 8, 8, 4, 2 };
/* giflib4 was missing this.
*/
static const char *
vips_foreign_load_gif_errstr( int error_code )
{
#ifdef HAVE_GIFLIB_5
return( GifErrorString( error_code ) );
#else /*!HAVE_GIFLIB_5*/
switch( error_code ) {
case D_GIF_ERR_OPEN_FAILED:
return( _( "Failed to open given file" ) );
case D_GIF_ERR_READ_FAILED:
return( _( "Failed to read from given file" ) );
case D_GIF_ERR_NOT_GIF_FILE:
return( _( "Data is not a GIF file" ) );
case D_GIF_ERR_NO_SCRN_DSCR:
return( _( "No screen descriptor detected" ) );
case D_GIF_ERR_NO_IMAG_DSCR:
return( _( "No image descriptor detected" ) );
case D_GIF_ERR_NO_COLOR_MAP:
return( _( "Neither global nor local color map" ) );
case D_GIF_ERR_WRONG_RECORD:
return( _( "Wrong record type detected" ) );
case D_GIF_ERR_DATA_TOO_BIG:
return( _( "Number of pixels bigger than width * height" ) );
case D_GIF_ERR_NOT_ENOUGH_MEM:
return( _( "Failed to allocate required memory" ) );
case D_GIF_ERR_CLOSE_FAILED:
return( _( "Failed to close given file" ) );
case D_GIF_ERR_NOT_READABLE:
return( _( "Given file was not opened for read" ) );
case D_GIF_ERR_IMAGE_DEFECT:
return( _( "Image is defective, decoding aborted" ) );
case D_GIF_ERR_EOF_TOO_SOON:
return( _( "Image EOF detected, before image complete" ) );
default:
return( _( "Unknown error" ) );
}
#endif /*HAVE_GIFLIB_5*/
}
static void
vips_foreign_load_gif_error_vips( VipsForeignLoadGif *gif, int error )
{
VipsObjectClass *class = VIPS_OBJECT_GET_CLASS( gif );
const char *message;
if( (message = vips_foreign_load_gif_errstr( error )) )
vips_error( class->nickname, "%s", message );
}
static void
vips_foreign_load_gif_error( VipsForeignLoadGif *gif )
{
int error;
error = 0;
#ifdef HAVE_GIFLIB_5
if( gif->file )
error = gif->file->Error;
#else
error = GifLastError();
#endif
if( error )
vips_foreign_load_gif_error_vips( gif, error );
}
static void
vips_foreign_load_gif_close( VipsForeignLoadGif *gif )
{
#ifdef HAVE_GIFLIB_5
if( gif->file ) {
int error;
if( DGifCloseFile( gif->file, &error ) == GIF_ERROR )
vips_foreign_load_gif_error_vips( gif, error );
gif->file = NULL;
}
#else
if( gif->file ) {
if( DGifCloseFile( gif->file ) == GIF_ERROR )
vips_foreign_load_gif_error_vips( gif, GifLastError() );
gif->file = NULL;
}
#endif
}
static void
vips_foreign_load_gif_dispose( GObject *gobject )
{
VipsForeignLoadGif *gif = (VipsForeignLoadGif *) gobject;
vips_foreign_load_gif_close( gif );
VIPS_UNREF( gif->frame );
VIPS_UNREF( gif->previous );
VIPS_FREE( gif->comment );
VIPS_FREE( gif->line )
G_OBJECT_CLASS( vips_foreign_load_gif_parent_class )->
dispose( gobject );
}
static VipsForeignFlags
vips_foreign_load_gif_get_flags_filename( const char *filename )
{
return( VIPS_FOREIGN_SEQUENTIAL );
}
static VipsForeignFlags
vips_foreign_load_gif_get_flags( VipsForeignLoad *load )
{
return( VIPS_FOREIGN_SEQUENTIAL );
}
static gboolean
vips_foreign_load_gif_is_a_buffer( const void *buf, size_t len )
{
const guchar *str = (const guchar *) buf;
if( len >= 4 &&
str[0] == 'G' &&
str[1] == 'I' &&
str[2] == 'F' &&
str[3] == '8' )
return( 1 );
return( 0 );
}
static gboolean
vips_foreign_load_gif_is_a( const char *filename )
{
unsigned char buf[4];
if( vips__get_bytes( filename, buf, 4 ) == 4 &&
vips_foreign_load_gif_is_a_buffer( buf, 4 ) )
return( 1 );
return( 0 );
}
static int
vips_foreign_load_gif_ext_next( VipsForeignLoadGif *gif,
GifByteType **extension )
{
if( DGifGetExtensionNext( gif->file, extension ) == GIF_ERROR ) {
vips_foreign_load_gif_error( gif );
return( -1 );
}
if( *extension )
VIPS_DEBUG_MSG( "gifload: EXTENSION_NEXT\n" );
return( 0 );
}
static int
vips_foreign_load_gif_code_next( VipsForeignLoadGif *gif,
GifByteType **extension )
{
if( DGifGetCodeNext( gif->file, extension ) == GIF_ERROR ) {
vips_foreign_load_gif_error( gif );
return( -1 );
}
if( *extension )
VIPS_DEBUG_MSG( "gifload: CODE_NEXT\n" );
return( 0 );
}
/* Quickly scan an image record.
*/
static int
vips_foreign_load_gif_scan_image( VipsForeignLoadGif *gif )
{
VipsObjectClass *class = VIPS_OBJECT_GET_CLASS( gif );
GifFileType *file = gif->file;
ColorMapObject *map;
GifByteType *extension;
if( DGifGetImageDesc( gif->file ) == GIF_ERROR ) {
vips_foreign_load_gif_error( gif );
return( -1 );
}
/* Check that the frame looks sane. Perhaps giflib checks
* this for us.
*/
if( file->Image.Left < 0 ||
file->Image.Width < 1 ||
file->Image.Width > 10000 ||
file->Image.Left + file->Image.Width > file->SWidth ||
file->Image.Top < 0 ||
file->Image.Height < 1 ||
file->Image.Height > 10000 ||
file->Image.Top + file->Image.Height > file->SHeight ) {
vips_error( class->nickname, "%s", _( "bad frame size" ) );
return( -1 );
}
/* Test for a non-greyscale colourmap for this frame.
*/
map = file->Image.ColorMap ? file->Image.ColorMap : file->SColorMap;
if( !gif->has_colour &&
map ) {
int i;
for( i = 0; i < map->ColorCount; i++ )
if( map->Colors[i].Red != map->Colors[i].Green ||
map->Colors[i].Green != map->Colors[i].Blue ) {
gif->has_colour = TRUE;
break;
}
}
/* Step over compressed image data.
*/
do {
if( vips_foreign_load_gif_code_next( gif, &extension ) )
return( -1 );
} while( extension != NULL );
return( 0 );
}
static int
vips_foreign_load_gif_scan_application_ext( VipsForeignLoadGif *gif,
GifByteType *extension )
{
gboolean have_netscape;
/* The 11-byte NETSCAPE extension.
*/
have_netscape = FALSE;
if( extension[0] == 11 &&
vips_isprefix( "NETSCAPE2.0", (const char*) (extension + 1) ) )
have_netscape = TRUE;
while( extension != NULL ) {
if( vips_foreign_load_gif_ext_next( gif, &extension ) )
return( -1 );
if( have_netscape &&
extension &&
extension[0] == 3 &&
extension[1] == 1 )
gif->loop = extension[2] | (extension[3] << 8);
}
return( 0 );
}
static int
vips_foreign_load_gif_scan_comment_ext( VipsForeignLoadGif *gif,
GifByteType *extension )
{
VIPS_DEBUG_MSG( "gifload: type: comment\n" );
if( !gif->comment ) {
/* Up to 257 with a NULL terminator.
*/
char comment[257];
vips_strncpy( comment, (char *) (extension + 1), 256 );
comment[extension[0]] = '\0';
gif->comment = g_strdup( comment );
}
while( extension != NULL )
if( vips_foreign_load_gif_ext_next( gif, &extension ) )
return( -1 );
return( 0 );
}
static int
vips_foreign_load_gif_scan_extension( VipsForeignLoadGif *gif )
{
GifByteType *extension;
int ext_code;
if( DGifGetExtension( gif->file, &ext_code, &extension ) ==
GIF_ERROR ) {
vips_foreign_load_gif_error( gif );
return( -1 );
}
if( extension )
switch( ext_code ) {
case GRAPHICS_EXT_FUNC_CODE:
if( extension[0] == 4 &&
extension[1] & 0x1 ) {
VIPS_DEBUG_MSG( "gifload: has transp.\n" );
gif->has_transparency = TRUE;
}
if( !gif->has_delay ) {
VIPS_DEBUG_MSG( "gifload: has delay\n" );
gif->has_delay = TRUE;
gif->delay = extension[2] | (extension[3] << 8);
}
while( extension != NULL )
if( vips_foreign_load_gif_ext_next( gif,
&extension ) )
return( -1 );
break;
case APPLICATION_EXT_FUNC_CODE:
if( vips_foreign_load_gif_scan_application_ext( gif,
extension ) )
return( -1 );
break;
case COMMENT_EXT_FUNC_CODE:
if( vips_foreign_load_gif_scan_comment_ext( gif,
extension ) )
return( -1 );
break;
default:
/* Step over any NEXT blocks for unknown extensions.
*/
while( extension != NULL )
if( vips_foreign_load_gif_ext_next( gif,
&extension ) )
return( -1 );
break;
}
return( 0 );
}
static int
vips_foreign_load_gif_set_header( VipsForeignLoadGif *gif, VipsImage *image )
{
vips_image_init_fields( image,
gif->file->SWidth, gif->file->SHeight * gif->n,
(gif->has_colour ? 3 : 1) + (gif->has_transparency ? 1 : 0),
VIPS_FORMAT_UCHAR, VIPS_CODING_NONE,
gif->has_colour ?
VIPS_INTERPRETATION_sRGB : VIPS_INTERPRETATION_B_W,
1.0, 1.0 );
vips_image_pipelinev( image, VIPS_DEMAND_STYLE_FATSTRIP, NULL );
if( vips_object_argument_isset( VIPS_OBJECT( gif ), "n" ) )
vips_image_set_int( image,
VIPS_META_PAGE_HEIGHT, gif->file->SHeight );
vips_image_set_int( image, VIPS_META_N_PAGES, gif->n_pages );
vips_image_set_int( image, "gif-delay", gif->delay );
vips_image_set_int( image, "gif-loop", gif->loop );
if( gif->comment )
vips_image_set_string( image, "gif-comment", gif->comment );
return( 0 );
}
/* Attempt to quickly scan a GIF and discover what we need for our header. We
* need to scan the whole file to get n_pages, transparency and colour.
*
* Don't flag errors during header scan. Many GIFs do not follow spec.
*/
static int
vips_foreign_load_gif_header( VipsForeignLoad *load )
{
VipsObjectClass *class = VIPS_OBJECT_GET_CLASS( load );
VipsForeignLoadGifClass *gif_class =
(VipsForeignLoadGifClass *) VIPS_OBJECT_GET_CLASS( load );
VipsForeignLoadGif *gif = (VipsForeignLoadGif *) load;
GifRecordType record;
if( gif_class->open( gif ) )
return( -1 );
gif->n_pages = 0;
do {
if( DGifGetRecordType( gif->file, &record ) == GIF_ERROR )
continue;
switch( record ) {
case IMAGE_DESC_RECORD_TYPE:
(void) vips_foreign_load_gif_scan_image( gif );
gif->n_pages += 1;
break;
case EXTENSION_RECORD_TYPE:
/* We will need to fetch the extensions to check for
* cmaps and transparency.
*/
(void) vips_foreign_load_gif_scan_extension( gif );
break;
case TERMINATE_RECORD_TYPE:
gif->eof = TRUE;
break;
case SCREEN_DESC_RECORD_TYPE:
case UNDEFINED_RECORD_TYPE:
break;
default:
break;
}
} while( !gif->eof );
if( gif->n == -1 )
gif->n = gif->n_pages - gif->page;
if( gif->page < 0 ||
gif->n <= 0 ||
gif->page + gif->n > gif->n_pages ) {
vips_error( class->nickname, "%s", _( "bad page number" ) );
return( -1 );
}
/* And set the output vips header from what we've learned.
*/
if( vips_foreign_load_gif_set_header( gif, load->out ) )
return( -1 );
return( 0 );
}
static void
vips_foreign_load_gif_build_cmap( VipsForeignLoadGif *gif )
{
ColorMapObject *map = gif->file->Image.ColorMap ?
gif->file->Image.ColorMap : gif->file->SColorMap;
int v;
for( v = 0; v < 256; v++ ) {
VipsPel *q = (VipsPel *) &gif->cmap[v];
if( map &&
v < map->ColorCount ) {
q[0] = map->Colors[v].Red;
q[1] = map->Colors[v].Green;
q[2] = map->Colors[v].Blue;
q[3] = 255;
}
else {
/* If there's no map, just save the index.
*/
q[0] = v;
q[1] = v;
q[2] = v;
q[3] = 255;
}
}
}
static void
vips_foreign_load_gif_render_line( VipsForeignLoadGif *gif,
int width, VipsPel * restrict q, VipsPel * restrict p )
{
guint32 *iq;
int x;
iq = (guint32 *) q;
for( x = 0; x < width; x++ ) {
VipsPel v = p[x];
if( v == gif->transparency ) {
/* In DISPOSE_DO_NOT mode, the previous frame shows
* through (ie. we do nothing). In all other modes,
* it's just transparent.
*/
if( gif->dispose != DISPOSE_DO_NOT )
iq[x] = 0;
}
else
/* Blast in the RGBA for this value.
*/
iq[x] = gif->cmap[v];
}
}
/* Render the current gif frame into an RGBA buffer. GIFs can accumulate,
* depending on the current dispose mode.
*/
static int
vips_foreign_load_gif_render( VipsForeignLoadGif *gif )
{
GifFileType *file = gif->file;
/* Update the colour map for this frame.
*/
vips_foreign_load_gif_build_cmap( gif );
/* BACKGROUND means we reset the frame to 0 (transparent) before we
* render the next set of pixels.
*/
if( gif->dispose == DISPOSE_BACKGROUND )
memset( VIPS_IMAGE_ADDR( gif->frame, 0, 0 ), 0,
VIPS_IMAGE_SIZEOF_IMAGE( gif->frame ) );
/* PREVIOUS means we init the frame with the frame before last, ie. we
* undo the last render.
*
* Anything other than PREVIOUS, we must update the previous buffer,
*/
if( gif->dispose == DISPOSE_PREVIOUS )
memcpy( VIPS_IMAGE_ADDR( gif->frame, 0, 0 ),
VIPS_IMAGE_ADDR( gif->previous, 0, 0 ),
VIPS_IMAGE_SIZEOF_IMAGE( gif->frame ) );
else
memcpy( VIPS_IMAGE_ADDR( gif->previous, 0, 0 ),
VIPS_IMAGE_ADDR( gif->frame, 0, 0 ),
VIPS_IMAGE_SIZEOF_IMAGE( gif->frame ) );
if( file->Image.Interlace ) {
int i;
VIPS_DEBUG_MSG( "vips_foreign_load_gif_render: "
"interlaced frame of %d x %d pixels at %d x %d\n",
file->Image.Width, file->Image.Height,
file->Image.Left, file->Image.Top );
for( i = 0; i < 4; i++ ) {
int y;
for( y = InterlacedOffset[i];
y < file->Image.Height;
y += InterlacedJumps[i] ) {
VipsPel *q = VIPS_IMAGE_ADDR( gif->frame,
file->Image.Left, file->Image.Top + y );
if( DGifGetLine( gif->file, gif->line,
file->Image.Width ) == GIF_ERROR ) {
vips_foreign_load_gif_error( gif );
return( -1 );
}
vips_foreign_load_gif_render_line( gif,
file->Image.Width, q, gif->line );
}
}
}
else {
int y;
VIPS_DEBUG_MSG( "vips_foreign_load_gif_render: "
"non-interlaced frame of %d x %d pixels at %d x %d\n",
file->Image.Width, file->Image.Height,
file->Image.Left, file->Image.Top );
for( y = 0; y < file->Image.Height; y++ ) {
VipsPel *q = VIPS_IMAGE_ADDR( gif->frame,
file->Image.Left, file->Image.Top + y );
if( DGifGetLine( gif->file, gif->line,
file->Image.Width ) == GIF_ERROR ) {
vips_foreign_load_gif_error( gif );
return( -1 );
}
vips_foreign_load_gif_render_line( gif,
file->Image.Width, q, gif->line );
}
}
return( 0 );
}
static int
vips_foreign_load_gif_extension( VipsForeignLoadGif *gif )
{
GifByteType *extension;
int ext_code;
VIPS_DEBUG_MSG( "vips_foreign_load_gif_extension:\n" );
if( DGifGetExtension( gif->file, &ext_code, &extension ) ==
GIF_ERROR ) {
vips_foreign_load_gif_error( gif );
return( -1 );
}
if( extension &&
ext_code == GRAPHICS_EXT_FUNC_CODE &&
extension[0] == 4 ) {
/* Bytes are flags, delay low, delay high,
* transparency. Flag bit 1 means transparency
* is being set.
*/
gif->transparency = -1;
if( extension[1] & 0x1 )
gif->transparency = extension[4];
/* Set the current dispose mode. This is read during frame load
* to set the meaning of background and transparent pixels.
*/
gif->dispose = (extension[1] >> 2) & 0x7;
VIPS_DEBUG_MSG( "vips_foreign_load_gif_extension: "
"dispose = %d\n", gif->dispose );
}
while( extension != NULL )
if( vips_foreign_load_gif_ext_next( gif, &extension ) )
return( -1 );
return( 0 );
}
/* Read the next page from the file into @frame.
*/
static int
vips_foreign_load_gif_next_page( VipsForeignLoadGif *gif )
{
GifRecordType record;
gboolean have_read_frame;
have_read_frame = FALSE;
do {
if( DGifGetRecordType( gif->file, &record ) == GIF_ERROR ) {
vips_foreign_load_gif_error( gif );
return( -1 );
}
switch( record ) {
case IMAGE_DESC_RECORD_TYPE:
VIPS_DEBUG_MSG( "vips_foreign_load_gif_next_page: "
"IMAGE_DESC_RECORD_TYPE\n" );
if( DGifGetImageDesc( gif->file ) == GIF_ERROR ) {
vips_foreign_load_gif_error( gif );
return( -1 );
}
if( vips_foreign_load_gif_render( gif ) )
return( -1 );
have_read_frame = TRUE;
break;
case EXTENSION_RECORD_TYPE:
if( vips_foreign_load_gif_extension( gif ) )
return( -1 );
break;
case TERMINATE_RECORD_TYPE:
VIPS_DEBUG_MSG( "vips_foreign_load_gif_next_page: "
"TERMINATE_RECORD_TYPE\n" );
gif->eof = TRUE;
break;
case SCREEN_DESC_RECORD_TYPE:
VIPS_DEBUG_MSG( "vips_foreign_load_gif_next_page: "
"SCREEN_DESC_RECORD_TYPE\n" );
break;
case UNDEFINED_RECORD_TYPE:
VIPS_DEBUG_MSG( "vips_foreign_load_gif_next_page: "
"UNDEFINED_RECORD_TYPE\n" );
break;
default:
break;
}
} while( !have_read_frame &&
!gif->eof );
return( 0 );
}
static int
vips_foreign_load_gif_generate( VipsRegion *or,
void *seq, void *a, void *b, gboolean *stop )
{
VipsRect *r = &or->valid;
VipsForeignLoadGif *gif = (VipsForeignLoadGif *) a;
int y;
for( y = 0; y < r->height; y++ ) {
/* The page for this output line, and the line number in page.
*/
int page = (r->top + y) / gif->file->SHeight + gif->page;
int line = (r->top + y) % gif->file->SHeight;
VipsPel *p, *q;
int x;
g_assert( line >= 0 && line < gif->frame->Ysize );
g_assert( page >= 0 && page < gif->n_pages );
/* current_page == 0 means we've not loaded any pages yet. So
* we need to have loaded the page beyond the page we want.
*/
while( gif->current_page <= page ) {
if( vips_foreign_load_gif_next_page( gif ) )
return( -1 );
gif->current_page += 1;
}
/* @frame is always RGBA, but or may be G, GA, RGB or RGBA.
* We have to pick out the values we want.
*/
p = VIPS_IMAGE_ADDR( gif->frame, 0, line );
q = VIPS_REGION_ADDR( or, 0, r->top + y );
switch( or->im->Bands ) {
case 1:
for( x = 0; x < gif->frame->Xsize; x++ ) {
q[0] = p[1];
q += 1;
p += 4;
}
break;
case 2:
for( x = 0; x < gif->frame->Xsize; x++ ) {
q[0] = p[1];
q[1] = p[3];
q += 2;
p += 4;
}
break;
case 3:
for( x = 0; x < gif->frame->Xsize; x++ ) {
q[0] = p[0];
q[1] = p[1];
q[2] = p[2];
q += 3;
p += 4;
}
break;
case 4:
memcpy( q, p, VIPS_IMAGE_SIZEOF_LINE( gif->frame ) );
break;
default:
g_assert_not_reached();
break;
}
}
return( 0 );
}
static int
vips_foreign_load_gif_load( VipsForeignLoad *load )
{
VipsForeignLoadGifClass *class =
(VipsForeignLoadGifClass *) VIPS_OBJECT_GET_CLASS( load );
VipsForeignLoadGif *gif = (VipsForeignLoadGif *) load;
VipsImage **t = (VipsImage **)
vips_object_local_array( VIPS_OBJECT( load ), 4 );
/* Rewind.
*/
if( class->open( gif ) )
return( -1 );
VIPS_DEBUG_MSG( "vips_foreign_load_gif_load:\n" );
/* Make the memory image we accumulate pixels in. We always accumulate
* to RGBA, then trim down to whatever the output image needs on
* _generate.
*/
gif->frame = vips_image_new_memory();
vips_image_init_fields( gif->frame,
gif->file->SWidth, gif->file->SHeight, 4, VIPS_FORMAT_UCHAR,
VIPS_CODING_NONE, VIPS_INTERPRETATION_sRGB, 1.0, 1.0 );
if( vips_image_write_prepare( gif->frame ) )
return( -1 );
/* A copy of the previous state of the frame, in case we have to
* process a DISPOSE_PREVIOUS.
*/
gif->previous = vips_image_new_memory();
vips_image_init_fields( gif->previous,
gif->file->SWidth, gif->file->SHeight, 4, VIPS_FORMAT_UCHAR,
VIPS_CODING_NONE, VIPS_INTERPRETATION_sRGB, 1.0, 1.0 );
if( vips_image_write_prepare( gif->previous ) )
return( -1 );
/* Make the output pipeline.
*/
t[0] = vips_image_new();
if( vips_foreign_load_gif_set_header( gif, t[0] ) )
return( -1 );
/* Strips 8 pixels high to avoid too many tiny regions.
*/
if( vips_image_generate( t[0],
NULL, vips_foreign_load_gif_generate, NULL, gif, NULL ) ||
vips_sequential( t[0], &t[1],
"tile_height", VIPS__FATSTRIP_HEIGHT,
NULL ) ||
vips_image_write( t[1], load->real ) )
return( -1 );
return( 0 );
}
static int
vips_foreign_load_gif_open( VipsForeignLoadGif *gif )
{
#ifdef HAVE_GIFLIB_5
{
int error;
if( !(gif->file = DGifOpen( gif, gif->read_func, &error )) ) {
vips_foreign_load_gif_error_vips( gif, error );
return( -1 );
}
}
#else
if( !(gif->file = DGifOpen( gif, gif->read_func )) ) {
vips_foreign_load_gif_error_vips( gif, GifLastError() );
return( -1 );
}
#endif
gif->eof = FALSE;
gif->current_page = 0;
/* Allocate a line buffer now that we have the GIF width.
*/
VIPS_FREE( gif->line )
if( !(gif->line = VIPS_ARRAY( NULL, gif->file->SWidth, GifPixelType )) )
return( -1 );
return( 0 );
}
static void
vips_foreign_load_gif_class_init( VipsForeignLoadGifClass *class )
{
GObjectClass *gobject_class = G_OBJECT_CLASS( class );
VipsObjectClass *object_class = (VipsObjectClass *) class;
VipsForeignLoadClass *load_class = (VipsForeignLoadClass *) class;
VipsForeignLoadGifClass *gif_class = (VipsForeignLoadGifClass *) class;
gobject_class->dispose = vips_foreign_load_gif_dispose;
gobject_class->set_property = vips_object_set_property;
gobject_class->get_property = vips_object_get_property;
gif_class->open = vips_foreign_load_gif_open;
load_class->header = vips_foreign_load_gif_header;
load_class->load = vips_foreign_load_gif_load;
object_class->nickname = "gifload_base";
object_class->description = _( "load GIF with giflib" );
load_class->get_flags_filename =
vips_foreign_load_gif_get_flags_filename;
load_class->get_flags = vips_foreign_load_gif_get_flags;
VIPS_ARG_INT( class, "page", 20,
_( "Page" ),
_( "Load this page from the file" ),
VIPS_ARGUMENT_OPTIONAL_INPUT,
G_STRUCT_OFFSET( VipsForeignLoadGif, page ),
0, 100000, 0 );
VIPS_ARG_INT( class, "n", 21,
_( "n" ),
_( "Load this many pages" ),
VIPS_ARGUMENT_OPTIONAL_INPUT,
G_STRUCT_OFFSET( VipsForeignLoadGif, n ),
-1, 100000, 1 );
}
static void
vips_foreign_load_gif_init( VipsForeignLoadGif *gif )
{
gif->n = 1;
gif->transparency = -1;
gif->delay = 4;
gif->loop = 0;
gif->comment = NULL;
gif->dispose = 0;
}
typedef struct _VipsForeignLoadGifFile {
VipsForeignLoadGif parent_object;
/* Filename for load.
*/
char *filename;
/* The FILE* we read from.
*/
FILE *fp;
} VipsForeignLoadGifFile;
typedef VipsForeignLoadGifClass VipsForeignLoadGifFileClass;
G_DEFINE_TYPE( VipsForeignLoadGifFile, vips_foreign_load_gif_file,
vips_foreign_load_gif_get_type() );
static void
vips_foreign_load_gif_file_dispose( GObject *gobject )
{
VipsForeignLoadGifFile *file = (VipsForeignLoadGifFile *) gobject;
VIPS_FREEF( fclose, file->fp );
G_OBJECT_CLASS( vips_foreign_load_gif_file_parent_class )->
dispose( gobject );
}
/* Our input function for file open. We can't use DGifOpenFileName(), since
* that just calls open() and won't work with unicode on win32. We can't use
* DGifOpenFileHandle() since that's an fd from open() and you can't pass those
* across DLL boundaries on Windows.
*/
static int
vips_giflib_file_read( GifFileType *gfile, GifByteType *buffer, int n )
{
VipsForeignLoadGif *gif = (VipsForeignLoadGif *) gfile->UserData;
VipsForeignLoadGifFile *file = (VipsForeignLoadGifFile *) gif;
if( feof( file->fp ) )
gif->eof = TRUE;
return( (int) fread( (void *) buffer, 1, n, file->fp ) );
}
static int
vips_foreign_load_gif_file_open( VipsForeignLoadGif *gif )
{
VipsForeignLoad *load = (VipsForeignLoad *) gif;
VipsForeignLoadGifFile *file = (VipsForeignLoadGifFile *) gif;
if( !file->fp ) {
if( !(file->fp =
vips__file_open_read( file->filename, NULL, FALSE )) )
return( -1 );
VIPS_SETSTR( load->out->filename, file->filename );
}
else
rewind( file->fp );
vips_foreign_load_gif_close( gif );
gif->read_func = vips_giflib_file_read;
return( VIPS_FOREIGN_LOAD_GIF_CLASS(
vips_foreign_load_gif_file_parent_class )->open( gif ) );
}
static const char *vips_foreign_gif_suffs[] = {
".gif",
NULL
};
static void
vips_foreign_load_gif_file_class_init(
VipsForeignLoadGifFileClass *class )
{
GObjectClass *gobject_class = G_OBJECT_CLASS( class );
VipsObjectClass *object_class = (VipsObjectClass *) class;
VipsForeignClass *foreign_class = (VipsForeignClass *) class;
VipsForeignLoadClass *load_class = (VipsForeignLoadClass *) class;
VipsForeignLoadGifClass *gif_class = (VipsForeignLoadGifClass *) class;
gobject_class->dispose = vips_foreign_load_gif_file_dispose;
gobject_class->set_property = vips_object_set_property;
gobject_class->get_property = vips_object_get_property;
object_class->nickname = "gifload";
object_class->description = _( "load GIF with giflib" );
foreign_class->suffs = vips_foreign_gif_suffs;
load_class->is_a = vips_foreign_load_gif_is_a;
gif_class->open = vips_foreign_load_gif_file_open;
VIPS_ARG_STRING( class, "filename", 1,
_( "Filename" ),
_( "Filename to load from" ),
VIPS_ARGUMENT_REQUIRED_INPUT,
G_STRUCT_OFFSET( VipsForeignLoadGifFile, filename ),
NULL );
}
static void
vips_foreign_load_gif_file_init( VipsForeignLoadGifFile *file )
{
}
typedef struct _VipsForeignLoadGifBuffer {
VipsForeignLoadGif parent_object;
/* Load from a buffer.
*/
VipsArea *buf;
/* Current read point, bytes left in buffer.
*/
VipsPel *p;
size_t bytes_to_go;
} VipsForeignLoadGifBuffer;
typedef VipsForeignLoadGifClass VipsForeignLoadGifBufferClass;
G_DEFINE_TYPE( VipsForeignLoadGifBuffer, vips_foreign_load_gif_buffer,
vips_foreign_load_gif_get_type() );
/* Callback from the gif loader.
*
* Read up to len bytes into buffer, return number of bytes read, 0 for EOF.
*/
static int
vips_giflib_buffer_read( GifFileType *file, GifByteType *buf, int n )
{
VipsForeignLoadGif *gif = (VipsForeignLoadGif *) file->UserData;
VipsForeignLoadGifBuffer *buffer = (VipsForeignLoadGifBuffer *) gif;
size_t will_read = VIPS_MIN( n, buffer->bytes_to_go );
memcpy( buf, buffer->p, will_read );
buffer->p += will_read;
buffer->bytes_to_go -= will_read;
if( will_read == 0 )
gif->eof = TRUE;
return( will_read );
}
static int
vips_foreign_load_gif_buffer_open( VipsForeignLoadGif *gif )
{
VipsForeignLoadGifBuffer *buffer = (VipsForeignLoadGifBuffer *) gif;
vips_foreign_load_gif_close( gif );
buffer->p = buffer->buf->data;
buffer->bytes_to_go = buffer->buf->length;
gif->read_func = vips_giflib_buffer_read;;
return( VIPS_FOREIGN_LOAD_GIF_CLASS(
vips_foreign_load_gif_file_parent_class )->open( gif ) );
}
static void
vips_foreign_load_gif_buffer_class_init(
VipsForeignLoadGifBufferClass *class )
{
GObjectClass *gobject_class = G_OBJECT_CLASS( class );
VipsObjectClass *object_class = (VipsObjectClass *) class;
VipsForeignLoadClass *load_class = (VipsForeignLoadClass *) class;
VipsForeignLoadGifClass *gif_class = (VipsForeignLoadGifClass *) class;
gobject_class->set_property = vips_object_set_property;
gobject_class->get_property = vips_object_get_property;
object_class->nickname = "gifload_buffer";
object_class->description = _( "load GIF with giflib" );
load_class->is_a_buffer = vips_foreign_load_gif_is_a_buffer;
gif_class->open = vips_foreign_load_gif_buffer_open;
VIPS_ARG_BOXED( class, "buffer", 1,
_( "Buffer" ),
_( "Buffer to load from" ),
VIPS_ARGUMENT_REQUIRED_INPUT,
G_STRUCT_OFFSET( VipsForeignLoadGifBuffer, buf ),
VIPS_TYPE_BLOB );
}
static void
vips_foreign_load_gif_buffer_init( VipsForeignLoadGifBuffer *buffer )
{
}
#endif /*HAVE_GIFLIB*/
/**
* vips_gifload:
* @filename: file to load
* @out: (out): output image
* @...: %NULL-terminated list of optional named arguments
*
* Optional arguments:
*
* * @page: %gint, page (frame) to read
* * @n: %gint, load this many pages
*
* Read a GIF file into a VIPS image.
*
* Use @page to select a page to render, numbering from zero.
*
* Use @n to select the number of pages to render. The default is 1. Pages are
* rendered in a vertical column, with each individual page aligned to the
* left. Set to -1 to mean "until the end of the document". Use vips_grid()
* to change page layout.
*
* The whole GIF is rendered into memory on header access. The output image
* will be 1, 2, 3 or 4 bands depending on what the reader finds in the file.
*
* See also: vips_image_new_from_file().
*
* Returns: 0 on success, -1 on error.
*/
int
vips_gifload( const char *filename, VipsImage **out, ... )
{
va_list ap;
int result;
va_start( ap, out );
result = vips_call_split( "gifload", ap, filename, out );
va_end( ap );
return( result );
}
/**
* vips_gifload_buffer:
* @buf: (array length=len) (element-type guint8): memory area to load
* @len: (type gsize): size of memory area
* @out: (out): image to write
* @...: %NULL-terminated list of optional named arguments
*
* Optional arguments:
*
* * @page: %gint, page (frame) to read
* * @n: %gint, load this many pages
*
* Read a GIF-formatted memory block into a VIPS image. Exactly as
* vips_gifload(), but read from a memory buffer.
*
* You must not free the buffer while @out is active. The
* #VipsObject::postclose signal on @out is a good place to free.
*
* See also: vips_gifload().
*
* Returns: 0 on success, -1 on error.
*/
int
vips_gifload_buffer( void *buf, size_t len, VipsImage **out, ... )
{
va_list ap;
VipsBlob *blob;
int result;
/* We don't take a copy of the data or free it.
*/
blob = vips_blob_new( NULL, buf, len );
va_start( ap, out );
result = vips_call_split( "gifload_buffer", ap, blob, out );
va_end( ap );
vips_area_unref( VIPS_AREA( blob ) );
return( result );
}
| ./CrossVul/dataset_final_sorted/CWE-416/c/good_1179_0 |
crossvul-cpp_data_bad_832_2 | // SPDX-License-Identifier: GPL-2.0+
#include <linux/io.h>
#include "ipmi_si.h"
static unsigned char port_inb(const struct si_sm_io *io, unsigned int offset)
{
unsigned int addr = io->addr_data;
return inb(addr + (offset * io->regspacing));
}
static void port_outb(const struct si_sm_io *io, unsigned int offset,
unsigned char b)
{
unsigned int addr = io->addr_data;
outb(b, addr + (offset * io->regspacing));
}
static unsigned char port_inw(const struct si_sm_io *io, unsigned int offset)
{
unsigned int addr = io->addr_data;
return (inw(addr + (offset * io->regspacing)) >> io->regshift) & 0xff;
}
static void port_outw(const struct si_sm_io *io, unsigned int offset,
unsigned char b)
{
unsigned int addr = io->addr_data;
outw(b << io->regshift, addr + (offset * io->regspacing));
}
static unsigned char port_inl(const struct si_sm_io *io, unsigned int offset)
{
unsigned int addr = io->addr_data;
return (inl(addr + (offset * io->regspacing)) >> io->regshift) & 0xff;
}
static void port_outl(const struct si_sm_io *io, unsigned int offset,
unsigned char b)
{
unsigned int addr = io->addr_data;
outl(b << io->regshift, addr+(offset * io->regspacing));
}
static void port_cleanup(struct si_sm_io *io)
{
unsigned int addr = io->addr_data;
int idx;
if (addr) {
for (idx = 0; idx < io->io_size; idx++)
release_region(addr + idx * io->regspacing,
io->regsize);
}
}
int ipmi_si_port_setup(struct si_sm_io *io)
{
unsigned int addr = io->addr_data;
int idx;
if (!addr)
return -ENODEV;
io->io_cleanup = port_cleanup;
/*
* Figure out the actual inb/inw/inl/etc routine to use based
* upon the register size.
*/
switch (io->regsize) {
case 1:
io->inputb = port_inb;
io->outputb = port_outb;
break;
case 2:
io->inputb = port_inw;
io->outputb = port_outw;
break;
case 4:
io->inputb = port_inl;
io->outputb = port_outl;
break;
default:
dev_warn(io->dev, "Invalid register size: %d\n",
io->regsize);
return -EINVAL;
}
/*
* Some BIOSes reserve disjoint I/O regions in their ACPI
* tables. This causes problems when trying to register the
* entire I/O region. Therefore we must register each I/O
* port separately.
*/
for (idx = 0; idx < io->io_size; idx++) {
if (request_region(addr + idx * io->regspacing,
io->regsize, DEVICE_NAME) == NULL) {
/* Undo allocations */
while (idx--)
release_region(addr + idx * io->regspacing,
io->regsize);
return -EIO;
}
}
return 0;
}
| ./CrossVul/dataset_final_sorted/CWE-416/c/bad_832_2 |
crossvul-cpp_data_good_913_0 | /**********************************************************************
regext.c - Oniguruma (regular expression library)
**********************************************************************/
/*-
* Copyright (c) 2002-2019 K.Kosako <sndgk393 AT ybb DOT ne DOT jp>
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*/
#include "regint.h"
#if 0
static void
conv_ext0be32(const UChar* s, const UChar* end, UChar* conv)
{
while (s < end) {
*conv++ = '\0';
*conv++ = '\0';
*conv++ = '\0';
*conv++ = *s++;
}
}
static void
conv_ext0le32(const UChar* s, const UChar* end, UChar* conv)
{
while (s < end) {
*conv++ = *s++;
*conv++ = '\0';
*conv++ = '\0';
*conv++ = '\0';
}
}
static void
conv_ext0be(const UChar* s, const UChar* end, UChar* conv)
{
while (s < end) {
*conv++ = '\0';
*conv++ = *s++;
}
}
static void
conv_ext0le(const UChar* s, const UChar* end, UChar* conv)
{
while (s < end) {
*conv++ = *s++;
*conv++ = '\0';
}
}
static void
conv_swap4bytes(const UChar* s, const UChar* end, UChar* conv)
{
while (s < end) {
*conv++ = s[3];
*conv++ = s[2];
*conv++ = s[1];
*conv++ = s[0];
s += 4;
}
}
static void
conv_swap2bytes(const UChar* s, const UChar* end, UChar* conv)
{
while (s < end) {
*conv++ = s[1];
*conv++ = s[0];
s += 2;
}
}
static int
conv_encoding(OnigEncoding from, OnigEncoding to, const UChar* s, const UChar* end,
UChar** conv, UChar** conv_end)
{
int len = (int )(end - s);
if (to == ONIG_ENCODING_UTF16_BE) {
if (from == ONIG_ENCODING_ASCII || from == ONIG_ENCODING_ISO_8859_1) {
*conv = (UChar* )xmalloc(len * 2);
CHECK_NULL_RETURN_MEMERR(*conv);
*conv_end = *conv + (len * 2);
conv_ext0be(s, end, *conv);
return 0;
}
else if (from == ONIG_ENCODING_UTF16_LE) {
swap16:
*conv = (UChar* )xmalloc(len);
CHECK_NULL_RETURN_MEMERR(*conv);
*conv_end = *conv + len;
conv_swap2bytes(s, end, *conv);
return 0;
}
}
else if (to == ONIG_ENCODING_UTF16_LE) {
if (from == ONIG_ENCODING_ASCII || from == ONIG_ENCODING_ISO_8859_1) {
*conv = (UChar* )xmalloc(len * 2);
CHECK_NULL_RETURN_MEMERR(*conv);
*conv_end = *conv + (len * 2);
conv_ext0le(s, end, *conv);
return 0;
}
else if (from == ONIG_ENCODING_UTF16_BE) {
goto swap16;
}
}
if (to == ONIG_ENCODING_UTF32_BE) {
if (from == ONIG_ENCODING_ASCII || from == ONIG_ENCODING_ISO_8859_1) {
*conv = (UChar* )xmalloc(len * 4);
CHECK_NULL_RETURN_MEMERR(*conv);
*conv_end = *conv + (len * 4);
conv_ext0be32(s, end, *conv);
return 0;
}
else if (from == ONIG_ENCODING_UTF32_LE) {
swap32:
*conv = (UChar* )xmalloc(len);
CHECK_NULL_RETURN_MEMERR(*conv);
*conv_end = *conv + len;
conv_swap4bytes(s, end, *conv);
return 0;
}
}
else if (to == ONIG_ENCODING_UTF32_LE) {
if (from == ONIG_ENCODING_ASCII || from == ONIG_ENCODING_ISO_8859_1) {
*conv = (UChar* )xmalloc(len * 4);
CHECK_NULL_RETURN_MEMERR(*conv);
*conv_end = *conv + (len * 4);
conv_ext0le32(s, end, *conv);
return 0;
}
else if (from == ONIG_ENCODING_UTF32_BE) {
goto swap32;
}
}
return ONIGERR_NOT_SUPPORTED_ENCODING_COMBINATION;
}
#endif
extern int
onig_new_deluxe(regex_t** reg, const UChar* pattern, const UChar* pattern_end,
OnigCompileInfo* ci, OnigErrorInfo* einfo)
{
int r;
UChar *cpat, *cpat_end;
if (IS_NOT_NULL(einfo)) einfo->par = (UChar* )NULL;
if (ci->pattern_enc != ci->target_enc) {
return ONIGERR_NOT_SUPPORTED_ENCODING_COMBINATION;
}
else {
cpat = (UChar* )pattern;
cpat_end = (UChar* )pattern_end;
}
*reg = (regex_t* )xmalloc(sizeof(regex_t));
if (IS_NULL(*reg)) {
r = ONIGERR_MEMORY;
goto err2;
}
r = onig_reg_init(*reg, ci->option, ci->case_fold_flag, ci->target_enc,
ci->syntax);
if (r != 0) goto err;
r = onig_compile(*reg, cpat, cpat_end, einfo);
if (r != 0) {
err:
onig_free(*reg);
*reg = NULL;
}
err2:
if (cpat != pattern) xfree(cpat);
return r;
}
| ./CrossVul/dataset_final_sorted/CWE-416/c/good_913_0 |
crossvul-cpp_data_good_4231_0 | /*
** $Id: ldo.c $
** Stack and Call structure of Lua
** See Copyright Notice in lua.h
*/
#define ldo_c
#define LUA_CORE
#include "lprefix.h"
#include <setjmp.h>
#include <stdlib.h>
#include <string.h>
#include "lua.h"
#include "lapi.h"
#include "ldebug.h"
#include "ldo.h"
#include "lfunc.h"
#include "lgc.h"
#include "lmem.h"
#include "lobject.h"
#include "lopcodes.h"
#include "lparser.h"
#include "lstate.h"
#include "lstring.h"
#include "ltable.h"
#include "ltm.h"
#include "lundump.h"
#include "lvm.h"
#include "lzio.h"
#define errorstatus(s) ((s) > LUA_YIELD)
/*
** {======================================================
** Error-recovery functions
** =======================================================
*/
/*
** LUAI_THROW/LUAI_TRY define how Lua does exception handling. By
** default, Lua handles errors with exceptions when compiling as
** C++ code, with _longjmp/_setjmp when asked to use them, and with
** longjmp/setjmp otherwise.
*/
#if !defined(LUAI_THROW) /* { */
#if defined(__cplusplus) && !defined(LUA_USE_LONGJMP) /* { */
/* C++ exceptions */
#define LUAI_THROW(L,c) throw(c)
#define LUAI_TRY(L,c,a) \
try { a } catch(...) { if ((c)->status == 0) (c)->status = -1; }
#define luai_jmpbuf int /* dummy variable */
#elif defined(LUA_USE_POSIX) /* }{ */
/* in POSIX, try _longjmp/_setjmp (more efficient) */
#define LUAI_THROW(L,c) _longjmp((c)->b, 1)
#define LUAI_TRY(L,c,a) if (_setjmp((c)->b) == 0) { a }
#define luai_jmpbuf jmp_buf
#else /* }{ */
/* ISO C handling with long jumps */
#define LUAI_THROW(L,c) longjmp((c)->b, 1)
#define LUAI_TRY(L,c,a) if (setjmp((c)->b) == 0) { a }
#define luai_jmpbuf jmp_buf
#endif /* } */
#endif /* } */
/* chain list of long jump buffers */
struct lua_longjmp {
struct lua_longjmp *previous;
luai_jmpbuf b;
volatile int status; /* error code */
};
void luaD_seterrorobj (lua_State *L, int errcode, StkId oldtop) {
switch (errcode) {
case LUA_ERRMEM: { /* memory error? */
setsvalue2s(L, oldtop, G(L)->memerrmsg); /* reuse preregistered msg. */
break;
}
case LUA_ERRERR: {
setsvalue2s(L, oldtop, luaS_newliteral(L, "error in error handling"));
break;
}
case CLOSEPROTECT: {
setnilvalue(s2v(oldtop)); /* no error message */
break;
}
default: {
setobjs2s(L, oldtop, L->top - 1); /* error message on current top */
break;
}
}
L->top = oldtop + 1;
}
l_noret luaD_throw (lua_State *L, int errcode) {
if (L->errorJmp) { /* thread has an error handler? */
L->errorJmp->status = errcode; /* set status */
LUAI_THROW(L, L->errorJmp); /* jump to it */
}
else { /* thread has no error handler */
global_State *g = G(L);
errcode = luaF_close(L, L->stack, errcode); /* close all upvalues */
L->status = cast_byte(errcode); /* mark it as dead */
if (g->mainthread->errorJmp) { /* main thread has a handler? */
setobjs2s(L, g->mainthread->top++, L->top - 1); /* copy error obj. */
luaD_throw(g->mainthread, errcode); /* re-throw in main thread */
}
else { /* no handler at all; abort */
if (g->panic) { /* panic function? */
luaD_seterrorobj(L, errcode, L->top); /* assume EXTRA_STACK */
if (L->ci->top < L->top)
L->ci->top = L->top; /* pushing msg. can break this invariant */
lua_unlock(L);
g->panic(L); /* call panic function (last chance to jump out) */
}
abort();
}
}
}
int luaD_rawrunprotected (lua_State *L, Pfunc f, void *ud) {
global_State *g = G(L);
l_uint32 oldnCcalls = g->Cstacklimit - (L->nCcalls + L->nci);
struct lua_longjmp lj;
lj.status = LUA_OK;
lj.previous = L->errorJmp; /* chain new error handler */
L->errorJmp = &lj;
LUAI_TRY(L, &lj,
(*f)(L, ud);
);
L->errorJmp = lj.previous; /* restore old error handler */
L->nCcalls = g->Cstacklimit - oldnCcalls - L->nci;
return lj.status;
}
/* }====================================================== */
/*
** {==================================================================
** Stack reallocation
** ===================================================================
*/
static void correctstack (lua_State *L, StkId oldstack, StkId newstack) {
CallInfo *ci;
UpVal *up;
if (oldstack == newstack)
return; /* stack address did not change */
L->top = (L->top - oldstack) + newstack;
for (up = L->openupval; up != NULL; up = up->u.open.next)
up->v = s2v((uplevel(up) - oldstack) + newstack);
for (ci = L->ci; ci != NULL; ci = ci->previous) {
ci->top = (ci->top - oldstack) + newstack;
ci->func = (ci->func - oldstack) + newstack;
if (isLua(ci))
ci->u.l.trap = 1; /* signal to update 'trap' in 'luaV_execute' */
}
}
/* some space for error handling */
#define ERRORSTACKSIZE (LUAI_MAXSTACK + 200)
int luaD_reallocstack (lua_State *L, int newsize, int raiseerror) {
int lim = L->stacksize;
StkId newstack = luaM_reallocvector(L, L->stack, lim, newsize, StackValue);
lua_assert(newsize <= LUAI_MAXSTACK || newsize == ERRORSTACKSIZE);
lua_assert(L->stack_last - L->stack == L->stacksize - EXTRA_STACK);
if (unlikely(newstack == NULL)) { /* reallocation failed? */
if (raiseerror)
luaM_error(L);
else return 0; /* do not raise an error */
}
for (; lim < newsize; lim++)
setnilvalue(s2v(newstack + lim)); /* erase new segment */
correctstack(L, L->stack, newstack);
L->stack = newstack;
L->stacksize = newsize;
L->stack_last = L->stack + newsize - EXTRA_STACK;
return 1;
}
/*
** Try to grow the stack by at least 'n' elements. when 'raiseerror'
** is true, raises any error; otherwise, return 0 in case of errors.
*/
int luaD_growstack (lua_State *L, int n, int raiseerror) {
int size = L->stacksize;
int newsize = 2 * size; /* tentative new size */
if (unlikely(size > LUAI_MAXSTACK)) { /* need more space after extra size? */
if (raiseerror)
luaD_throw(L, LUA_ERRERR); /* error inside message handler */
else return 0;
}
else {
int needed = cast_int(L->top - L->stack) + n + EXTRA_STACK;
if (newsize > LUAI_MAXSTACK) /* cannot cross the limit */
newsize = LUAI_MAXSTACK;
if (newsize < needed) /* but must respect what was asked for */
newsize = needed;
if (unlikely(newsize > LUAI_MAXSTACK)) { /* stack overflow? */
/* add extra size to be able to handle the error message */
luaD_reallocstack(L, ERRORSTACKSIZE, raiseerror);
if (raiseerror)
luaG_runerror(L, "stack overflow");
else return 0;
}
} /* else no errors */
return luaD_reallocstack(L, newsize, raiseerror);
}
static int stackinuse (lua_State *L) {
CallInfo *ci;
StkId lim = L->top;
for (ci = L->ci; ci != NULL; ci = ci->previous) {
if (lim < ci->top) lim = ci->top;
}
lua_assert(lim <= L->stack_last);
return cast_int(lim - L->stack) + 1; /* part of stack in use */
}
void luaD_shrinkstack (lua_State *L) {
int inuse = stackinuse(L);
int goodsize = inuse + BASIC_STACK_SIZE;
if (goodsize > LUAI_MAXSTACK)
goodsize = LUAI_MAXSTACK; /* respect stack limit */
/* if thread is currently not handling a stack overflow and its
good size is smaller than current size, shrink its stack */
if (inuse <= (LUAI_MAXSTACK - EXTRA_STACK) && goodsize < L->stacksize)
luaD_reallocstack(L, goodsize, 0); /* ok if that fails */
else /* don't change stack */
condmovestack(L,{},{}); /* (change only for debugging) */
luaE_shrinkCI(L); /* shrink CI list */
}
void luaD_inctop (lua_State *L) {
luaD_checkstack(L, 1);
L->top++;
}
/* }================================================================== */
/*
** Call a hook for the given event. Make sure there is a hook to be
** called. (Both 'L->hook' and 'L->hookmask', which trigger this
** function, can be changed asynchronously by signals.)
*/
void luaD_hook (lua_State *L, int event, int line,
int ftransfer, int ntransfer) {
lua_Hook hook = L->hook;
if (hook && L->allowhook) { /* make sure there is a hook */
int mask = CIST_HOOKED;
CallInfo *ci = L->ci;
ptrdiff_t top = savestack(L, L->top);
ptrdiff_t ci_top = savestack(L, ci->top);
lua_Debug ar;
ar.event = event;
ar.currentline = line;
ar.i_ci = ci;
if (ntransfer != 0) {
mask |= CIST_TRAN; /* 'ci' has transfer information */
ci->u2.transferinfo.ftransfer = ftransfer;
ci->u2.transferinfo.ntransfer = ntransfer;
}
luaD_checkstack(L, LUA_MINSTACK); /* ensure minimum stack size */
if (L->top + LUA_MINSTACK > ci->top)
ci->top = L->top + LUA_MINSTACK;
L->allowhook = 0; /* cannot call hooks inside a hook */
ci->callstatus |= mask;
lua_unlock(L);
(*hook)(L, &ar);
lua_lock(L);
lua_assert(!L->allowhook);
L->allowhook = 1;
ci->top = restorestack(L, ci_top);
L->top = restorestack(L, top);
ci->callstatus &= ~mask;
}
}
/*
** Executes a call hook for Lua functions. This function is called
** whenever 'hookmask' is not zero, so it checks whether call hooks are
** active.
*/
void luaD_hookcall (lua_State *L, CallInfo *ci) {
int hook = (ci->callstatus & CIST_TAIL) ? LUA_HOOKTAILCALL : LUA_HOOKCALL;
Proto *p;
if (!(L->hookmask & LUA_MASKCALL)) /* some other hook? */
return; /* don't call hook */
p = clLvalue(s2v(ci->func))->p;
L->top = ci->top; /* prepare top */
ci->u.l.savedpc++; /* hooks assume 'pc' is already incremented */
luaD_hook(L, hook, -1, 1, p->numparams);
ci->u.l.savedpc--; /* correct 'pc' */
}
static StkId rethook (lua_State *L, CallInfo *ci, StkId firstres, int nres) {
ptrdiff_t oldtop = savestack(L, L->top); /* hook may change top */
int delta = 0;
if (isLuacode(ci)) {
Proto *p = clLvalue(s2v(ci->func))->p;
if (p->is_vararg)
delta = ci->u.l.nextraargs + p->numparams + 1;
if (L->top < ci->top)
L->top = ci->top; /* correct top to run hook */
}
if (L->hookmask & LUA_MASKRET) { /* is return hook on? */
int ftransfer;
ci->func += delta; /* if vararg, back to virtual 'func' */
ftransfer = cast(unsigned short, firstres - ci->func);
luaD_hook(L, LUA_HOOKRET, -1, ftransfer, nres); /* call it */
ci->func -= delta;
}
if (isLua(ci->previous))
L->oldpc = ci->previous->u.l.savedpc; /* update 'oldpc' */
return restorestack(L, oldtop);
}
/*
** Check whether 'func' has a '__call' metafield. If so, put it in the
** stack, below original 'func', so that 'luaD_call' can call it. Raise
** an error if there is no '__call' metafield.
*/
void luaD_tryfuncTM (lua_State *L, StkId func) {
const TValue *tm = luaT_gettmbyobj(L, s2v(func), TM_CALL);
StkId p;
if (unlikely(ttisnil(tm)))
luaG_typeerror(L, s2v(func), "call"); /* nothing to call */
for (p = L->top; p > func; p--) /* open space for metamethod */
setobjs2s(L, p, p-1);
L->top++; /* stack space pre-allocated by the caller */
setobj2s(L, func, tm); /* metamethod is the new function to be called */
}
/*
** Given 'nres' results at 'firstResult', move 'wanted' of them to 'res'.
** Handle most typical cases (zero results for commands, one result for
** expressions, multiple results for tail calls/single parameters)
** separated.
*/
static void moveresults (lua_State *L, StkId res, int nres, int wanted) {
StkId firstresult;
int i;
switch (wanted) { /* handle typical cases separately */
case 0: /* no values needed */
L->top = res;
return;
case 1: /* one value needed */
if (nres == 0) /* no results? */
setnilvalue(s2v(res)); /* adjust with nil */
else
setobjs2s(L, res, L->top - nres); /* move it to proper place */
L->top = res + 1;
return;
case LUA_MULTRET:
wanted = nres; /* we want all results */
break;
default: /* multiple results (or to-be-closed variables) */
if (hastocloseCfunc(wanted)) { /* to-be-closed variables? */
ptrdiff_t savedres = savestack(L, res);
luaF_close(L, res, LUA_OK); /* may change the stack */
res = restorestack(L, savedres);
wanted = codeNresults(wanted); /* correct value */
if (wanted == LUA_MULTRET)
wanted = nres;
}
break;
}
firstresult = L->top - nres; /* index of first result */
/* move all results to correct place */
for (i = 0; i < nres && i < wanted; i++)
setobjs2s(L, res + i, firstresult + i);
for (; i < wanted; i++) /* complete wanted number of results */
setnilvalue(s2v(res + i));
L->top = res + wanted; /* top points after the last result */
}
/*
** Finishes a function call: calls hook if necessary, removes CallInfo,
** moves current number of results to proper place.
*/
void luaD_poscall (lua_State *L, CallInfo *ci, int nres) {
if (L->hookmask)
L->top = rethook(L, ci, L->top - nres, nres);
L->ci = ci->previous; /* back to caller */
/* move results to proper place */
moveresults(L, ci->func, nres, ci->nresults);
}
#define next_ci(L) (L->ci->next ? L->ci->next : luaE_extendCI(L))
/*
** Prepare a function for a tail call, building its call info on top
** of the current call info. 'narg1' is the number of arguments plus 1
** (so that it includes the function itself).
*/
void luaD_pretailcall (lua_State *L, CallInfo *ci, StkId func, int narg1) {
Proto *p = clLvalue(s2v(func))->p;
int fsize = p->maxstacksize; /* frame size */
int nfixparams = p->numparams;
int i;
for (i = 0; i < narg1; i++) /* move down function and arguments */
setobjs2s(L, ci->func + i, func + i);
checkstackGC(L, fsize);
func = ci->func; /* moved-down function */
for (; narg1 <= nfixparams; narg1++)
setnilvalue(s2v(func + narg1)); /* complete missing arguments */
ci->top = func + 1 + fsize; /* top for new function */
lua_assert(ci->top <= L->stack_last);
ci->u.l.savedpc = p->code; /* starting point */
ci->callstatus |= CIST_TAIL;
L->top = func + narg1; /* set top */
}
/*
** Call a function (C or Lua). The function to be called is at *func.
** The arguments are on the stack, right after the function.
** When returns, all the results are on the stack, starting at the original
** function position.
*/
void luaD_call (lua_State *L, StkId func, int nresults) {
lua_CFunction f;
retry:
switch (ttypetag(s2v(func))) {
case LUA_VCCL: /* C closure */
f = clCvalue(s2v(func))->f;
goto Cfunc;
case LUA_VLCF: /* light C function */
f = fvalue(s2v(func));
Cfunc: {
int n; /* number of returns */
CallInfo *ci;
checkstackGCp(L, LUA_MINSTACK, func); /* ensure minimum stack size */
L->ci = ci = next_ci(L);
ci->nresults = nresults;
ci->callstatus = CIST_C;
ci->top = L->top + LUA_MINSTACK;
ci->func = func;
lua_assert(ci->top <= L->stack_last);
if (L->hookmask & LUA_MASKCALL) {
int narg = cast_int(L->top - func) - 1;
luaD_hook(L, LUA_HOOKCALL, -1, 1, narg);
}
lua_unlock(L);
n = (*f)(L); /* do the actual call */
lua_lock(L);
api_checknelems(L, n);
luaD_poscall(L, ci, n);
break;
}
case LUA_VLCL: { /* Lua function */
CallInfo *ci;
Proto *p = clLvalue(s2v(func))->p;
int narg = cast_int(L->top - func) - 1; /* number of real arguments */
int nfixparams = p->numparams;
int fsize = p->maxstacksize; /* frame size */
checkstackGCp(L, fsize, func);
L->ci = ci = next_ci(L);
ci->nresults = nresults;
ci->u.l.savedpc = p->code; /* starting point */
ci->callstatus = 0;
ci->top = func + 1 + fsize;
ci->func = func;
L->ci = ci;
for (; narg < nfixparams; narg++)
setnilvalue(s2v(L->top++)); /* complete missing arguments */
lua_assert(ci->top <= L->stack_last);
luaV_execute(L, ci); /* run the function */
break;
}
default: { /* not a function */
checkstackGCp(L, 1, func); /* space for metamethod */
luaD_tryfuncTM(L, func); /* try to get '__call' metamethod */
goto retry; /* try again with metamethod */
}
}
}
/*
** Similar to 'luaD_call', but does not allow yields during the call.
** If there is a stack overflow, freeing all CI structures will
** force the subsequent call to invoke 'luaE_extendCI', which then
** will raise any errors.
*/
void luaD_callnoyield (lua_State *L, StkId func, int nResults) {
incXCcalls(L);
if (getCcalls(L) <= CSTACKERR) /* possible stack overflow? */
luaE_freeCI(L);
luaD_call(L, func, nResults);
decXCcalls(L);
}
/*
** Completes the execution of an interrupted C function, calling its
** continuation function.
*/
static void finishCcall (lua_State *L, int status) {
CallInfo *ci = L->ci;
int n;
/* must have a continuation and must be able to call it */
lua_assert(ci->u.c.k != NULL && yieldable(L));
/* error status can only happen in a protected call */
lua_assert((ci->callstatus & CIST_YPCALL) || status == LUA_YIELD);
if (ci->callstatus & CIST_YPCALL) { /* was inside a pcall? */
ci->callstatus &= ~CIST_YPCALL; /* continuation is also inside it */
L->errfunc = ci->u.c.old_errfunc; /* with the same error function */
}
/* finish 'lua_callk'/'lua_pcall'; CIST_YPCALL and 'errfunc' already
handled */
adjustresults(L, ci->nresults);
lua_unlock(L);
n = (*ci->u.c.k)(L, status, ci->u.c.ctx); /* call continuation function */
lua_lock(L);
api_checknelems(L, n);
luaD_poscall(L, ci, n); /* finish 'luaD_call' */
}
/*
** Executes "full continuation" (everything in the stack) of a
** previously interrupted coroutine until the stack is empty (or another
** interruption long-jumps out of the loop). If the coroutine is
** recovering from an error, 'ud' points to the error status, which must
** be passed to the first continuation function (otherwise the default
** status is LUA_YIELD).
*/
static void unroll (lua_State *L, void *ud) {
CallInfo *ci;
if (ud != NULL) /* error status? */
finishCcall(L, *(int *)ud); /* finish 'lua_pcallk' callee */
while ((ci = L->ci) != &L->base_ci) { /* something in the stack */
if (!isLua(ci)) /* C function? */
finishCcall(L, LUA_YIELD); /* complete its execution */
else { /* Lua function */
luaV_finishOp(L); /* finish interrupted instruction */
luaV_execute(L, ci); /* execute down to higher C 'boundary' */
}
}
}
/*
** Try to find a suspended protected call (a "recover point") for the
** given thread.
*/
static CallInfo *findpcall (lua_State *L) {
CallInfo *ci;
for (ci = L->ci; ci != NULL; ci = ci->previous) { /* search for a pcall */
if (ci->callstatus & CIST_YPCALL)
return ci;
}
return NULL; /* no pending pcall */
}
/*
** Recovers from an error in a coroutine. Finds a recover point (if
** there is one) and completes the execution of the interrupted
** 'luaD_pcall'. If there is no recover point, returns zero.
*/
static int recover (lua_State *L, int status) {
StkId oldtop;
CallInfo *ci = findpcall(L);
if (ci == NULL) return 0; /* no recovery point */
/* "finish" luaD_pcall */
oldtop = restorestack(L, ci->u2.funcidx);
luaF_close(L, oldtop, status); /* may change the stack */
oldtop = restorestack(L, ci->u2.funcidx);
luaD_seterrorobj(L, status, oldtop);
L->ci = ci;
L->allowhook = getoah(ci->callstatus); /* restore original 'allowhook' */
luaD_shrinkstack(L);
L->errfunc = ci->u.c.old_errfunc;
return 1; /* continue running the coroutine */
}
/*
** Signal an error in the call to 'lua_resume', not in the execution
** of the coroutine itself. (Such errors should not be handled by any
** coroutine error handler and should not kill the coroutine.)
*/
static int resume_error (lua_State *L, const char *msg, int narg) {
L->top -= narg; /* remove args from the stack */
setsvalue2s(L, L->top, luaS_new(L, msg)); /* push error message */
api_incr_top(L);
lua_unlock(L);
return LUA_ERRRUN;
}
/*
** Do the work for 'lua_resume' in protected mode. Most of the work
** depends on the status of the coroutine: initial state, suspended
** inside a hook, or regularly suspended (optionally with a continuation
** function), plus erroneous cases: non-suspended coroutine or dead
** coroutine.
*/
static void resume (lua_State *L, void *ud) {
int n = *(cast(int*, ud)); /* number of arguments */
StkId firstArg = L->top - n; /* first argument */
CallInfo *ci = L->ci;
if (L->status == LUA_OK) { /* starting a coroutine? */
luaD_call(L, firstArg - 1, LUA_MULTRET);
}
else { /* resuming from previous yield */
lua_assert(L->status == LUA_YIELD);
L->status = LUA_OK; /* mark that it is running (again) */
if (isLua(ci)) /* yielded inside a hook? */
luaV_execute(L, ci); /* just continue running Lua code */
else { /* 'common' yield */
if (ci->u.c.k != NULL) { /* does it have a continuation function? */
lua_unlock(L);
n = (*ci->u.c.k)(L, LUA_YIELD, ci->u.c.ctx); /* call continuation */
lua_lock(L);
api_checknelems(L, n);
}
luaD_poscall(L, ci, n); /* finish 'luaD_call' */
}
unroll(L, NULL); /* run continuation */
}
}
LUA_API int lua_resume (lua_State *L, lua_State *from, int nargs,
int *nresults) {
int status;
lua_lock(L);
if (L->status == LUA_OK) { /* may be starting a coroutine */
if (L->ci != &L->base_ci) /* not in base level? */
return resume_error(L, "cannot resume non-suspended coroutine", nargs);
else if (L->top - (L->ci->func + 1) == nargs) /* no function? */
return resume_error(L, "cannot resume dead coroutine", nargs);
}
else if (L->status != LUA_YIELD) /* ended with errors? */
return resume_error(L, "cannot resume dead coroutine", nargs);
if (from == NULL)
L->nCcalls = CSTACKTHREAD;
else /* correct 'nCcalls' for this thread */
L->nCcalls = getCcalls(from) + from->nci - L->nci - CSTACKCF;
if (L->nCcalls <= CSTACKERR)
return resume_error(L, "C stack overflow", nargs);
luai_userstateresume(L, nargs);
api_checknelems(L, (L->status == LUA_OK) ? nargs + 1 : nargs);
status = luaD_rawrunprotected(L, resume, &nargs);
/* continue running after recoverable errors */
while (errorstatus(status) && recover(L, status)) {
/* unroll continuation */
status = luaD_rawrunprotected(L, unroll, &status);
}
if (likely(!errorstatus(status)))
lua_assert(status == L->status); /* normal end or yield */
else { /* unrecoverable error */
L->status = cast_byte(status); /* mark thread as 'dead' */
luaD_seterrorobj(L, status, L->top); /* push error message */
L->ci->top = L->top;
}
*nresults = (status == LUA_YIELD) ? L->ci->u2.nyield
: cast_int(L->top - (L->ci->func + 1));
lua_unlock(L);
return status;
}
LUA_API int lua_isyieldable (lua_State *L) {
return yieldable(L);
}
LUA_API int lua_yieldk (lua_State *L, int nresults, lua_KContext ctx,
lua_KFunction k) {
CallInfo *ci;
luai_userstateyield(L, nresults);
lua_lock(L);
ci = L->ci;
api_checknelems(L, nresults);
if (unlikely(!yieldable(L))) {
if (L != G(L)->mainthread)
luaG_runerror(L, "attempt to yield across a C-call boundary");
else
luaG_runerror(L, "attempt to yield from outside a coroutine");
}
L->status = LUA_YIELD;
if (isLua(ci)) { /* inside a hook? */
lua_assert(!isLuacode(ci));
api_check(L, k == NULL, "hooks cannot continue after yielding");
ci->u2.nyield = 0; /* no results */
}
else {
if ((ci->u.c.k = k) != NULL) /* is there a continuation? */
ci->u.c.ctx = ctx; /* save context */
ci->u2.nyield = nresults; /* save number of results */
luaD_throw(L, LUA_YIELD);
}
lua_assert(ci->callstatus & CIST_HOOKED); /* must be inside a hook */
lua_unlock(L);
return 0; /* return to 'luaD_hook' */
}
/*
** Call the C function 'func' in protected mode, restoring basic
** thread information ('allowhook', etc.) and in particular
** its stack level in case of errors.
*/
int luaD_pcall (lua_State *L, Pfunc func, void *u,
ptrdiff_t old_top, ptrdiff_t ef) {
int status;
CallInfo *old_ci = L->ci;
lu_byte old_allowhooks = L->allowhook;
ptrdiff_t old_errfunc = L->errfunc;
L->errfunc = ef;
status = luaD_rawrunprotected(L, func, u);
if (unlikely(status != LUA_OK)) { /* an error occurred? */
StkId oldtop = restorestack(L, old_top);
L->ci = old_ci;
L->allowhook = old_allowhooks;
status = luaF_close(L, oldtop, status);
oldtop = restorestack(L, old_top); /* previous call may change stack */
luaD_seterrorobj(L, status, oldtop);
luaD_shrinkstack(L);
}
L->errfunc = old_errfunc;
return status;
}
/*
** Execute a protected parser.
*/
struct SParser { /* data to 'f_parser' */
ZIO *z;
Mbuffer buff; /* dynamic structure used by the scanner */
Dyndata dyd; /* dynamic structures used by the parser */
const char *mode;
const char *name;
};
static void checkmode (lua_State *L, const char *mode, const char *x) {
if (mode && strchr(mode, x[0]) == NULL) {
luaO_pushfstring(L,
"attempt to load a %s chunk (mode is '%s')", x, mode);
luaD_throw(L, LUA_ERRSYNTAX);
}
}
static void f_parser (lua_State *L, void *ud) {
LClosure *cl;
struct SParser *p = cast(struct SParser *, ud);
int c = zgetc(p->z); /* read first character */
if (c == LUA_SIGNATURE[0]) {
checkmode(L, p->mode, "binary");
cl = luaU_undump(L, p->z, p->name);
}
else {
checkmode(L, p->mode, "text");
cl = luaY_parser(L, p->z, &p->buff, &p->dyd, p->name, c);
}
lua_assert(cl->nupvalues == cl->p->sizeupvalues);
luaF_initupvals(L, cl);
}
int luaD_protectedparser (lua_State *L, ZIO *z, const char *name,
const char *mode) {
struct SParser p;
int status;
incnny(L); /* cannot yield during parsing */
p.z = z; p.name = name; p.mode = mode;
p.dyd.actvar.arr = NULL; p.dyd.actvar.size = 0;
p.dyd.gt.arr = NULL; p.dyd.gt.size = 0;
p.dyd.label.arr = NULL; p.dyd.label.size = 0;
luaZ_initbuffer(L, &p.buff);
status = luaD_pcall(L, f_parser, &p, savestack(L, L->top), L->errfunc);
luaZ_freebuffer(L, &p.buff);
luaM_freearray(L, p.dyd.actvar.arr, p.dyd.actvar.size);
luaM_freearray(L, p.dyd.gt.arr, p.dyd.gt.size);
luaM_freearray(L, p.dyd.label.arr, p.dyd.label.size);
decnny(L);
return status;
}
| ./CrossVul/dataset_final_sorted/CWE-416/c/good_4231_0 |
crossvul-cpp_data_good_1316_0 | // SPDX-License-Identifier: GPL-2.0-or-later
/*
* Timers abstract layer
* Copyright (c) by Jaroslav Kysela <perex@perex.cz>
*/
#include <linux/delay.h>
#include <linux/init.h>
#include <linux/slab.h>
#include <linux/time.h>
#include <linux/mutex.h>
#include <linux/device.h>
#include <linux/module.h>
#include <linux/string.h>
#include <linux/sched/signal.h>
#include <sound/core.h>
#include <sound/timer.h>
#include <sound/control.h>
#include <sound/info.h>
#include <sound/minors.h>
#include <sound/initval.h>
#include <linux/kmod.h>
/* internal flags */
#define SNDRV_TIMER_IFLG_PAUSED 0x00010000
#define SNDRV_TIMER_IFLG_DEAD 0x00020000
#if IS_ENABLED(CONFIG_SND_HRTIMER)
#define DEFAULT_TIMER_LIMIT 4
#else
#define DEFAULT_TIMER_LIMIT 1
#endif
static int timer_limit = DEFAULT_TIMER_LIMIT;
static int timer_tstamp_monotonic = 1;
MODULE_AUTHOR("Jaroslav Kysela <perex@perex.cz>, Takashi Iwai <tiwai@suse.de>");
MODULE_DESCRIPTION("ALSA timer interface");
MODULE_LICENSE("GPL");
module_param(timer_limit, int, 0444);
MODULE_PARM_DESC(timer_limit, "Maximum global timers in system.");
module_param(timer_tstamp_monotonic, int, 0444);
MODULE_PARM_DESC(timer_tstamp_monotonic, "Use posix monotonic clock source for timestamps (default).");
MODULE_ALIAS_CHARDEV(CONFIG_SND_MAJOR, SNDRV_MINOR_TIMER);
MODULE_ALIAS("devname:snd/timer");
struct snd_timer_user {
struct snd_timer_instance *timeri;
int tread; /* enhanced read with timestamps and events */
unsigned long ticks;
unsigned long overrun;
int qhead;
int qtail;
int qused;
int queue_size;
bool disconnected;
struct snd_timer_read *queue;
struct snd_timer_tread *tqueue;
spinlock_t qlock;
unsigned long last_resolution;
unsigned int filter;
struct timespec tstamp; /* trigger tstamp */
wait_queue_head_t qchange_sleep;
struct fasync_struct *fasync;
struct mutex ioctl_lock;
};
/* list of timers */
static LIST_HEAD(snd_timer_list);
/* list of slave instances */
static LIST_HEAD(snd_timer_slave_list);
/* lock for slave active lists */
static DEFINE_SPINLOCK(slave_active_lock);
static DEFINE_MUTEX(register_mutex);
static int snd_timer_free(struct snd_timer *timer);
static int snd_timer_dev_free(struct snd_device *device);
static int snd_timer_dev_register(struct snd_device *device);
static int snd_timer_dev_disconnect(struct snd_device *device);
static void snd_timer_reschedule(struct snd_timer * timer, unsigned long ticks_left);
/*
* create a timer instance with the given owner string.
* when timer is not NULL, increments the module counter
*/
static struct snd_timer_instance *snd_timer_instance_new(char *owner,
struct snd_timer *timer)
{
struct snd_timer_instance *timeri;
timeri = kzalloc(sizeof(*timeri), GFP_KERNEL);
if (timeri == NULL)
return NULL;
timeri->owner = kstrdup(owner, GFP_KERNEL);
if (! timeri->owner) {
kfree(timeri);
return NULL;
}
INIT_LIST_HEAD(&timeri->open_list);
INIT_LIST_HEAD(&timeri->active_list);
INIT_LIST_HEAD(&timeri->ack_list);
INIT_LIST_HEAD(&timeri->slave_list_head);
INIT_LIST_HEAD(&timeri->slave_active_head);
timeri->timer = timer;
if (timer && !try_module_get(timer->module)) {
kfree(timeri->owner);
kfree(timeri);
return NULL;
}
return timeri;
}
/*
* find a timer instance from the given timer id
*/
static struct snd_timer *snd_timer_find(struct snd_timer_id *tid)
{
struct snd_timer *timer = NULL;
list_for_each_entry(timer, &snd_timer_list, device_list) {
if (timer->tmr_class != tid->dev_class)
continue;
if ((timer->tmr_class == SNDRV_TIMER_CLASS_CARD ||
timer->tmr_class == SNDRV_TIMER_CLASS_PCM) &&
(timer->card == NULL ||
timer->card->number != tid->card))
continue;
if (timer->tmr_device != tid->device)
continue;
if (timer->tmr_subdevice != tid->subdevice)
continue;
return timer;
}
return NULL;
}
#ifdef CONFIG_MODULES
static void snd_timer_request(struct snd_timer_id *tid)
{
switch (tid->dev_class) {
case SNDRV_TIMER_CLASS_GLOBAL:
if (tid->device < timer_limit)
request_module("snd-timer-%i", tid->device);
break;
case SNDRV_TIMER_CLASS_CARD:
case SNDRV_TIMER_CLASS_PCM:
if (tid->card < snd_ecards_limit)
request_module("snd-card-%i", tid->card);
break;
default:
break;
}
}
#endif
/*
* look for a master instance matching with the slave id of the given slave.
* when found, relink the open_link of the slave.
*
* call this with register_mutex down.
*/
static int snd_timer_check_slave(struct snd_timer_instance *slave)
{
struct snd_timer *timer;
struct snd_timer_instance *master;
/* FIXME: it's really dumb to look up all entries.. */
list_for_each_entry(timer, &snd_timer_list, device_list) {
list_for_each_entry(master, &timer->open_list_head, open_list) {
if (slave->slave_class == master->slave_class &&
slave->slave_id == master->slave_id) {
if (master->timer->num_instances >=
master->timer->max_instances)
return -EBUSY;
list_move_tail(&slave->open_list,
&master->slave_list_head);
master->timer->num_instances++;
spin_lock_irq(&slave_active_lock);
slave->master = master;
slave->timer = master->timer;
spin_unlock_irq(&slave_active_lock);
return 0;
}
}
}
return 0;
}
/*
* look for slave instances matching with the slave id of the given master.
* when found, relink the open_link of slaves.
*
* call this with register_mutex down.
*/
static int snd_timer_check_master(struct snd_timer_instance *master)
{
struct snd_timer_instance *slave, *tmp;
/* check all pending slaves */
list_for_each_entry_safe(slave, tmp, &snd_timer_slave_list, open_list) {
if (slave->slave_class == master->slave_class &&
slave->slave_id == master->slave_id) {
if (master->timer->num_instances >=
master->timer->max_instances)
return -EBUSY;
list_move_tail(&slave->open_list, &master->slave_list_head);
master->timer->num_instances++;
spin_lock_irq(&slave_active_lock);
spin_lock(&master->timer->lock);
slave->master = master;
slave->timer = master->timer;
if (slave->flags & SNDRV_TIMER_IFLG_RUNNING)
list_add_tail(&slave->active_list,
&master->slave_active_head);
spin_unlock(&master->timer->lock);
spin_unlock_irq(&slave_active_lock);
}
}
return 0;
}
static int snd_timer_close_locked(struct snd_timer_instance *timeri,
struct device **card_devp_to_put);
/*
* open a timer instance
* when opening a master, the slave id must be here given.
*/
int snd_timer_open(struct snd_timer_instance **ti,
char *owner, struct snd_timer_id *tid,
unsigned int slave_id)
{
struct snd_timer *timer;
struct snd_timer_instance *timeri = NULL;
struct device *card_dev_to_put = NULL;
int err;
mutex_lock(®ister_mutex);
if (tid->dev_class == SNDRV_TIMER_CLASS_SLAVE) {
/* open a slave instance */
if (tid->dev_sclass <= SNDRV_TIMER_SCLASS_NONE ||
tid->dev_sclass > SNDRV_TIMER_SCLASS_OSS_SEQUENCER) {
pr_debug("ALSA: timer: invalid slave class %i\n",
tid->dev_sclass);
err = -EINVAL;
goto unlock;
}
timeri = snd_timer_instance_new(owner, NULL);
if (!timeri) {
err = -ENOMEM;
goto unlock;
}
timeri->slave_class = tid->dev_sclass;
timeri->slave_id = tid->device;
timeri->flags |= SNDRV_TIMER_IFLG_SLAVE;
list_add_tail(&timeri->open_list, &snd_timer_slave_list);
err = snd_timer_check_slave(timeri);
if (err < 0) {
snd_timer_close_locked(timeri, &card_dev_to_put);
timeri = NULL;
}
goto unlock;
}
/* open a master instance */
timer = snd_timer_find(tid);
#ifdef CONFIG_MODULES
if (!timer) {
mutex_unlock(®ister_mutex);
snd_timer_request(tid);
mutex_lock(®ister_mutex);
timer = snd_timer_find(tid);
}
#endif
if (!timer) {
err = -ENODEV;
goto unlock;
}
if (!list_empty(&timer->open_list_head)) {
struct snd_timer_instance *t =
list_entry(timer->open_list_head.next,
struct snd_timer_instance, open_list);
if (t->flags & SNDRV_TIMER_IFLG_EXCLUSIVE) {
err = -EBUSY;
goto unlock;
}
}
if (timer->num_instances >= timer->max_instances) {
err = -EBUSY;
goto unlock;
}
timeri = snd_timer_instance_new(owner, timer);
if (!timeri) {
err = -ENOMEM;
goto unlock;
}
/* take a card refcount for safe disconnection */
if (timer->card)
get_device(&timer->card->card_dev);
timeri->slave_class = tid->dev_sclass;
timeri->slave_id = slave_id;
if (list_empty(&timer->open_list_head) && timer->hw.open) {
err = timer->hw.open(timer);
if (err) {
kfree(timeri->owner);
kfree(timeri);
timeri = NULL;
if (timer->card)
card_dev_to_put = &timer->card->card_dev;
module_put(timer->module);
goto unlock;
}
}
list_add_tail(&timeri->open_list, &timer->open_list_head);
timer->num_instances++;
err = snd_timer_check_master(timeri);
if (err < 0) {
snd_timer_close_locked(timeri, &card_dev_to_put);
timeri = NULL;
}
unlock:
mutex_unlock(®ister_mutex);
/* put_device() is called after unlock for avoiding deadlock */
if (card_dev_to_put)
put_device(card_dev_to_put);
*ti = timeri;
return err;
}
EXPORT_SYMBOL(snd_timer_open);
/*
* close a timer instance
* call this with register_mutex down.
*/
static int snd_timer_close_locked(struct snd_timer_instance *timeri,
struct device **card_devp_to_put)
{
struct snd_timer *timer = timeri->timer;
struct snd_timer_instance *slave, *tmp;
if (timer) {
spin_lock_irq(&timer->lock);
timeri->flags |= SNDRV_TIMER_IFLG_DEAD;
spin_unlock_irq(&timer->lock);
}
list_del(&timeri->open_list);
/* force to stop the timer */
snd_timer_stop(timeri);
if (timer) {
timer->num_instances--;
/* wait, until the active callback is finished */
spin_lock_irq(&timer->lock);
while (timeri->flags & SNDRV_TIMER_IFLG_CALLBACK) {
spin_unlock_irq(&timer->lock);
udelay(10);
spin_lock_irq(&timer->lock);
}
spin_unlock_irq(&timer->lock);
/* remove slave links */
spin_lock_irq(&slave_active_lock);
spin_lock(&timer->lock);
list_for_each_entry_safe(slave, tmp, &timeri->slave_list_head,
open_list) {
list_move_tail(&slave->open_list, &snd_timer_slave_list);
timer->num_instances--;
slave->master = NULL;
slave->timer = NULL;
list_del_init(&slave->ack_list);
list_del_init(&slave->active_list);
}
spin_unlock(&timer->lock);
spin_unlock_irq(&slave_active_lock);
/* slave doesn't need to release timer resources below */
if (timeri->flags & SNDRV_TIMER_IFLG_SLAVE)
timer = NULL;
}
if (timeri->private_free)
timeri->private_free(timeri);
kfree(timeri->owner);
kfree(timeri);
if (timer) {
if (list_empty(&timer->open_list_head) && timer->hw.close)
timer->hw.close(timer);
/* release a card refcount for safe disconnection */
if (timer->card)
*card_devp_to_put = &timer->card->card_dev;
module_put(timer->module);
}
return 0;
}
/*
* close a timer instance
*/
int snd_timer_close(struct snd_timer_instance *timeri)
{
struct device *card_dev_to_put = NULL;
int err;
if (snd_BUG_ON(!timeri))
return -ENXIO;
mutex_lock(®ister_mutex);
err = snd_timer_close_locked(timeri, &card_dev_to_put);
mutex_unlock(®ister_mutex);
/* put_device() is called after unlock for avoiding deadlock */
if (card_dev_to_put)
put_device(card_dev_to_put);
return err;
}
EXPORT_SYMBOL(snd_timer_close);
static unsigned long snd_timer_hw_resolution(struct snd_timer *timer)
{
if (timer->hw.c_resolution)
return timer->hw.c_resolution(timer);
else
return timer->hw.resolution;
}
unsigned long snd_timer_resolution(struct snd_timer_instance *timeri)
{
struct snd_timer * timer;
unsigned long ret = 0;
unsigned long flags;
if (timeri == NULL)
return 0;
timer = timeri->timer;
if (timer) {
spin_lock_irqsave(&timer->lock, flags);
ret = snd_timer_hw_resolution(timer);
spin_unlock_irqrestore(&timer->lock, flags);
}
return ret;
}
EXPORT_SYMBOL(snd_timer_resolution);
static void snd_timer_notify1(struct snd_timer_instance *ti, int event)
{
struct snd_timer *timer = ti->timer;
unsigned long resolution = 0;
struct snd_timer_instance *ts;
struct timespec tstamp;
if (timer_tstamp_monotonic)
ktime_get_ts(&tstamp);
else
getnstimeofday(&tstamp);
if (snd_BUG_ON(event < SNDRV_TIMER_EVENT_START ||
event > SNDRV_TIMER_EVENT_PAUSE))
return;
if (timer &&
(event == SNDRV_TIMER_EVENT_START ||
event == SNDRV_TIMER_EVENT_CONTINUE))
resolution = snd_timer_hw_resolution(timer);
if (ti->ccallback)
ti->ccallback(ti, event, &tstamp, resolution);
if (ti->flags & SNDRV_TIMER_IFLG_SLAVE)
return;
if (timer == NULL)
return;
if (timer->hw.flags & SNDRV_TIMER_HW_SLAVE)
return;
list_for_each_entry(ts, &ti->slave_active_head, active_list)
if (ts->ccallback)
ts->ccallback(ts, event + 100, &tstamp, resolution);
}
/* start/continue a master timer */
static int snd_timer_start1(struct snd_timer_instance *timeri,
bool start, unsigned long ticks)
{
struct snd_timer *timer;
int result;
unsigned long flags;
timer = timeri->timer;
if (!timer)
return -EINVAL;
spin_lock_irqsave(&timer->lock, flags);
if (timeri->flags & SNDRV_TIMER_IFLG_DEAD) {
result = -EINVAL;
goto unlock;
}
if (timer->card && timer->card->shutdown) {
result = -ENODEV;
goto unlock;
}
if (timeri->flags & (SNDRV_TIMER_IFLG_RUNNING |
SNDRV_TIMER_IFLG_START)) {
result = -EBUSY;
goto unlock;
}
if (start)
timeri->ticks = timeri->cticks = ticks;
else if (!timeri->cticks)
timeri->cticks = 1;
timeri->pticks = 0;
list_move_tail(&timeri->active_list, &timer->active_list_head);
if (timer->running) {
if (timer->hw.flags & SNDRV_TIMER_HW_SLAVE)
goto __start_now;
timer->flags |= SNDRV_TIMER_FLG_RESCHED;
timeri->flags |= SNDRV_TIMER_IFLG_START;
result = 1; /* delayed start */
} else {
if (start)
timer->sticks = ticks;
timer->hw.start(timer);
__start_now:
timer->running++;
timeri->flags |= SNDRV_TIMER_IFLG_RUNNING;
result = 0;
}
snd_timer_notify1(timeri, start ? SNDRV_TIMER_EVENT_START :
SNDRV_TIMER_EVENT_CONTINUE);
unlock:
spin_unlock_irqrestore(&timer->lock, flags);
return result;
}
/* start/continue a slave timer */
static int snd_timer_start_slave(struct snd_timer_instance *timeri,
bool start)
{
unsigned long flags;
int err;
spin_lock_irqsave(&slave_active_lock, flags);
if (timeri->flags & SNDRV_TIMER_IFLG_DEAD) {
err = -EINVAL;
goto unlock;
}
if (timeri->flags & SNDRV_TIMER_IFLG_RUNNING) {
err = -EBUSY;
goto unlock;
}
timeri->flags |= SNDRV_TIMER_IFLG_RUNNING;
if (timeri->master && timeri->timer) {
spin_lock(&timeri->timer->lock);
list_add_tail(&timeri->active_list,
&timeri->master->slave_active_head);
snd_timer_notify1(timeri, start ? SNDRV_TIMER_EVENT_START :
SNDRV_TIMER_EVENT_CONTINUE);
spin_unlock(&timeri->timer->lock);
}
err = 1; /* delayed start */
unlock:
spin_unlock_irqrestore(&slave_active_lock, flags);
return err;
}
/* stop/pause a master timer */
static int snd_timer_stop1(struct snd_timer_instance *timeri, bool stop)
{
struct snd_timer *timer;
int result = 0;
unsigned long flags;
timer = timeri->timer;
if (!timer)
return -EINVAL;
spin_lock_irqsave(&timer->lock, flags);
if (!(timeri->flags & (SNDRV_TIMER_IFLG_RUNNING |
SNDRV_TIMER_IFLG_START))) {
result = -EBUSY;
goto unlock;
}
list_del_init(&timeri->ack_list);
list_del_init(&timeri->active_list);
if (timer->card && timer->card->shutdown)
goto unlock;
if (stop) {
timeri->cticks = timeri->ticks;
timeri->pticks = 0;
}
if ((timeri->flags & SNDRV_TIMER_IFLG_RUNNING) &&
!(--timer->running)) {
timer->hw.stop(timer);
if (timer->flags & SNDRV_TIMER_FLG_RESCHED) {
timer->flags &= ~SNDRV_TIMER_FLG_RESCHED;
snd_timer_reschedule(timer, 0);
if (timer->flags & SNDRV_TIMER_FLG_CHANGE) {
timer->flags &= ~SNDRV_TIMER_FLG_CHANGE;
timer->hw.start(timer);
}
}
}
timeri->flags &= ~(SNDRV_TIMER_IFLG_RUNNING | SNDRV_TIMER_IFLG_START);
if (stop)
timeri->flags &= ~SNDRV_TIMER_IFLG_PAUSED;
else
timeri->flags |= SNDRV_TIMER_IFLG_PAUSED;
snd_timer_notify1(timeri, stop ? SNDRV_TIMER_EVENT_STOP :
SNDRV_TIMER_EVENT_PAUSE);
unlock:
spin_unlock_irqrestore(&timer->lock, flags);
return result;
}
/* stop/pause a slave timer */
static int snd_timer_stop_slave(struct snd_timer_instance *timeri, bool stop)
{
unsigned long flags;
spin_lock_irqsave(&slave_active_lock, flags);
if (!(timeri->flags & SNDRV_TIMER_IFLG_RUNNING)) {
spin_unlock_irqrestore(&slave_active_lock, flags);
return -EBUSY;
}
timeri->flags &= ~SNDRV_TIMER_IFLG_RUNNING;
if (timeri->timer) {
spin_lock(&timeri->timer->lock);
list_del_init(&timeri->ack_list);
list_del_init(&timeri->active_list);
snd_timer_notify1(timeri, stop ? SNDRV_TIMER_EVENT_STOP :
SNDRV_TIMER_EVENT_PAUSE);
spin_unlock(&timeri->timer->lock);
}
spin_unlock_irqrestore(&slave_active_lock, flags);
return 0;
}
/*
* start the timer instance
*/
int snd_timer_start(struct snd_timer_instance *timeri, unsigned int ticks)
{
if (timeri == NULL || ticks < 1)
return -EINVAL;
if (timeri->flags & SNDRV_TIMER_IFLG_SLAVE)
return snd_timer_start_slave(timeri, true);
else
return snd_timer_start1(timeri, true, ticks);
}
EXPORT_SYMBOL(snd_timer_start);
/*
* stop the timer instance.
*
* do not call this from the timer callback!
*/
int snd_timer_stop(struct snd_timer_instance *timeri)
{
if (timeri->flags & SNDRV_TIMER_IFLG_SLAVE)
return snd_timer_stop_slave(timeri, true);
else
return snd_timer_stop1(timeri, true);
}
EXPORT_SYMBOL(snd_timer_stop);
/*
* start again.. the tick is kept.
*/
int snd_timer_continue(struct snd_timer_instance *timeri)
{
/* timer can continue only after pause */
if (!(timeri->flags & SNDRV_TIMER_IFLG_PAUSED))
return -EINVAL;
if (timeri->flags & SNDRV_TIMER_IFLG_SLAVE)
return snd_timer_start_slave(timeri, false);
else
return snd_timer_start1(timeri, false, 0);
}
EXPORT_SYMBOL(snd_timer_continue);
/*
* pause.. remember the ticks left
*/
int snd_timer_pause(struct snd_timer_instance * timeri)
{
if (timeri->flags & SNDRV_TIMER_IFLG_SLAVE)
return snd_timer_stop_slave(timeri, false);
else
return snd_timer_stop1(timeri, false);
}
EXPORT_SYMBOL(snd_timer_pause);
/*
* reschedule the timer
*
* start pending instances and check the scheduling ticks.
* when the scheduling ticks is changed set CHANGE flag to reprogram the timer.
*/
static void snd_timer_reschedule(struct snd_timer * timer, unsigned long ticks_left)
{
struct snd_timer_instance *ti;
unsigned long ticks = ~0UL;
list_for_each_entry(ti, &timer->active_list_head, active_list) {
if (ti->flags & SNDRV_TIMER_IFLG_START) {
ti->flags &= ~SNDRV_TIMER_IFLG_START;
ti->flags |= SNDRV_TIMER_IFLG_RUNNING;
timer->running++;
}
if (ti->flags & SNDRV_TIMER_IFLG_RUNNING) {
if (ticks > ti->cticks)
ticks = ti->cticks;
}
}
if (ticks == ~0UL) {
timer->flags &= ~SNDRV_TIMER_FLG_RESCHED;
return;
}
if (ticks > timer->hw.ticks)
ticks = timer->hw.ticks;
if (ticks_left != ticks)
timer->flags |= SNDRV_TIMER_FLG_CHANGE;
timer->sticks = ticks;
}
/* call callbacks in timer ack list */
static void snd_timer_process_callbacks(struct snd_timer *timer,
struct list_head *head)
{
struct snd_timer_instance *ti;
unsigned long resolution, ticks;
while (!list_empty(head)) {
ti = list_first_entry(head, struct snd_timer_instance,
ack_list);
/* remove from ack_list and make empty */
list_del_init(&ti->ack_list);
if (!(ti->flags & SNDRV_TIMER_IFLG_DEAD)) {
ticks = ti->pticks;
ti->pticks = 0;
resolution = ti->resolution;
ti->flags |= SNDRV_TIMER_IFLG_CALLBACK;
spin_unlock(&timer->lock);
if (ti->callback)
ti->callback(ti, resolution, ticks);
spin_lock(&timer->lock);
ti->flags &= ~SNDRV_TIMER_IFLG_CALLBACK;
}
}
}
/* clear pending instances from ack list */
static void snd_timer_clear_callbacks(struct snd_timer *timer,
struct list_head *head)
{
unsigned long flags;
spin_lock_irqsave(&timer->lock, flags);
while (!list_empty(head))
list_del_init(head->next);
spin_unlock_irqrestore(&timer->lock, flags);
}
/*
* timer tasklet
*
*/
static void snd_timer_tasklet(unsigned long arg)
{
struct snd_timer *timer = (struct snd_timer *) arg;
unsigned long flags;
if (timer->card && timer->card->shutdown) {
snd_timer_clear_callbacks(timer, &timer->sack_list_head);
return;
}
spin_lock_irqsave(&timer->lock, flags);
snd_timer_process_callbacks(timer, &timer->sack_list_head);
spin_unlock_irqrestore(&timer->lock, flags);
}
/*
* timer interrupt
*
* ticks_left is usually equal to timer->sticks.
*
*/
void snd_timer_interrupt(struct snd_timer * timer, unsigned long ticks_left)
{
struct snd_timer_instance *ti, *ts, *tmp;
unsigned long resolution;
struct list_head *ack_list_head;
unsigned long flags;
int use_tasklet = 0;
if (timer == NULL)
return;
if (timer->card && timer->card->shutdown) {
snd_timer_clear_callbacks(timer, &timer->ack_list_head);
return;
}
spin_lock_irqsave(&timer->lock, flags);
/* remember the current resolution */
resolution = snd_timer_hw_resolution(timer);
/* loop for all active instances
* Here we cannot use list_for_each_entry because the active_list of a
* processed instance is relinked to done_list_head before the callback
* is called.
*/
list_for_each_entry_safe(ti, tmp, &timer->active_list_head,
active_list) {
if (ti->flags & SNDRV_TIMER_IFLG_DEAD)
continue;
if (!(ti->flags & SNDRV_TIMER_IFLG_RUNNING))
continue;
ti->pticks += ticks_left;
ti->resolution = resolution;
if (ti->cticks < ticks_left)
ti->cticks = 0;
else
ti->cticks -= ticks_left;
if (ti->cticks) /* not expired */
continue;
if (ti->flags & SNDRV_TIMER_IFLG_AUTO) {
ti->cticks = ti->ticks;
} else {
ti->flags &= ~SNDRV_TIMER_IFLG_RUNNING;
--timer->running;
list_del_init(&ti->active_list);
}
if ((timer->hw.flags & SNDRV_TIMER_HW_TASKLET) ||
(ti->flags & SNDRV_TIMER_IFLG_FAST))
ack_list_head = &timer->ack_list_head;
else
ack_list_head = &timer->sack_list_head;
if (list_empty(&ti->ack_list))
list_add_tail(&ti->ack_list, ack_list_head);
list_for_each_entry(ts, &ti->slave_active_head, active_list) {
ts->pticks = ti->pticks;
ts->resolution = resolution;
if (list_empty(&ts->ack_list))
list_add_tail(&ts->ack_list, ack_list_head);
}
}
if (timer->flags & SNDRV_TIMER_FLG_RESCHED)
snd_timer_reschedule(timer, timer->sticks);
if (timer->running) {
if (timer->hw.flags & SNDRV_TIMER_HW_STOP) {
timer->hw.stop(timer);
timer->flags |= SNDRV_TIMER_FLG_CHANGE;
}
if (!(timer->hw.flags & SNDRV_TIMER_HW_AUTO) ||
(timer->flags & SNDRV_TIMER_FLG_CHANGE)) {
/* restart timer */
timer->flags &= ~SNDRV_TIMER_FLG_CHANGE;
timer->hw.start(timer);
}
} else {
timer->hw.stop(timer);
}
/* now process all fast callbacks */
snd_timer_process_callbacks(timer, &timer->ack_list_head);
/* do we have any slow callbacks? */
use_tasklet = !list_empty(&timer->sack_list_head);
spin_unlock_irqrestore(&timer->lock, flags);
if (use_tasklet)
tasklet_schedule(&timer->task_queue);
}
EXPORT_SYMBOL(snd_timer_interrupt);
/*
*/
int snd_timer_new(struct snd_card *card, char *id, struct snd_timer_id *tid,
struct snd_timer **rtimer)
{
struct snd_timer *timer;
int err;
static struct snd_device_ops ops = {
.dev_free = snd_timer_dev_free,
.dev_register = snd_timer_dev_register,
.dev_disconnect = snd_timer_dev_disconnect,
};
if (snd_BUG_ON(!tid))
return -EINVAL;
if (tid->dev_class == SNDRV_TIMER_CLASS_CARD ||
tid->dev_class == SNDRV_TIMER_CLASS_PCM) {
if (WARN_ON(!card))
return -EINVAL;
}
if (rtimer)
*rtimer = NULL;
timer = kzalloc(sizeof(*timer), GFP_KERNEL);
if (!timer)
return -ENOMEM;
timer->tmr_class = tid->dev_class;
timer->card = card;
timer->tmr_device = tid->device;
timer->tmr_subdevice = tid->subdevice;
if (id)
strlcpy(timer->id, id, sizeof(timer->id));
timer->sticks = 1;
INIT_LIST_HEAD(&timer->device_list);
INIT_LIST_HEAD(&timer->open_list_head);
INIT_LIST_HEAD(&timer->active_list_head);
INIT_LIST_HEAD(&timer->ack_list_head);
INIT_LIST_HEAD(&timer->sack_list_head);
spin_lock_init(&timer->lock);
tasklet_init(&timer->task_queue, snd_timer_tasklet,
(unsigned long)timer);
timer->max_instances = 1000; /* default limit per timer */
if (card != NULL) {
timer->module = card->module;
err = snd_device_new(card, SNDRV_DEV_TIMER, timer, &ops);
if (err < 0) {
snd_timer_free(timer);
return err;
}
}
if (rtimer)
*rtimer = timer;
return 0;
}
EXPORT_SYMBOL(snd_timer_new);
static int snd_timer_free(struct snd_timer *timer)
{
if (!timer)
return 0;
mutex_lock(®ister_mutex);
if (! list_empty(&timer->open_list_head)) {
struct list_head *p, *n;
struct snd_timer_instance *ti;
pr_warn("ALSA: timer %p is busy?\n", timer);
list_for_each_safe(p, n, &timer->open_list_head) {
list_del_init(p);
ti = list_entry(p, struct snd_timer_instance, open_list);
ti->timer = NULL;
}
}
list_del(&timer->device_list);
mutex_unlock(®ister_mutex);
if (timer->private_free)
timer->private_free(timer);
kfree(timer);
return 0;
}
static int snd_timer_dev_free(struct snd_device *device)
{
struct snd_timer *timer = device->device_data;
return snd_timer_free(timer);
}
static int snd_timer_dev_register(struct snd_device *dev)
{
struct snd_timer *timer = dev->device_data;
struct snd_timer *timer1;
if (snd_BUG_ON(!timer || !timer->hw.start || !timer->hw.stop))
return -ENXIO;
if (!(timer->hw.flags & SNDRV_TIMER_HW_SLAVE) &&
!timer->hw.resolution && timer->hw.c_resolution == NULL)
return -EINVAL;
mutex_lock(®ister_mutex);
list_for_each_entry(timer1, &snd_timer_list, device_list) {
if (timer1->tmr_class > timer->tmr_class)
break;
if (timer1->tmr_class < timer->tmr_class)
continue;
if (timer1->card && timer->card) {
if (timer1->card->number > timer->card->number)
break;
if (timer1->card->number < timer->card->number)
continue;
}
if (timer1->tmr_device > timer->tmr_device)
break;
if (timer1->tmr_device < timer->tmr_device)
continue;
if (timer1->tmr_subdevice > timer->tmr_subdevice)
break;
if (timer1->tmr_subdevice < timer->tmr_subdevice)
continue;
/* conflicts.. */
mutex_unlock(®ister_mutex);
return -EBUSY;
}
list_add_tail(&timer->device_list, &timer1->device_list);
mutex_unlock(®ister_mutex);
return 0;
}
static int snd_timer_dev_disconnect(struct snd_device *device)
{
struct snd_timer *timer = device->device_data;
struct snd_timer_instance *ti;
mutex_lock(®ister_mutex);
list_del_init(&timer->device_list);
/* wake up pending sleepers */
list_for_each_entry(ti, &timer->open_list_head, open_list) {
if (ti->disconnect)
ti->disconnect(ti);
}
mutex_unlock(®ister_mutex);
return 0;
}
void snd_timer_notify(struct snd_timer *timer, int event, struct timespec *tstamp)
{
unsigned long flags;
unsigned long resolution = 0;
struct snd_timer_instance *ti, *ts;
if (timer->card && timer->card->shutdown)
return;
if (! (timer->hw.flags & SNDRV_TIMER_HW_SLAVE))
return;
if (snd_BUG_ON(event < SNDRV_TIMER_EVENT_MSTART ||
event > SNDRV_TIMER_EVENT_MRESUME))
return;
spin_lock_irqsave(&timer->lock, flags);
if (event == SNDRV_TIMER_EVENT_MSTART ||
event == SNDRV_TIMER_EVENT_MCONTINUE ||
event == SNDRV_TIMER_EVENT_MRESUME)
resolution = snd_timer_hw_resolution(timer);
list_for_each_entry(ti, &timer->active_list_head, active_list) {
if (ti->ccallback)
ti->ccallback(ti, event, tstamp, resolution);
list_for_each_entry(ts, &ti->slave_active_head, active_list)
if (ts->ccallback)
ts->ccallback(ts, event, tstamp, resolution);
}
spin_unlock_irqrestore(&timer->lock, flags);
}
EXPORT_SYMBOL(snd_timer_notify);
/*
* exported functions for global timers
*/
int snd_timer_global_new(char *id, int device, struct snd_timer **rtimer)
{
struct snd_timer_id tid;
tid.dev_class = SNDRV_TIMER_CLASS_GLOBAL;
tid.dev_sclass = SNDRV_TIMER_SCLASS_NONE;
tid.card = -1;
tid.device = device;
tid.subdevice = 0;
return snd_timer_new(NULL, id, &tid, rtimer);
}
EXPORT_SYMBOL(snd_timer_global_new);
int snd_timer_global_free(struct snd_timer *timer)
{
return snd_timer_free(timer);
}
EXPORT_SYMBOL(snd_timer_global_free);
int snd_timer_global_register(struct snd_timer *timer)
{
struct snd_device dev;
memset(&dev, 0, sizeof(dev));
dev.device_data = timer;
return snd_timer_dev_register(&dev);
}
EXPORT_SYMBOL(snd_timer_global_register);
/*
* System timer
*/
struct snd_timer_system_private {
struct timer_list tlist;
struct snd_timer *snd_timer;
unsigned long last_expires;
unsigned long last_jiffies;
unsigned long correction;
};
static void snd_timer_s_function(struct timer_list *t)
{
struct snd_timer_system_private *priv = from_timer(priv, t,
tlist);
struct snd_timer *timer = priv->snd_timer;
unsigned long jiff = jiffies;
if (time_after(jiff, priv->last_expires))
priv->correction += (long)jiff - (long)priv->last_expires;
snd_timer_interrupt(timer, (long)jiff - (long)priv->last_jiffies);
}
static int snd_timer_s_start(struct snd_timer * timer)
{
struct snd_timer_system_private *priv;
unsigned long njiff;
priv = (struct snd_timer_system_private *) timer->private_data;
njiff = (priv->last_jiffies = jiffies);
if (priv->correction > timer->sticks - 1) {
priv->correction -= timer->sticks - 1;
njiff++;
} else {
njiff += timer->sticks - priv->correction;
priv->correction = 0;
}
priv->last_expires = njiff;
mod_timer(&priv->tlist, njiff);
return 0;
}
static int snd_timer_s_stop(struct snd_timer * timer)
{
struct snd_timer_system_private *priv;
unsigned long jiff;
priv = (struct snd_timer_system_private *) timer->private_data;
del_timer(&priv->tlist);
jiff = jiffies;
if (time_before(jiff, priv->last_expires))
timer->sticks = priv->last_expires - jiff;
else
timer->sticks = 1;
priv->correction = 0;
return 0;
}
static int snd_timer_s_close(struct snd_timer *timer)
{
struct snd_timer_system_private *priv;
priv = (struct snd_timer_system_private *)timer->private_data;
del_timer_sync(&priv->tlist);
return 0;
}
static struct snd_timer_hardware snd_timer_system =
{
.flags = SNDRV_TIMER_HW_FIRST | SNDRV_TIMER_HW_TASKLET,
.resolution = 1000000000L / HZ,
.ticks = 10000000L,
.close = snd_timer_s_close,
.start = snd_timer_s_start,
.stop = snd_timer_s_stop
};
static void snd_timer_free_system(struct snd_timer *timer)
{
kfree(timer->private_data);
}
static int snd_timer_register_system(void)
{
struct snd_timer *timer;
struct snd_timer_system_private *priv;
int err;
err = snd_timer_global_new("system", SNDRV_TIMER_GLOBAL_SYSTEM, &timer);
if (err < 0)
return err;
strcpy(timer->name, "system timer");
timer->hw = snd_timer_system;
priv = kzalloc(sizeof(*priv), GFP_KERNEL);
if (priv == NULL) {
snd_timer_free(timer);
return -ENOMEM;
}
priv->snd_timer = timer;
timer_setup(&priv->tlist, snd_timer_s_function, 0);
timer->private_data = priv;
timer->private_free = snd_timer_free_system;
return snd_timer_global_register(timer);
}
#ifdef CONFIG_SND_PROC_FS
/*
* Info interface
*/
static void snd_timer_proc_read(struct snd_info_entry *entry,
struct snd_info_buffer *buffer)
{
struct snd_timer *timer;
struct snd_timer_instance *ti;
mutex_lock(®ister_mutex);
list_for_each_entry(timer, &snd_timer_list, device_list) {
if (timer->card && timer->card->shutdown)
continue;
switch (timer->tmr_class) {
case SNDRV_TIMER_CLASS_GLOBAL:
snd_iprintf(buffer, "G%i: ", timer->tmr_device);
break;
case SNDRV_TIMER_CLASS_CARD:
snd_iprintf(buffer, "C%i-%i: ",
timer->card->number, timer->tmr_device);
break;
case SNDRV_TIMER_CLASS_PCM:
snd_iprintf(buffer, "P%i-%i-%i: ", timer->card->number,
timer->tmr_device, timer->tmr_subdevice);
break;
default:
snd_iprintf(buffer, "?%i-%i-%i-%i: ", timer->tmr_class,
timer->card ? timer->card->number : -1,
timer->tmr_device, timer->tmr_subdevice);
}
snd_iprintf(buffer, "%s :", timer->name);
if (timer->hw.resolution)
snd_iprintf(buffer, " %lu.%03luus (%lu ticks)",
timer->hw.resolution / 1000,
timer->hw.resolution % 1000,
timer->hw.ticks);
if (timer->hw.flags & SNDRV_TIMER_HW_SLAVE)
snd_iprintf(buffer, " SLAVE");
snd_iprintf(buffer, "\n");
list_for_each_entry(ti, &timer->open_list_head, open_list)
snd_iprintf(buffer, " Client %s : %s\n",
ti->owner ? ti->owner : "unknown",
ti->flags & (SNDRV_TIMER_IFLG_START |
SNDRV_TIMER_IFLG_RUNNING)
? "running" : "stopped");
}
mutex_unlock(®ister_mutex);
}
static struct snd_info_entry *snd_timer_proc_entry;
static void __init snd_timer_proc_init(void)
{
struct snd_info_entry *entry;
entry = snd_info_create_module_entry(THIS_MODULE, "timers", NULL);
if (entry != NULL) {
entry->c.text.read = snd_timer_proc_read;
if (snd_info_register(entry) < 0) {
snd_info_free_entry(entry);
entry = NULL;
}
}
snd_timer_proc_entry = entry;
}
static void __exit snd_timer_proc_done(void)
{
snd_info_free_entry(snd_timer_proc_entry);
}
#else /* !CONFIG_SND_PROC_FS */
#define snd_timer_proc_init()
#define snd_timer_proc_done()
#endif
/*
* USER SPACE interface
*/
static void snd_timer_user_interrupt(struct snd_timer_instance *timeri,
unsigned long resolution,
unsigned long ticks)
{
struct snd_timer_user *tu = timeri->callback_data;
struct snd_timer_read *r;
int prev;
spin_lock(&tu->qlock);
if (tu->qused > 0) {
prev = tu->qtail == 0 ? tu->queue_size - 1 : tu->qtail - 1;
r = &tu->queue[prev];
if (r->resolution == resolution) {
r->ticks += ticks;
goto __wake;
}
}
if (tu->qused >= tu->queue_size) {
tu->overrun++;
} else {
r = &tu->queue[tu->qtail++];
tu->qtail %= tu->queue_size;
r->resolution = resolution;
r->ticks = ticks;
tu->qused++;
}
__wake:
spin_unlock(&tu->qlock);
kill_fasync(&tu->fasync, SIGIO, POLL_IN);
wake_up(&tu->qchange_sleep);
}
static void snd_timer_user_append_to_tqueue(struct snd_timer_user *tu,
struct snd_timer_tread *tread)
{
if (tu->qused >= tu->queue_size) {
tu->overrun++;
} else {
memcpy(&tu->tqueue[tu->qtail++], tread, sizeof(*tread));
tu->qtail %= tu->queue_size;
tu->qused++;
}
}
static void snd_timer_user_ccallback(struct snd_timer_instance *timeri,
int event,
struct timespec *tstamp,
unsigned long resolution)
{
struct snd_timer_user *tu = timeri->callback_data;
struct snd_timer_tread r1;
unsigned long flags;
if (event >= SNDRV_TIMER_EVENT_START &&
event <= SNDRV_TIMER_EVENT_PAUSE)
tu->tstamp = *tstamp;
if ((tu->filter & (1 << event)) == 0 || !tu->tread)
return;
memset(&r1, 0, sizeof(r1));
r1.event = event;
r1.tstamp = *tstamp;
r1.val = resolution;
spin_lock_irqsave(&tu->qlock, flags);
snd_timer_user_append_to_tqueue(tu, &r1);
spin_unlock_irqrestore(&tu->qlock, flags);
kill_fasync(&tu->fasync, SIGIO, POLL_IN);
wake_up(&tu->qchange_sleep);
}
static void snd_timer_user_disconnect(struct snd_timer_instance *timeri)
{
struct snd_timer_user *tu = timeri->callback_data;
tu->disconnected = true;
wake_up(&tu->qchange_sleep);
}
static void snd_timer_user_tinterrupt(struct snd_timer_instance *timeri,
unsigned long resolution,
unsigned long ticks)
{
struct snd_timer_user *tu = timeri->callback_data;
struct snd_timer_tread *r, r1;
struct timespec tstamp;
int prev, append = 0;
memset(&r1, 0, sizeof(r1));
memset(&tstamp, 0, sizeof(tstamp));
spin_lock(&tu->qlock);
if ((tu->filter & ((1 << SNDRV_TIMER_EVENT_RESOLUTION) |
(1 << SNDRV_TIMER_EVENT_TICK))) == 0) {
spin_unlock(&tu->qlock);
return;
}
if (tu->last_resolution != resolution || ticks > 0) {
if (timer_tstamp_monotonic)
ktime_get_ts(&tstamp);
else
getnstimeofday(&tstamp);
}
if ((tu->filter & (1 << SNDRV_TIMER_EVENT_RESOLUTION)) &&
tu->last_resolution != resolution) {
r1.event = SNDRV_TIMER_EVENT_RESOLUTION;
r1.tstamp = tstamp;
r1.val = resolution;
snd_timer_user_append_to_tqueue(tu, &r1);
tu->last_resolution = resolution;
append++;
}
if ((tu->filter & (1 << SNDRV_TIMER_EVENT_TICK)) == 0)
goto __wake;
if (ticks == 0)
goto __wake;
if (tu->qused > 0) {
prev = tu->qtail == 0 ? tu->queue_size - 1 : tu->qtail - 1;
r = &tu->tqueue[prev];
if (r->event == SNDRV_TIMER_EVENT_TICK) {
r->tstamp = tstamp;
r->val += ticks;
append++;
goto __wake;
}
}
r1.event = SNDRV_TIMER_EVENT_TICK;
r1.tstamp = tstamp;
r1.val = ticks;
snd_timer_user_append_to_tqueue(tu, &r1);
append++;
__wake:
spin_unlock(&tu->qlock);
if (append == 0)
return;
kill_fasync(&tu->fasync, SIGIO, POLL_IN);
wake_up(&tu->qchange_sleep);
}
static int realloc_user_queue(struct snd_timer_user *tu, int size)
{
struct snd_timer_read *queue = NULL;
struct snd_timer_tread *tqueue = NULL;
if (tu->tread) {
tqueue = kcalloc(size, sizeof(*tqueue), GFP_KERNEL);
if (!tqueue)
return -ENOMEM;
} else {
queue = kcalloc(size, sizeof(*queue), GFP_KERNEL);
if (!queue)
return -ENOMEM;
}
spin_lock_irq(&tu->qlock);
kfree(tu->queue);
kfree(tu->tqueue);
tu->queue_size = size;
tu->queue = queue;
tu->tqueue = tqueue;
tu->qhead = tu->qtail = tu->qused = 0;
spin_unlock_irq(&tu->qlock);
return 0;
}
static int snd_timer_user_open(struct inode *inode, struct file *file)
{
struct snd_timer_user *tu;
int err;
err = stream_open(inode, file);
if (err < 0)
return err;
tu = kzalloc(sizeof(*tu), GFP_KERNEL);
if (tu == NULL)
return -ENOMEM;
spin_lock_init(&tu->qlock);
init_waitqueue_head(&tu->qchange_sleep);
mutex_init(&tu->ioctl_lock);
tu->ticks = 1;
if (realloc_user_queue(tu, 128) < 0) {
kfree(tu);
return -ENOMEM;
}
file->private_data = tu;
return 0;
}
static int snd_timer_user_release(struct inode *inode, struct file *file)
{
struct snd_timer_user *tu;
if (file->private_data) {
tu = file->private_data;
file->private_data = NULL;
mutex_lock(&tu->ioctl_lock);
if (tu->timeri)
snd_timer_close(tu->timeri);
mutex_unlock(&tu->ioctl_lock);
kfree(tu->queue);
kfree(tu->tqueue);
kfree(tu);
}
return 0;
}
static void snd_timer_user_zero_id(struct snd_timer_id *id)
{
id->dev_class = SNDRV_TIMER_CLASS_NONE;
id->dev_sclass = SNDRV_TIMER_SCLASS_NONE;
id->card = -1;
id->device = -1;
id->subdevice = -1;
}
static void snd_timer_user_copy_id(struct snd_timer_id *id, struct snd_timer *timer)
{
id->dev_class = timer->tmr_class;
id->dev_sclass = SNDRV_TIMER_SCLASS_NONE;
id->card = timer->card ? timer->card->number : -1;
id->device = timer->tmr_device;
id->subdevice = timer->tmr_subdevice;
}
static int snd_timer_user_next_device(struct snd_timer_id __user *_tid)
{
struct snd_timer_id id;
struct snd_timer *timer;
struct list_head *p;
if (copy_from_user(&id, _tid, sizeof(id)))
return -EFAULT;
mutex_lock(®ister_mutex);
if (id.dev_class < 0) { /* first item */
if (list_empty(&snd_timer_list))
snd_timer_user_zero_id(&id);
else {
timer = list_entry(snd_timer_list.next,
struct snd_timer, device_list);
snd_timer_user_copy_id(&id, timer);
}
} else {
switch (id.dev_class) {
case SNDRV_TIMER_CLASS_GLOBAL:
id.device = id.device < 0 ? 0 : id.device + 1;
list_for_each(p, &snd_timer_list) {
timer = list_entry(p, struct snd_timer, device_list);
if (timer->tmr_class > SNDRV_TIMER_CLASS_GLOBAL) {
snd_timer_user_copy_id(&id, timer);
break;
}
if (timer->tmr_device >= id.device) {
snd_timer_user_copy_id(&id, timer);
break;
}
}
if (p == &snd_timer_list)
snd_timer_user_zero_id(&id);
break;
case SNDRV_TIMER_CLASS_CARD:
case SNDRV_TIMER_CLASS_PCM:
if (id.card < 0) {
id.card = 0;
} else {
if (id.device < 0) {
id.device = 0;
} else {
if (id.subdevice < 0)
id.subdevice = 0;
else if (id.subdevice < INT_MAX)
id.subdevice++;
}
}
list_for_each(p, &snd_timer_list) {
timer = list_entry(p, struct snd_timer, device_list);
if (timer->tmr_class > id.dev_class) {
snd_timer_user_copy_id(&id, timer);
break;
}
if (timer->tmr_class < id.dev_class)
continue;
if (timer->card->number > id.card) {
snd_timer_user_copy_id(&id, timer);
break;
}
if (timer->card->number < id.card)
continue;
if (timer->tmr_device > id.device) {
snd_timer_user_copy_id(&id, timer);
break;
}
if (timer->tmr_device < id.device)
continue;
if (timer->tmr_subdevice > id.subdevice) {
snd_timer_user_copy_id(&id, timer);
break;
}
if (timer->tmr_subdevice < id.subdevice)
continue;
snd_timer_user_copy_id(&id, timer);
break;
}
if (p == &snd_timer_list)
snd_timer_user_zero_id(&id);
break;
default:
snd_timer_user_zero_id(&id);
}
}
mutex_unlock(®ister_mutex);
if (copy_to_user(_tid, &id, sizeof(*_tid)))
return -EFAULT;
return 0;
}
static int snd_timer_user_ginfo(struct file *file,
struct snd_timer_ginfo __user *_ginfo)
{
struct snd_timer_ginfo *ginfo;
struct snd_timer_id tid;
struct snd_timer *t;
struct list_head *p;
int err = 0;
ginfo = memdup_user(_ginfo, sizeof(*ginfo));
if (IS_ERR(ginfo))
return PTR_ERR(ginfo);
tid = ginfo->tid;
memset(ginfo, 0, sizeof(*ginfo));
ginfo->tid = tid;
mutex_lock(®ister_mutex);
t = snd_timer_find(&tid);
if (t != NULL) {
ginfo->card = t->card ? t->card->number : -1;
if (t->hw.flags & SNDRV_TIMER_HW_SLAVE)
ginfo->flags |= SNDRV_TIMER_FLG_SLAVE;
strlcpy(ginfo->id, t->id, sizeof(ginfo->id));
strlcpy(ginfo->name, t->name, sizeof(ginfo->name));
ginfo->resolution = t->hw.resolution;
if (t->hw.resolution_min > 0) {
ginfo->resolution_min = t->hw.resolution_min;
ginfo->resolution_max = t->hw.resolution_max;
}
list_for_each(p, &t->open_list_head) {
ginfo->clients++;
}
} else {
err = -ENODEV;
}
mutex_unlock(®ister_mutex);
if (err >= 0 && copy_to_user(_ginfo, ginfo, sizeof(*ginfo)))
err = -EFAULT;
kfree(ginfo);
return err;
}
static int timer_set_gparams(struct snd_timer_gparams *gparams)
{
struct snd_timer *t;
int err;
mutex_lock(®ister_mutex);
t = snd_timer_find(&gparams->tid);
if (!t) {
err = -ENODEV;
goto _error;
}
if (!list_empty(&t->open_list_head)) {
err = -EBUSY;
goto _error;
}
if (!t->hw.set_period) {
err = -ENOSYS;
goto _error;
}
err = t->hw.set_period(t, gparams->period_num, gparams->period_den);
_error:
mutex_unlock(®ister_mutex);
return err;
}
static int snd_timer_user_gparams(struct file *file,
struct snd_timer_gparams __user *_gparams)
{
struct snd_timer_gparams gparams;
if (copy_from_user(&gparams, _gparams, sizeof(gparams)))
return -EFAULT;
return timer_set_gparams(&gparams);
}
static int snd_timer_user_gstatus(struct file *file,
struct snd_timer_gstatus __user *_gstatus)
{
struct snd_timer_gstatus gstatus;
struct snd_timer_id tid;
struct snd_timer *t;
int err = 0;
if (copy_from_user(&gstatus, _gstatus, sizeof(gstatus)))
return -EFAULT;
tid = gstatus.tid;
memset(&gstatus, 0, sizeof(gstatus));
gstatus.tid = tid;
mutex_lock(®ister_mutex);
t = snd_timer_find(&tid);
if (t != NULL) {
spin_lock_irq(&t->lock);
gstatus.resolution = snd_timer_hw_resolution(t);
if (t->hw.precise_resolution) {
t->hw.precise_resolution(t, &gstatus.resolution_num,
&gstatus.resolution_den);
} else {
gstatus.resolution_num = gstatus.resolution;
gstatus.resolution_den = 1000000000uL;
}
spin_unlock_irq(&t->lock);
} else {
err = -ENODEV;
}
mutex_unlock(®ister_mutex);
if (err >= 0 && copy_to_user(_gstatus, &gstatus, sizeof(gstatus)))
err = -EFAULT;
return err;
}
static int snd_timer_user_tselect(struct file *file,
struct snd_timer_select __user *_tselect)
{
struct snd_timer_user *tu;
struct snd_timer_select tselect;
char str[32];
int err = 0;
tu = file->private_data;
if (tu->timeri) {
snd_timer_close(tu->timeri);
tu->timeri = NULL;
}
if (copy_from_user(&tselect, _tselect, sizeof(tselect))) {
err = -EFAULT;
goto __err;
}
sprintf(str, "application %i", current->pid);
if (tselect.id.dev_class != SNDRV_TIMER_CLASS_SLAVE)
tselect.id.dev_sclass = SNDRV_TIMER_SCLASS_APPLICATION;
err = snd_timer_open(&tu->timeri, str, &tselect.id, current->pid);
if (err < 0)
goto __err;
tu->timeri->flags |= SNDRV_TIMER_IFLG_FAST;
tu->timeri->callback = tu->tread
? snd_timer_user_tinterrupt : snd_timer_user_interrupt;
tu->timeri->ccallback = snd_timer_user_ccallback;
tu->timeri->callback_data = (void *)tu;
tu->timeri->disconnect = snd_timer_user_disconnect;
__err:
return err;
}
static int snd_timer_user_info(struct file *file,
struct snd_timer_info __user *_info)
{
struct snd_timer_user *tu;
struct snd_timer_info *info;
struct snd_timer *t;
int err = 0;
tu = file->private_data;
if (!tu->timeri)
return -EBADFD;
t = tu->timeri->timer;
if (!t)
return -EBADFD;
info = kzalloc(sizeof(*info), GFP_KERNEL);
if (! info)
return -ENOMEM;
info->card = t->card ? t->card->number : -1;
if (t->hw.flags & SNDRV_TIMER_HW_SLAVE)
info->flags |= SNDRV_TIMER_FLG_SLAVE;
strlcpy(info->id, t->id, sizeof(info->id));
strlcpy(info->name, t->name, sizeof(info->name));
info->resolution = t->hw.resolution;
if (copy_to_user(_info, info, sizeof(*_info)))
err = -EFAULT;
kfree(info);
return err;
}
static int snd_timer_user_params(struct file *file,
struct snd_timer_params __user *_params)
{
struct snd_timer_user *tu;
struct snd_timer_params params;
struct snd_timer *t;
int err;
tu = file->private_data;
if (!tu->timeri)
return -EBADFD;
t = tu->timeri->timer;
if (!t)
return -EBADFD;
if (copy_from_user(¶ms, _params, sizeof(params)))
return -EFAULT;
if (!(t->hw.flags & SNDRV_TIMER_HW_SLAVE)) {
u64 resolution;
if (params.ticks < 1) {
err = -EINVAL;
goto _end;
}
/* Don't allow resolution less than 1ms */
resolution = snd_timer_resolution(tu->timeri);
resolution *= params.ticks;
if (resolution < 1000000) {
err = -EINVAL;
goto _end;
}
}
if (params.queue_size > 0 &&
(params.queue_size < 32 || params.queue_size > 1024)) {
err = -EINVAL;
goto _end;
}
if (params.filter & ~((1<<SNDRV_TIMER_EVENT_RESOLUTION)|
(1<<SNDRV_TIMER_EVENT_TICK)|
(1<<SNDRV_TIMER_EVENT_START)|
(1<<SNDRV_TIMER_EVENT_STOP)|
(1<<SNDRV_TIMER_EVENT_CONTINUE)|
(1<<SNDRV_TIMER_EVENT_PAUSE)|
(1<<SNDRV_TIMER_EVENT_SUSPEND)|
(1<<SNDRV_TIMER_EVENT_RESUME)|
(1<<SNDRV_TIMER_EVENT_MSTART)|
(1<<SNDRV_TIMER_EVENT_MSTOP)|
(1<<SNDRV_TIMER_EVENT_MCONTINUE)|
(1<<SNDRV_TIMER_EVENT_MPAUSE)|
(1<<SNDRV_TIMER_EVENT_MSUSPEND)|
(1<<SNDRV_TIMER_EVENT_MRESUME))) {
err = -EINVAL;
goto _end;
}
snd_timer_stop(tu->timeri);
spin_lock_irq(&t->lock);
tu->timeri->flags &= ~(SNDRV_TIMER_IFLG_AUTO|
SNDRV_TIMER_IFLG_EXCLUSIVE|
SNDRV_TIMER_IFLG_EARLY_EVENT);
if (params.flags & SNDRV_TIMER_PSFLG_AUTO)
tu->timeri->flags |= SNDRV_TIMER_IFLG_AUTO;
if (params.flags & SNDRV_TIMER_PSFLG_EXCLUSIVE)
tu->timeri->flags |= SNDRV_TIMER_IFLG_EXCLUSIVE;
if (params.flags & SNDRV_TIMER_PSFLG_EARLY_EVENT)
tu->timeri->flags |= SNDRV_TIMER_IFLG_EARLY_EVENT;
spin_unlock_irq(&t->lock);
if (params.queue_size > 0 &&
(unsigned int)tu->queue_size != params.queue_size) {
err = realloc_user_queue(tu, params.queue_size);
if (err < 0)
goto _end;
}
spin_lock_irq(&tu->qlock);
tu->qhead = tu->qtail = tu->qused = 0;
if (tu->timeri->flags & SNDRV_TIMER_IFLG_EARLY_EVENT) {
if (tu->tread) {
struct snd_timer_tread tread;
memset(&tread, 0, sizeof(tread));
tread.event = SNDRV_TIMER_EVENT_EARLY;
tread.tstamp.tv_sec = 0;
tread.tstamp.tv_nsec = 0;
tread.val = 0;
snd_timer_user_append_to_tqueue(tu, &tread);
} else {
struct snd_timer_read *r = &tu->queue[0];
r->resolution = 0;
r->ticks = 0;
tu->qused++;
tu->qtail++;
}
}
tu->filter = params.filter;
tu->ticks = params.ticks;
spin_unlock_irq(&tu->qlock);
err = 0;
_end:
if (copy_to_user(_params, ¶ms, sizeof(params)))
return -EFAULT;
return err;
}
static int snd_timer_user_status(struct file *file,
struct snd_timer_status __user *_status)
{
struct snd_timer_user *tu;
struct snd_timer_status status;
tu = file->private_data;
if (!tu->timeri)
return -EBADFD;
memset(&status, 0, sizeof(status));
status.tstamp = tu->tstamp;
status.resolution = snd_timer_resolution(tu->timeri);
status.lost = tu->timeri->lost;
status.overrun = tu->overrun;
spin_lock_irq(&tu->qlock);
status.queue = tu->qused;
spin_unlock_irq(&tu->qlock);
if (copy_to_user(_status, &status, sizeof(status)))
return -EFAULT;
return 0;
}
static int snd_timer_user_start(struct file *file)
{
int err;
struct snd_timer_user *tu;
tu = file->private_data;
if (!tu->timeri)
return -EBADFD;
snd_timer_stop(tu->timeri);
tu->timeri->lost = 0;
tu->last_resolution = 0;
err = snd_timer_start(tu->timeri, tu->ticks);
if (err < 0)
return err;
return 0;
}
static int snd_timer_user_stop(struct file *file)
{
int err;
struct snd_timer_user *tu;
tu = file->private_data;
if (!tu->timeri)
return -EBADFD;
err = snd_timer_stop(tu->timeri);
if (err < 0)
return err;
return 0;
}
static int snd_timer_user_continue(struct file *file)
{
int err;
struct snd_timer_user *tu;
tu = file->private_data;
if (!tu->timeri)
return -EBADFD;
/* start timer instead of continue if it's not used before */
if (!(tu->timeri->flags & SNDRV_TIMER_IFLG_PAUSED))
return snd_timer_user_start(file);
tu->timeri->lost = 0;
err = snd_timer_continue(tu->timeri);
if (err < 0)
return err;
return 0;
}
static int snd_timer_user_pause(struct file *file)
{
int err;
struct snd_timer_user *tu;
tu = file->private_data;
if (!tu->timeri)
return -EBADFD;
err = snd_timer_pause(tu->timeri);
if (err < 0)
return err;
return 0;
}
enum {
SNDRV_TIMER_IOCTL_START_OLD = _IO('T', 0x20),
SNDRV_TIMER_IOCTL_STOP_OLD = _IO('T', 0x21),
SNDRV_TIMER_IOCTL_CONTINUE_OLD = _IO('T', 0x22),
SNDRV_TIMER_IOCTL_PAUSE_OLD = _IO('T', 0x23),
};
static long __snd_timer_user_ioctl(struct file *file, unsigned int cmd,
unsigned long arg)
{
struct snd_timer_user *tu;
void __user *argp = (void __user *)arg;
int __user *p = argp;
tu = file->private_data;
switch (cmd) {
case SNDRV_TIMER_IOCTL_PVERSION:
return put_user(SNDRV_TIMER_VERSION, p) ? -EFAULT : 0;
case SNDRV_TIMER_IOCTL_NEXT_DEVICE:
return snd_timer_user_next_device(argp);
case SNDRV_TIMER_IOCTL_TREAD:
{
int xarg, old_tread;
if (tu->timeri) /* too late */
return -EBUSY;
if (get_user(xarg, p))
return -EFAULT;
old_tread = tu->tread;
tu->tread = xarg ? 1 : 0;
if (tu->tread != old_tread &&
realloc_user_queue(tu, tu->queue_size) < 0) {
tu->tread = old_tread;
return -ENOMEM;
}
return 0;
}
case SNDRV_TIMER_IOCTL_GINFO:
return snd_timer_user_ginfo(file, argp);
case SNDRV_TIMER_IOCTL_GPARAMS:
return snd_timer_user_gparams(file, argp);
case SNDRV_TIMER_IOCTL_GSTATUS:
return snd_timer_user_gstatus(file, argp);
case SNDRV_TIMER_IOCTL_SELECT:
return snd_timer_user_tselect(file, argp);
case SNDRV_TIMER_IOCTL_INFO:
return snd_timer_user_info(file, argp);
case SNDRV_TIMER_IOCTL_PARAMS:
return snd_timer_user_params(file, argp);
case SNDRV_TIMER_IOCTL_STATUS:
return snd_timer_user_status(file, argp);
case SNDRV_TIMER_IOCTL_START:
case SNDRV_TIMER_IOCTL_START_OLD:
return snd_timer_user_start(file);
case SNDRV_TIMER_IOCTL_STOP:
case SNDRV_TIMER_IOCTL_STOP_OLD:
return snd_timer_user_stop(file);
case SNDRV_TIMER_IOCTL_CONTINUE:
case SNDRV_TIMER_IOCTL_CONTINUE_OLD:
return snd_timer_user_continue(file);
case SNDRV_TIMER_IOCTL_PAUSE:
case SNDRV_TIMER_IOCTL_PAUSE_OLD:
return snd_timer_user_pause(file);
}
return -ENOTTY;
}
static long snd_timer_user_ioctl(struct file *file, unsigned int cmd,
unsigned long arg)
{
struct snd_timer_user *tu = file->private_data;
long ret;
mutex_lock(&tu->ioctl_lock);
ret = __snd_timer_user_ioctl(file, cmd, arg);
mutex_unlock(&tu->ioctl_lock);
return ret;
}
static int snd_timer_user_fasync(int fd, struct file * file, int on)
{
struct snd_timer_user *tu;
tu = file->private_data;
return fasync_helper(fd, file, on, &tu->fasync);
}
static ssize_t snd_timer_user_read(struct file *file, char __user *buffer,
size_t count, loff_t *offset)
{
struct snd_timer_user *tu;
long result = 0, unit;
int qhead;
int err = 0;
tu = file->private_data;
unit = tu->tread ? sizeof(struct snd_timer_tread) : sizeof(struct snd_timer_read);
mutex_lock(&tu->ioctl_lock);
spin_lock_irq(&tu->qlock);
while ((long)count - result >= unit) {
while (!tu->qused) {
wait_queue_entry_t wait;
if ((file->f_flags & O_NONBLOCK) != 0 || result > 0) {
err = -EAGAIN;
goto _error;
}
set_current_state(TASK_INTERRUPTIBLE);
init_waitqueue_entry(&wait, current);
add_wait_queue(&tu->qchange_sleep, &wait);
spin_unlock_irq(&tu->qlock);
mutex_unlock(&tu->ioctl_lock);
schedule();
mutex_lock(&tu->ioctl_lock);
spin_lock_irq(&tu->qlock);
remove_wait_queue(&tu->qchange_sleep, &wait);
if (tu->disconnected) {
err = -ENODEV;
goto _error;
}
if (signal_pending(current)) {
err = -ERESTARTSYS;
goto _error;
}
}
qhead = tu->qhead++;
tu->qhead %= tu->queue_size;
tu->qused--;
spin_unlock_irq(&tu->qlock);
if (tu->tread) {
if (copy_to_user(buffer, &tu->tqueue[qhead],
sizeof(struct snd_timer_tread)))
err = -EFAULT;
} else {
if (copy_to_user(buffer, &tu->queue[qhead],
sizeof(struct snd_timer_read)))
err = -EFAULT;
}
spin_lock_irq(&tu->qlock);
if (err < 0)
goto _error;
result += unit;
buffer += unit;
}
_error:
spin_unlock_irq(&tu->qlock);
mutex_unlock(&tu->ioctl_lock);
return result > 0 ? result : err;
}
static __poll_t snd_timer_user_poll(struct file *file, poll_table * wait)
{
__poll_t mask;
struct snd_timer_user *tu;
tu = file->private_data;
poll_wait(file, &tu->qchange_sleep, wait);
mask = 0;
spin_lock_irq(&tu->qlock);
if (tu->qused)
mask |= EPOLLIN | EPOLLRDNORM;
if (tu->disconnected)
mask |= EPOLLERR;
spin_unlock_irq(&tu->qlock);
return mask;
}
#ifdef CONFIG_COMPAT
#include "timer_compat.c"
#else
#define snd_timer_user_ioctl_compat NULL
#endif
static const struct file_operations snd_timer_f_ops =
{
.owner = THIS_MODULE,
.read = snd_timer_user_read,
.open = snd_timer_user_open,
.release = snd_timer_user_release,
.llseek = no_llseek,
.poll = snd_timer_user_poll,
.unlocked_ioctl = snd_timer_user_ioctl,
.compat_ioctl = snd_timer_user_ioctl_compat,
.fasync = snd_timer_user_fasync,
};
/* unregister the system timer */
static void snd_timer_free_all(void)
{
struct snd_timer *timer, *n;
list_for_each_entry_safe(timer, n, &snd_timer_list, device_list)
snd_timer_free(timer);
}
static struct device timer_dev;
/*
* ENTRY functions
*/
static int __init alsa_timer_init(void)
{
int err;
snd_device_initialize(&timer_dev, NULL);
dev_set_name(&timer_dev, "timer");
#ifdef SNDRV_OSS_INFO_DEV_TIMERS
snd_oss_info_register(SNDRV_OSS_INFO_DEV_TIMERS, SNDRV_CARDS - 1,
"system timer");
#endif
err = snd_timer_register_system();
if (err < 0) {
pr_err("ALSA: unable to register system timer (%i)\n", err);
goto put_timer;
}
err = snd_register_device(SNDRV_DEVICE_TYPE_TIMER, NULL, 0,
&snd_timer_f_ops, NULL, &timer_dev);
if (err < 0) {
pr_err("ALSA: unable to register timer device (%i)\n", err);
snd_timer_free_all();
goto put_timer;
}
snd_timer_proc_init();
return 0;
put_timer:
put_device(&timer_dev);
return err;
}
static void __exit alsa_timer_exit(void)
{
snd_unregister_device(&timer_dev);
snd_timer_free_all();
put_device(&timer_dev);
snd_timer_proc_done();
#ifdef SNDRV_OSS_INFO_DEV_TIMERS
snd_oss_info_unregister(SNDRV_OSS_INFO_DEV_TIMERS, SNDRV_CARDS - 1);
#endif
}
module_init(alsa_timer_init)
module_exit(alsa_timer_exit)
| ./CrossVul/dataset_final_sorted/CWE-416/c/good_1316_0 |
crossvul-cpp_data_good_2030_0 | /*
* Routines having to do with the 'struct sk_buff' memory handlers.
*
* Authors: Alan Cox <alan@lxorguk.ukuu.org.uk>
* Florian La Roche <rzsfl@rz.uni-sb.de>
*
* Fixes:
* Alan Cox : Fixed the worst of the load
* balancer bugs.
* Dave Platt : Interrupt stacking fix.
* Richard Kooijman : Timestamp fixes.
* Alan Cox : Changed buffer format.
* Alan Cox : destructor hook for AF_UNIX etc.
* Linus Torvalds : Better skb_clone.
* Alan Cox : Added skb_copy.
* Alan Cox : Added all the changed routines Linus
* only put in the headers
* Ray VanTassle : Fixed --skb->lock in free
* Alan Cox : skb_copy copy arp field
* Andi Kleen : slabified it.
* Robert Olsson : Removed skb_head_pool
*
* NOTE:
* The __skb_ routines should be called with interrupts
* disabled, or you better be *real* sure that the operation is atomic
* with respect to whatever list is being frobbed (e.g. via lock_sock()
* or via disabling bottom half handlers, etc).
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version
* 2 of the License, or (at your option) any later version.
*/
/*
* The functions in this file will not compile correctly with gcc 2.4.x
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/module.h>
#include <linux/types.h>
#include <linux/kernel.h>
#include <linux/kmemcheck.h>
#include <linux/mm.h>
#include <linux/interrupt.h>
#include <linux/in.h>
#include <linux/inet.h>
#include <linux/slab.h>
#include <linux/tcp.h>
#include <linux/udp.h>
#include <linux/netdevice.h>
#ifdef CONFIG_NET_CLS_ACT
#include <net/pkt_sched.h>
#endif
#include <linux/string.h>
#include <linux/skbuff.h>
#include <linux/splice.h>
#include <linux/cache.h>
#include <linux/rtnetlink.h>
#include <linux/init.h>
#include <linux/scatterlist.h>
#include <linux/errqueue.h>
#include <linux/prefetch.h>
#include <net/protocol.h>
#include <net/dst.h>
#include <net/sock.h>
#include <net/checksum.h>
#include <net/ip6_checksum.h>
#include <net/xfrm.h>
#include <asm/uaccess.h>
#include <trace/events/skb.h>
#include <linux/highmem.h>
struct kmem_cache *skbuff_head_cache __read_mostly;
static struct kmem_cache *skbuff_fclone_cache __read_mostly;
/**
* skb_panic - private function for out-of-line support
* @skb: buffer
* @sz: size
* @addr: address
* @msg: skb_over_panic or skb_under_panic
*
* Out-of-line support for skb_put() and skb_push().
* Called via the wrapper skb_over_panic() or skb_under_panic().
* Keep out of line to prevent kernel bloat.
* __builtin_return_address is not used because it is not always reliable.
*/
static void skb_panic(struct sk_buff *skb, unsigned int sz, void *addr,
const char msg[])
{
pr_emerg("%s: text:%p len:%d put:%d head:%p data:%p tail:%#lx end:%#lx dev:%s\n",
msg, addr, skb->len, sz, skb->head, skb->data,
(unsigned long)skb->tail, (unsigned long)skb->end,
skb->dev ? skb->dev->name : "<NULL>");
BUG();
}
static void skb_over_panic(struct sk_buff *skb, unsigned int sz, void *addr)
{
skb_panic(skb, sz, addr, __func__);
}
static void skb_under_panic(struct sk_buff *skb, unsigned int sz, void *addr)
{
skb_panic(skb, sz, addr, __func__);
}
/*
* kmalloc_reserve is a wrapper around kmalloc_node_track_caller that tells
* the caller if emergency pfmemalloc reserves are being used. If it is and
* the socket is later found to be SOCK_MEMALLOC then PFMEMALLOC reserves
* may be used. Otherwise, the packet data may be discarded until enough
* memory is free
*/
#define kmalloc_reserve(size, gfp, node, pfmemalloc) \
__kmalloc_reserve(size, gfp, node, _RET_IP_, pfmemalloc)
static void *__kmalloc_reserve(size_t size, gfp_t flags, int node,
unsigned long ip, bool *pfmemalloc)
{
void *obj;
bool ret_pfmemalloc = false;
/*
* Try a regular allocation, when that fails and we're not entitled
* to the reserves, fail.
*/
obj = kmalloc_node_track_caller(size,
flags | __GFP_NOMEMALLOC | __GFP_NOWARN,
node);
if (obj || !(gfp_pfmemalloc_allowed(flags)))
goto out;
/* Try again but now we are using pfmemalloc reserves */
ret_pfmemalloc = true;
obj = kmalloc_node_track_caller(size, flags, node);
out:
if (pfmemalloc)
*pfmemalloc = ret_pfmemalloc;
return obj;
}
/* Allocate a new skbuff. We do this ourselves so we can fill in a few
* 'private' fields and also do memory statistics to find all the
* [BEEP] leaks.
*
*/
struct sk_buff *__alloc_skb_head(gfp_t gfp_mask, int node)
{
struct sk_buff *skb;
/* Get the HEAD */
skb = kmem_cache_alloc_node(skbuff_head_cache,
gfp_mask & ~__GFP_DMA, node);
if (!skb)
goto out;
/*
* Only clear those fields we need to clear, not those that we will
* actually initialise below. Hence, don't put any more fields after
* the tail pointer in struct sk_buff!
*/
memset(skb, 0, offsetof(struct sk_buff, tail));
skb->head = NULL;
skb->truesize = sizeof(struct sk_buff);
atomic_set(&skb->users, 1);
skb->mac_header = (typeof(skb->mac_header))~0U;
out:
return skb;
}
/**
* __alloc_skb - allocate a network buffer
* @size: size to allocate
* @gfp_mask: allocation mask
* @flags: If SKB_ALLOC_FCLONE is set, allocate from fclone cache
* instead of head cache and allocate a cloned (child) skb.
* If SKB_ALLOC_RX is set, __GFP_MEMALLOC will be used for
* allocations in case the data is required for writeback
* @node: numa node to allocate memory on
*
* Allocate a new &sk_buff. The returned buffer has no headroom and a
* tail room of at least size bytes. The object has a reference count
* of one. The return is the buffer. On a failure the return is %NULL.
*
* Buffers may only be allocated from interrupts using a @gfp_mask of
* %GFP_ATOMIC.
*/
struct sk_buff *__alloc_skb(unsigned int size, gfp_t gfp_mask,
int flags, int node)
{
struct kmem_cache *cache;
struct skb_shared_info *shinfo;
struct sk_buff *skb;
u8 *data;
bool pfmemalloc;
cache = (flags & SKB_ALLOC_FCLONE)
? skbuff_fclone_cache : skbuff_head_cache;
if (sk_memalloc_socks() && (flags & SKB_ALLOC_RX))
gfp_mask |= __GFP_MEMALLOC;
/* Get the HEAD */
skb = kmem_cache_alloc_node(cache, gfp_mask & ~__GFP_DMA, node);
if (!skb)
goto out;
prefetchw(skb);
/* We do our best to align skb_shared_info on a separate cache
* line. It usually works because kmalloc(X > SMP_CACHE_BYTES) gives
* aligned memory blocks, unless SLUB/SLAB debug is enabled.
* Both skb->head and skb_shared_info are cache line aligned.
*/
size = SKB_DATA_ALIGN(size);
size += SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
data = kmalloc_reserve(size, gfp_mask, node, &pfmemalloc);
if (!data)
goto nodata;
/* kmalloc(size) might give us more room than requested.
* Put skb_shared_info exactly at the end of allocated zone,
* to allow max possible filling before reallocation.
*/
size = SKB_WITH_OVERHEAD(ksize(data));
prefetchw(data + size);
/*
* Only clear those fields we need to clear, not those that we will
* actually initialise below. Hence, don't put any more fields after
* the tail pointer in struct sk_buff!
*/
memset(skb, 0, offsetof(struct sk_buff, tail));
/* Account for allocated memory : skb + skb->head */
skb->truesize = SKB_TRUESIZE(size);
skb->pfmemalloc = pfmemalloc;
atomic_set(&skb->users, 1);
skb->head = data;
skb->data = data;
skb_reset_tail_pointer(skb);
skb->end = skb->tail + size;
skb->mac_header = (typeof(skb->mac_header))~0U;
skb->transport_header = (typeof(skb->transport_header))~0U;
/* make sure we initialize shinfo sequentially */
shinfo = skb_shinfo(skb);
memset(shinfo, 0, offsetof(struct skb_shared_info, dataref));
atomic_set(&shinfo->dataref, 1);
kmemcheck_annotate_variable(shinfo->destructor_arg);
if (flags & SKB_ALLOC_FCLONE) {
struct sk_buff *child = skb + 1;
atomic_t *fclone_ref = (atomic_t *) (child + 1);
kmemcheck_annotate_bitfield(child, flags1);
kmemcheck_annotate_bitfield(child, flags2);
skb->fclone = SKB_FCLONE_ORIG;
atomic_set(fclone_ref, 1);
child->fclone = SKB_FCLONE_UNAVAILABLE;
child->pfmemalloc = pfmemalloc;
}
out:
return skb;
nodata:
kmem_cache_free(cache, skb);
skb = NULL;
goto out;
}
EXPORT_SYMBOL(__alloc_skb);
/**
* build_skb - build a network buffer
* @data: data buffer provided by caller
* @frag_size: size of fragment, or 0 if head was kmalloced
*
* Allocate a new &sk_buff. Caller provides space holding head and
* skb_shared_info. @data must have been allocated by kmalloc() only if
* @frag_size is 0, otherwise data should come from the page allocator.
* The return is the new skb buffer.
* On a failure the return is %NULL, and @data is not freed.
* Notes :
* Before IO, driver allocates only data buffer where NIC put incoming frame
* Driver should add room at head (NET_SKB_PAD) and
* MUST add room at tail (SKB_DATA_ALIGN(skb_shared_info))
* After IO, driver calls build_skb(), to allocate sk_buff and populate it
* before giving packet to stack.
* RX rings only contains data buffers, not full skbs.
*/
struct sk_buff *build_skb(void *data, unsigned int frag_size)
{
struct skb_shared_info *shinfo;
struct sk_buff *skb;
unsigned int size = frag_size ? : ksize(data);
skb = kmem_cache_alloc(skbuff_head_cache, GFP_ATOMIC);
if (!skb)
return NULL;
size -= SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
memset(skb, 0, offsetof(struct sk_buff, tail));
skb->truesize = SKB_TRUESIZE(size);
skb->head_frag = frag_size != 0;
atomic_set(&skb->users, 1);
skb->head = data;
skb->data = data;
skb_reset_tail_pointer(skb);
skb->end = skb->tail + size;
skb->mac_header = (typeof(skb->mac_header))~0U;
skb->transport_header = (typeof(skb->transport_header))~0U;
/* make sure we initialize shinfo sequentially */
shinfo = skb_shinfo(skb);
memset(shinfo, 0, offsetof(struct skb_shared_info, dataref));
atomic_set(&shinfo->dataref, 1);
kmemcheck_annotate_variable(shinfo->destructor_arg);
return skb;
}
EXPORT_SYMBOL(build_skb);
struct netdev_alloc_cache {
struct page_frag frag;
/* we maintain a pagecount bias, so that we dont dirty cache line
* containing page->_count every time we allocate a fragment.
*/
unsigned int pagecnt_bias;
};
static DEFINE_PER_CPU(struct netdev_alloc_cache, netdev_alloc_cache);
static void *__netdev_alloc_frag(unsigned int fragsz, gfp_t gfp_mask)
{
struct netdev_alloc_cache *nc;
void *data = NULL;
int order;
unsigned long flags;
local_irq_save(flags);
nc = &__get_cpu_var(netdev_alloc_cache);
if (unlikely(!nc->frag.page)) {
refill:
for (order = NETDEV_FRAG_PAGE_MAX_ORDER; ;) {
gfp_t gfp = gfp_mask;
if (order)
gfp |= __GFP_COMP | __GFP_NOWARN;
nc->frag.page = alloc_pages(gfp, order);
if (likely(nc->frag.page))
break;
if (--order < 0)
goto end;
}
nc->frag.size = PAGE_SIZE << order;
recycle:
atomic_set(&nc->frag.page->_count, NETDEV_PAGECNT_MAX_BIAS);
nc->pagecnt_bias = NETDEV_PAGECNT_MAX_BIAS;
nc->frag.offset = 0;
}
if (nc->frag.offset + fragsz > nc->frag.size) {
/* avoid unnecessary locked operations if possible */
if ((atomic_read(&nc->frag.page->_count) == nc->pagecnt_bias) ||
atomic_sub_and_test(nc->pagecnt_bias, &nc->frag.page->_count))
goto recycle;
goto refill;
}
data = page_address(nc->frag.page) + nc->frag.offset;
nc->frag.offset += fragsz;
nc->pagecnt_bias--;
end:
local_irq_restore(flags);
return data;
}
/**
* netdev_alloc_frag - allocate a page fragment
* @fragsz: fragment size
*
* Allocates a frag from a page for receive buffer.
* Uses GFP_ATOMIC allocations.
*/
void *netdev_alloc_frag(unsigned int fragsz)
{
return __netdev_alloc_frag(fragsz, GFP_ATOMIC | __GFP_COLD);
}
EXPORT_SYMBOL(netdev_alloc_frag);
/**
* __netdev_alloc_skb - allocate an skbuff for rx on a specific device
* @dev: network device to receive on
* @length: length to allocate
* @gfp_mask: get_free_pages mask, passed to alloc_skb
*
* Allocate a new &sk_buff and assign it a usage count of one. The
* buffer has unspecified headroom built in. Users should allocate
* the headroom they think they need without accounting for the
* built in space. The built in space is used for optimisations.
*
* %NULL is returned if there is no free memory.
*/
struct sk_buff *__netdev_alloc_skb(struct net_device *dev,
unsigned int length, gfp_t gfp_mask)
{
struct sk_buff *skb = NULL;
unsigned int fragsz = SKB_DATA_ALIGN(length + NET_SKB_PAD) +
SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
if (fragsz <= PAGE_SIZE && !(gfp_mask & (__GFP_WAIT | GFP_DMA))) {
void *data;
if (sk_memalloc_socks())
gfp_mask |= __GFP_MEMALLOC;
data = __netdev_alloc_frag(fragsz, gfp_mask);
if (likely(data)) {
skb = build_skb(data, fragsz);
if (unlikely(!skb))
put_page(virt_to_head_page(data));
}
} else {
skb = __alloc_skb(length + NET_SKB_PAD, gfp_mask,
SKB_ALLOC_RX, NUMA_NO_NODE);
}
if (likely(skb)) {
skb_reserve(skb, NET_SKB_PAD);
skb->dev = dev;
}
return skb;
}
EXPORT_SYMBOL(__netdev_alloc_skb);
void skb_add_rx_frag(struct sk_buff *skb, int i, struct page *page, int off,
int size, unsigned int truesize)
{
skb_fill_page_desc(skb, i, page, off, size);
skb->len += size;
skb->data_len += size;
skb->truesize += truesize;
}
EXPORT_SYMBOL(skb_add_rx_frag);
void skb_coalesce_rx_frag(struct sk_buff *skb, int i, int size,
unsigned int truesize)
{
skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
skb_frag_size_add(frag, size);
skb->len += size;
skb->data_len += size;
skb->truesize += truesize;
}
EXPORT_SYMBOL(skb_coalesce_rx_frag);
static void skb_drop_list(struct sk_buff **listp)
{
kfree_skb_list(*listp);
*listp = NULL;
}
static inline void skb_drop_fraglist(struct sk_buff *skb)
{
skb_drop_list(&skb_shinfo(skb)->frag_list);
}
static void skb_clone_fraglist(struct sk_buff *skb)
{
struct sk_buff *list;
skb_walk_frags(skb, list)
skb_get(list);
}
static void skb_free_head(struct sk_buff *skb)
{
if (skb->head_frag)
put_page(virt_to_head_page(skb->head));
else
kfree(skb->head);
}
static void skb_release_data(struct sk_buff *skb)
{
if (!skb->cloned ||
!atomic_sub_return(skb->nohdr ? (1 << SKB_DATAREF_SHIFT) + 1 : 1,
&skb_shinfo(skb)->dataref)) {
if (skb_shinfo(skb)->nr_frags) {
int i;
for (i = 0; i < skb_shinfo(skb)->nr_frags; i++)
skb_frag_unref(skb, i);
}
/*
* If skb buf is from userspace, we need to notify the caller
* the lower device DMA has done;
*/
if (skb_shinfo(skb)->tx_flags & SKBTX_DEV_ZEROCOPY) {
struct ubuf_info *uarg;
uarg = skb_shinfo(skb)->destructor_arg;
if (uarg->callback)
uarg->callback(uarg, true);
}
if (skb_has_frag_list(skb))
skb_drop_fraglist(skb);
skb_free_head(skb);
}
}
/*
* Free an skbuff by memory without cleaning the state.
*/
static void kfree_skbmem(struct sk_buff *skb)
{
struct sk_buff *other;
atomic_t *fclone_ref;
switch (skb->fclone) {
case SKB_FCLONE_UNAVAILABLE:
kmem_cache_free(skbuff_head_cache, skb);
break;
case SKB_FCLONE_ORIG:
fclone_ref = (atomic_t *) (skb + 2);
if (atomic_dec_and_test(fclone_ref))
kmem_cache_free(skbuff_fclone_cache, skb);
break;
case SKB_FCLONE_CLONE:
fclone_ref = (atomic_t *) (skb + 1);
other = skb - 1;
/* The clone portion is available for
* fast-cloning again.
*/
skb->fclone = SKB_FCLONE_UNAVAILABLE;
if (atomic_dec_and_test(fclone_ref))
kmem_cache_free(skbuff_fclone_cache, other);
break;
}
}
static void skb_release_head_state(struct sk_buff *skb)
{
skb_dst_drop(skb);
#ifdef CONFIG_XFRM
secpath_put(skb->sp);
#endif
if (skb->destructor) {
WARN_ON(in_irq());
skb->destructor(skb);
}
#if IS_ENABLED(CONFIG_NF_CONNTRACK)
nf_conntrack_put(skb->nfct);
#endif
#ifdef CONFIG_BRIDGE_NETFILTER
nf_bridge_put(skb->nf_bridge);
#endif
/* XXX: IS this still necessary? - JHS */
#ifdef CONFIG_NET_SCHED
skb->tc_index = 0;
#ifdef CONFIG_NET_CLS_ACT
skb->tc_verd = 0;
#endif
#endif
}
/* Free everything but the sk_buff shell. */
static void skb_release_all(struct sk_buff *skb)
{
skb_release_head_state(skb);
if (likely(skb->head))
skb_release_data(skb);
}
/**
* __kfree_skb - private function
* @skb: buffer
*
* Free an sk_buff. Release anything attached to the buffer.
* Clean the state. This is an internal helper function. Users should
* always call kfree_skb
*/
void __kfree_skb(struct sk_buff *skb)
{
skb_release_all(skb);
kfree_skbmem(skb);
}
EXPORT_SYMBOL(__kfree_skb);
/**
* kfree_skb - free an sk_buff
* @skb: buffer to free
*
* Drop a reference to the buffer and free it if the usage count has
* hit zero.
*/
void kfree_skb(struct sk_buff *skb)
{
if (unlikely(!skb))
return;
if (likely(atomic_read(&skb->users) == 1))
smp_rmb();
else if (likely(!atomic_dec_and_test(&skb->users)))
return;
trace_kfree_skb(skb, __builtin_return_address(0));
__kfree_skb(skb);
}
EXPORT_SYMBOL(kfree_skb);
void kfree_skb_list(struct sk_buff *segs)
{
while (segs) {
struct sk_buff *next = segs->next;
kfree_skb(segs);
segs = next;
}
}
EXPORT_SYMBOL(kfree_skb_list);
/**
* skb_tx_error - report an sk_buff xmit error
* @skb: buffer that triggered an error
*
* Report xmit error if a device callback is tracking this skb.
* skb must be freed afterwards.
*/
void skb_tx_error(struct sk_buff *skb)
{
if (skb_shinfo(skb)->tx_flags & SKBTX_DEV_ZEROCOPY) {
struct ubuf_info *uarg;
uarg = skb_shinfo(skb)->destructor_arg;
if (uarg->callback)
uarg->callback(uarg, false);
skb_shinfo(skb)->tx_flags &= ~SKBTX_DEV_ZEROCOPY;
}
}
EXPORT_SYMBOL(skb_tx_error);
/**
* consume_skb - free an skbuff
* @skb: buffer to free
*
* Drop a ref to the buffer and free it if the usage count has hit zero
* Functions identically to kfree_skb, but kfree_skb assumes that the frame
* is being dropped after a failure and notes that
*/
void consume_skb(struct sk_buff *skb)
{
if (unlikely(!skb))
return;
if (likely(atomic_read(&skb->users) == 1))
smp_rmb();
else if (likely(!atomic_dec_and_test(&skb->users)))
return;
trace_consume_skb(skb);
__kfree_skb(skb);
}
EXPORT_SYMBOL(consume_skb);
static void __copy_skb_header(struct sk_buff *new, const struct sk_buff *old)
{
new->tstamp = old->tstamp;
new->dev = old->dev;
new->transport_header = old->transport_header;
new->network_header = old->network_header;
new->mac_header = old->mac_header;
new->inner_protocol = old->inner_protocol;
new->inner_transport_header = old->inner_transport_header;
new->inner_network_header = old->inner_network_header;
new->inner_mac_header = old->inner_mac_header;
skb_dst_copy(new, old);
skb_copy_hash(new, old);
new->ooo_okay = old->ooo_okay;
new->no_fcs = old->no_fcs;
new->encapsulation = old->encapsulation;
#ifdef CONFIG_XFRM
new->sp = secpath_get(old->sp);
#endif
memcpy(new->cb, old->cb, sizeof(old->cb));
new->csum = old->csum;
new->local_df = old->local_df;
new->pkt_type = old->pkt_type;
new->ip_summed = old->ip_summed;
skb_copy_queue_mapping(new, old);
new->priority = old->priority;
#if IS_ENABLED(CONFIG_IP_VS)
new->ipvs_property = old->ipvs_property;
#endif
new->pfmemalloc = old->pfmemalloc;
new->protocol = old->protocol;
new->mark = old->mark;
new->skb_iif = old->skb_iif;
__nf_copy(new, old);
#ifdef CONFIG_NET_SCHED
new->tc_index = old->tc_index;
#ifdef CONFIG_NET_CLS_ACT
new->tc_verd = old->tc_verd;
#endif
#endif
new->vlan_proto = old->vlan_proto;
new->vlan_tci = old->vlan_tci;
skb_copy_secmark(new, old);
#ifdef CONFIG_NET_RX_BUSY_POLL
new->napi_id = old->napi_id;
#endif
}
/*
* You should not add any new code to this function. Add it to
* __copy_skb_header above instead.
*/
static struct sk_buff *__skb_clone(struct sk_buff *n, struct sk_buff *skb)
{
#define C(x) n->x = skb->x
n->next = n->prev = NULL;
n->sk = NULL;
__copy_skb_header(n, skb);
C(len);
C(data_len);
C(mac_len);
n->hdr_len = skb->nohdr ? skb_headroom(skb) : skb->hdr_len;
n->cloned = 1;
n->nohdr = 0;
n->destructor = NULL;
C(tail);
C(end);
C(head);
C(head_frag);
C(data);
C(truesize);
atomic_set(&n->users, 1);
atomic_inc(&(skb_shinfo(skb)->dataref));
skb->cloned = 1;
return n;
#undef C
}
/**
* skb_morph - morph one skb into another
* @dst: the skb to receive the contents
* @src: the skb to supply the contents
*
* This is identical to skb_clone except that the target skb is
* supplied by the user.
*
* The target skb is returned upon exit.
*/
struct sk_buff *skb_morph(struct sk_buff *dst, struct sk_buff *src)
{
skb_release_all(dst);
return __skb_clone(dst, src);
}
EXPORT_SYMBOL_GPL(skb_morph);
/**
* skb_copy_ubufs - copy userspace skb frags buffers to kernel
* @skb: the skb to modify
* @gfp_mask: allocation priority
*
* This must be called on SKBTX_DEV_ZEROCOPY skb.
* It will copy all frags into kernel and drop the reference
* to userspace pages.
*
* If this function is called from an interrupt gfp_mask() must be
* %GFP_ATOMIC.
*
* Returns 0 on success or a negative error code on failure
* to allocate kernel memory to copy to.
*/
int skb_copy_ubufs(struct sk_buff *skb, gfp_t gfp_mask)
{
int i;
int num_frags = skb_shinfo(skb)->nr_frags;
struct page *page, *head = NULL;
struct ubuf_info *uarg = skb_shinfo(skb)->destructor_arg;
for (i = 0; i < num_frags; i++) {
u8 *vaddr;
skb_frag_t *f = &skb_shinfo(skb)->frags[i];
page = alloc_page(gfp_mask);
if (!page) {
while (head) {
struct page *next = (struct page *)page_private(head);
put_page(head);
head = next;
}
return -ENOMEM;
}
vaddr = kmap_atomic(skb_frag_page(f));
memcpy(page_address(page),
vaddr + f->page_offset, skb_frag_size(f));
kunmap_atomic(vaddr);
set_page_private(page, (unsigned long)head);
head = page;
}
/* skb frags release userspace buffers */
for (i = 0; i < num_frags; i++)
skb_frag_unref(skb, i);
uarg->callback(uarg, false);
/* skb frags point to kernel buffers */
for (i = num_frags - 1; i >= 0; i--) {
__skb_fill_page_desc(skb, i, head, 0,
skb_shinfo(skb)->frags[i].size);
head = (struct page *)page_private(head);
}
skb_shinfo(skb)->tx_flags &= ~SKBTX_DEV_ZEROCOPY;
return 0;
}
EXPORT_SYMBOL_GPL(skb_copy_ubufs);
/**
* skb_clone - duplicate an sk_buff
* @skb: buffer to clone
* @gfp_mask: allocation priority
*
* Duplicate an &sk_buff. The new one is not owned by a socket. Both
* copies share the same packet data but not structure. The new
* buffer has a reference count of 1. If the allocation fails the
* function returns %NULL otherwise the new buffer is returned.
*
* If this function is called from an interrupt gfp_mask() must be
* %GFP_ATOMIC.
*/
struct sk_buff *skb_clone(struct sk_buff *skb, gfp_t gfp_mask)
{
struct sk_buff *n;
if (skb_orphan_frags(skb, gfp_mask))
return NULL;
n = skb + 1;
if (skb->fclone == SKB_FCLONE_ORIG &&
n->fclone == SKB_FCLONE_UNAVAILABLE) {
atomic_t *fclone_ref = (atomic_t *) (n + 1);
n->fclone = SKB_FCLONE_CLONE;
atomic_inc(fclone_ref);
} else {
if (skb_pfmemalloc(skb))
gfp_mask |= __GFP_MEMALLOC;
n = kmem_cache_alloc(skbuff_head_cache, gfp_mask);
if (!n)
return NULL;
kmemcheck_annotate_bitfield(n, flags1);
kmemcheck_annotate_bitfield(n, flags2);
n->fclone = SKB_FCLONE_UNAVAILABLE;
}
return __skb_clone(n, skb);
}
EXPORT_SYMBOL(skb_clone);
static void skb_headers_offset_update(struct sk_buff *skb, int off)
{
/* Only adjust this if it actually is csum_start rather than csum */
if (skb->ip_summed == CHECKSUM_PARTIAL)
skb->csum_start += off;
/* {transport,network,mac}_header and tail are relative to skb->head */
skb->transport_header += off;
skb->network_header += off;
if (skb_mac_header_was_set(skb))
skb->mac_header += off;
skb->inner_transport_header += off;
skb->inner_network_header += off;
skb->inner_mac_header += off;
}
static void copy_skb_header(struct sk_buff *new, const struct sk_buff *old)
{
__copy_skb_header(new, old);
skb_shinfo(new)->gso_size = skb_shinfo(old)->gso_size;
skb_shinfo(new)->gso_segs = skb_shinfo(old)->gso_segs;
skb_shinfo(new)->gso_type = skb_shinfo(old)->gso_type;
}
static inline int skb_alloc_rx_flag(const struct sk_buff *skb)
{
if (skb_pfmemalloc(skb))
return SKB_ALLOC_RX;
return 0;
}
/**
* skb_copy - create private copy of an sk_buff
* @skb: buffer to copy
* @gfp_mask: allocation priority
*
* Make a copy of both an &sk_buff and its data. This is used when the
* caller wishes to modify the data and needs a private copy of the
* data to alter. Returns %NULL on failure or the pointer to the buffer
* on success. The returned buffer has a reference count of 1.
*
* As by-product this function converts non-linear &sk_buff to linear
* one, so that &sk_buff becomes completely private and caller is allowed
* to modify all the data of returned buffer. This means that this
* function is not recommended for use in circumstances when only
* header is going to be modified. Use pskb_copy() instead.
*/
struct sk_buff *skb_copy(const struct sk_buff *skb, gfp_t gfp_mask)
{
int headerlen = skb_headroom(skb);
unsigned int size = skb_end_offset(skb) + skb->data_len;
struct sk_buff *n = __alloc_skb(size, gfp_mask,
skb_alloc_rx_flag(skb), NUMA_NO_NODE);
if (!n)
return NULL;
/* Set the data pointer */
skb_reserve(n, headerlen);
/* Set the tail pointer and length */
skb_put(n, skb->len);
if (skb_copy_bits(skb, -headerlen, n->head, headerlen + skb->len))
BUG();
copy_skb_header(n, skb);
return n;
}
EXPORT_SYMBOL(skb_copy);
/**
* __pskb_copy - create copy of an sk_buff with private head.
* @skb: buffer to copy
* @headroom: headroom of new skb
* @gfp_mask: allocation priority
*
* Make a copy of both an &sk_buff and part of its data, located
* in header. Fragmented data remain shared. This is used when
* the caller wishes to modify only header of &sk_buff and needs
* private copy of the header to alter. Returns %NULL on failure
* or the pointer to the buffer on success.
* The returned buffer has a reference count of 1.
*/
struct sk_buff *__pskb_copy(struct sk_buff *skb, int headroom, gfp_t gfp_mask)
{
unsigned int size = skb_headlen(skb) + headroom;
struct sk_buff *n = __alloc_skb(size, gfp_mask,
skb_alloc_rx_flag(skb), NUMA_NO_NODE);
if (!n)
goto out;
/* Set the data pointer */
skb_reserve(n, headroom);
/* Set the tail pointer and length */
skb_put(n, skb_headlen(skb));
/* Copy the bytes */
skb_copy_from_linear_data(skb, n->data, n->len);
n->truesize += skb->data_len;
n->data_len = skb->data_len;
n->len = skb->len;
if (skb_shinfo(skb)->nr_frags) {
int i;
if (skb_orphan_frags(skb, gfp_mask)) {
kfree_skb(n);
n = NULL;
goto out;
}
for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
skb_shinfo(n)->frags[i] = skb_shinfo(skb)->frags[i];
skb_frag_ref(skb, i);
}
skb_shinfo(n)->nr_frags = i;
}
if (skb_has_frag_list(skb)) {
skb_shinfo(n)->frag_list = skb_shinfo(skb)->frag_list;
skb_clone_fraglist(n);
}
copy_skb_header(n, skb);
out:
return n;
}
EXPORT_SYMBOL(__pskb_copy);
/**
* pskb_expand_head - reallocate header of &sk_buff
* @skb: buffer to reallocate
* @nhead: room to add at head
* @ntail: room to add at tail
* @gfp_mask: allocation priority
*
* Expands (or creates identical copy, if @nhead and @ntail are zero)
* header of @skb. &sk_buff itself is not changed. &sk_buff MUST have
* reference count of 1. Returns zero in the case of success or error,
* if expansion failed. In the last case, &sk_buff is not changed.
*
* All the pointers pointing into skb header may change and must be
* reloaded after call to this function.
*/
int pskb_expand_head(struct sk_buff *skb, int nhead, int ntail,
gfp_t gfp_mask)
{
int i;
u8 *data;
int size = nhead + skb_end_offset(skb) + ntail;
long off;
BUG_ON(nhead < 0);
if (skb_shared(skb))
BUG();
size = SKB_DATA_ALIGN(size);
if (skb_pfmemalloc(skb))
gfp_mask |= __GFP_MEMALLOC;
data = kmalloc_reserve(size + SKB_DATA_ALIGN(sizeof(struct skb_shared_info)),
gfp_mask, NUMA_NO_NODE, NULL);
if (!data)
goto nodata;
size = SKB_WITH_OVERHEAD(ksize(data));
/* Copy only real data... and, alas, header. This should be
* optimized for the cases when header is void.
*/
memcpy(data + nhead, skb->head, skb_tail_pointer(skb) - skb->head);
memcpy((struct skb_shared_info *)(data + size),
skb_shinfo(skb),
offsetof(struct skb_shared_info, frags[skb_shinfo(skb)->nr_frags]));
/*
* if shinfo is shared we must drop the old head gracefully, but if it
* is not we can just drop the old head and let the existing refcount
* be since all we did is relocate the values
*/
if (skb_cloned(skb)) {
/* copy this zero copy skb frags */
if (skb_orphan_frags(skb, gfp_mask))
goto nofrags;
for (i = 0; i < skb_shinfo(skb)->nr_frags; i++)
skb_frag_ref(skb, i);
if (skb_has_frag_list(skb))
skb_clone_fraglist(skb);
skb_release_data(skb);
} else {
skb_free_head(skb);
}
off = (data + nhead) - skb->head;
skb->head = data;
skb->head_frag = 0;
skb->data += off;
#ifdef NET_SKBUFF_DATA_USES_OFFSET
skb->end = size;
off = nhead;
#else
skb->end = skb->head + size;
#endif
skb->tail += off;
skb_headers_offset_update(skb, nhead);
skb->cloned = 0;
skb->hdr_len = 0;
skb->nohdr = 0;
atomic_set(&skb_shinfo(skb)->dataref, 1);
return 0;
nofrags:
kfree(data);
nodata:
return -ENOMEM;
}
EXPORT_SYMBOL(pskb_expand_head);
/* Make private copy of skb with writable head and some headroom */
struct sk_buff *skb_realloc_headroom(struct sk_buff *skb, unsigned int headroom)
{
struct sk_buff *skb2;
int delta = headroom - skb_headroom(skb);
if (delta <= 0)
skb2 = pskb_copy(skb, GFP_ATOMIC);
else {
skb2 = skb_clone(skb, GFP_ATOMIC);
if (skb2 && pskb_expand_head(skb2, SKB_DATA_ALIGN(delta), 0,
GFP_ATOMIC)) {
kfree_skb(skb2);
skb2 = NULL;
}
}
return skb2;
}
EXPORT_SYMBOL(skb_realloc_headroom);
/**
* skb_copy_expand - copy and expand sk_buff
* @skb: buffer to copy
* @newheadroom: new free bytes at head
* @newtailroom: new free bytes at tail
* @gfp_mask: allocation priority
*
* Make a copy of both an &sk_buff and its data and while doing so
* allocate additional space.
*
* This is used when the caller wishes to modify the data and needs a
* private copy of the data to alter as well as more space for new fields.
* Returns %NULL on failure or the pointer to the buffer
* on success. The returned buffer has a reference count of 1.
*
* You must pass %GFP_ATOMIC as the allocation priority if this function
* is called from an interrupt.
*/
struct sk_buff *skb_copy_expand(const struct sk_buff *skb,
int newheadroom, int newtailroom,
gfp_t gfp_mask)
{
/*
* Allocate the copy buffer
*/
struct sk_buff *n = __alloc_skb(newheadroom + skb->len + newtailroom,
gfp_mask, skb_alloc_rx_flag(skb),
NUMA_NO_NODE);
int oldheadroom = skb_headroom(skb);
int head_copy_len, head_copy_off;
if (!n)
return NULL;
skb_reserve(n, newheadroom);
/* Set the tail pointer and length */
skb_put(n, skb->len);
head_copy_len = oldheadroom;
head_copy_off = 0;
if (newheadroom <= head_copy_len)
head_copy_len = newheadroom;
else
head_copy_off = newheadroom - head_copy_len;
/* Copy the linear header and data. */
if (skb_copy_bits(skb, -head_copy_len, n->head + head_copy_off,
skb->len + head_copy_len))
BUG();
copy_skb_header(n, skb);
skb_headers_offset_update(n, newheadroom - oldheadroom);
return n;
}
EXPORT_SYMBOL(skb_copy_expand);
/**
* skb_pad - zero pad the tail of an skb
* @skb: buffer to pad
* @pad: space to pad
*
* Ensure that a buffer is followed by a padding area that is zero
* filled. Used by network drivers which may DMA or transfer data
* beyond the buffer end onto the wire.
*
* May return error in out of memory cases. The skb is freed on error.
*/
int skb_pad(struct sk_buff *skb, int pad)
{
int err;
int ntail;
/* If the skbuff is non linear tailroom is always zero.. */
if (!skb_cloned(skb) && skb_tailroom(skb) >= pad) {
memset(skb->data+skb->len, 0, pad);
return 0;
}
ntail = skb->data_len + pad - (skb->end - skb->tail);
if (likely(skb_cloned(skb) || ntail > 0)) {
err = pskb_expand_head(skb, 0, ntail, GFP_ATOMIC);
if (unlikely(err))
goto free_skb;
}
/* FIXME: The use of this function with non-linear skb's really needs
* to be audited.
*/
err = skb_linearize(skb);
if (unlikely(err))
goto free_skb;
memset(skb->data + skb->len, 0, pad);
return 0;
free_skb:
kfree_skb(skb);
return err;
}
EXPORT_SYMBOL(skb_pad);
/**
* pskb_put - add data to the tail of a potentially fragmented buffer
* @skb: start of the buffer to use
* @tail: tail fragment of the buffer to use
* @len: amount of data to add
*
* This function extends the used data area of the potentially
* fragmented buffer. @tail must be the last fragment of @skb -- or
* @skb itself. If this would exceed the total buffer size the kernel
* will panic. A pointer to the first byte of the extra data is
* returned.
*/
unsigned char *pskb_put(struct sk_buff *skb, struct sk_buff *tail, int len)
{
if (tail != skb) {
skb->data_len += len;
skb->len += len;
}
return skb_put(tail, len);
}
EXPORT_SYMBOL_GPL(pskb_put);
/**
* skb_put - add data to a buffer
* @skb: buffer to use
* @len: amount of data to add
*
* This function extends the used data area of the buffer. If this would
* exceed the total buffer size the kernel will panic. A pointer to the
* first byte of the extra data is returned.
*/
unsigned char *skb_put(struct sk_buff *skb, unsigned int len)
{
unsigned char *tmp = skb_tail_pointer(skb);
SKB_LINEAR_ASSERT(skb);
skb->tail += len;
skb->len += len;
if (unlikely(skb->tail > skb->end))
skb_over_panic(skb, len, __builtin_return_address(0));
return tmp;
}
EXPORT_SYMBOL(skb_put);
/**
* skb_push - add data to the start of a buffer
* @skb: buffer to use
* @len: amount of data to add
*
* This function extends the used data area of the buffer at the buffer
* start. If this would exceed the total buffer headroom the kernel will
* panic. A pointer to the first byte of the extra data is returned.
*/
unsigned char *skb_push(struct sk_buff *skb, unsigned int len)
{
skb->data -= len;
skb->len += len;
if (unlikely(skb->data<skb->head))
skb_under_panic(skb, len, __builtin_return_address(0));
return skb->data;
}
EXPORT_SYMBOL(skb_push);
/**
* skb_pull - remove data from the start of a buffer
* @skb: buffer to use
* @len: amount of data to remove
*
* This function removes data from the start of a buffer, returning
* the memory to the headroom. A pointer to the next data in the buffer
* is returned. Once the data has been pulled future pushes will overwrite
* the old data.
*/
unsigned char *skb_pull(struct sk_buff *skb, unsigned int len)
{
return skb_pull_inline(skb, len);
}
EXPORT_SYMBOL(skb_pull);
/**
* skb_trim - remove end from a buffer
* @skb: buffer to alter
* @len: new length
*
* Cut the length of a buffer down by removing data from the tail. If
* the buffer is already under the length specified it is not modified.
* The skb must be linear.
*/
void skb_trim(struct sk_buff *skb, unsigned int len)
{
if (skb->len > len)
__skb_trim(skb, len);
}
EXPORT_SYMBOL(skb_trim);
/* Trims skb to length len. It can change skb pointers.
*/
int ___pskb_trim(struct sk_buff *skb, unsigned int len)
{
struct sk_buff **fragp;
struct sk_buff *frag;
int offset = skb_headlen(skb);
int nfrags = skb_shinfo(skb)->nr_frags;
int i;
int err;
if (skb_cloned(skb) &&
unlikely((err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC))))
return err;
i = 0;
if (offset >= len)
goto drop_pages;
for (; i < nfrags; i++) {
int end = offset + skb_frag_size(&skb_shinfo(skb)->frags[i]);
if (end < len) {
offset = end;
continue;
}
skb_frag_size_set(&skb_shinfo(skb)->frags[i++], len - offset);
drop_pages:
skb_shinfo(skb)->nr_frags = i;
for (; i < nfrags; i++)
skb_frag_unref(skb, i);
if (skb_has_frag_list(skb))
skb_drop_fraglist(skb);
goto done;
}
for (fragp = &skb_shinfo(skb)->frag_list; (frag = *fragp);
fragp = &frag->next) {
int end = offset + frag->len;
if (skb_shared(frag)) {
struct sk_buff *nfrag;
nfrag = skb_clone(frag, GFP_ATOMIC);
if (unlikely(!nfrag))
return -ENOMEM;
nfrag->next = frag->next;
consume_skb(frag);
frag = nfrag;
*fragp = frag;
}
if (end < len) {
offset = end;
continue;
}
if (end > len &&
unlikely((err = pskb_trim(frag, len - offset))))
return err;
if (frag->next)
skb_drop_list(&frag->next);
break;
}
done:
if (len > skb_headlen(skb)) {
skb->data_len -= skb->len - len;
skb->len = len;
} else {
skb->len = len;
skb->data_len = 0;
skb_set_tail_pointer(skb, len);
}
return 0;
}
EXPORT_SYMBOL(___pskb_trim);
/**
* __pskb_pull_tail - advance tail of skb header
* @skb: buffer to reallocate
* @delta: number of bytes to advance tail
*
* The function makes a sense only on a fragmented &sk_buff,
* it expands header moving its tail forward and copying necessary
* data from fragmented part.
*
* &sk_buff MUST have reference count of 1.
*
* Returns %NULL (and &sk_buff does not change) if pull failed
* or value of new tail of skb in the case of success.
*
* All the pointers pointing into skb header may change and must be
* reloaded after call to this function.
*/
/* Moves tail of skb head forward, copying data from fragmented part,
* when it is necessary.
* 1. It may fail due to malloc failure.
* 2. It may change skb pointers.
*
* It is pretty complicated. Luckily, it is called only in exceptional cases.
*/
unsigned char *__pskb_pull_tail(struct sk_buff *skb, int delta)
{
/* If skb has not enough free space at tail, get new one
* plus 128 bytes for future expansions. If we have enough
* room at tail, reallocate without expansion only if skb is cloned.
*/
int i, k, eat = (skb->tail + delta) - skb->end;
if (eat > 0 || skb_cloned(skb)) {
if (pskb_expand_head(skb, 0, eat > 0 ? eat + 128 : 0,
GFP_ATOMIC))
return NULL;
}
if (skb_copy_bits(skb, skb_headlen(skb), skb_tail_pointer(skb), delta))
BUG();
/* Optimization: no fragments, no reasons to preestimate
* size of pulled pages. Superb.
*/
if (!skb_has_frag_list(skb))
goto pull_pages;
/* Estimate size of pulled pages. */
eat = delta;
for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
int size = skb_frag_size(&skb_shinfo(skb)->frags[i]);
if (size >= eat)
goto pull_pages;
eat -= size;
}
/* If we need update frag list, we are in troubles.
* Certainly, it possible to add an offset to skb data,
* but taking into account that pulling is expected to
* be very rare operation, it is worth to fight against
* further bloating skb head and crucify ourselves here instead.
* Pure masohism, indeed. 8)8)
*/
if (eat) {
struct sk_buff *list = skb_shinfo(skb)->frag_list;
struct sk_buff *clone = NULL;
struct sk_buff *insp = NULL;
do {
BUG_ON(!list);
if (list->len <= eat) {
/* Eaten as whole. */
eat -= list->len;
list = list->next;
insp = list;
} else {
/* Eaten partially. */
if (skb_shared(list)) {
/* Sucks! We need to fork list. :-( */
clone = skb_clone(list, GFP_ATOMIC);
if (!clone)
return NULL;
insp = list->next;
list = clone;
} else {
/* This may be pulled without
* problems. */
insp = list;
}
if (!pskb_pull(list, eat)) {
kfree_skb(clone);
return NULL;
}
break;
}
} while (eat);
/* Free pulled out fragments. */
while ((list = skb_shinfo(skb)->frag_list) != insp) {
skb_shinfo(skb)->frag_list = list->next;
kfree_skb(list);
}
/* And insert new clone at head. */
if (clone) {
clone->next = list;
skb_shinfo(skb)->frag_list = clone;
}
}
/* Success! Now we may commit changes to skb data. */
pull_pages:
eat = delta;
k = 0;
for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
int size = skb_frag_size(&skb_shinfo(skb)->frags[i]);
if (size <= eat) {
skb_frag_unref(skb, i);
eat -= size;
} else {
skb_shinfo(skb)->frags[k] = skb_shinfo(skb)->frags[i];
if (eat) {
skb_shinfo(skb)->frags[k].page_offset += eat;
skb_frag_size_sub(&skb_shinfo(skb)->frags[k], eat);
eat = 0;
}
k++;
}
}
skb_shinfo(skb)->nr_frags = k;
skb->tail += delta;
skb->data_len -= delta;
return skb_tail_pointer(skb);
}
EXPORT_SYMBOL(__pskb_pull_tail);
/**
* skb_copy_bits - copy bits from skb to kernel buffer
* @skb: source skb
* @offset: offset in source
* @to: destination buffer
* @len: number of bytes to copy
*
* Copy the specified number of bytes from the source skb to the
* destination buffer.
*
* CAUTION ! :
* If its prototype is ever changed,
* check arch/{*}/net/{*}.S files,
* since it is called from BPF assembly code.
*/
int skb_copy_bits(const struct sk_buff *skb, int offset, void *to, int len)
{
int start = skb_headlen(skb);
struct sk_buff *frag_iter;
int i, copy;
if (offset > (int)skb->len - len)
goto fault;
/* Copy header. */
if ((copy = start - offset) > 0) {
if (copy > len)
copy = len;
skb_copy_from_linear_data_offset(skb, offset, to, copy);
if ((len -= copy) == 0)
return 0;
offset += copy;
to += copy;
}
for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
int end;
skb_frag_t *f = &skb_shinfo(skb)->frags[i];
WARN_ON(start > offset + len);
end = start + skb_frag_size(f);
if ((copy = end - offset) > 0) {
u8 *vaddr;
if (copy > len)
copy = len;
vaddr = kmap_atomic(skb_frag_page(f));
memcpy(to,
vaddr + f->page_offset + offset - start,
copy);
kunmap_atomic(vaddr);
if ((len -= copy) == 0)
return 0;
offset += copy;
to += copy;
}
start = end;
}
skb_walk_frags(skb, frag_iter) {
int end;
WARN_ON(start > offset + len);
end = start + frag_iter->len;
if ((copy = end - offset) > 0) {
if (copy > len)
copy = len;
if (skb_copy_bits(frag_iter, offset - start, to, copy))
goto fault;
if ((len -= copy) == 0)
return 0;
offset += copy;
to += copy;
}
start = end;
}
if (!len)
return 0;
fault:
return -EFAULT;
}
EXPORT_SYMBOL(skb_copy_bits);
/*
* Callback from splice_to_pipe(), if we need to release some pages
* at the end of the spd in case we error'ed out in filling the pipe.
*/
static void sock_spd_release(struct splice_pipe_desc *spd, unsigned int i)
{
put_page(spd->pages[i]);
}
static struct page *linear_to_page(struct page *page, unsigned int *len,
unsigned int *offset,
struct sock *sk)
{
struct page_frag *pfrag = sk_page_frag(sk);
if (!sk_page_frag_refill(sk, pfrag))
return NULL;
*len = min_t(unsigned int, *len, pfrag->size - pfrag->offset);
memcpy(page_address(pfrag->page) + pfrag->offset,
page_address(page) + *offset, *len);
*offset = pfrag->offset;
pfrag->offset += *len;
return pfrag->page;
}
static bool spd_can_coalesce(const struct splice_pipe_desc *spd,
struct page *page,
unsigned int offset)
{
return spd->nr_pages &&
spd->pages[spd->nr_pages - 1] == page &&
(spd->partial[spd->nr_pages - 1].offset +
spd->partial[spd->nr_pages - 1].len == offset);
}
/*
* Fill page/offset/length into spd, if it can hold more pages.
*/
static bool spd_fill_page(struct splice_pipe_desc *spd,
struct pipe_inode_info *pipe, struct page *page,
unsigned int *len, unsigned int offset,
bool linear,
struct sock *sk)
{
if (unlikely(spd->nr_pages == MAX_SKB_FRAGS))
return true;
if (linear) {
page = linear_to_page(page, len, &offset, sk);
if (!page)
return true;
}
if (spd_can_coalesce(spd, page, offset)) {
spd->partial[spd->nr_pages - 1].len += *len;
return false;
}
get_page(page);
spd->pages[spd->nr_pages] = page;
spd->partial[spd->nr_pages].len = *len;
spd->partial[spd->nr_pages].offset = offset;
spd->nr_pages++;
return false;
}
static bool __splice_segment(struct page *page, unsigned int poff,
unsigned int plen, unsigned int *off,
unsigned int *len,
struct splice_pipe_desc *spd, bool linear,
struct sock *sk,
struct pipe_inode_info *pipe)
{
if (!*len)
return true;
/* skip this segment if already processed */
if (*off >= plen) {
*off -= plen;
return false;
}
/* ignore any bits we already processed */
poff += *off;
plen -= *off;
*off = 0;
do {
unsigned int flen = min(*len, plen);
if (spd_fill_page(spd, pipe, page, &flen, poff,
linear, sk))
return true;
poff += flen;
plen -= flen;
*len -= flen;
} while (*len && plen);
return false;
}
/*
* Map linear and fragment data from the skb to spd. It reports true if the
* pipe is full or if we already spliced the requested length.
*/
static bool __skb_splice_bits(struct sk_buff *skb, struct pipe_inode_info *pipe,
unsigned int *offset, unsigned int *len,
struct splice_pipe_desc *spd, struct sock *sk)
{
int seg;
/* map the linear part :
* If skb->head_frag is set, this 'linear' part is backed by a
* fragment, and if the head is not shared with any clones then
* we can avoid a copy since we own the head portion of this page.
*/
if (__splice_segment(virt_to_page(skb->data),
(unsigned long) skb->data & (PAGE_SIZE - 1),
skb_headlen(skb),
offset, len, spd,
skb_head_is_locked(skb),
sk, pipe))
return true;
/*
* then map the fragments
*/
for (seg = 0; seg < skb_shinfo(skb)->nr_frags; seg++) {
const skb_frag_t *f = &skb_shinfo(skb)->frags[seg];
if (__splice_segment(skb_frag_page(f),
f->page_offset, skb_frag_size(f),
offset, len, spd, false, sk, pipe))
return true;
}
return false;
}
/*
* Map data from the skb to a pipe. Should handle both the linear part,
* the fragments, and the frag list. It does NOT handle frag lists within
* the frag list, if such a thing exists. We'd probably need to recurse to
* handle that cleanly.
*/
int skb_splice_bits(struct sk_buff *skb, unsigned int offset,
struct pipe_inode_info *pipe, unsigned int tlen,
unsigned int flags)
{
struct partial_page partial[MAX_SKB_FRAGS];
struct page *pages[MAX_SKB_FRAGS];
struct splice_pipe_desc spd = {
.pages = pages,
.partial = partial,
.nr_pages_max = MAX_SKB_FRAGS,
.flags = flags,
.ops = &nosteal_pipe_buf_ops,
.spd_release = sock_spd_release,
};
struct sk_buff *frag_iter;
struct sock *sk = skb->sk;
int ret = 0;
/*
* __skb_splice_bits() only fails if the output has no room left,
* so no point in going over the frag_list for the error case.
*/
if (__skb_splice_bits(skb, pipe, &offset, &tlen, &spd, sk))
goto done;
else if (!tlen)
goto done;
/*
* now see if we have a frag_list to map
*/
skb_walk_frags(skb, frag_iter) {
if (!tlen)
break;
if (__skb_splice_bits(frag_iter, pipe, &offset, &tlen, &spd, sk))
break;
}
done:
if (spd.nr_pages) {
/*
* Drop the socket lock, otherwise we have reverse
* locking dependencies between sk_lock and i_mutex
* here as compared to sendfile(). We enter here
* with the socket lock held, and splice_to_pipe() will
* grab the pipe inode lock. For sendfile() emulation,
* we call into ->sendpage() with the i_mutex lock held
* and networking will grab the socket lock.
*/
release_sock(sk);
ret = splice_to_pipe(pipe, &spd);
lock_sock(sk);
}
return ret;
}
/**
* skb_store_bits - store bits from kernel buffer to skb
* @skb: destination buffer
* @offset: offset in destination
* @from: source buffer
* @len: number of bytes to copy
*
* Copy the specified number of bytes from the source buffer to the
* destination skb. This function handles all the messy bits of
* traversing fragment lists and such.
*/
int skb_store_bits(struct sk_buff *skb, int offset, const void *from, int len)
{
int start = skb_headlen(skb);
struct sk_buff *frag_iter;
int i, copy;
if (offset > (int)skb->len - len)
goto fault;
if ((copy = start - offset) > 0) {
if (copy > len)
copy = len;
skb_copy_to_linear_data_offset(skb, offset, from, copy);
if ((len -= copy) == 0)
return 0;
offset += copy;
from += copy;
}
for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
int end;
WARN_ON(start > offset + len);
end = start + skb_frag_size(frag);
if ((copy = end - offset) > 0) {
u8 *vaddr;
if (copy > len)
copy = len;
vaddr = kmap_atomic(skb_frag_page(frag));
memcpy(vaddr + frag->page_offset + offset - start,
from, copy);
kunmap_atomic(vaddr);
if ((len -= copy) == 0)
return 0;
offset += copy;
from += copy;
}
start = end;
}
skb_walk_frags(skb, frag_iter) {
int end;
WARN_ON(start > offset + len);
end = start + frag_iter->len;
if ((copy = end - offset) > 0) {
if (copy > len)
copy = len;
if (skb_store_bits(frag_iter, offset - start,
from, copy))
goto fault;
if ((len -= copy) == 0)
return 0;
offset += copy;
from += copy;
}
start = end;
}
if (!len)
return 0;
fault:
return -EFAULT;
}
EXPORT_SYMBOL(skb_store_bits);
/* Checksum skb data. */
__wsum __skb_checksum(const struct sk_buff *skb, int offset, int len,
__wsum csum, const struct skb_checksum_ops *ops)
{
int start = skb_headlen(skb);
int i, copy = start - offset;
struct sk_buff *frag_iter;
int pos = 0;
/* Checksum header. */
if (copy > 0) {
if (copy > len)
copy = len;
csum = ops->update(skb->data + offset, copy, csum);
if ((len -= copy) == 0)
return csum;
offset += copy;
pos = copy;
}
for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
int end;
skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
WARN_ON(start > offset + len);
end = start + skb_frag_size(frag);
if ((copy = end - offset) > 0) {
__wsum csum2;
u8 *vaddr;
if (copy > len)
copy = len;
vaddr = kmap_atomic(skb_frag_page(frag));
csum2 = ops->update(vaddr + frag->page_offset +
offset - start, copy, 0);
kunmap_atomic(vaddr);
csum = ops->combine(csum, csum2, pos, copy);
if (!(len -= copy))
return csum;
offset += copy;
pos += copy;
}
start = end;
}
skb_walk_frags(skb, frag_iter) {
int end;
WARN_ON(start > offset + len);
end = start + frag_iter->len;
if ((copy = end - offset) > 0) {
__wsum csum2;
if (copy > len)
copy = len;
csum2 = __skb_checksum(frag_iter, offset - start,
copy, 0, ops);
csum = ops->combine(csum, csum2, pos, copy);
if ((len -= copy) == 0)
return csum;
offset += copy;
pos += copy;
}
start = end;
}
BUG_ON(len);
return csum;
}
EXPORT_SYMBOL(__skb_checksum);
__wsum skb_checksum(const struct sk_buff *skb, int offset,
int len, __wsum csum)
{
const struct skb_checksum_ops ops = {
.update = csum_partial_ext,
.combine = csum_block_add_ext,
};
return __skb_checksum(skb, offset, len, csum, &ops);
}
EXPORT_SYMBOL(skb_checksum);
/* Both of above in one bottle. */
__wsum skb_copy_and_csum_bits(const struct sk_buff *skb, int offset,
u8 *to, int len, __wsum csum)
{
int start = skb_headlen(skb);
int i, copy = start - offset;
struct sk_buff *frag_iter;
int pos = 0;
/* Copy header. */
if (copy > 0) {
if (copy > len)
copy = len;
csum = csum_partial_copy_nocheck(skb->data + offset, to,
copy, csum);
if ((len -= copy) == 0)
return csum;
offset += copy;
to += copy;
pos = copy;
}
for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
int end;
WARN_ON(start > offset + len);
end = start + skb_frag_size(&skb_shinfo(skb)->frags[i]);
if ((copy = end - offset) > 0) {
__wsum csum2;
u8 *vaddr;
skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
if (copy > len)
copy = len;
vaddr = kmap_atomic(skb_frag_page(frag));
csum2 = csum_partial_copy_nocheck(vaddr +
frag->page_offset +
offset - start, to,
copy, 0);
kunmap_atomic(vaddr);
csum = csum_block_add(csum, csum2, pos);
if (!(len -= copy))
return csum;
offset += copy;
to += copy;
pos += copy;
}
start = end;
}
skb_walk_frags(skb, frag_iter) {
__wsum csum2;
int end;
WARN_ON(start > offset + len);
end = start + frag_iter->len;
if ((copy = end - offset) > 0) {
if (copy > len)
copy = len;
csum2 = skb_copy_and_csum_bits(frag_iter,
offset - start,
to, copy, 0);
csum = csum_block_add(csum, csum2, pos);
if ((len -= copy) == 0)
return csum;
offset += copy;
to += copy;
pos += copy;
}
start = end;
}
BUG_ON(len);
return csum;
}
EXPORT_SYMBOL(skb_copy_and_csum_bits);
/**
* skb_zerocopy_headlen - Calculate headroom needed for skb_zerocopy()
* @from: source buffer
*
* Calculates the amount of linear headroom needed in the 'to' skb passed
* into skb_zerocopy().
*/
unsigned int
skb_zerocopy_headlen(const struct sk_buff *from)
{
unsigned int hlen = 0;
if (!from->head_frag ||
skb_headlen(from) < L1_CACHE_BYTES ||
skb_shinfo(from)->nr_frags >= MAX_SKB_FRAGS)
hlen = skb_headlen(from);
if (skb_has_frag_list(from))
hlen = from->len;
return hlen;
}
EXPORT_SYMBOL_GPL(skb_zerocopy_headlen);
/**
* skb_zerocopy - Zero copy skb to skb
* @to: destination buffer
* @from: source buffer
* @len: number of bytes to copy from source buffer
* @hlen: size of linear headroom in destination buffer
*
* Copies up to `len` bytes from `from` to `to` by creating references
* to the frags in the source buffer.
*
* The `hlen` as calculated by skb_zerocopy_headlen() specifies the
* headroom in the `to` buffer.
*/
void
skb_zerocopy(struct sk_buff *to, const struct sk_buff *from, int len, int hlen)
{
int i, j = 0;
int plen = 0; /* length of skb->head fragment */
struct page *page;
unsigned int offset;
BUG_ON(!from->head_frag && !hlen);
/* dont bother with small payloads */
if (len <= skb_tailroom(to)) {
skb_copy_bits(from, 0, skb_put(to, len), len);
return;
}
if (hlen) {
skb_copy_bits(from, 0, skb_put(to, hlen), hlen);
len -= hlen;
} else {
plen = min_t(int, skb_headlen(from), len);
if (plen) {
page = virt_to_head_page(from->head);
offset = from->data - (unsigned char *)page_address(page);
__skb_fill_page_desc(to, 0, page, offset, plen);
get_page(page);
j = 1;
len -= plen;
}
}
to->truesize += len + plen;
to->len += len + plen;
to->data_len += len + plen;
for (i = 0; i < skb_shinfo(from)->nr_frags; i++) {
if (!len)
break;
skb_shinfo(to)->frags[j] = skb_shinfo(from)->frags[i];
skb_shinfo(to)->frags[j].size = min_t(int, skb_shinfo(to)->frags[j].size, len);
len -= skb_shinfo(to)->frags[j].size;
skb_frag_ref(to, j);
j++;
}
skb_shinfo(to)->nr_frags = j;
}
EXPORT_SYMBOL_GPL(skb_zerocopy);
void skb_copy_and_csum_dev(const struct sk_buff *skb, u8 *to)
{
__wsum csum;
long csstart;
if (skb->ip_summed == CHECKSUM_PARTIAL)
csstart = skb_checksum_start_offset(skb);
else
csstart = skb_headlen(skb);
BUG_ON(csstart > skb_headlen(skb));
skb_copy_from_linear_data(skb, to, csstart);
csum = 0;
if (csstart != skb->len)
csum = skb_copy_and_csum_bits(skb, csstart, to + csstart,
skb->len - csstart, 0);
if (skb->ip_summed == CHECKSUM_PARTIAL) {
long csstuff = csstart + skb->csum_offset;
*((__sum16 *)(to + csstuff)) = csum_fold(csum);
}
}
EXPORT_SYMBOL(skb_copy_and_csum_dev);
/**
* skb_dequeue - remove from the head of the queue
* @list: list to dequeue from
*
* Remove the head of the list. The list lock is taken so the function
* may be used safely with other locking list functions. The head item is
* returned or %NULL if the list is empty.
*/
struct sk_buff *skb_dequeue(struct sk_buff_head *list)
{
unsigned long flags;
struct sk_buff *result;
spin_lock_irqsave(&list->lock, flags);
result = __skb_dequeue(list);
spin_unlock_irqrestore(&list->lock, flags);
return result;
}
EXPORT_SYMBOL(skb_dequeue);
/**
* skb_dequeue_tail - remove from the tail of the queue
* @list: list to dequeue from
*
* Remove the tail of the list. The list lock is taken so the function
* may be used safely with other locking list functions. The tail item is
* returned or %NULL if the list is empty.
*/
struct sk_buff *skb_dequeue_tail(struct sk_buff_head *list)
{
unsigned long flags;
struct sk_buff *result;
spin_lock_irqsave(&list->lock, flags);
result = __skb_dequeue_tail(list);
spin_unlock_irqrestore(&list->lock, flags);
return result;
}
EXPORT_SYMBOL(skb_dequeue_tail);
/**
* skb_queue_purge - empty a list
* @list: list to empty
*
* Delete all buffers on an &sk_buff list. Each buffer is removed from
* the list and one reference dropped. This function takes the list
* lock and is atomic with respect to other list locking functions.
*/
void skb_queue_purge(struct sk_buff_head *list)
{
struct sk_buff *skb;
while ((skb = skb_dequeue(list)) != NULL)
kfree_skb(skb);
}
EXPORT_SYMBOL(skb_queue_purge);
/**
* skb_queue_head - queue a buffer at the list head
* @list: list to use
* @newsk: buffer to queue
*
* Queue a buffer at the start of the list. This function takes the
* list lock and can be used safely with other locking &sk_buff functions
* safely.
*
* A buffer cannot be placed on two lists at the same time.
*/
void skb_queue_head(struct sk_buff_head *list, struct sk_buff *newsk)
{
unsigned long flags;
spin_lock_irqsave(&list->lock, flags);
__skb_queue_head(list, newsk);
spin_unlock_irqrestore(&list->lock, flags);
}
EXPORT_SYMBOL(skb_queue_head);
/**
* skb_queue_tail - queue a buffer at the list tail
* @list: list to use
* @newsk: buffer to queue
*
* Queue a buffer at the tail of the list. This function takes the
* list lock and can be used safely with other locking &sk_buff functions
* safely.
*
* A buffer cannot be placed on two lists at the same time.
*/
void skb_queue_tail(struct sk_buff_head *list, struct sk_buff *newsk)
{
unsigned long flags;
spin_lock_irqsave(&list->lock, flags);
__skb_queue_tail(list, newsk);
spin_unlock_irqrestore(&list->lock, flags);
}
EXPORT_SYMBOL(skb_queue_tail);
/**
* skb_unlink - remove a buffer from a list
* @skb: buffer to remove
* @list: list to use
*
* Remove a packet from a list. The list locks are taken and this
* function is atomic with respect to other list locked calls
*
* You must know what list the SKB is on.
*/
void skb_unlink(struct sk_buff *skb, struct sk_buff_head *list)
{
unsigned long flags;
spin_lock_irqsave(&list->lock, flags);
__skb_unlink(skb, list);
spin_unlock_irqrestore(&list->lock, flags);
}
EXPORT_SYMBOL(skb_unlink);
/**
* skb_append - append a buffer
* @old: buffer to insert after
* @newsk: buffer to insert
* @list: list to use
*
* Place a packet after a given packet in a list. The list locks are taken
* and this function is atomic with respect to other list locked calls.
* A buffer cannot be placed on two lists at the same time.
*/
void skb_append(struct sk_buff *old, struct sk_buff *newsk, struct sk_buff_head *list)
{
unsigned long flags;
spin_lock_irqsave(&list->lock, flags);
__skb_queue_after(list, old, newsk);
spin_unlock_irqrestore(&list->lock, flags);
}
EXPORT_SYMBOL(skb_append);
/**
* skb_insert - insert a buffer
* @old: buffer to insert before
* @newsk: buffer to insert
* @list: list to use
*
* Place a packet before a given packet in a list. The list locks are
* taken and this function is atomic with respect to other list locked
* calls.
*
* A buffer cannot be placed on two lists at the same time.
*/
void skb_insert(struct sk_buff *old, struct sk_buff *newsk, struct sk_buff_head *list)
{
unsigned long flags;
spin_lock_irqsave(&list->lock, flags);
__skb_insert(newsk, old->prev, old, list);
spin_unlock_irqrestore(&list->lock, flags);
}
EXPORT_SYMBOL(skb_insert);
static inline void skb_split_inside_header(struct sk_buff *skb,
struct sk_buff* skb1,
const u32 len, const int pos)
{
int i;
skb_copy_from_linear_data_offset(skb, len, skb_put(skb1, pos - len),
pos - len);
/* And move data appendix as is. */
for (i = 0; i < skb_shinfo(skb)->nr_frags; i++)
skb_shinfo(skb1)->frags[i] = skb_shinfo(skb)->frags[i];
skb_shinfo(skb1)->nr_frags = skb_shinfo(skb)->nr_frags;
skb_shinfo(skb)->nr_frags = 0;
skb1->data_len = skb->data_len;
skb1->len += skb1->data_len;
skb->data_len = 0;
skb->len = len;
skb_set_tail_pointer(skb, len);
}
static inline void skb_split_no_header(struct sk_buff *skb,
struct sk_buff* skb1,
const u32 len, int pos)
{
int i, k = 0;
const int nfrags = skb_shinfo(skb)->nr_frags;
skb_shinfo(skb)->nr_frags = 0;
skb1->len = skb1->data_len = skb->len - len;
skb->len = len;
skb->data_len = len - pos;
for (i = 0; i < nfrags; i++) {
int size = skb_frag_size(&skb_shinfo(skb)->frags[i]);
if (pos + size > len) {
skb_shinfo(skb1)->frags[k] = skb_shinfo(skb)->frags[i];
if (pos < len) {
/* Split frag.
* We have two variants in this case:
* 1. Move all the frag to the second
* part, if it is possible. F.e.
* this approach is mandatory for TUX,
* where splitting is expensive.
* 2. Split is accurately. We make this.
*/
skb_frag_ref(skb, i);
skb_shinfo(skb1)->frags[0].page_offset += len - pos;
skb_frag_size_sub(&skb_shinfo(skb1)->frags[0], len - pos);
skb_frag_size_set(&skb_shinfo(skb)->frags[i], len - pos);
skb_shinfo(skb)->nr_frags++;
}
k++;
} else
skb_shinfo(skb)->nr_frags++;
pos += size;
}
skb_shinfo(skb1)->nr_frags = k;
}
/**
* skb_split - Split fragmented skb to two parts at length len.
* @skb: the buffer to split
* @skb1: the buffer to receive the second part
* @len: new length for skb
*/
void skb_split(struct sk_buff *skb, struct sk_buff *skb1, const u32 len)
{
int pos = skb_headlen(skb);
skb_shinfo(skb1)->tx_flags = skb_shinfo(skb)->tx_flags & SKBTX_SHARED_FRAG;
if (len < pos) /* Split line is inside header. */
skb_split_inside_header(skb, skb1, len, pos);
else /* Second chunk has no header, nothing to copy. */
skb_split_no_header(skb, skb1, len, pos);
}
EXPORT_SYMBOL(skb_split);
/* Shifting from/to a cloned skb is a no-go.
*
* Caller cannot keep skb_shinfo related pointers past calling here!
*/
static int skb_prepare_for_shift(struct sk_buff *skb)
{
return skb_cloned(skb) && pskb_expand_head(skb, 0, 0, GFP_ATOMIC);
}
/**
* skb_shift - Shifts paged data partially from skb to another
* @tgt: buffer into which tail data gets added
* @skb: buffer from which the paged data comes from
* @shiftlen: shift up to this many bytes
*
* Attempts to shift up to shiftlen worth of bytes, which may be less than
* the length of the skb, from skb to tgt. Returns number bytes shifted.
* It's up to caller to free skb if everything was shifted.
*
* If @tgt runs out of frags, the whole operation is aborted.
*
* Skb cannot include anything else but paged data while tgt is allowed
* to have non-paged data as well.
*
* TODO: full sized shift could be optimized but that would need
* specialized skb free'er to handle frags without up-to-date nr_frags.
*/
int skb_shift(struct sk_buff *tgt, struct sk_buff *skb, int shiftlen)
{
int from, to, merge, todo;
struct skb_frag_struct *fragfrom, *fragto;
BUG_ON(shiftlen > skb->len);
BUG_ON(skb_headlen(skb)); /* Would corrupt stream */
todo = shiftlen;
from = 0;
to = skb_shinfo(tgt)->nr_frags;
fragfrom = &skb_shinfo(skb)->frags[from];
/* Actual merge is delayed until the point when we know we can
* commit all, so that we don't have to undo partial changes
*/
if (!to ||
!skb_can_coalesce(tgt, to, skb_frag_page(fragfrom),
fragfrom->page_offset)) {
merge = -1;
} else {
merge = to - 1;
todo -= skb_frag_size(fragfrom);
if (todo < 0) {
if (skb_prepare_for_shift(skb) ||
skb_prepare_for_shift(tgt))
return 0;
/* All previous frag pointers might be stale! */
fragfrom = &skb_shinfo(skb)->frags[from];
fragto = &skb_shinfo(tgt)->frags[merge];
skb_frag_size_add(fragto, shiftlen);
skb_frag_size_sub(fragfrom, shiftlen);
fragfrom->page_offset += shiftlen;
goto onlymerged;
}
from++;
}
/* Skip full, not-fitting skb to avoid expensive operations */
if ((shiftlen == skb->len) &&
(skb_shinfo(skb)->nr_frags - from) > (MAX_SKB_FRAGS - to))
return 0;
if (skb_prepare_for_shift(skb) || skb_prepare_for_shift(tgt))
return 0;
while ((todo > 0) && (from < skb_shinfo(skb)->nr_frags)) {
if (to == MAX_SKB_FRAGS)
return 0;
fragfrom = &skb_shinfo(skb)->frags[from];
fragto = &skb_shinfo(tgt)->frags[to];
if (todo >= skb_frag_size(fragfrom)) {
*fragto = *fragfrom;
todo -= skb_frag_size(fragfrom);
from++;
to++;
} else {
__skb_frag_ref(fragfrom);
fragto->page = fragfrom->page;
fragto->page_offset = fragfrom->page_offset;
skb_frag_size_set(fragto, todo);
fragfrom->page_offset += todo;
skb_frag_size_sub(fragfrom, todo);
todo = 0;
to++;
break;
}
}
/* Ready to "commit" this state change to tgt */
skb_shinfo(tgt)->nr_frags = to;
if (merge >= 0) {
fragfrom = &skb_shinfo(skb)->frags[0];
fragto = &skb_shinfo(tgt)->frags[merge];
skb_frag_size_add(fragto, skb_frag_size(fragfrom));
__skb_frag_unref(fragfrom);
}
/* Reposition in the original skb */
to = 0;
while (from < skb_shinfo(skb)->nr_frags)
skb_shinfo(skb)->frags[to++] = skb_shinfo(skb)->frags[from++];
skb_shinfo(skb)->nr_frags = to;
BUG_ON(todo > 0 && !skb_shinfo(skb)->nr_frags);
onlymerged:
/* Most likely the tgt won't ever need its checksum anymore, skb on
* the other hand might need it if it needs to be resent
*/
tgt->ip_summed = CHECKSUM_PARTIAL;
skb->ip_summed = CHECKSUM_PARTIAL;
/* Yak, is it really working this way? Some helper please? */
skb->len -= shiftlen;
skb->data_len -= shiftlen;
skb->truesize -= shiftlen;
tgt->len += shiftlen;
tgt->data_len += shiftlen;
tgt->truesize += shiftlen;
return shiftlen;
}
/**
* skb_prepare_seq_read - Prepare a sequential read of skb data
* @skb: the buffer to read
* @from: lower offset of data to be read
* @to: upper offset of data to be read
* @st: state variable
*
* Initializes the specified state variable. Must be called before
* invoking skb_seq_read() for the first time.
*/
void skb_prepare_seq_read(struct sk_buff *skb, unsigned int from,
unsigned int to, struct skb_seq_state *st)
{
st->lower_offset = from;
st->upper_offset = to;
st->root_skb = st->cur_skb = skb;
st->frag_idx = st->stepped_offset = 0;
st->frag_data = NULL;
}
EXPORT_SYMBOL(skb_prepare_seq_read);
/**
* skb_seq_read - Sequentially read skb data
* @consumed: number of bytes consumed by the caller so far
* @data: destination pointer for data to be returned
* @st: state variable
*
* Reads a block of skb data at @consumed relative to the
* lower offset specified to skb_prepare_seq_read(). Assigns
* the head of the data block to @data and returns the length
* of the block or 0 if the end of the skb data or the upper
* offset has been reached.
*
* The caller is not required to consume all of the data
* returned, i.e. @consumed is typically set to the number
* of bytes already consumed and the next call to
* skb_seq_read() will return the remaining part of the block.
*
* Note 1: The size of each block of data returned can be arbitrary,
* this limitation is the cost for zerocopy seqeuental
* reads of potentially non linear data.
*
* Note 2: Fragment lists within fragments are not implemented
* at the moment, state->root_skb could be replaced with
* a stack for this purpose.
*/
unsigned int skb_seq_read(unsigned int consumed, const u8 **data,
struct skb_seq_state *st)
{
unsigned int block_limit, abs_offset = consumed + st->lower_offset;
skb_frag_t *frag;
if (unlikely(abs_offset >= st->upper_offset)) {
if (st->frag_data) {
kunmap_atomic(st->frag_data);
st->frag_data = NULL;
}
return 0;
}
next_skb:
block_limit = skb_headlen(st->cur_skb) + st->stepped_offset;
if (abs_offset < block_limit && !st->frag_data) {
*data = st->cur_skb->data + (abs_offset - st->stepped_offset);
return block_limit - abs_offset;
}
if (st->frag_idx == 0 && !st->frag_data)
st->stepped_offset += skb_headlen(st->cur_skb);
while (st->frag_idx < skb_shinfo(st->cur_skb)->nr_frags) {
frag = &skb_shinfo(st->cur_skb)->frags[st->frag_idx];
block_limit = skb_frag_size(frag) + st->stepped_offset;
if (abs_offset < block_limit) {
if (!st->frag_data)
st->frag_data = kmap_atomic(skb_frag_page(frag));
*data = (u8 *) st->frag_data + frag->page_offset +
(abs_offset - st->stepped_offset);
return block_limit - abs_offset;
}
if (st->frag_data) {
kunmap_atomic(st->frag_data);
st->frag_data = NULL;
}
st->frag_idx++;
st->stepped_offset += skb_frag_size(frag);
}
if (st->frag_data) {
kunmap_atomic(st->frag_data);
st->frag_data = NULL;
}
if (st->root_skb == st->cur_skb && skb_has_frag_list(st->root_skb)) {
st->cur_skb = skb_shinfo(st->root_skb)->frag_list;
st->frag_idx = 0;
goto next_skb;
} else if (st->cur_skb->next) {
st->cur_skb = st->cur_skb->next;
st->frag_idx = 0;
goto next_skb;
}
return 0;
}
EXPORT_SYMBOL(skb_seq_read);
/**
* skb_abort_seq_read - Abort a sequential read of skb data
* @st: state variable
*
* Must be called if skb_seq_read() was not called until it
* returned 0.
*/
void skb_abort_seq_read(struct skb_seq_state *st)
{
if (st->frag_data)
kunmap_atomic(st->frag_data);
}
EXPORT_SYMBOL(skb_abort_seq_read);
#define TS_SKB_CB(state) ((struct skb_seq_state *) &((state)->cb))
static unsigned int skb_ts_get_next_block(unsigned int offset, const u8 **text,
struct ts_config *conf,
struct ts_state *state)
{
return skb_seq_read(offset, text, TS_SKB_CB(state));
}
static void skb_ts_finish(struct ts_config *conf, struct ts_state *state)
{
skb_abort_seq_read(TS_SKB_CB(state));
}
/**
* skb_find_text - Find a text pattern in skb data
* @skb: the buffer to look in
* @from: search offset
* @to: search limit
* @config: textsearch configuration
* @state: uninitialized textsearch state variable
*
* Finds a pattern in the skb data according to the specified
* textsearch configuration. Use textsearch_next() to retrieve
* subsequent occurrences of the pattern. Returns the offset
* to the first occurrence or UINT_MAX if no match was found.
*/
unsigned int skb_find_text(struct sk_buff *skb, unsigned int from,
unsigned int to, struct ts_config *config,
struct ts_state *state)
{
unsigned int ret;
config->get_next_block = skb_ts_get_next_block;
config->finish = skb_ts_finish;
skb_prepare_seq_read(skb, from, to, TS_SKB_CB(state));
ret = textsearch_find(config, state);
return (ret <= to - from ? ret : UINT_MAX);
}
EXPORT_SYMBOL(skb_find_text);
/**
* skb_append_datato_frags - append the user data to a skb
* @sk: sock structure
* @skb: skb structure to be appened with user data.
* @getfrag: call back function to be used for getting the user data
* @from: pointer to user message iov
* @length: length of the iov message
*
* Description: This procedure append the user data in the fragment part
* of the skb if any page alloc fails user this procedure returns -ENOMEM
*/
int skb_append_datato_frags(struct sock *sk, struct sk_buff *skb,
int (*getfrag)(void *from, char *to, int offset,
int len, int odd, struct sk_buff *skb),
void *from, int length)
{
int frg_cnt = skb_shinfo(skb)->nr_frags;
int copy;
int offset = 0;
int ret;
struct page_frag *pfrag = ¤t->task_frag;
do {
/* Return error if we don't have space for new frag */
if (frg_cnt >= MAX_SKB_FRAGS)
return -EMSGSIZE;
if (!sk_page_frag_refill(sk, pfrag))
return -ENOMEM;
/* copy the user data to page */
copy = min_t(int, length, pfrag->size - pfrag->offset);
ret = getfrag(from, page_address(pfrag->page) + pfrag->offset,
offset, copy, 0, skb);
if (ret < 0)
return -EFAULT;
/* copy was successful so update the size parameters */
skb_fill_page_desc(skb, frg_cnt, pfrag->page, pfrag->offset,
copy);
frg_cnt++;
pfrag->offset += copy;
get_page(pfrag->page);
skb->truesize += copy;
atomic_add(copy, &sk->sk_wmem_alloc);
skb->len += copy;
skb->data_len += copy;
offset += copy;
length -= copy;
} while (length > 0);
return 0;
}
EXPORT_SYMBOL(skb_append_datato_frags);
/**
* skb_pull_rcsum - pull skb and update receive checksum
* @skb: buffer to update
* @len: length of data pulled
*
* This function performs an skb_pull on the packet and updates
* the CHECKSUM_COMPLETE checksum. It should be used on
* receive path processing instead of skb_pull unless you know
* that the checksum difference is zero (e.g., a valid IP header)
* or you are setting ip_summed to CHECKSUM_NONE.
*/
unsigned char *skb_pull_rcsum(struct sk_buff *skb, unsigned int len)
{
BUG_ON(len > skb->len);
skb->len -= len;
BUG_ON(skb->len < skb->data_len);
skb_postpull_rcsum(skb, skb->data, len);
return skb->data += len;
}
EXPORT_SYMBOL_GPL(skb_pull_rcsum);
/**
* skb_segment - Perform protocol segmentation on skb.
* @head_skb: buffer to segment
* @features: features for the output path (see dev->features)
*
* This function performs segmentation on the given skb. It returns
* a pointer to the first in a list of new skbs for the segments.
* In case of error it returns ERR_PTR(err).
*/
struct sk_buff *skb_segment(struct sk_buff *head_skb,
netdev_features_t features)
{
struct sk_buff *segs = NULL;
struct sk_buff *tail = NULL;
struct sk_buff *list_skb = skb_shinfo(head_skb)->frag_list;
skb_frag_t *frag = skb_shinfo(head_skb)->frags;
unsigned int mss = skb_shinfo(head_skb)->gso_size;
unsigned int doffset = head_skb->data - skb_mac_header(head_skb);
struct sk_buff *frag_skb = head_skb;
unsigned int offset = doffset;
unsigned int tnl_hlen = skb_tnl_header_len(head_skb);
unsigned int headroom;
unsigned int len;
__be16 proto;
bool csum;
int sg = !!(features & NETIF_F_SG);
int nfrags = skb_shinfo(head_skb)->nr_frags;
int err = -ENOMEM;
int i = 0;
int pos;
proto = skb_network_protocol(head_skb);
if (unlikely(!proto))
return ERR_PTR(-EINVAL);
csum = !!can_checksum_protocol(features, proto);
__skb_push(head_skb, doffset);
headroom = skb_headroom(head_skb);
pos = skb_headlen(head_skb);
do {
struct sk_buff *nskb;
skb_frag_t *nskb_frag;
int hsize;
int size;
len = head_skb->len - offset;
if (len > mss)
len = mss;
hsize = skb_headlen(head_skb) - offset;
if (hsize < 0)
hsize = 0;
if (hsize > len || !sg)
hsize = len;
if (!hsize && i >= nfrags && skb_headlen(list_skb) &&
(skb_headlen(list_skb) == len || sg)) {
BUG_ON(skb_headlen(list_skb) > len);
i = 0;
nfrags = skb_shinfo(list_skb)->nr_frags;
frag = skb_shinfo(list_skb)->frags;
frag_skb = list_skb;
pos += skb_headlen(list_skb);
while (pos < offset + len) {
BUG_ON(i >= nfrags);
size = skb_frag_size(frag);
if (pos + size > offset + len)
break;
i++;
pos += size;
frag++;
}
nskb = skb_clone(list_skb, GFP_ATOMIC);
list_skb = list_skb->next;
if (unlikely(!nskb))
goto err;
if (unlikely(pskb_trim(nskb, len))) {
kfree_skb(nskb);
goto err;
}
hsize = skb_end_offset(nskb);
if (skb_cow_head(nskb, doffset + headroom)) {
kfree_skb(nskb);
goto err;
}
nskb->truesize += skb_end_offset(nskb) - hsize;
skb_release_head_state(nskb);
__skb_push(nskb, doffset);
} else {
nskb = __alloc_skb(hsize + doffset + headroom,
GFP_ATOMIC, skb_alloc_rx_flag(head_skb),
NUMA_NO_NODE);
if (unlikely(!nskb))
goto err;
skb_reserve(nskb, headroom);
__skb_put(nskb, doffset);
}
if (segs)
tail->next = nskb;
else
segs = nskb;
tail = nskb;
__copy_skb_header(nskb, head_skb);
nskb->mac_len = head_skb->mac_len;
skb_headers_offset_update(nskb, skb_headroom(nskb) - headroom);
skb_copy_from_linear_data_offset(head_skb, -tnl_hlen,
nskb->data - tnl_hlen,
doffset + tnl_hlen);
if (nskb->len == len + doffset)
goto perform_csum_check;
if (!sg) {
nskb->ip_summed = CHECKSUM_NONE;
nskb->csum = skb_copy_and_csum_bits(head_skb, offset,
skb_put(nskb, len),
len, 0);
continue;
}
nskb_frag = skb_shinfo(nskb)->frags;
skb_copy_from_linear_data_offset(head_skb, offset,
skb_put(nskb, hsize), hsize);
skb_shinfo(nskb)->tx_flags = skb_shinfo(head_skb)->tx_flags &
SKBTX_SHARED_FRAG;
while (pos < offset + len) {
if (i >= nfrags) {
BUG_ON(skb_headlen(list_skb));
i = 0;
nfrags = skb_shinfo(list_skb)->nr_frags;
frag = skb_shinfo(list_skb)->frags;
frag_skb = list_skb;
BUG_ON(!nfrags);
list_skb = list_skb->next;
}
if (unlikely(skb_shinfo(nskb)->nr_frags >=
MAX_SKB_FRAGS)) {
net_warn_ratelimited(
"skb_segment: too many frags: %u %u\n",
pos, mss);
goto err;
}
if (unlikely(skb_orphan_frags(frag_skb, GFP_ATOMIC)))
goto err;
*nskb_frag = *frag;
__skb_frag_ref(nskb_frag);
size = skb_frag_size(nskb_frag);
if (pos < offset) {
nskb_frag->page_offset += offset - pos;
skb_frag_size_sub(nskb_frag, offset - pos);
}
skb_shinfo(nskb)->nr_frags++;
if (pos + size <= offset + len) {
i++;
frag++;
pos += size;
} else {
skb_frag_size_sub(nskb_frag, pos + size - (offset + len));
goto skip_fraglist;
}
nskb_frag++;
}
skip_fraglist:
nskb->data_len = len - hsize;
nskb->len += nskb->data_len;
nskb->truesize += nskb->data_len;
perform_csum_check:
if (!csum) {
nskb->csum = skb_checksum(nskb, doffset,
nskb->len - doffset, 0);
nskb->ip_summed = CHECKSUM_NONE;
}
} while ((offset += len) < head_skb->len);
return segs;
err:
kfree_skb_list(segs);
return ERR_PTR(err);
}
EXPORT_SYMBOL_GPL(skb_segment);
int skb_gro_receive(struct sk_buff **head, struct sk_buff *skb)
{
struct skb_shared_info *pinfo, *skbinfo = skb_shinfo(skb);
unsigned int offset = skb_gro_offset(skb);
unsigned int headlen = skb_headlen(skb);
struct sk_buff *nskb, *lp, *p = *head;
unsigned int len = skb_gro_len(skb);
unsigned int delta_truesize;
unsigned int headroom;
if (unlikely(p->len + len >= 65536))
return -E2BIG;
lp = NAPI_GRO_CB(p)->last ?: p;
pinfo = skb_shinfo(lp);
if (headlen <= offset) {
skb_frag_t *frag;
skb_frag_t *frag2;
int i = skbinfo->nr_frags;
int nr_frags = pinfo->nr_frags + i;
if (nr_frags > MAX_SKB_FRAGS)
goto merge;
offset -= headlen;
pinfo->nr_frags = nr_frags;
skbinfo->nr_frags = 0;
frag = pinfo->frags + nr_frags;
frag2 = skbinfo->frags + i;
do {
*--frag = *--frag2;
} while (--i);
frag->page_offset += offset;
skb_frag_size_sub(frag, offset);
/* all fragments truesize : remove (head size + sk_buff) */
delta_truesize = skb->truesize -
SKB_TRUESIZE(skb_end_offset(skb));
skb->truesize -= skb->data_len;
skb->len -= skb->data_len;
skb->data_len = 0;
NAPI_GRO_CB(skb)->free = NAPI_GRO_FREE;
goto done;
} else if (skb->head_frag) {
int nr_frags = pinfo->nr_frags;
skb_frag_t *frag = pinfo->frags + nr_frags;
struct page *page = virt_to_head_page(skb->head);
unsigned int first_size = headlen - offset;
unsigned int first_offset;
if (nr_frags + 1 + skbinfo->nr_frags > MAX_SKB_FRAGS)
goto merge;
first_offset = skb->data -
(unsigned char *)page_address(page) +
offset;
pinfo->nr_frags = nr_frags + 1 + skbinfo->nr_frags;
frag->page.p = page;
frag->page_offset = first_offset;
skb_frag_size_set(frag, first_size);
memcpy(frag + 1, skbinfo->frags, sizeof(*frag) * skbinfo->nr_frags);
/* We dont need to clear skbinfo->nr_frags here */
delta_truesize = skb->truesize - SKB_DATA_ALIGN(sizeof(struct sk_buff));
NAPI_GRO_CB(skb)->free = NAPI_GRO_FREE_STOLEN_HEAD;
goto done;
}
if (pinfo->frag_list)
goto merge;
if (skb_gro_len(p) != pinfo->gso_size)
return -E2BIG;
headroom = skb_headroom(p);
nskb = alloc_skb(headroom + skb_gro_offset(p), GFP_ATOMIC);
if (unlikely(!nskb))
return -ENOMEM;
__copy_skb_header(nskb, p);
nskb->mac_len = p->mac_len;
skb_reserve(nskb, headroom);
__skb_put(nskb, skb_gro_offset(p));
skb_set_mac_header(nskb, skb_mac_header(p) - p->data);
skb_set_network_header(nskb, skb_network_offset(p));
skb_set_transport_header(nskb, skb_transport_offset(p));
__skb_pull(p, skb_gro_offset(p));
memcpy(skb_mac_header(nskb), skb_mac_header(p),
p->data - skb_mac_header(p));
skb_shinfo(nskb)->frag_list = p;
skb_shinfo(nskb)->gso_size = pinfo->gso_size;
pinfo->gso_size = 0;
skb_header_release(p);
NAPI_GRO_CB(nskb)->last = p;
nskb->data_len += p->len;
nskb->truesize += p->truesize;
nskb->len += p->len;
*head = nskb;
nskb->next = p->next;
p->next = NULL;
p = nskb;
merge:
delta_truesize = skb->truesize;
if (offset > headlen) {
unsigned int eat = offset - headlen;
skbinfo->frags[0].page_offset += eat;
skb_frag_size_sub(&skbinfo->frags[0], eat);
skb->data_len -= eat;
skb->len -= eat;
offset = headlen;
}
__skb_pull(skb, offset);
if (!NAPI_GRO_CB(p)->last)
skb_shinfo(p)->frag_list = skb;
else
NAPI_GRO_CB(p)->last->next = skb;
NAPI_GRO_CB(p)->last = skb;
skb_header_release(skb);
lp = p;
done:
NAPI_GRO_CB(p)->count++;
p->data_len += len;
p->truesize += delta_truesize;
p->len += len;
if (lp != p) {
lp->data_len += len;
lp->truesize += delta_truesize;
lp->len += len;
}
NAPI_GRO_CB(skb)->same_flow = 1;
return 0;
}
EXPORT_SYMBOL_GPL(skb_gro_receive);
void __init skb_init(void)
{
skbuff_head_cache = kmem_cache_create("skbuff_head_cache",
sizeof(struct sk_buff),
0,
SLAB_HWCACHE_ALIGN|SLAB_PANIC,
NULL);
skbuff_fclone_cache = kmem_cache_create("skbuff_fclone_cache",
(2*sizeof(struct sk_buff)) +
sizeof(atomic_t),
0,
SLAB_HWCACHE_ALIGN|SLAB_PANIC,
NULL);
}
/**
* skb_to_sgvec - Fill a scatter-gather list from a socket buffer
* @skb: Socket buffer containing the buffers to be mapped
* @sg: The scatter-gather list to map into
* @offset: The offset into the buffer's contents to start mapping
* @len: Length of buffer space to be mapped
*
* Fill the specified scatter-gather list with mappings/pointers into a
* region of the buffer space attached to a socket buffer.
*/
static int
__skb_to_sgvec(struct sk_buff *skb, struct scatterlist *sg, int offset, int len)
{
int start = skb_headlen(skb);
int i, copy = start - offset;
struct sk_buff *frag_iter;
int elt = 0;
if (copy > 0) {
if (copy > len)
copy = len;
sg_set_buf(sg, skb->data + offset, copy);
elt++;
if ((len -= copy) == 0)
return elt;
offset += copy;
}
for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
int end;
WARN_ON(start > offset + len);
end = start + skb_frag_size(&skb_shinfo(skb)->frags[i]);
if ((copy = end - offset) > 0) {
skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
if (copy > len)
copy = len;
sg_set_page(&sg[elt], skb_frag_page(frag), copy,
frag->page_offset+offset-start);
elt++;
if (!(len -= copy))
return elt;
offset += copy;
}
start = end;
}
skb_walk_frags(skb, frag_iter) {
int end;
WARN_ON(start > offset + len);
end = start + frag_iter->len;
if ((copy = end - offset) > 0) {
if (copy > len)
copy = len;
elt += __skb_to_sgvec(frag_iter, sg+elt, offset - start,
copy);
if ((len -= copy) == 0)
return elt;
offset += copy;
}
start = end;
}
BUG_ON(len);
return elt;
}
int skb_to_sgvec(struct sk_buff *skb, struct scatterlist *sg, int offset, int len)
{
int nsg = __skb_to_sgvec(skb, sg, offset, len);
sg_mark_end(&sg[nsg - 1]);
return nsg;
}
EXPORT_SYMBOL_GPL(skb_to_sgvec);
/**
* skb_cow_data - Check that a socket buffer's data buffers are writable
* @skb: The socket buffer to check.
* @tailbits: Amount of trailing space to be added
* @trailer: Returned pointer to the skb where the @tailbits space begins
*
* Make sure that the data buffers attached to a socket buffer are
* writable. If they are not, private copies are made of the data buffers
* and the socket buffer is set to use these instead.
*
* If @tailbits is given, make sure that there is space to write @tailbits
* bytes of data beyond current end of socket buffer. @trailer will be
* set to point to the skb in which this space begins.
*
* The number of scatterlist elements required to completely map the
* COW'd and extended socket buffer will be returned.
*/
int skb_cow_data(struct sk_buff *skb, int tailbits, struct sk_buff **trailer)
{
int copyflag;
int elt;
struct sk_buff *skb1, **skb_p;
/* If skb is cloned or its head is paged, reallocate
* head pulling out all the pages (pages are considered not writable
* at the moment even if they are anonymous).
*/
if ((skb_cloned(skb) || skb_shinfo(skb)->nr_frags) &&
__pskb_pull_tail(skb, skb_pagelen(skb)-skb_headlen(skb)) == NULL)
return -ENOMEM;
/* Easy case. Most of packets will go this way. */
if (!skb_has_frag_list(skb)) {
/* A little of trouble, not enough of space for trailer.
* This should not happen, when stack is tuned to generate
* good frames. OK, on miss we reallocate and reserve even more
* space, 128 bytes is fair. */
if (skb_tailroom(skb) < tailbits &&
pskb_expand_head(skb, 0, tailbits-skb_tailroom(skb)+128, GFP_ATOMIC))
return -ENOMEM;
/* Voila! */
*trailer = skb;
return 1;
}
/* Misery. We are in troubles, going to mincer fragments... */
elt = 1;
skb_p = &skb_shinfo(skb)->frag_list;
copyflag = 0;
while ((skb1 = *skb_p) != NULL) {
int ntail = 0;
/* The fragment is partially pulled by someone,
* this can happen on input. Copy it and everything
* after it. */
if (skb_shared(skb1))
copyflag = 1;
/* If the skb is the last, worry about trailer. */
if (skb1->next == NULL && tailbits) {
if (skb_shinfo(skb1)->nr_frags ||
skb_has_frag_list(skb1) ||
skb_tailroom(skb1) < tailbits)
ntail = tailbits + 128;
}
if (copyflag ||
skb_cloned(skb1) ||
ntail ||
skb_shinfo(skb1)->nr_frags ||
skb_has_frag_list(skb1)) {
struct sk_buff *skb2;
/* Fuck, we are miserable poor guys... */
if (ntail == 0)
skb2 = skb_copy(skb1, GFP_ATOMIC);
else
skb2 = skb_copy_expand(skb1,
skb_headroom(skb1),
ntail,
GFP_ATOMIC);
if (unlikely(skb2 == NULL))
return -ENOMEM;
if (skb1->sk)
skb_set_owner_w(skb2, skb1->sk);
/* Looking around. Are we still alive?
* OK, link new skb, drop old one */
skb2->next = skb1->next;
*skb_p = skb2;
kfree_skb(skb1);
skb1 = skb2;
}
elt++;
*trailer = skb1;
skb_p = &skb1->next;
}
return elt;
}
EXPORT_SYMBOL_GPL(skb_cow_data);
static void sock_rmem_free(struct sk_buff *skb)
{
struct sock *sk = skb->sk;
atomic_sub(skb->truesize, &sk->sk_rmem_alloc);
}
/*
* Note: We dont mem charge error packets (no sk_forward_alloc changes)
*/
int sock_queue_err_skb(struct sock *sk, struct sk_buff *skb)
{
int len = skb->len;
if (atomic_read(&sk->sk_rmem_alloc) + skb->truesize >=
(unsigned int)sk->sk_rcvbuf)
return -ENOMEM;
skb_orphan(skb);
skb->sk = sk;
skb->destructor = sock_rmem_free;
atomic_add(skb->truesize, &sk->sk_rmem_alloc);
/* before exiting rcu section, make sure dst is refcounted */
skb_dst_force(skb);
skb_queue_tail(&sk->sk_error_queue, skb);
if (!sock_flag(sk, SOCK_DEAD))
sk->sk_data_ready(sk, len);
return 0;
}
EXPORT_SYMBOL(sock_queue_err_skb);
void skb_tstamp_tx(struct sk_buff *orig_skb,
struct skb_shared_hwtstamps *hwtstamps)
{
struct sock *sk = orig_skb->sk;
struct sock_exterr_skb *serr;
struct sk_buff *skb;
int err;
if (!sk)
return;
if (hwtstamps) {
*skb_hwtstamps(orig_skb) =
*hwtstamps;
} else {
/*
* no hardware time stamps available,
* so keep the shared tx_flags and only
* store software time stamp
*/
orig_skb->tstamp = ktime_get_real();
}
skb = skb_clone(orig_skb, GFP_ATOMIC);
if (!skb)
return;
serr = SKB_EXT_ERR(skb);
memset(serr, 0, sizeof(*serr));
serr->ee.ee_errno = ENOMSG;
serr->ee.ee_origin = SO_EE_ORIGIN_TIMESTAMPING;
err = sock_queue_err_skb(sk, skb);
if (err)
kfree_skb(skb);
}
EXPORT_SYMBOL_GPL(skb_tstamp_tx);
void skb_complete_wifi_ack(struct sk_buff *skb, bool acked)
{
struct sock *sk = skb->sk;
struct sock_exterr_skb *serr;
int err;
skb->wifi_acked_valid = 1;
skb->wifi_acked = acked;
serr = SKB_EXT_ERR(skb);
memset(serr, 0, sizeof(*serr));
serr->ee.ee_errno = ENOMSG;
serr->ee.ee_origin = SO_EE_ORIGIN_TXSTATUS;
err = sock_queue_err_skb(sk, skb);
if (err)
kfree_skb(skb);
}
EXPORT_SYMBOL_GPL(skb_complete_wifi_ack);
/**
* skb_partial_csum_set - set up and verify partial csum values for packet
* @skb: the skb to set
* @start: the number of bytes after skb->data to start checksumming.
* @off: the offset from start to place the checksum.
*
* For untrusted partially-checksummed packets, we need to make sure the values
* for skb->csum_start and skb->csum_offset are valid so we don't oops.
*
* This function checks and sets those values and skb->ip_summed: if this
* returns false you should drop the packet.
*/
bool skb_partial_csum_set(struct sk_buff *skb, u16 start, u16 off)
{
if (unlikely(start > skb_headlen(skb)) ||
unlikely((int)start + off > skb_headlen(skb) - 2)) {
net_warn_ratelimited("bad partial csum: csum=%u/%u len=%u\n",
start, off, skb_headlen(skb));
return false;
}
skb->ip_summed = CHECKSUM_PARTIAL;
skb->csum_start = skb_headroom(skb) + start;
skb->csum_offset = off;
skb_set_transport_header(skb, start);
return true;
}
EXPORT_SYMBOL_GPL(skb_partial_csum_set);
static int skb_maybe_pull_tail(struct sk_buff *skb, unsigned int len,
unsigned int max)
{
if (skb_headlen(skb) >= len)
return 0;
/* If we need to pullup then pullup to the max, so we
* won't need to do it again.
*/
if (max > skb->len)
max = skb->len;
if (__pskb_pull_tail(skb, max - skb_headlen(skb)) == NULL)
return -ENOMEM;
if (skb_headlen(skb) < len)
return -EPROTO;
return 0;
}
/* This value should be large enough to cover a tagged ethernet header plus
* maximally sized IP and TCP or UDP headers.
*/
#define MAX_IP_HDR_LEN 128
static int skb_checksum_setup_ip(struct sk_buff *skb, bool recalculate)
{
unsigned int off;
bool fragment;
int err;
fragment = false;
err = skb_maybe_pull_tail(skb,
sizeof(struct iphdr),
MAX_IP_HDR_LEN);
if (err < 0)
goto out;
if (ip_hdr(skb)->frag_off & htons(IP_OFFSET | IP_MF))
fragment = true;
off = ip_hdrlen(skb);
err = -EPROTO;
if (fragment)
goto out;
switch (ip_hdr(skb)->protocol) {
case IPPROTO_TCP:
err = skb_maybe_pull_tail(skb,
off + sizeof(struct tcphdr),
MAX_IP_HDR_LEN);
if (err < 0)
goto out;
if (!skb_partial_csum_set(skb, off,
offsetof(struct tcphdr, check))) {
err = -EPROTO;
goto out;
}
if (recalculate)
tcp_hdr(skb)->check =
~csum_tcpudp_magic(ip_hdr(skb)->saddr,
ip_hdr(skb)->daddr,
skb->len - off,
IPPROTO_TCP, 0);
break;
case IPPROTO_UDP:
err = skb_maybe_pull_tail(skb,
off + sizeof(struct udphdr),
MAX_IP_HDR_LEN);
if (err < 0)
goto out;
if (!skb_partial_csum_set(skb, off,
offsetof(struct udphdr, check))) {
err = -EPROTO;
goto out;
}
if (recalculate)
udp_hdr(skb)->check =
~csum_tcpudp_magic(ip_hdr(skb)->saddr,
ip_hdr(skb)->daddr,
skb->len - off,
IPPROTO_UDP, 0);
break;
default:
goto out;
}
err = 0;
out:
return err;
}
/* This value should be large enough to cover a tagged ethernet header plus
* an IPv6 header, all options, and a maximal TCP or UDP header.
*/
#define MAX_IPV6_HDR_LEN 256
#define OPT_HDR(type, skb, off) \
(type *)(skb_network_header(skb) + (off))
static int skb_checksum_setup_ipv6(struct sk_buff *skb, bool recalculate)
{
int err;
u8 nexthdr;
unsigned int off;
unsigned int len;
bool fragment;
bool done;
fragment = false;
done = false;
off = sizeof(struct ipv6hdr);
err = skb_maybe_pull_tail(skb, off, MAX_IPV6_HDR_LEN);
if (err < 0)
goto out;
nexthdr = ipv6_hdr(skb)->nexthdr;
len = sizeof(struct ipv6hdr) + ntohs(ipv6_hdr(skb)->payload_len);
while (off <= len && !done) {
switch (nexthdr) {
case IPPROTO_DSTOPTS:
case IPPROTO_HOPOPTS:
case IPPROTO_ROUTING: {
struct ipv6_opt_hdr *hp;
err = skb_maybe_pull_tail(skb,
off +
sizeof(struct ipv6_opt_hdr),
MAX_IPV6_HDR_LEN);
if (err < 0)
goto out;
hp = OPT_HDR(struct ipv6_opt_hdr, skb, off);
nexthdr = hp->nexthdr;
off += ipv6_optlen(hp);
break;
}
case IPPROTO_AH: {
struct ip_auth_hdr *hp;
err = skb_maybe_pull_tail(skb,
off +
sizeof(struct ip_auth_hdr),
MAX_IPV6_HDR_LEN);
if (err < 0)
goto out;
hp = OPT_HDR(struct ip_auth_hdr, skb, off);
nexthdr = hp->nexthdr;
off += ipv6_authlen(hp);
break;
}
case IPPROTO_FRAGMENT: {
struct frag_hdr *hp;
err = skb_maybe_pull_tail(skb,
off +
sizeof(struct frag_hdr),
MAX_IPV6_HDR_LEN);
if (err < 0)
goto out;
hp = OPT_HDR(struct frag_hdr, skb, off);
if (hp->frag_off & htons(IP6_OFFSET | IP6_MF))
fragment = true;
nexthdr = hp->nexthdr;
off += sizeof(struct frag_hdr);
break;
}
default:
done = true;
break;
}
}
err = -EPROTO;
if (!done || fragment)
goto out;
switch (nexthdr) {
case IPPROTO_TCP:
err = skb_maybe_pull_tail(skb,
off + sizeof(struct tcphdr),
MAX_IPV6_HDR_LEN);
if (err < 0)
goto out;
if (!skb_partial_csum_set(skb, off,
offsetof(struct tcphdr, check))) {
err = -EPROTO;
goto out;
}
if (recalculate)
tcp_hdr(skb)->check =
~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
&ipv6_hdr(skb)->daddr,
skb->len - off,
IPPROTO_TCP, 0);
break;
case IPPROTO_UDP:
err = skb_maybe_pull_tail(skb,
off + sizeof(struct udphdr),
MAX_IPV6_HDR_LEN);
if (err < 0)
goto out;
if (!skb_partial_csum_set(skb, off,
offsetof(struct udphdr, check))) {
err = -EPROTO;
goto out;
}
if (recalculate)
udp_hdr(skb)->check =
~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
&ipv6_hdr(skb)->daddr,
skb->len - off,
IPPROTO_UDP, 0);
break;
default:
goto out;
}
err = 0;
out:
return err;
}
/**
* skb_checksum_setup - set up partial checksum offset
* @skb: the skb to set up
* @recalculate: if true the pseudo-header checksum will be recalculated
*/
int skb_checksum_setup(struct sk_buff *skb, bool recalculate)
{
int err;
switch (skb->protocol) {
case htons(ETH_P_IP):
err = skb_checksum_setup_ip(skb, recalculate);
break;
case htons(ETH_P_IPV6):
err = skb_checksum_setup_ipv6(skb, recalculate);
break;
default:
err = -EPROTO;
break;
}
return err;
}
EXPORT_SYMBOL(skb_checksum_setup);
void __skb_warn_lro_forwarding(const struct sk_buff *skb)
{
net_warn_ratelimited("%s: received packets cannot be forwarded while LRO is enabled\n",
skb->dev->name);
}
EXPORT_SYMBOL(__skb_warn_lro_forwarding);
void kfree_skb_partial(struct sk_buff *skb, bool head_stolen)
{
if (head_stolen) {
skb_release_head_state(skb);
kmem_cache_free(skbuff_head_cache, skb);
} else {
__kfree_skb(skb);
}
}
EXPORT_SYMBOL(kfree_skb_partial);
/**
* skb_try_coalesce - try to merge skb to prior one
* @to: prior buffer
* @from: buffer to add
* @fragstolen: pointer to boolean
* @delta_truesize: how much more was allocated than was requested
*/
bool skb_try_coalesce(struct sk_buff *to, struct sk_buff *from,
bool *fragstolen, int *delta_truesize)
{
int i, delta, len = from->len;
*fragstolen = false;
if (skb_cloned(to))
return false;
if (len <= skb_tailroom(to)) {
BUG_ON(skb_copy_bits(from, 0, skb_put(to, len), len));
*delta_truesize = 0;
return true;
}
if (skb_has_frag_list(to) || skb_has_frag_list(from))
return false;
if (skb_headlen(from) != 0) {
struct page *page;
unsigned int offset;
if (skb_shinfo(to)->nr_frags +
skb_shinfo(from)->nr_frags >= MAX_SKB_FRAGS)
return false;
if (skb_head_is_locked(from))
return false;
delta = from->truesize - SKB_DATA_ALIGN(sizeof(struct sk_buff));
page = virt_to_head_page(from->head);
offset = from->data - (unsigned char *)page_address(page);
skb_fill_page_desc(to, skb_shinfo(to)->nr_frags,
page, offset, skb_headlen(from));
*fragstolen = true;
} else {
if (skb_shinfo(to)->nr_frags +
skb_shinfo(from)->nr_frags > MAX_SKB_FRAGS)
return false;
delta = from->truesize - SKB_TRUESIZE(skb_end_offset(from));
}
WARN_ON_ONCE(delta < len);
memcpy(skb_shinfo(to)->frags + skb_shinfo(to)->nr_frags,
skb_shinfo(from)->frags,
skb_shinfo(from)->nr_frags * sizeof(skb_frag_t));
skb_shinfo(to)->nr_frags += skb_shinfo(from)->nr_frags;
if (!skb_cloned(from))
skb_shinfo(from)->nr_frags = 0;
/* if the skb is not cloned this does nothing
* since we set nr_frags to 0.
*/
for (i = 0; i < skb_shinfo(from)->nr_frags; i++)
skb_frag_ref(from, i);
to->truesize += delta;
to->len += len;
to->data_len += len;
*delta_truesize = delta;
return true;
}
EXPORT_SYMBOL(skb_try_coalesce);
/**
* skb_scrub_packet - scrub an skb
*
* @skb: buffer to clean
* @xnet: packet is crossing netns
*
* skb_scrub_packet can be used after encapsulating or decapsulting a packet
* into/from a tunnel. Some information have to be cleared during these
* operations.
* skb_scrub_packet can also be used to clean a skb before injecting it in
* another namespace (@xnet == true). We have to clear all information in the
* skb that could impact namespace isolation.
*/
void skb_scrub_packet(struct sk_buff *skb, bool xnet)
{
if (xnet)
skb_orphan(skb);
skb->tstamp.tv64 = 0;
skb->pkt_type = PACKET_HOST;
skb->skb_iif = 0;
skb->local_df = 0;
skb_dst_drop(skb);
skb->mark = 0;
secpath_reset(skb);
nf_reset(skb);
nf_reset_trace(skb);
}
EXPORT_SYMBOL_GPL(skb_scrub_packet);
/**
* skb_gso_transport_seglen - Return length of individual segments of a gso packet
*
* @skb: GSO skb
*
* skb_gso_transport_seglen is used to determine the real size of the
* individual segments, including Layer4 headers (TCP/UDP).
*
* The MAC/L2 or network (IP, IPv6) headers are not accounted for.
*/
unsigned int skb_gso_transport_seglen(const struct sk_buff *skb)
{
const struct skb_shared_info *shinfo = skb_shinfo(skb);
unsigned int hdr_len;
if (likely(shinfo->gso_type & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6)))
hdr_len = tcp_hdrlen(skb);
else
hdr_len = sizeof(struct udphdr);
return hdr_len + shinfo->gso_size;
}
EXPORT_SYMBOL_GPL(skb_gso_transport_seglen);
| ./CrossVul/dataset_final_sorted/CWE-416/c/good_2030_0 |
crossvul-cpp_data_good_5516_1 | /*
+----------------------------------------------------------------------+
| PHP Version 7 |
+----------------------------------------------------------------------+
| Copyright (c) 1997-2016 The PHP Group |
+----------------------------------------------------------------------+
| This source file is subject to version 3.01 of the PHP license, |
| that is bundled with this package in the file LICENSE, and is |
| available through the world-wide-web at the following url: |
| http://www.php.net/license/3_01.txt |
| If you did not receive a copy of the PHP license and are unable to |
| obtain it through the world-wide-web, please send a note to |
| license@php.net so we can mail you a copy immediately. |
+----------------------------------------------------------------------+
| Authors: Jani Lehtimäki <jkl@njet.net> |
| Thies C. Arntzen <thies@thieso.net> |
| Sascha Schumann <sascha@schumann.cx> |
+----------------------------------------------------------------------+
*/
/* $Id$ */
/* {{{ includes
*/
#include <stdio.h>
#include <stdlib.h>
#include <errno.h>
#include "php.h"
#include "php_string.h"
#include "php_var.h"
#include "zend_smart_str.h"
#include "basic_functions.h"
#include "php_incomplete_class.h"
#define COMMON (is_ref ? "&" : "")
/* }}} */
static void php_array_element_dump(zval *zv, zend_ulong index, zend_string *key, int level) /* {{{ */
{
if (key == NULL) { /* numeric key */
php_printf("%*c[" ZEND_LONG_FMT "]=>\n", level + 1, ' ', index);
} else { /* string key */
php_printf("%*c[\"", level + 1, ' ');
PHPWRITE(ZSTR_VAL(key), ZSTR_LEN(key));
php_printf("\"]=>\n");
}
php_var_dump(zv, level + 2);
}
/* }}} */
static void php_object_property_dump(zval *zv, zend_ulong index, zend_string *key, int level) /* {{{ */
{
const char *prop_name, *class_name;
if (key == NULL) { /* numeric key */
php_printf("%*c[" ZEND_LONG_FMT "]=>\n", level + 1, ' ', index);
} else { /* string key */
int unmangle = zend_unmangle_property_name(key, &class_name, &prop_name);
php_printf("%*c[", level + 1, ' ');
if (class_name && unmangle == SUCCESS) {
if (class_name[0] == '*') {
php_printf("\"%s\":protected", prop_name);
} else {
php_printf("\"%s\":\"%s\":private", prop_name, class_name);
}
} else {
php_printf("\"");
PHPWRITE(ZSTR_VAL(key), ZSTR_LEN(key));
php_printf("\"");
}
ZEND_PUTS("]=>\n");
}
php_var_dump(zv, level + 2);
}
/* }}} */
PHPAPI void php_var_dump(zval *struc, int level) /* {{{ */
{
HashTable *myht;
zend_string *class_name;
int is_temp;
int is_ref = 0;
zend_ulong num;
zend_string *key;
zval *val;
uint32_t count;
if (level > 1) {
php_printf("%*c", level - 1, ' ');
}
again:
switch (Z_TYPE_P(struc)) {
case IS_FALSE:
php_printf("%sbool(false)\n", COMMON);
break;
case IS_TRUE:
php_printf("%sbool(true)\n", COMMON);
break;
case IS_NULL:
php_printf("%sNULL\n", COMMON);
break;
case IS_LONG:
php_printf("%sint(" ZEND_LONG_FMT ")\n", COMMON, Z_LVAL_P(struc));
break;
case IS_DOUBLE:
php_printf("%sfloat(%.*G)\n", COMMON, (int) EG(precision), Z_DVAL_P(struc));
break;
case IS_STRING:
php_printf("%sstring(%zd) \"", COMMON, Z_STRLEN_P(struc));
PHPWRITE(Z_STRVAL_P(struc), Z_STRLEN_P(struc));
PUTS("\"\n");
break;
case IS_ARRAY:
myht = Z_ARRVAL_P(struc);
if (level > 1 && ZEND_HASH_APPLY_PROTECTION(myht) && ++myht->u.v.nApplyCount > 1) {
PUTS("*RECURSION*\n");
--myht->u.v.nApplyCount;
return;
}
count = zend_array_count(myht);
php_printf("%sarray(%d) {\n", COMMON, count);
is_temp = 0;
ZEND_HASH_FOREACH_KEY_VAL_IND(myht, num, key, val) {
php_array_element_dump(val, num, key, level);
} ZEND_HASH_FOREACH_END();
if (level > 1 && ZEND_HASH_APPLY_PROTECTION(myht)) {
--myht->u.v.nApplyCount;
}
if (is_temp) {
zend_hash_destroy(myht);
efree(myht);
}
if (level > 1) {
php_printf("%*c", level-1, ' ');
}
PUTS("}\n");
break;
case IS_OBJECT:
if (Z_OBJ_APPLY_COUNT_P(struc) > 0) {
PUTS("*RECURSION*\n");
return;
}
Z_OBJ_INC_APPLY_COUNT_P(struc);
myht = Z_OBJDEBUG_P(struc, is_temp);
class_name = Z_OBJ_HANDLER_P(struc, get_class_name)(Z_OBJ_P(struc));
php_printf("%sobject(%s)#%d (%d) {\n", COMMON, ZSTR_VAL(class_name), Z_OBJ_HANDLE_P(struc), myht ? zend_array_count(myht) : 0);
zend_string_release(class_name);
if (myht) {
zend_ulong num;
zend_string *key;
zval *val;
ZEND_HASH_FOREACH_KEY_VAL_IND(myht, num, key, val) {
php_object_property_dump(val, num, key, level);
} ZEND_HASH_FOREACH_END();
if (is_temp) {
zend_hash_destroy(myht);
efree(myht);
}
}
if (level > 1) {
php_printf("%*c", level-1, ' ');
}
PUTS("}\n");
Z_OBJ_DEC_APPLY_COUNT_P(struc);
break;
case IS_RESOURCE: {
const char *type_name = zend_rsrc_list_get_rsrc_type(Z_RES_P(struc));
php_printf("%sresource(%pd) of type (%s)\n", COMMON, Z_RES_P(struc)->handle, type_name ? type_name : "Unknown");
break;
}
case IS_REFERENCE:
//??? hide references with refcount==1 (for compatibility)
if (Z_REFCOUNT_P(struc) > 1) {
is_ref = 1;
}
struc = Z_REFVAL_P(struc);
goto again;
break;
default:
php_printf("%sUNKNOWN:0\n", COMMON);
break;
}
}
/* }}} */
/* {{{ proto void var_dump(mixed var)
Dumps a string representation of variable to output */
PHP_FUNCTION(var_dump)
{
zval *args;
int argc;
int i;
if (zend_parse_parameters(ZEND_NUM_ARGS(), "+", &args, &argc) == FAILURE) {
return;
}
for (i = 0; i < argc; i++) {
php_var_dump(&args[i], 1);
}
}
/* }}} */
static void zval_array_element_dump(zval *zv, zend_ulong index, zend_string *key, int level) /* {{{ */
{
if (key == NULL) { /* numeric key */
php_printf("%*c[" ZEND_LONG_FMT "]=>\n", level + 1, ' ', index);
} else { /* string key */
php_printf("%*c[\"", level + 1, ' ');
PHPWRITE(ZSTR_VAL(key), ZSTR_LEN(key));
php_printf("\"]=>\n");
}
php_debug_zval_dump(zv, level + 2);
}
/* }}} */
static void zval_object_property_dump(zval *zv, zend_ulong index, zend_string *key, int level) /* {{{ */
{
const char *prop_name, *class_name;
if (key == NULL) { /* numeric key */
php_printf("%*c[" ZEND_LONG_FMT "]=>\n", level + 1, ' ', index);
} else { /* string key */
zend_unmangle_property_name(key, &class_name, &prop_name);
php_printf("%*c[", level + 1, ' ');
if (class_name) {
if (class_name[0] == '*') {
php_printf("\"%s\":protected", prop_name);
} else {
php_printf("\"%s\":\"%s\":private", prop_name, class_name);
}
} else {
php_printf("\"%s\"", prop_name);
}
ZEND_PUTS("]=>\n");
}
php_debug_zval_dump(zv, level + 2);
}
/* }}} */
PHPAPI void php_debug_zval_dump(zval *struc, int level) /* {{{ */
{
HashTable *myht = NULL;
zend_string *class_name;
int is_temp = 0;
int is_ref = 0;
zend_ulong index;
zend_string *key;
zval *val;
uint32_t count;
if (level > 1) {
php_printf("%*c", level - 1, ' ');
}
again:
switch (Z_TYPE_P(struc)) {
case IS_FALSE:
php_printf("%sbool(false)\n", COMMON);
break;
case IS_TRUE:
php_printf("%sbool(true)\n", COMMON);
break;
case IS_NULL:
php_printf("%sNULL\n", COMMON);
break;
case IS_LONG:
php_printf("%sint(" ZEND_LONG_FMT ")\n", COMMON, Z_LVAL_P(struc));
break;
case IS_DOUBLE:
php_printf("%sfloat(%.*G)\n", COMMON, (int) EG(precision), Z_DVAL_P(struc));
break;
case IS_STRING:
php_printf("%sstring(%zd) \"", COMMON, Z_STRLEN_P(struc));
PHPWRITE(Z_STRVAL_P(struc), Z_STRLEN_P(struc));
php_printf("\" refcount(%u)\n", Z_REFCOUNTED_P(struc) ? Z_REFCOUNT_P(struc) : 1);
break;
case IS_ARRAY:
myht = Z_ARRVAL_P(struc);
if (level > 1 && ZEND_HASH_APPLY_PROTECTION(myht) && myht->u.v.nApplyCount++ > 1) {
myht->u.v.nApplyCount--;
PUTS("*RECURSION*\n");
return;
}
count = zend_array_count(myht);
php_printf("%sarray(%d) refcount(%u){\n", COMMON, count, Z_REFCOUNTED_P(struc) ? Z_REFCOUNT_P(struc) : 1);
ZEND_HASH_FOREACH_KEY_VAL_IND(myht, index, key, val) {
zval_array_element_dump(val, index, key, level);
} ZEND_HASH_FOREACH_END();
if (level > 1 && ZEND_HASH_APPLY_PROTECTION(myht)) {
myht->u.v.nApplyCount--;
}
if (is_temp) {
zend_hash_destroy(myht);
efree(myht);
}
if (level > 1) {
php_printf("%*c", level - 1, ' ');
}
PUTS("}\n");
break;
case IS_OBJECT:
myht = Z_OBJDEBUG_P(struc, is_temp);
if (myht) {
if (myht->u.v.nApplyCount > 1) {
PUTS("*RECURSION*\n");
return;
} else {
myht->u.v.nApplyCount++;
}
}
class_name = Z_OBJ_HANDLER_P(struc, get_class_name)(Z_OBJ_P(struc));
php_printf("%sobject(%s)#%d (%d) refcount(%u){\n", COMMON, ZSTR_VAL(class_name), Z_OBJ_HANDLE_P(struc), myht ? zend_array_count(myht) : 0, Z_REFCOUNT_P(struc));
zend_string_release(class_name);
if (myht) {
ZEND_HASH_FOREACH_KEY_VAL_IND(myht, index, key, val) {
zval_object_property_dump(val, index, key, level);
} ZEND_HASH_FOREACH_END();
myht->u.v.nApplyCount--;
if (is_temp) {
zend_hash_destroy(myht);
efree(myht);
}
}
if (level > 1) {
php_printf("%*c", level - 1, ' ');
}
PUTS("}\n");
break;
case IS_RESOURCE: {
const char *type_name = zend_rsrc_list_get_rsrc_type(Z_RES_P(struc));
php_printf("%sresource(%d) of type (%s) refcount(%u)\n", COMMON, Z_RES_P(struc)->handle, type_name ? type_name : "Unknown", Z_REFCOUNT_P(struc));
break;
}
case IS_REFERENCE:
//??? hide references with refcount==1 (for compatibility)
if (Z_REFCOUNT_P(struc) > 1) {
is_ref = 1;
}
struc = Z_REFVAL_P(struc);
goto again;
default:
php_printf("%sUNKNOWN:0\n", COMMON);
break;
}
}
/* }}} */
/* {{{ proto void debug_zval_dump(mixed var)
Dumps a string representation of an internal zend value to output. */
PHP_FUNCTION(debug_zval_dump)
{
zval *args;
int argc;
int i;
if (zend_parse_parameters(ZEND_NUM_ARGS(), "+", &args, &argc) == FAILURE) {
return;
}
for (i = 0; i < argc; i++) {
php_debug_zval_dump(&args[i], 1);
}
}
/* }}} */
#define buffer_append_spaces(buf, num_spaces) \
do { \
char *tmp_spaces; \
size_t tmp_spaces_len; \
tmp_spaces_len = spprintf(&tmp_spaces, 0,"%*c", num_spaces, ' '); \
smart_str_appendl(buf, tmp_spaces, tmp_spaces_len); \
efree(tmp_spaces); \
} while(0);
static void php_array_element_export(zval *zv, zend_ulong index, zend_string *key, int level, smart_str *buf) /* {{{ */
{
if (key == NULL) { /* numeric key */
buffer_append_spaces(buf, level+1);
smart_str_append_long(buf, (zend_long) index);
smart_str_appendl(buf, " => ", 4);
} else { /* string key */
zend_string *tmp_str;
zend_string *ckey = php_addcslashes(key, 0, "'\\", 2);
tmp_str = php_str_to_str(ZSTR_VAL(ckey), ZSTR_LEN(ckey), "\0", 1, "' . \"\\0\" . '", 12);
buffer_append_spaces(buf, level + 1);
smart_str_appendc(buf, '\'');
smart_str_append(buf, tmp_str);
smart_str_appendl(buf, "' => ", 5);
zend_string_free(ckey);
zend_string_free(tmp_str);
}
php_var_export_ex(zv, level + 2, buf);
smart_str_appendc(buf, ',');
smart_str_appendc(buf, '\n');
}
/* }}} */
static void php_object_element_export(zval *zv, zend_ulong index, zend_string *key, int level, smart_str *buf) /* {{{ */
{
buffer_append_spaces(buf, level + 2);
if (key != NULL) {
const char *class_name, *prop_name;
size_t prop_name_len;
zend_string *pname_esc;
zend_unmangle_property_name_ex(key, &class_name, &prop_name, &prop_name_len);
pname_esc = php_addcslashes(zend_string_init(prop_name, prop_name_len, 0), 1, "'\\", 2);
smart_str_appendc(buf, '\'');
smart_str_append(buf, pname_esc);
smart_str_appendc(buf, '\'');
zend_string_release(pname_esc);
} else {
smart_str_append_long(buf, (zend_long) index);
}
smart_str_appendl(buf, " => ", 4);
php_var_export_ex(zv, level + 2, buf);
smart_str_appendc(buf, ',');
smart_str_appendc(buf, '\n');
}
/* }}} */
PHPAPI void php_var_export_ex(zval *struc, int level, smart_str *buf) /* {{{ */
{
HashTable *myht;
char *tmp_str;
size_t tmp_len;
zend_string *ztmp, *ztmp2;
zend_ulong index;
zend_string *key;
zval *val;
again:
switch (Z_TYPE_P(struc)) {
case IS_FALSE:
smart_str_appendl(buf, "false", 5);
break;
case IS_TRUE:
smart_str_appendl(buf, "true", 4);
break;
case IS_NULL:
smart_str_appendl(buf, "NULL", 4);
break;
case IS_LONG:
smart_str_append_long(buf, Z_LVAL_P(struc));
break;
case IS_DOUBLE:
tmp_len = spprintf(&tmp_str, 0,"%.*H", PG(serialize_precision), Z_DVAL_P(struc));
smart_str_appendl(buf, tmp_str, tmp_len);
/* Without a decimal point, PHP treats a number literal as an int.
* This check even works for scientific notation, because the
* mantissa always contains a decimal point.
* We need to check for finiteness, because INF, -INF and NAN
* must not have a decimal point added.
*/
if (zend_finite(Z_DVAL_P(struc)) && NULL == strchr(tmp_str, '.')) {
smart_str_appendl(buf, ".0", 2);
}
efree(tmp_str);
break;
case IS_STRING:
ztmp = php_addcslashes(Z_STR_P(struc), 0, "'\\", 2);
ztmp2 = php_str_to_str(ZSTR_VAL(ztmp), ZSTR_LEN(ztmp), "\0", 1, "' . \"\\0\" . '", 12);
smart_str_appendc(buf, '\'');
smart_str_append(buf, ztmp2);
smart_str_appendc(buf, '\'');
zend_string_free(ztmp);
zend_string_free(ztmp2);
break;
case IS_ARRAY:
myht = Z_ARRVAL_P(struc);
if (ZEND_HASH_APPLY_PROTECTION(myht) && myht->u.v.nApplyCount++ > 0) {
myht->u.v.nApplyCount--;
smart_str_appendl(buf, "NULL", 4);
zend_error(E_WARNING, "var_export does not handle circular references");
return;
}
if (level > 1) {
smart_str_appendc(buf, '\n');
buffer_append_spaces(buf, level - 1);
}
smart_str_appendl(buf, "array (\n", 8);
ZEND_HASH_FOREACH_KEY_VAL_IND(myht, index, key, val) {
php_array_element_export(val, index, key, level, buf);
} ZEND_HASH_FOREACH_END();
if (ZEND_HASH_APPLY_PROTECTION(myht)) {
myht->u.v.nApplyCount--;
}
if (level > 1) {
buffer_append_spaces(buf, level - 1);
}
smart_str_appendc(buf, ')');
break;
case IS_OBJECT:
myht = Z_OBJPROP_P(struc);
if (myht) {
if (myht->u.v.nApplyCount > 0) {
smart_str_appendl(buf, "NULL", 4);
zend_error(E_WARNING, "var_export does not handle circular references");
return;
} else {
myht->u.v.nApplyCount++;
}
}
if (level > 1) {
smart_str_appendc(buf, '\n');
buffer_append_spaces(buf, level - 1);
}
smart_str_append(buf, Z_OBJCE_P(struc)->name);
smart_str_appendl(buf, "::__set_state(array(\n", 21);
if (myht) {
ZEND_HASH_FOREACH_KEY_VAL_IND(myht, index, key, val) {
php_object_element_export(val, index, key, level, buf);
} ZEND_HASH_FOREACH_END();
myht->u.v.nApplyCount--;
}
if (level > 1) {
buffer_append_spaces(buf, level - 1);
}
smart_str_appendl(buf, "))", 2);
break;
case IS_REFERENCE:
struc = Z_REFVAL_P(struc);
goto again;
break;
default:
smart_str_appendl(buf, "NULL", 4);
break;
}
}
/* }}} */
/* FOR BC reasons, this will always perform and then print */
PHPAPI void php_var_export(zval *struc, int level) /* {{{ */
{
smart_str buf = {0};
php_var_export_ex(struc, level, &buf);
smart_str_0(&buf);
PHPWRITE(ZSTR_VAL(buf.s), ZSTR_LEN(buf.s));
smart_str_free(&buf);
}
/* }}} */
/* {{{ proto mixed var_export(mixed var [, bool return])
Outputs or returns a string representation of a variable */
PHP_FUNCTION(var_export)
{
zval *var;
zend_bool return_output = 0;
smart_str buf = {0};
if (zend_parse_parameters(ZEND_NUM_ARGS(), "z|b", &var, &return_output) == FAILURE) {
return;
}
php_var_export_ex(var, 1, &buf);
smart_str_0 (&buf);
if (return_output) {
RETURN_NEW_STR(buf.s);
} else {
PHPWRITE(ZSTR_VAL(buf.s), ZSTR_LEN(buf.s));
smart_str_free(&buf);
}
}
/* }}} */
static void php_var_serialize_intern(smart_str *buf, zval *struc, php_serialize_data_t var_hash);
static inline zend_long php_add_var_hash(php_serialize_data_t data, zval *var) /* {{{ */
{
zval *zv;
zend_ulong key;
zend_bool is_ref = Z_ISREF_P(var);
data->n += 1;
if (!is_ref && Z_TYPE_P(var) != IS_OBJECT) {
return 0;
}
/* References to objects are treated as if the reference didn't exist */
if (is_ref && Z_TYPE_P(Z_REFVAL_P(var)) == IS_OBJECT) {
var = Z_REFVAL_P(var);
}
/* Index for the variable is stored using the numeric value of the pointer to
* the zend_refcounted struct */
key = (zend_ulong) (zend_uintptr_t) Z_COUNTED_P(var);
zv = zend_hash_index_find(&data->ht, key);
if (zv) {
/* References are only counted once, undo the data->n increment above */
if (is_ref) {
data->n -= 1;
}
return Z_LVAL_P(zv);
} else {
zval zv_n;
ZVAL_LONG(&zv_n, data->n);
zend_hash_index_add_new(&data->ht, key, &zv_n);
/* Additionally to the index, we also store the variable, to ensure that it is
* not destroyed during serialization and its pointer reused. The variable is
* stored at the numeric value of the pointer + 1, which cannot be the location
* of another zend_refcounted structure. */
zend_hash_index_add_new(&data->ht, key + 1, var);
Z_ADDREF_P(var);
return 0;
}
}
/* }}} */
static inline void php_var_serialize_long(smart_str *buf, zend_long val) /* {{{ */
{
smart_str_appendl(buf, "i:", 2);
smart_str_append_long(buf, val);
smart_str_appendc(buf, ';');
}
/* }}} */
static inline void php_var_serialize_string(smart_str *buf, char *str, size_t len) /* {{{ */
{
smart_str_appendl(buf, "s:", 2);
smart_str_append_unsigned(buf, len);
smart_str_appendl(buf, ":\"", 2);
smart_str_appendl(buf, str, len);
smart_str_appendl(buf, "\";", 2);
}
/* }}} */
static inline zend_bool php_var_serialize_class_name(smart_str *buf, zval *struc) /* {{{ */
{
PHP_CLASS_ATTRIBUTES;
PHP_SET_CLASS_ATTRIBUTES(struc);
smart_str_appendl(buf, "O:", 2);
smart_str_append_unsigned(buf, ZSTR_LEN(class_name));
smart_str_appendl(buf, ":\"", 2);
smart_str_append(buf, class_name);
smart_str_appendl(buf, "\":", 2);
PHP_CLEANUP_CLASS_ATTRIBUTES();
return incomplete_class;
}
/* }}} */
static HashTable *php_var_serialize_collect_names(HashTable *src, uint32_t count, zend_bool incomplete) /* {{{ */ {
zval *val;
HashTable *ht;
zend_string *key, *name;
ALLOC_HASHTABLE(ht);
zend_hash_init(ht, count, NULL, NULL, 0);
ZEND_HASH_FOREACH_STR_KEY_VAL(src, key, val) {
if (incomplete && strcmp(ZSTR_VAL(key), MAGIC_MEMBER) == 0) {
continue;
}
if (Z_TYPE_P(val) != IS_STRING) {
php_error_docref(NULL, E_NOTICE,
"__sleep should return an array only containing the names of instance-variables to serialize.");
}
name = zval_get_string(val);
if (zend_hash_exists(ht, name)) {
php_error_docref(NULL, E_NOTICE,
"\"%s\" is returned from __sleep multiple times", ZSTR_VAL(name));
zend_string_release(name);
continue;
}
zend_hash_add_empty_element(ht, name);
zend_string_release(name);
} ZEND_HASH_FOREACH_END();
return ht;
}
/* }}} */
static void php_var_serialize_class(smart_str *buf, zval *struc, zval *retval_ptr, php_serialize_data_t var_hash) /* {{{ */
{
uint32_t count;
zend_bool incomplete_class;
HashTable *ht;
incomplete_class = php_var_serialize_class_name(buf, struc);
/* count after serializing name, since php_var_serialize_class_name
* changes the count if the variable is incomplete class */
if (Z_TYPE_P(retval_ptr) == IS_ARRAY) {
ht = Z_ARRVAL_P(retval_ptr);
count = zend_array_count(ht);
} else if (Z_TYPE_P(retval_ptr) == IS_OBJECT) {
ht = Z_OBJPROP_P(retval_ptr);
count = zend_array_count(ht);
if (incomplete_class) {
--count;
}
} else {
count = 0;
ht = NULL;
}
if (count > 0) {
zval *d;
zval nval, *nvalp;
zend_string *name;
HashTable *names, *propers;
names = php_var_serialize_collect_names(ht, count, incomplete_class);
smart_str_append_unsigned(buf, zend_hash_num_elements(names));
smart_str_appendl(buf, ":{", 2);
ZVAL_NULL(&nval);
nvalp = &nval;
propers = Z_OBJPROP_P(struc);
ZEND_HASH_FOREACH_STR_KEY(names, name) {
if ((d = zend_hash_find(propers, name)) != NULL) {
if (Z_TYPE_P(d) == IS_INDIRECT) {
d = Z_INDIRECT_P(d);
if (Z_TYPE_P(d) == IS_UNDEF) {
continue;
}
}
php_var_serialize_string(buf, ZSTR_VAL(name), ZSTR_LEN(name));
php_var_serialize_intern(buf, d, var_hash);
} else {
zend_class_entry *ce = Z_OBJ_P(struc)->ce;
if (ce) {
zend_string *prot_name, *priv_name;
do {
priv_name = zend_mangle_property_name(
ZSTR_VAL(ce->name), ZSTR_LEN(ce->name), ZSTR_VAL(name), ZSTR_LEN(name), ce->type & ZEND_INTERNAL_CLASS);
if ((d = zend_hash_find(propers, priv_name)) != NULL) {
if (Z_TYPE_P(d) == IS_INDIRECT) {
d = Z_INDIRECT_P(d);
if (Z_ISUNDEF_P(d)) {
break;
}
}
php_var_serialize_string(buf, ZSTR_VAL(priv_name), ZSTR_LEN(priv_name));
zend_string_free(priv_name);
php_var_serialize_intern(buf, d, var_hash);
break;
}
zend_string_free(priv_name);
prot_name = zend_mangle_property_name(
"*", 1, ZSTR_VAL(name), ZSTR_LEN(name), ce->type & ZEND_INTERNAL_CLASS);
if ((d = zend_hash_find(propers, prot_name)) != NULL) {
if (Z_TYPE_P(d) == IS_INDIRECT) {
d = Z_INDIRECT_P(d);
if (Z_TYPE_P(d) == IS_UNDEF) {
zend_string_free(prot_name);
break;
}
}
php_var_serialize_string(buf, ZSTR_VAL(prot_name), ZSTR_LEN(prot_name));
zend_string_free(prot_name);
php_var_serialize_intern(buf, d, var_hash);
break;
}
zend_string_free(prot_name);
php_var_serialize_string(buf, ZSTR_VAL(name), ZSTR_LEN(name));
php_var_serialize_intern(buf, nvalp, var_hash);
php_error_docref(NULL, E_NOTICE,
"\"%s\" returned as member variable from __sleep() but does not exist", ZSTR_VAL(name));
} while (0);
} else {
php_var_serialize_string(buf, ZSTR_VAL(name), ZSTR_LEN(name));
php_var_serialize_intern(buf, nvalp, var_hash);
}
}
} ZEND_HASH_FOREACH_END();
smart_str_appendc(buf, '}');
zend_hash_destroy(names);
FREE_HASHTABLE(names);
} else {
smart_str_appendl(buf, "0:{}", 4);
}
}
/* }}} */
static void php_var_serialize_intern(smart_str *buf, zval *struc, php_serialize_data_t var_hash) /* {{{ */
{
zend_long var_already;
HashTable *myht;
if (EG(exception)) {
return;
}
if (var_hash && (var_already = php_add_var_hash(var_hash, struc))) {
if (Z_ISREF_P(struc)) {
smart_str_appendl(buf, "R:", 2);
smart_str_append_long(buf, var_already);
smart_str_appendc(buf, ';');
return;
} else if (Z_TYPE_P(struc) == IS_OBJECT) {
smart_str_appendl(buf, "r:", 2);
smart_str_append_long(buf, var_already);
smart_str_appendc(buf, ';');
return;
}
}
again:
switch (Z_TYPE_P(struc)) {
case IS_FALSE:
smart_str_appendl(buf, "b:0;", 4);
return;
case IS_TRUE:
smart_str_appendl(buf, "b:1;", 4);
return;
case IS_NULL:
smart_str_appendl(buf, "N;", 2);
return;
case IS_LONG:
php_var_serialize_long(buf, Z_LVAL_P(struc));
return;
case IS_DOUBLE: {
char *s;
smart_str_appendl(buf, "d:", 2);
s = (char *) safe_emalloc(PG(serialize_precision), 1, MAX_LENGTH_OF_DOUBLE + 1);
php_gcvt(Z_DVAL_P(struc), (int)PG(serialize_precision), '.', 'E', s);
smart_str_appends(buf, s);
smart_str_appendc(buf, ';');
efree(s);
return;
}
case IS_STRING:
php_var_serialize_string(buf, Z_STRVAL_P(struc), Z_STRLEN_P(struc));
return;
case IS_OBJECT: {
zval retval;
zval fname;
int res;
zend_class_entry *ce = Z_OBJCE_P(struc);
if (ce->serialize != NULL) {
/* has custom handler */
unsigned char *serialized_data = NULL;
size_t serialized_length;
if (ce->serialize(struc, &serialized_data, &serialized_length, (zend_serialize_data *)var_hash) == SUCCESS) {
smart_str_appendl(buf, "C:", 2);
smart_str_append_unsigned(buf, ZSTR_LEN(Z_OBJCE_P(struc)->name));
smart_str_appendl(buf, ":\"", 2);
smart_str_append(buf, Z_OBJCE_P(struc)->name);
smart_str_appendl(buf, "\":", 2);
smart_str_append_unsigned(buf, serialized_length);
smart_str_appendl(buf, ":{", 2);
smart_str_appendl(buf, (char *) serialized_data, serialized_length);
smart_str_appendc(buf, '}');
} else {
smart_str_appendl(buf, "N;", 2);
}
if (serialized_data) {
efree(serialized_data);
}
return;
}
if (ce != PHP_IC_ENTRY && zend_hash_str_exists(&ce->function_table, "__sleep", sizeof("__sleep")-1)) {
ZVAL_STRINGL(&fname, "__sleep", sizeof("__sleep") - 1);
BG(serialize_lock)++;
res = call_user_function_ex(CG(function_table), struc, &fname, &retval, 0, 0, 1, NULL);
BG(serialize_lock)--;
zval_dtor(&fname);
if (EG(exception)) {
zval_ptr_dtor(&retval);
return;
}
if (res == SUCCESS) {
if (Z_TYPE(retval) != IS_UNDEF) {
if (HASH_OF(&retval)) {
php_var_serialize_class(buf, struc, &retval, var_hash);
} else {
php_error_docref(NULL, E_NOTICE, "__sleep should return an array only containing the names of instance-variables to serialize");
/* we should still add element even if it's not OK,
* since we already wrote the length of the array before */
smart_str_appendl(buf,"N;", 2);
}
zval_ptr_dtor(&retval);
}
return;
}
zval_ptr_dtor(&retval);
}
/* fall-through */
}
case IS_ARRAY: {
uint32_t i;
zend_bool incomplete_class = 0;
if (Z_TYPE_P(struc) == IS_ARRAY) {
smart_str_appendl(buf, "a:", 2);
myht = Z_ARRVAL_P(struc);
i = zend_array_count(myht);
} else {
incomplete_class = php_var_serialize_class_name(buf, struc);
myht = Z_OBJPROP_P(struc);
/* count after serializing name, since php_var_serialize_class_name
* changes the count if the variable is incomplete class */
i = zend_array_count(myht);
if (i > 0 && incomplete_class) {
--i;
}
}
smart_str_append_unsigned(buf, i);
smart_str_appendl(buf, ":{", 2);
if (i > 0) {
zend_string *key;
zval *data;
zend_ulong index;
ZEND_HASH_FOREACH_KEY_VAL_IND(myht, index, key, data) {
if (incomplete_class && strcmp(ZSTR_VAL(key), MAGIC_MEMBER) == 0) {
continue;
}
if (!key) {
php_var_serialize_long(buf, index);
} else {
php_var_serialize_string(buf, ZSTR_VAL(key), ZSTR_LEN(key));
}
if (Z_ISREF_P(data) && Z_REFCOUNT_P(data) == 1) {
data = Z_REFVAL_P(data);
}
/* we should still add element even if it's not OK,
* since we already wrote the length of the array before */
if ((Z_TYPE_P(data) == IS_ARRAY && Z_TYPE_P(struc) == IS_ARRAY && Z_ARR_P(data) == Z_ARR_P(struc))
|| (Z_TYPE_P(data) == IS_ARRAY && Z_ARRVAL_P(data)->u.v.nApplyCount > 1)
) {
smart_str_appendl(buf, "N;", 2);
} else {
if (Z_TYPE_P(data) == IS_ARRAY && ZEND_HASH_APPLY_PROTECTION(Z_ARRVAL_P(data))) {
Z_ARRVAL_P(data)->u.v.nApplyCount++;
}
php_var_serialize_intern(buf, data, var_hash);
if (Z_TYPE_P(data) == IS_ARRAY && ZEND_HASH_APPLY_PROTECTION(Z_ARRVAL_P(data))) {
Z_ARRVAL_P(data)->u.v.nApplyCount--;
}
}
} ZEND_HASH_FOREACH_END();
}
smart_str_appendc(buf, '}');
return;
}
case IS_REFERENCE:
struc = Z_REFVAL_P(struc);
goto again;
default:
smart_str_appendl(buf, "i:0;", 4);
return;
}
}
/* }}} */
PHPAPI void php_var_serialize(smart_str *buf, zval *struc, php_serialize_data_t *data) /* {{{ */
{
php_var_serialize_intern(buf, struc, *data);
smart_str_0(buf);
}
/* }}} */
/* {{{ proto string serialize(mixed variable)
Returns a string representation of variable (which can later be unserialized) */
PHP_FUNCTION(serialize)
{
zval *struc;
php_serialize_data_t var_hash;
smart_str buf = {0};
if (zend_parse_parameters(ZEND_NUM_ARGS(), "z", &struc) == FAILURE) {
return;
}
PHP_VAR_SERIALIZE_INIT(var_hash);
php_var_serialize(&buf, struc, &var_hash);
PHP_VAR_SERIALIZE_DESTROY(var_hash);
if (EG(exception)) {
smart_str_free(&buf);
RETURN_FALSE;
}
if (buf.s) {
RETURN_NEW_STR(buf.s);
} else {
RETURN_NULL();
}
}
/* }}} */
/* {{{ proto mixed unserialize(string variable_representation[, array allowed_classes])
Takes a string representation of variable and recreates it */
PHP_FUNCTION(unserialize)
{
char *buf = NULL;
size_t buf_len;
const unsigned char *p;
php_unserialize_data_t var_hash;
zval *options = NULL, *classes = NULL;
zval *retval;
HashTable *class_hash = NULL;
if (zend_parse_parameters(ZEND_NUM_ARGS(), "s|a", &buf, &buf_len, &options) == FAILURE) {
RETURN_FALSE;
}
if (buf_len == 0) {
RETURN_FALSE;
}
p = (const unsigned char*) buf;
PHP_VAR_UNSERIALIZE_INIT(var_hash);
if(options != NULL) {
classes = zend_hash_str_find(Z_ARRVAL_P(options), "allowed_classes", sizeof("allowed_classes")-1);
if(classes && (Z_TYPE_P(classes) == IS_ARRAY || !zend_is_true(classes))) {
ALLOC_HASHTABLE(class_hash);
zend_hash_init(class_hash, (Z_TYPE_P(classes) == IS_ARRAY)?zend_hash_num_elements(Z_ARRVAL_P(classes)):0, NULL, NULL, 0);
}
if(class_hash && Z_TYPE_P(classes) == IS_ARRAY) {
zval *entry;
zend_string *lcname;
ZEND_HASH_FOREACH_VAL(Z_ARRVAL_P(classes), entry) {
convert_to_string_ex(entry);
lcname = zend_string_tolower(Z_STR_P(entry));
zend_hash_add_empty_element(class_hash, lcname);
zend_string_release(lcname);
} ZEND_HASH_FOREACH_END();
}
}
retval = var_tmp_var(&var_hash);
if (!php_var_unserialize_ex(retval, &p, p + buf_len, &var_hash, class_hash)) {
PHP_VAR_UNSERIALIZE_DESTROY(var_hash);
if (class_hash) {
zend_hash_destroy(class_hash);
FREE_HASHTABLE(class_hash);
}
if (!EG(exception)) {
php_error_docref(NULL, E_NOTICE, "Error at offset " ZEND_LONG_FMT " of %zd bytes",
(zend_long)((char*)p - buf), buf_len);
}
RETURN_FALSE;
}
ZVAL_COPY(return_value, retval);
PHP_VAR_UNSERIALIZE_DESTROY(var_hash);
if (class_hash) {
zend_hash_destroy(class_hash);
FREE_HASHTABLE(class_hash);
}
}
/* }}} */
/* {{{ proto int memory_get_usage([bool real_usage])
Returns the allocated by PHP memory */
PHP_FUNCTION(memory_get_usage) {
zend_bool real_usage = 0;
if (zend_parse_parameters(ZEND_NUM_ARGS(), "|b", &real_usage) == FAILURE) {
RETURN_FALSE;
}
RETURN_LONG(zend_memory_usage(real_usage));
}
/* }}} */
/* {{{ proto int memory_get_peak_usage([bool real_usage])
Returns the peak allocated by PHP memory */
PHP_FUNCTION(memory_get_peak_usage) {
zend_bool real_usage = 0;
if (zend_parse_parameters(ZEND_NUM_ARGS(), "|b", &real_usage) == FAILURE) {
RETURN_FALSE;
}
RETURN_LONG(zend_memory_peak_usage(real_usage));
}
/* }}} */
/*
* Local variables:
* tab-width: 4
* c-basic-offset: 4
* End:
* vim600: sw=4 ts=4 fdm=marker
* vim<600: sw=4 ts=4
*/
| ./CrossVul/dataset_final_sorted/CWE-416/c/good_5516_1 |
crossvul-cpp_data_bad_819_0 | /*
FUSE: Filesystem in Userspace
Copyright (C) 2001-2008 Miklos Szeredi <miklos@szeredi.hu>
This program can be distributed under the terms of the GNU GPL.
See the file COPYING.
*/
#include "fuse_i.h"
#include <linux/init.h>
#include <linux/module.h>
#include <linux/poll.h>
#include <linux/sched/signal.h>
#include <linux/uio.h>
#include <linux/miscdevice.h>
#include <linux/pagemap.h>
#include <linux/file.h>
#include <linux/slab.h>
#include <linux/pipe_fs_i.h>
#include <linux/swap.h>
#include <linux/splice.h>
#include <linux/sched.h>
MODULE_ALIAS_MISCDEV(FUSE_MINOR);
MODULE_ALIAS("devname:fuse");
/* Ordinary requests have even IDs, while interrupts IDs are odd */
#define FUSE_INT_REQ_BIT (1ULL << 0)
#define FUSE_REQ_ID_STEP (1ULL << 1)
static struct kmem_cache *fuse_req_cachep;
static struct fuse_dev *fuse_get_dev(struct file *file)
{
/*
* Lockless access is OK, because file->private data is set
* once during mount and is valid until the file is released.
*/
return READ_ONCE(file->private_data);
}
static void fuse_request_init(struct fuse_req *req, struct page **pages,
struct fuse_page_desc *page_descs,
unsigned npages)
{
INIT_LIST_HEAD(&req->list);
INIT_LIST_HEAD(&req->intr_entry);
init_waitqueue_head(&req->waitq);
refcount_set(&req->count, 1);
req->pages = pages;
req->page_descs = page_descs;
req->max_pages = npages;
__set_bit(FR_PENDING, &req->flags);
}
static struct page **fuse_req_pages_alloc(unsigned int npages, gfp_t flags,
struct fuse_page_desc **desc)
{
struct page **pages;
pages = kzalloc(npages * (sizeof(struct page *) +
sizeof(struct fuse_page_desc)), flags);
*desc = (void *) pages + npages * sizeof(struct page *);
return pages;
}
static struct fuse_req *__fuse_request_alloc(unsigned npages, gfp_t flags)
{
struct fuse_req *req = kmem_cache_zalloc(fuse_req_cachep, flags);
if (req) {
struct page **pages = NULL;
struct fuse_page_desc *page_descs = NULL;
WARN_ON(npages > FUSE_MAX_MAX_PAGES);
if (npages > FUSE_REQ_INLINE_PAGES) {
pages = fuse_req_pages_alloc(npages, flags,
&page_descs);
if (!pages) {
kmem_cache_free(fuse_req_cachep, req);
return NULL;
}
} else if (npages) {
pages = req->inline_pages;
page_descs = req->inline_page_descs;
}
fuse_request_init(req, pages, page_descs, npages);
}
return req;
}
struct fuse_req *fuse_request_alloc(unsigned npages)
{
return __fuse_request_alloc(npages, GFP_KERNEL);
}
EXPORT_SYMBOL_GPL(fuse_request_alloc);
struct fuse_req *fuse_request_alloc_nofs(unsigned npages)
{
return __fuse_request_alloc(npages, GFP_NOFS);
}
static void fuse_req_pages_free(struct fuse_req *req)
{
if (req->pages != req->inline_pages)
kfree(req->pages);
}
bool fuse_req_realloc_pages(struct fuse_conn *fc, struct fuse_req *req,
gfp_t flags)
{
struct page **pages;
struct fuse_page_desc *page_descs;
unsigned int npages = min_t(unsigned int,
max_t(unsigned int, req->max_pages * 2,
FUSE_DEFAULT_MAX_PAGES_PER_REQ),
fc->max_pages);
WARN_ON(npages <= req->max_pages);
pages = fuse_req_pages_alloc(npages, flags, &page_descs);
if (!pages)
return false;
memcpy(pages, req->pages, sizeof(struct page *) * req->max_pages);
memcpy(page_descs, req->page_descs,
sizeof(struct fuse_page_desc) * req->max_pages);
fuse_req_pages_free(req);
req->pages = pages;
req->page_descs = page_descs;
req->max_pages = npages;
return true;
}
void fuse_request_free(struct fuse_req *req)
{
fuse_req_pages_free(req);
kmem_cache_free(fuse_req_cachep, req);
}
void __fuse_get_request(struct fuse_req *req)
{
refcount_inc(&req->count);
}
/* Must be called with > 1 refcount */
static void __fuse_put_request(struct fuse_req *req)
{
refcount_dec(&req->count);
}
void fuse_set_initialized(struct fuse_conn *fc)
{
/* Make sure stores before this are seen on another CPU */
smp_wmb();
fc->initialized = 1;
}
static bool fuse_block_alloc(struct fuse_conn *fc, bool for_background)
{
return !fc->initialized || (for_background && fc->blocked);
}
static void fuse_drop_waiting(struct fuse_conn *fc)
{
/*
* lockess check of fc->connected is okay, because atomic_dec_and_test()
* provides a memory barrier mached with the one in fuse_wait_aborted()
* to ensure no wake-up is missed.
*/
if (atomic_dec_and_test(&fc->num_waiting) &&
!READ_ONCE(fc->connected)) {
/* wake up aborters */
wake_up_all(&fc->blocked_waitq);
}
}
static struct fuse_req *__fuse_get_req(struct fuse_conn *fc, unsigned npages,
bool for_background)
{
struct fuse_req *req;
int err;
atomic_inc(&fc->num_waiting);
if (fuse_block_alloc(fc, for_background)) {
err = -EINTR;
if (wait_event_killable_exclusive(fc->blocked_waitq,
!fuse_block_alloc(fc, for_background)))
goto out;
}
/* Matches smp_wmb() in fuse_set_initialized() */
smp_rmb();
err = -ENOTCONN;
if (!fc->connected)
goto out;
err = -ECONNREFUSED;
if (fc->conn_error)
goto out;
req = fuse_request_alloc(npages);
err = -ENOMEM;
if (!req) {
if (for_background)
wake_up(&fc->blocked_waitq);
goto out;
}
req->in.h.uid = from_kuid(fc->user_ns, current_fsuid());
req->in.h.gid = from_kgid(fc->user_ns, current_fsgid());
req->in.h.pid = pid_nr_ns(task_pid(current), fc->pid_ns);
__set_bit(FR_WAITING, &req->flags);
if (for_background)
__set_bit(FR_BACKGROUND, &req->flags);
if (unlikely(req->in.h.uid == ((uid_t)-1) ||
req->in.h.gid == ((gid_t)-1))) {
fuse_put_request(fc, req);
return ERR_PTR(-EOVERFLOW);
}
return req;
out:
fuse_drop_waiting(fc);
return ERR_PTR(err);
}
struct fuse_req *fuse_get_req(struct fuse_conn *fc, unsigned npages)
{
return __fuse_get_req(fc, npages, false);
}
EXPORT_SYMBOL_GPL(fuse_get_req);
struct fuse_req *fuse_get_req_for_background(struct fuse_conn *fc,
unsigned npages)
{
return __fuse_get_req(fc, npages, true);
}
EXPORT_SYMBOL_GPL(fuse_get_req_for_background);
/*
* Return request in fuse_file->reserved_req. However that may
* currently be in use. If that is the case, wait for it to become
* available.
*/
static struct fuse_req *get_reserved_req(struct fuse_conn *fc,
struct file *file)
{
struct fuse_req *req = NULL;
struct fuse_file *ff = file->private_data;
do {
wait_event(fc->reserved_req_waitq, ff->reserved_req);
spin_lock(&fc->lock);
if (ff->reserved_req) {
req = ff->reserved_req;
ff->reserved_req = NULL;
req->stolen_file = get_file(file);
}
spin_unlock(&fc->lock);
} while (!req);
return req;
}
/*
* Put stolen request back into fuse_file->reserved_req
*/
static void put_reserved_req(struct fuse_conn *fc, struct fuse_req *req)
{
struct file *file = req->stolen_file;
struct fuse_file *ff = file->private_data;
WARN_ON(req->max_pages);
spin_lock(&fc->lock);
memset(req, 0, sizeof(*req));
fuse_request_init(req, NULL, NULL, 0);
BUG_ON(ff->reserved_req);
ff->reserved_req = req;
wake_up_all(&fc->reserved_req_waitq);
spin_unlock(&fc->lock);
fput(file);
}
/*
* Gets a requests for a file operation, always succeeds
*
* This is used for sending the FLUSH request, which must get to
* userspace, due to POSIX locks which may need to be unlocked.
*
* If allocation fails due to OOM, use the reserved request in
* fuse_file.
*
* This is very unlikely to deadlock accidentally, since the
* filesystem should not have it's own file open. If deadlock is
* intentional, it can still be broken by "aborting" the filesystem.
*/
struct fuse_req *fuse_get_req_nofail_nopages(struct fuse_conn *fc,
struct file *file)
{
struct fuse_req *req;
atomic_inc(&fc->num_waiting);
wait_event(fc->blocked_waitq, fc->initialized);
/* Matches smp_wmb() in fuse_set_initialized() */
smp_rmb();
req = fuse_request_alloc(0);
if (!req)
req = get_reserved_req(fc, file);
req->in.h.uid = from_kuid_munged(fc->user_ns, current_fsuid());
req->in.h.gid = from_kgid_munged(fc->user_ns, current_fsgid());
req->in.h.pid = pid_nr_ns(task_pid(current), fc->pid_ns);
__set_bit(FR_WAITING, &req->flags);
__clear_bit(FR_BACKGROUND, &req->flags);
return req;
}
void fuse_put_request(struct fuse_conn *fc, struct fuse_req *req)
{
if (refcount_dec_and_test(&req->count)) {
if (test_bit(FR_BACKGROUND, &req->flags)) {
/*
* We get here in the unlikely case that a background
* request was allocated but not sent
*/
spin_lock(&fc->bg_lock);
if (!fc->blocked)
wake_up(&fc->blocked_waitq);
spin_unlock(&fc->bg_lock);
}
if (test_bit(FR_WAITING, &req->flags)) {
__clear_bit(FR_WAITING, &req->flags);
fuse_drop_waiting(fc);
}
if (req->stolen_file)
put_reserved_req(fc, req);
else
fuse_request_free(req);
}
}
EXPORT_SYMBOL_GPL(fuse_put_request);
static unsigned len_args(unsigned numargs, struct fuse_arg *args)
{
unsigned nbytes = 0;
unsigned i;
for (i = 0; i < numargs; i++)
nbytes += args[i].size;
return nbytes;
}
static u64 fuse_get_unique(struct fuse_iqueue *fiq)
{
fiq->reqctr += FUSE_REQ_ID_STEP;
return fiq->reqctr;
}
static unsigned int fuse_req_hash(u64 unique)
{
return hash_long(unique & ~FUSE_INT_REQ_BIT, FUSE_PQ_HASH_BITS);
}
static void queue_request(struct fuse_iqueue *fiq, struct fuse_req *req)
{
req->in.h.len = sizeof(struct fuse_in_header) +
len_args(req->in.numargs, (struct fuse_arg *) req->in.args);
list_add_tail(&req->list, &fiq->pending);
wake_up_locked(&fiq->waitq);
kill_fasync(&fiq->fasync, SIGIO, POLL_IN);
}
void fuse_queue_forget(struct fuse_conn *fc, struct fuse_forget_link *forget,
u64 nodeid, u64 nlookup)
{
struct fuse_iqueue *fiq = &fc->iq;
forget->forget_one.nodeid = nodeid;
forget->forget_one.nlookup = nlookup;
spin_lock(&fiq->waitq.lock);
if (fiq->connected) {
fiq->forget_list_tail->next = forget;
fiq->forget_list_tail = forget;
wake_up_locked(&fiq->waitq);
kill_fasync(&fiq->fasync, SIGIO, POLL_IN);
} else {
kfree(forget);
}
spin_unlock(&fiq->waitq.lock);
}
static void flush_bg_queue(struct fuse_conn *fc)
{
struct fuse_iqueue *fiq = &fc->iq;
while (fc->active_background < fc->max_background &&
!list_empty(&fc->bg_queue)) {
struct fuse_req *req;
req = list_first_entry(&fc->bg_queue, struct fuse_req, list);
list_del(&req->list);
fc->active_background++;
spin_lock(&fiq->waitq.lock);
req->in.h.unique = fuse_get_unique(fiq);
queue_request(fiq, req);
spin_unlock(&fiq->waitq.lock);
}
}
/*
* This function is called when a request is finished. Either a reply
* has arrived or it was aborted (and not yet sent) or some error
* occurred during communication with userspace, or the device file
* was closed. The requester thread is woken up (if still waiting),
* the 'end' callback is called if given, else the reference to the
* request is released
*/
static void request_end(struct fuse_conn *fc, struct fuse_req *req)
{
struct fuse_iqueue *fiq = &fc->iq;
if (test_and_set_bit(FR_FINISHED, &req->flags))
goto put_request;
spin_lock(&fiq->waitq.lock);
list_del_init(&req->intr_entry);
spin_unlock(&fiq->waitq.lock);
WARN_ON(test_bit(FR_PENDING, &req->flags));
WARN_ON(test_bit(FR_SENT, &req->flags));
if (test_bit(FR_BACKGROUND, &req->flags)) {
spin_lock(&fc->bg_lock);
clear_bit(FR_BACKGROUND, &req->flags);
if (fc->num_background == fc->max_background) {
fc->blocked = 0;
wake_up(&fc->blocked_waitq);
} else if (!fc->blocked) {
/*
* Wake up next waiter, if any. It's okay to use
* waitqueue_active(), as we've already synced up
* fc->blocked with waiters with the wake_up() call
* above.
*/
if (waitqueue_active(&fc->blocked_waitq))
wake_up(&fc->blocked_waitq);
}
if (fc->num_background == fc->congestion_threshold && fc->sb) {
clear_bdi_congested(fc->sb->s_bdi, BLK_RW_SYNC);
clear_bdi_congested(fc->sb->s_bdi, BLK_RW_ASYNC);
}
fc->num_background--;
fc->active_background--;
flush_bg_queue(fc);
spin_unlock(&fc->bg_lock);
}
wake_up(&req->waitq);
if (req->end)
req->end(fc, req);
put_request:
fuse_put_request(fc, req);
}
static void queue_interrupt(struct fuse_iqueue *fiq, struct fuse_req *req)
{
spin_lock(&fiq->waitq.lock);
if (test_bit(FR_FINISHED, &req->flags)) {
spin_unlock(&fiq->waitq.lock);
return;
}
if (list_empty(&req->intr_entry)) {
list_add_tail(&req->intr_entry, &fiq->interrupts);
wake_up_locked(&fiq->waitq);
}
spin_unlock(&fiq->waitq.lock);
kill_fasync(&fiq->fasync, SIGIO, POLL_IN);
}
static void request_wait_answer(struct fuse_conn *fc, struct fuse_req *req)
{
struct fuse_iqueue *fiq = &fc->iq;
int err;
if (!fc->no_interrupt) {
/* Any signal may interrupt this */
err = wait_event_interruptible(req->waitq,
test_bit(FR_FINISHED, &req->flags));
if (!err)
return;
set_bit(FR_INTERRUPTED, &req->flags);
/* matches barrier in fuse_dev_do_read() */
smp_mb__after_atomic();
if (test_bit(FR_SENT, &req->flags))
queue_interrupt(fiq, req);
}
if (!test_bit(FR_FORCE, &req->flags)) {
/* Only fatal signals may interrupt this */
err = wait_event_killable(req->waitq,
test_bit(FR_FINISHED, &req->flags));
if (!err)
return;
spin_lock(&fiq->waitq.lock);
/* Request is not yet in userspace, bail out */
if (test_bit(FR_PENDING, &req->flags)) {
list_del(&req->list);
spin_unlock(&fiq->waitq.lock);
__fuse_put_request(req);
req->out.h.error = -EINTR;
return;
}
spin_unlock(&fiq->waitq.lock);
}
/*
* Either request is already in userspace, or it was forced.
* Wait it out.
*/
wait_event(req->waitq, test_bit(FR_FINISHED, &req->flags));
}
static void __fuse_request_send(struct fuse_conn *fc, struct fuse_req *req)
{
struct fuse_iqueue *fiq = &fc->iq;
BUG_ON(test_bit(FR_BACKGROUND, &req->flags));
spin_lock(&fiq->waitq.lock);
if (!fiq->connected) {
spin_unlock(&fiq->waitq.lock);
req->out.h.error = -ENOTCONN;
} else {
req->in.h.unique = fuse_get_unique(fiq);
queue_request(fiq, req);
/* acquire extra reference, since request is still needed
after request_end() */
__fuse_get_request(req);
spin_unlock(&fiq->waitq.lock);
request_wait_answer(fc, req);
/* Pairs with smp_wmb() in request_end() */
smp_rmb();
}
}
void fuse_request_send(struct fuse_conn *fc, struct fuse_req *req)
{
__set_bit(FR_ISREPLY, &req->flags);
if (!test_bit(FR_WAITING, &req->flags)) {
__set_bit(FR_WAITING, &req->flags);
atomic_inc(&fc->num_waiting);
}
__fuse_request_send(fc, req);
}
EXPORT_SYMBOL_GPL(fuse_request_send);
static void fuse_adjust_compat(struct fuse_conn *fc, struct fuse_args *args)
{
if (fc->minor < 4 && args->in.h.opcode == FUSE_STATFS)
args->out.args[0].size = FUSE_COMPAT_STATFS_SIZE;
if (fc->minor < 9) {
switch (args->in.h.opcode) {
case FUSE_LOOKUP:
case FUSE_CREATE:
case FUSE_MKNOD:
case FUSE_MKDIR:
case FUSE_SYMLINK:
case FUSE_LINK:
args->out.args[0].size = FUSE_COMPAT_ENTRY_OUT_SIZE;
break;
case FUSE_GETATTR:
case FUSE_SETATTR:
args->out.args[0].size = FUSE_COMPAT_ATTR_OUT_SIZE;
break;
}
}
if (fc->minor < 12) {
switch (args->in.h.opcode) {
case FUSE_CREATE:
args->in.args[0].size = sizeof(struct fuse_open_in);
break;
case FUSE_MKNOD:
args->in.args[0].size = FUSE_COMPAT_MKNOD_IN_SIZE;
break;
}
}
}
ssize_t fuse_simple_request(struct fuse_conn *fc, struct fuse_args *args)
{
struct fuse_req *req;
ssize_t ret;
req = fuse_get_req(fc, 0);
if (IS_ERR(req))
return PTR_ERR(req);
/* Needs to be done after fuse_get_req() so that fc->minor is valid */
fuse_adjust_compat(fc, args);
req->in.h.opcode = args->in.h.opcode;
req->in.h.nodeid = args->in.h.nodeid;
req->in.numargs = args->in.numargs;
memcpy(req->in.args, args->in.args,
args->in.numargs * sizeof(struct fuse_in_arg));
req->out.argvar = args->out.argvar;
req->out.numargs = args->out.numargs;
memcpy(req->out.args, args->out.args,
args->out.numargs * sizeof(struct fuse_arg));
fuse_request_send(fc, req);
ret = req->out.h.error;
if (!ret && args->out.argvar) {
BUG_ON(args->out.numargs != 1);
ret = req->out.args[0].size;
}
fuse_put_request(fc, req);
return ret;
}
bool fuse_request_queue_background(struct fuse_conn *fc, struct fuse_req *req)
{
bool queued = false;
WARN_ON(!test_bit(FR_BACKGROUND, &req->flags));
if (!test_bit(FR_WAITING, &req->flags)) {
__set_bit(FR_WAITING, &req->flags);
atomic_inc(&fc->num_waiting);
}
__set_bit(FR_ISREPLY, &req->flags);
spin_lock(&fc->bg_lock);
if (likely(fc->connected)) {
fc->num_background++;
if (fc->num_background == fc->max_background)
fc->blocked = 1;
if (fc->num_background == fc->congestion_threshold && fc->sb) {
set_bdi_congested(fc->sb->s_bdi, BLK_RW_SYNC);
set_bdi_congested(fc->sb->s_bdi, BLK_RW_ASYNC);
}
list_add_tail(&req->list, &fc->bg_queue);
flush_bg_queue(fc);
queued = true;
}
spin_unlock(&fc->bg_lock);
return queued;
}
void fuse_request_send_background(struct fuse_conn *fc, struct fuse_req *req)
{
WARN_ON(!req->end);
if (!fuse_request_queue_background(fc, req)) {
req->out.h.error = -ENOTCONN;
req->end(fc, req);
fuse_put_request(fc, req);
}
}
EXPORT_SYMBOL_GPL(fuse_request_send_background);
static int fuse_request_send_notify_reply(struct fuse_conn *fc,
struct fuse_req *req, u64 unique)
{
int err = -ENODEV;
struct fuse_iqueue *fiq = &fc->iq;
__clear_bit(FR_ISREPLY, &req->flags);
req->in.h.unique = unique;
spin_lock(&fiq->waitq.lock);
if (fiq->connected) {
queue_request(fiq, req);
err = 0;
}
spin_unlock(&fiq->waitq.lock);
return err;
}
void fuse_force_forget(struct file *file, u64 nodeid)
{
struct inode *inode = file_inode(file);
struct fuse_conn *fc = get_fuse_conn(inode);
struct fuse_req *req;
struct fuse_forget_in inarg;
memset(&inarg, 0, sizeof(inarg));
inarg.nlookup = 1;
req = fuse_get_req_nofail_nopages(fc, file);
req->in.h.opcode = FUSE_FORGET;
req->in.h.nodeid = nodeid;
req->in.numargs = 1;
req->in.args[0].size = sizeof(inarg);
req->in.args[0].value = &inarg;
__clear_bit(FR_ISREPLY, &req->flags);
__fuse_request_send(fc, req);
/* ignore errors */
fuse_put_request(fc, req);
}
/*
* Lock the request. Up to the next unlock_request() there mustn't be
* anything that could cause a page-fault. If the request was already
* aborted bail out.
*/
static int lock_request(struct fuse_req *req)
{
int err = 0;
if (req) {
spin_lock(&req->waitq.lock);
if (test_bit(FR_ABORTED, &req->flags))
err = -ENOENT;
else
set_bit(FR_LOCKED, &req->flags);
spin_unlock(&req->waitq.lock);
}
return err;
}
/*
* Unlock request. If it was aborted while locked, caller is responsible
* for unlocking and ending the request.
*/
static int unlock_request(struct fuse_req *req)
{
int err = 0;
if (req) {
spin_lock(&req->waitq.lock);
if (test_bit(FR_ABORTED, &req->flags))
err = -ENOENT;
else
clear_bit(FR_LOCKED, &req->flags);
spin_unlock(&req->waitq.lock);
}
return err;
}
struct fuse_copy_state {
int write;
struct fuse_req *req;
struct iov_iter *iter;
struct pipe_buffer *pipebufs;
struct pipe_buffer *currbuf;
struct pipe_inode_info *pipe;
unsigned long nr_segs;
struct page *pg;
unsigned len;
unsigned offset;
unsigned move_pages:1;
};
static void fuse_copy_init(struct fuse_copy_state *cs, int write,
struct iov_iter *iter)
{
memset(cs, 0, sizeof(*cs));
cs->write = write;
cs->iter = iter;
}
/* Unmap and put previous page of userspace buffer */
static void fuse_copy_finish(struct fuse_copy_state *cs)
{
if (cs->currbuf) {
struct pipe_buffer *buf = cs->currbuf;
if (cs->write)
buf->len = PAGE_SIZE - cs->len;
cs->currbuf = NULL;
} else if (cs->pg) {
if (cs->write) {
flush_dcache_page(cs->pg);
set_page_dirty_lock(cs->pg);
}
put_page(cs->pg);
}
cs->pg = NULL;
}
/*
* Get another pagefull of userspace buffer, and map it to kernel
* address space, and lock request
*/
static int fuse_copy_fill(struct fuse_copy_state *cs)
{
struct page *page;
int err;
err = unlock_request(cs->req);
if (err)
return err;
fuse_copy_finish(cs);
if (cs->pipebufs) {
struct pipe_buffer *buf = cs->pipebufs;
if (!cs->write) {
err = pipe_buf_confirm(cs->pipe, buf);
if (err)
return err;
BUG_ON(!cs->nr_segs);
cs->currbuf = buf;
cs->pg = buf->page;
cs->offset = buf->offset;
cs->len = buf->len;
cs->pipebufs++;
cs->nr_segs--;
} else {
if (cs->nr_segs == cs->pipe->buffers)
return -EIO;
page = alloc_page(GFP_HIGHUSER);
if (!page)
return -ENOMEM;
buf->page = page;
buf->offset = 0;
buf->len = 0;
cs->currbuf = buf;
cs->pg = page;
cs->offset = 0;
cs->len = PAGE_SIZE;
cs->pipebufs++;
cs->nr_segs++;
}
} else {
size_t off;
err = iov_iter_get_pages(cs->iter, &page, PAGE_SIZE, 1, &off);
if (err < 0)
return err;
BUG_ON(!err);
cs->len = err;
cs->offset = off;
cs->pg = page;
iov_iter_advance(cs->iter, err);
}
return lock_request(cs->req);
}
/* Do as much copy to/from userspace buffer as we can */
static int fuse_copy_do(struct fuse_copy_state *cs, void **val, unsigned *size)
{
unsigned ncpy = min(*size, cs->len);
if (val) {
void *pgaddr = kmap_atomic(cs->pg);
void *buf = pgaddr + cs->offset;
if (cs->write)
memcpy(buf, *val, ncpy);
else
memcpy(*val, buf, ncpy);
kunmap_atomic(pgaddr);
*val += ncpy;
}
*size -= ncpy;
cs->len -= ncpy;
cs->offset += ncpy;
return ncpy;
}
static int fuse_check_page(struct page *page)
{
if (page_mapcount(page) ||
page->mapping != NULL ||
page_count(page) != 1 ||
(page->flags & PAGE_FLAGS_CHECK_AT_PREP &
~(1 << PG_locked |
1 << PG_referenced |
1 << PG_uptodate |
1 << PG_lru |
1 << PG_active |
1 << PG_reclaim))) {
printk(KERN_WARNING "fuse: trying to steal weird page\n");
printk(KERN_WARNING " page=%p index=%li flags=%08lx, count=%i, mapcount=%i, mapping=%p\n", page, page->index, page->flags, page_count(page), page_mapcount(page), page->mapping);
return 1;
}
return 0;
}
static int fuse_try_move_page(struct fuse_copy_state *cs, struct page **pagep)
{
int err;
struct page *oldpage = *pagep;
struct page *newpage;
struct pipe_buffer *buf = cs->pipebufs;
err = unlock_request(cs->req);
if (err)
return err;
fuse_copy_finish(cs);
err = pipe_buf_confirm(cs->pipe, buf);
if (err)
return err;
BUG_ON(!cs->nr_segs);
cs->currbuf = buf;
cs->len = buf->len;
cs->pipebufs++;
cs->nr_segs--;
if (cs->len != PAGE_SIZE)
goto out_fallback;
if (pipe_buf_steal(cs->pipe, buf) != 0)
goto out_fallback;
newpage = buf->page;
if (!PageUptodate(newpage))
SetPageUptodate(newpage);
ClearPageMappedToDisk(newpage);
if (fuse_check_page(newpage) != 0)
goto out_fallback_unlock;
/*
* This is a new and locked page, it shouldn't be mapped or
* have any special flags on it
*/
if (WARN_ON(page_mapped(oldpage)))
goto out_fallback_unlock;
if (WARN_ON(page_has_private(oldpage)))
goto out_fallback_unlock;
if (WARN_ON(PageDirty(oldpage) || PageWriteback(oldpage)))
goto out_fallback_unlock;
if (WARN_ON(PageMlocked(oldpage)))
goto out_fallback_unlock;
err = replace_page_cache_page(oldpage, newpage, GFP_KERNEL);
if (err) {
unlock_page(newpage);
return err;
}
get_page(newpage);
if (!(buf->flags & PIPE_BUF_FLAG_LRU))
lru_cache_add_file(newpage);
err = 0;
spin_lock(&cs->req->waitq.lock);
if (test_bit(FR_ABORTED, &cs->req->flags))
err = -ENOENT;
else
*pagep = newpage;
spin_unlock(&cs->req->waitq.lock);
if (err) {
unlock_page(newpage);
put_page(newpage);
return err;
}
unlock_page(oldpage);
put_page(oldpage);
cs->len = 0;
return 0;
out_fallback_unlock:
unlock_page(newpage);
out_fallback:
cs->pg = buf->page;
cs->offset = buf->offset;
err = lock_request(cs->req);
if (err)
return err;
return 1;
}
static int fuse_ref_page(struct fuse_copy_state *cs, struct page *page,
unsigned offset, unsigned count)
{
struct pipe_buffer *buf;
int err;
if (cs->nr_segs == cs->pipe->buffers)
return -EIO;
err = unlock_request(cs->req);
if (err)
return err;
fuse_copy_finish(cs);
buf = cs->pipebufs;
get_page(page);
buf->page = page;
buf->offset = offset;
buf->len = count;
cs->pipebufs++;
cs->nr_segs++;
cs->len = 0;
return 0;
}
/*
* Copy a page in the request to/from the userspace buffer. Must be
* done atomically
*/
static int fuse_copy_page(struct fuse_copy_state *cs, struct page **pagep,
unsigned offset, unsigned count, int zeroing)
{
int err;
struct page *page = *pagep;
if (page && zeroing && count < PAGE_SIZE)
clear_highpage(page);
while (count) {
if (cs->write && cs->pipebufs && page) {
return fuse_ref_page(cs, page, offset, count);
} else if (!cs->len) {
if (cs->move_pages && page &&
offset == 0 && count == PAGE_SIZE) {
err = fuse_try_move_page(cs, pagep);
if (err <= 0)
return err;
} else {
err = fuse_copy_fill(cs);
if (err)
return err;
}
}
if (page) {
void *mapaddr = kmap_atomic(page);
void *buf = mapaddr + offset;
offset += fuse_copy_do(cs, &buf, &count);
kunmap_atomic(mapaddr);
} else
offset += fuse_copy_do(cs, NULL, &count);
}
if (page && !cs->write)
flush_dcache_page(page);
return 0;
}
/* Copy pages in the request to/from userspace buffer */
static int fuse_copy_pages(struct fuse_copy_state *cs, unsigned nbytes,
int zeroing)
{
unsigned i;
struct fuse_req *req = cs->req;
for (i = 0; i < req->num_pages && (nbytes || zeroing); i++) {
int err;
unsigned offset = req->page_descs[i].offset;
unsigned count = min(nbytes, req->page_descs[i].length);
err = fuse_copy_page(cs, &req->pages[i], offset, count,
zeroing);
if (err)
return err;
nbytes -= count;
}
return 0;
}
/* Copy a single argument in the request to/from userspace buffer */
static int fuse_copy_one(struct fuse_copy_state *cs, void *val, unsigned size)
{
while (size) {
if (!cs->len) {
int err = fuse_copy_fill(cs);
if (err)
return err;
}
fuse_copy_do(cs, &val, &size);
}
return 0;
}
/* Copy request arguments to/from userspace buffer */
static int fuse_copy_args(struct fuse_copy_state *cs, unsigned numargs,
unsigned argpages, struct fuse_arg *args,
int zeroing)
{
int err = 0;
unsigned i;
for (i = 0; !err && i < numargs; i++) {
struct fuse_arg *arg = &args[i];
if (i == numargs - 1 && argpages)
err = fuse_copy_pages(cs, arg->size, zeroing);
else
err = fuse_copy_one(cs, arg->value, arg->size);
}
return err;
}
static int forget_pending(struct fuse_iqueue *fiq)
{
return fiq->forget_list_head.next != NULL;
}
static int request_pending(struct fuse_iqueue *fiq)
{
return !list_empty(&fiq->pending) || !list_empty(&fiq->interrupts) ||
forget_pending(fiq);
}
/*
* Transfer an interrupt request to userspace
*
* Unlike other requests this is assembled on demand, without a need
* to allocate a separate fuse_req structure.
*
* Called with fiq->waitq.lock held, releases it
*/
static int fuse_read_interrupt(struct fuse_iqueue *fiq,
struct fuse_copy_state *cs,
size_t nbytes, struct fuse_req *req)
__releases(fiq->waitq.lock)
{
struct fuse_in_header ih;
struct fuse_interrupt_in arg;
unsigned reqsize = sizeof(ih) + sizeof(arg);
int err;
list_del_init(&req->intr_entry);
memset(&ih, 0, sizeof(ih));
memset(&arg, 0, sizeof(arg));
ih.len = reqsize;
ih.opcode = FUSE_INTERRUPT;
ih.unique = (req->in.h.unique | FUSE_INT_REQ_BIT);
arg.unique = req->in.h.unique;
spin_unlock(&fiq->waitq.lock);
if (nbytes < reqsize)
return -EINVAL;
err = fuse_copy_one(cs, &ih, sizeof(ih));
if (!err)
err = fuse_copy_one(cs, &arg, sizeof(arg));
fuse_copy_finish(cs);
return err ? err : reqsize;
}
static struct fuse_forget_link *dequeue_forget(struct fuse_iqueue *fiq,
unsigned max,
unsigned *countp)
{
struct fuse_forget_link *head = fiq->forget_list_head.next;
struct fuse_forget_link **newhead = &head;
unsigned count;
for (count = 0; *newhead != NULL && count < max; count++)
newhead = &(*newhead)->next;
fiq->forget_list_head.next = *newhead;
*newhead = NULL;
if (fiq->forget_list_head.next == NULL)
fiq->forget_list_tail = &fiq->forget_list_head;
if (countp != NULL)
*countp = count;
return head;
}
static int fuse_read_single_forget(struct fuse_iqueue *fiq,
struct fuse_copy_state *cs,
size_t nbytes)
__releases(fiq->waitq.lock)
{
int err;
struct fuse_forget_link *forget = dequeue_forget(fiq, 1, NULL);
struct fuse_forget_in arg = {
.nlookup = forget->forget_one.nlookup,
};
struct fuse_in_header ih = {
.opcode = FUSE_FORGET,
.nodeid = forget->forget_one.nodeid,
.unique = fuse_get_unique(fiq),
.len = sizeof(ih) + sizeof(arg),
};
spin_unlock(&fiq->waitq.lock);
kfree(forget);
if (nbytes < ih.len)
return -EINVAL;
err = fuse_copy_one(cs, &ih, sizeof(ih));
if (!err)
err = fuse_copy_one(cs, &arg, sizeof(arg));
fuse_copy_finish(cs);
if (err)
return err;
return ih.len;
}
static int fuse_read_batch_forget(struct fuse_iqueue *fiq,
struct fuse_copy_state *cs, size_t nbytes)
__releases(fiq->waitq.lock)
{
int err;
unsigned max_forgets;
unsigned count;
struct fuse_forget_link *head;
struct fuse_batch_forget_in arg = { .count = 0 };
struct fuse_in_header ih = {
.opcode = FUSE_BATCH_FORGET,
.unique = fuse_get_unique(fiq),
.len = sizeof(ih) + sizeof(arg),
};
if (nbytes < ih.len) {
spin_unlock(&fiq->waitq.lock);
return -EINVAL;
}
max_forgets = (nbytes - ih.len) / sizeof(struct fuse_forget_one);
head = dequeue_forget(fiq, max_forgets, &count);
spin_unlock(&fiq->waitq.lock);
arg.count = count;
ih.len += count * sizeof(struct fuse_forget_one);
err = fuse_copy_one(cs, &ih, sizeof(ih));
if (!err)
err = fuse_copy_one(cs, &arg, sizeof(arg));
while (head) {
struct fuse_forget_link *forget = head;
if (!err) {
err = fuse_copy_one(cs, &forget->forget_one,
sizeof(forget->forget_one));
}
head = forget->next;
kfree(forget);
}
fuse_copy_finish(cs);
if (err)
return err;
return ih.len;
}
static int fuse_read_forget(struct fuse_conn *fc, struct fuse_iqueue *fiq,
struct fuse_copy_state *cs,
size_t nbytes)
__releases(fiq->waitq.lock)
{
if (fc->minor < 16 || fiq->forget_list_head.next->next == NULL)
return fuse_read_single_forget(fiq, cs, nbytes);
else
return fuse_read_batch_forget(fiq, cs, nbytes);
}
/*
* Read a single request into the userspace filesystem's buffer. This
* function waits until a request is available, then removes it from
* the pending list and copies request data to userspace buffer. If
* no reply is needed (FORGET) or request has been aborted or there
* was an error during the copying then it's finished by calling
* request_end(). Otherwise add it to the processing list, and set
* the 'sent' flag.
*/
static ssize_t fuse_dev_do_read(struct fuse_dev *fud, struct file *file,
struct fuse_copy_state *cs, size_t nbytes)
{
ssize_t err;
struct fuse_conn *fc = fud->fc;
struct fuse_iqueue *fiq = &fc->iq;
struct fuse_pqueue *fpq = &fud->pq;
struct fuse_req *req;
struct fuse_in *in;
unsigned reqsize;
unsigned int hash;
restart:
spin_lock(&fiq->waitq.lock);
err = -EAGAIN;
if ((file->f_flags & O_NONBLOCK) && fiq->connected &&
!request_pending(fiq))
goto err_unlock;
err = wait_event_interruptible_exclusive_locked(fiq->waitq,
!fiq->connected || request_pending(fiq));
if (err)
goto err_unlock;
if (!fiq->connected) {
err = (fc->aborted && fc->abort_err) ? -ECONNABORTED : -ENODEV;
goto err_unlock;
}
if (!list_empty(&fiq->interrupts)) {
req = list_entry(fiq->interrupts.next, struct fuse_req,
intr_entry);
return fuse_read_interrupt(fiq, cs, nbytes, req);
}
if (forget_pending(fiq)) {
if (list_empty(&fiq->pending) || fiq->forget_batch-- > 0)
return fuse_read_forget(fc, fiq, cs, nbytes);
if (fiq->forget_batch <= -8)
fiq->forget_batch = 16;
}
req = list_entry(fiq->pending.next, struct fuse_req, list);
clear_bit(FR_PENDING, &req->flags);
list_del_init(&req->list);
spin_unlock(&fiq->waitq.lock);
in = &req->in;
reqsize = in->h.len;
/* If request is too large, reply with an error and restart the read */
if (nbytes < reqsize) {
req->out.h.error = -EIO;
/* SETXATTR is special, since it may contain too large data */
if (in->h.opcode == FUSE_SETXATTR)
req->out.h.error = -E2BIG;
request_end(fc, req);
goto restart;
}
spin_lock(&fpq->lock);
list_add(&req->list, &fpq->io);
spin_unlock(&fpq->lock);
cs->req = req;
err = fuse_copy_one(cs, &in->h, sizeof(in->h));
if (!err)
err = fuse_copy_args(cs, in->numargs, in->argpages,
(struct fuse_arg *) in->args, 0);
fuse_copy_finish(cs);
spin_lock(&fpq->lock);
clear_bit(FR_LOCKED, &req->flags);
if (!fpq->connected) {
err = (fc->aborted && fc->abort_err) ? -ECONNABORTED : -ENODEV;
goto out_end;
}
if (err) {
req->out.h.error = -EIO;
goto out_end;
}
if (!test_bit(FR_ISREPLY, &req->flags)) {
err = reqsize;
goto out_end;
}
hash = fuse_req_hash(req->in.h.unique);
list_move_tail(&req->list, &fpq->processing[hash]);
__fuse_get_request(req);
set_bit(FR_SENT, &req->flags);
spin_unlock(&fpq->lock);
/* matches barrier in request_wait_answer() */
smp_mb__after_atomic();
if (test_bit(FR_INTERRUPTED, &req->flags))
queue_interrupt(fiq, req);
fuse_put_request(fc, req);
return reqsize;
out_end:
if (!test_bit(FR_PRIVATE, &req->flags))
list_del_init(&req->list);
spin_unlock(&fpq->lock);
request_end(fc, req);
return err;
err_unlock:
spin_unlock(&fiq->waitq.lock);
return err;
}
static int fuse_dev_open(struct inode *inode, struct file *file)
{
/*
* The fuse device's file's private_data is used to hold
* the fuse_conn(ection) when it is mounted, and is used to
* keep track of whether the file has been mounted already.
*/
file->private_data = NULL;
return 0;
}
static ssize_t fuse_dev_read(struct kiocb *iocb, struct iov_iter *to)
{
struct fuse_copy_state cs;
struct file *file = iocb->ki_filp;
struct fuse_dev *fud = fuse_get_dev(file);
if (!fud)
return -EPERM;
if (!iter_is_iovec(to))
return -EINVAL;
fuse_copy_init(&cs, 1, to);
return fuse_dev_do_read(fud, file, &cs, iov_iter_count(to));
}
static ssize_t fuse_dev_splice_read(struct file *in, loff_t *ppos,
struct pipe_inode_info *pipe,
size_t len, unsigned int flags)
{
int total, ret;
int page_nr = 0;
struct pipe_buffer *bufs;
struct fuse_copy_state cs;
struct fuse_dev *fud = fuse_get_dev(in);
if (!fud)
return -EPERM;
bufs = kvmalloc_array(pipe->buffers, sizeof(struct pipe_buffer),
GFP_KERNEL);
if (!bufs)
return -ENOMEM;
fuse_copy_init(&cs, 1, NULL);
cs.pipebufs = bufs;
cs.pipe = pipe;
ret = fuse_dev_do_read(fud, in, &cs, len);
if (ret < 0)
goto out;
if (pipe->nrbufs + cs.nr_segs > pipe->buffers) {
ret = -EIO;
goto out;
}
for (ret = total = 0; page_nr < cs.nr_segs; total += ret) {
/*
* Need to be careful about this. Having buf->ops in module
* code can Oops if the buffer persists after module unload.
*/
bufs[page_nr].ops = &nosteal_pipe_buf_ops;
bufs[page_nr].flags = 0;
ret = add_to_pipe(pipe, &bufs[page_nr++]);
if (unlikely(ret < 0))
break;
}
if (total)
ret = total;
out:
for (; page_nr < cs.nr_segs; page_nr++)
put_page(bufs[page_nr].page);
kvfree(bufs);
return ret;
}
static int fuse_notify_poll(struct fuse_conn *fc, unsigned int size,
struct fuse_copy_state *cs)
{
struct fuse_notify_poll_wakeup_out outarg;
int err = -EINVAL;
if (size != sizeof(outarg))
goto err;
err = fuse_copy_one(cs, &outarg, sizeof(outarg));
if (err)
goto err;
fuse_copy_finish(cs);
return fuse_notify_poll_wakeup(fc, &outarg);
err:
fuse_copy_finish(cs);
return err;
}
static int fuse_notify_inval_inode(struct fuse_conn *fc, unsigned int size,
struct fuse_copy_state *cs)
{
struct fuse_notify_inval_inode_out outarg;
int err = -EINVAL;
if (size != sizeof(outarg))
goto err;
err = fuse_copy_one(cs, &outarg, sizeof(outarg));
if (err)
goto err;
fuse_copy_finish(cs);
down_read(&fc->killsb);
err = -ENOENT;
if (fc->sb) {
err = fuse_reverse_inval_inode(fc->sb, outarg.ino,
outarg.off, outarg.len);
}
up_read(&fc->killsb);
return err;
err:
fuse_copy_finish(cs);
return err;
}
static int fuse_notify_inval_entry(struct fuse_conn *fc, unsigned int size,
struct fuse_copy_state *cs)
{
struct fuse_notify_inval_entry_out outarg;
int err = -ENOMEM;
char *buf;
struct qstr name;
buf = kzalloc(FUSE_NAME_MAX + 1, GFP_KERNEL);
if (!buf)
goto err;
err = -EINVAL;
if (size < sizeof(outarg))
goto err;
err = fuse_copy_one(cs, &outarg, sizeof(outarg));
if (err)
goto err;
err = -ENAMETOOLONG;
if (outarg.namelen > FUSE_NAME_MAX)
goto err;
err = -EINVAL;
if (size != sizeof(outarg) + outarg.namelen + 1)
goto err;
name.name = buf;
name.len = outarg.namelen;
err = fuse_copy_one(cs, buf, outarg.namelen + 1);
if (err)
goto err;
fuse_copy_finish(cs);
buf[outarg.namelen] = 0;
down_read(&fc->killsb);
err = -ENOENT;
if (fc->sb)
err = fuse_reverse_inval_entry(fc->sb, outarg.parent, 0, &name);
up_read(&fc->killsb);
kfree(buf);
return err;
err:
kfree(buf);
fuse_copy_finish(cs);
return err;
}
static int fuse_notify_delete(struct fuse_conn *fc, unsigned int size,
struct fuse_copy_state *cs)
{
struct fuse_notify_delete_out outarg;
int err = -ENOMEM;
char *buf;
struct qstr name;
buf = kzalloc(FUSE_NAME_MAX + 1, GFP_KERNEL);
if (!buf)
goto err;
err = -EINVAL;
if (size < sizeof(outarg))
goto err;
err = fuse_copy_one(cs, &outarg, sizeof(outarg));
if (err)
goto err;
err = -ENAMETOOLONG;
if (outarg.namelen > FUSE_NAME_MAX)
goto err;
err = -EINVAL;
if (size != sizeof(outarg) + outarg.namelen + 1)
goto err;
name.name = buf;
name.len = outarg.namelen;
err = fuse_copy_one(cs, buf, outarg.namelen + 1);
if (err)
goto err;
fuse_copy_finish(cs);
buf[outarg.namelen] = 0;
down_read(&fc->killsb);
err = -ENOENT;
if (fc->sb)
err = fuse_reverse_inval_entry(fc->sb, outarg.parent,
outarg.child, &name);
up_read(&fc->killsb);
kfree(buf);
return err;
err:
kfree(buf);
fuse_copy_finish(cs);
return err;
}
static int fuse_notify_store(struct fuse_conn *fc, unsigned int size,
struct fuse_copy_state *cs)
{
struct fuse_notify_store_out outarg;
struct inode *inode;
struct address_space *mapping;
u64 nodeid;
int err;
pgoff_t index;
unsigned int offset;
unsigned int num;
loff_t file_size;
loff_t end;
err = -EINVAL;
if (size < sizeof(outarg))
goto out_finish;
err = fuse_copy_one(cs, &outarg, sizeof(outarg));
if (err)
goto out_finish;
err = -EINVAL;
if (size - sizeof(outarg) != outarg.size)
goto out_finish;
nodeid = outarg.nodeid;
down_read(&fc->killsb);
err = -ENOENT;
if (!fc->sb)
goto out_up_killsb;
inode = ilookup5(fc->sb, nodeid, fuse_inode_eq, &nodeid);
if (!inode)
goto out_up_killsb;
mapping = inode->i_mapping;
index = outarg.offset >> PAGE_SHIFT;
offset = outarg.offset & ~PAGE_MASK;
file_size = i_size_read(inode);
end = outarg.offset + outarg.size;
if (end > file_size) {
file_size = end;
fuse_write_update_size(inode, file_size);
}
num = outarg.size;
while (num) {
struct page *page;
unsigned int this_num;
err = -ENOMEM;
page = find_or_create_page(mapping, index,
mapping_gfp_mask(mapping));
if (!page)
goto out_iput;
this_num = min_t(unsigned, num, PAGE_SIZE - offset);
err = fuse_copy_page(cs, &page, offset, this_num, 0);
if (!err && offset == 0 &&
(this_num == PAGE_SIZE || file_size == end))
SetPageUptodate(page);
unlock_page(page);
put_page(page);
if (err)
goto out_iput;
num -= this_num;
offset = 0;
index++;
}
err = 0;
out_iput:
iput(inode);
out_up_killsb:
up_read(&fc->killsb);
out_finish:
fuse_copy_finish(cs);
return err;
}
static void fuse_retrieve_end(struct fuse_conn *fc, struct fuse_req *req)
{
release_pages(req->pages, req->num_pages);
}
static int fuse_retrieve(struct fuse_conn *fc, struct inode *inode,
struct fuse_notify_retrieve_out *outarg)
{
int err;
struct address_space *mapping = inode->i_mapping;
struct fuse_req *req;
pgoff_t index;
loff_t file_size;
unsigned int num;
unsigned int offset;
size_t total_len = 0;
unsigned int num_pages;
offset = outarg->offset & ~PAGE_MASK;
file_size = i_size_read(inode);
num = outarg->size;
if (outarg->offset > file_size)
num = 0;
else if (outarg->offset + num > file_size)
num = file_size - outarg->offset;
num_pages = (num + offset + PAGE_SIZE - 1) >> PAGE_SHIFT;
num_pages = min(num_pages, fc->max_pages);
req = fuse_get_req(fc, num_pages);
if (IS_ERR(req))
return PTR_ERR(req);
req->in.h.opcode = FUSE_NOTIFY_REPLY;
req->in.h.nodeid = outarg->nodeid;
req->in.numargs = 2;
req->in.argpages = 1;
req->end = fuse_retrieve_end;
index = outarg->offset >> PAGE_SHIFT;
while (num && req->num_pages < num_pages) {
struct page *page;
unsigned int this_num;
page = find_get_page(mapping, index);
if (!page)
break;
this_num = min_t(unsigned, num, PAGE_SIZE - offset);
req->pages[req->num_pages] = page;
req->page_descs[req->num_pages].offset = offset;
req->page_descs[req->num_pages].length = this_num;
req->num_pages++;
offset = 0;
num -= this_num;
total_len += this_num;
index++;
}
req->misc.retrieve_in.offset = outarg->offset;
req->misc.retrieve_in.size = total_len;
req->in.args[0].size = sizeof(req->misc.retrieve_in);
req->in.args[0].value = &req->misc.retrieve_in;
req->in.args[1].size = total_len;
err = fuse_request_send_notify_reply(fc, req, outarg->notify_unique);
if (err) {
fuse_retrieve_end(fc, req);
fuse_put_request(fc, req);
}
return err;
}
static int fuse_notify_retrieve(struct fuse_conn *fc, unsigned int size,
struct fuse_copy_state *cs)
{
struct fuse_notify_retrieve_out outarg;
struct inode *inode;
int err;
err = -EINVAL;
if (size != sizeof(outarg))
goto copy_finish;
err = fuse_copy_one(cs, &outarg, sizeof(outarg));
if (err)
goto copy_finish;
fuse_copy_finish(cs);
down_read(&fc->killsb);
err = -ENOENT;
if (fc->sb) {
u64 nodeid = outarg.nodeid;
inode = ilookup5(fc->sb, nodeid, fuse_inode_eq, &nodeid);
if (inode) {
err = fuse_retrieve(fc, inode, &outarg);
iput(inode);
}
}
up_read(&fc->killsb);
return err;
copy_finish:
fuse_copy_finish(cs);
return err;
}
static int fuse_notify(struct fuse_conn *fc, enum fuse_notify_code code,
unsigned int size, struct fuse_copy_state *cs)
{
/* Don't try to move pages (yet) */
cs->move_pages = 0;
switch (code) {
case FUSE_NOTIFY_POLL:
return fuse_notify_poll(fc, size, cs);
case FUSE_NOTIFY_INVAL_INODE:
return fuse_notify_inval_inode(fc, size, cs);
case FUSE_NOTIFY_INVAL_ENTRY:
return fuse_notify_inval_entry(fc, size, cs);
case FUSE_NOTIFY_STORE:
return fuse_notify_store(fc, size, cs);
case FUSE_NOTIFY_RETRIEVE:
return fuse_notify_retrieve(fc, size, cs);
case FUSE_NOTIFY_DELETE:
return fuse_notify_delete(fc, size, cs);
default:
fuse_copy_finish(cs);
return -EINVAL;
}
}
/* Look up request on processing list by unique ID */
static struct fuse_req *request_find(struct fuse_pqueue *fpq, u64 unique)
{
unsigned int hash = fuse_req_hash(unique);
struct fuse_req *req;
list_for_each_entry(req, &fpq->processing[hash], list) {
if (req->in.h.unique == unique)
return req;
}
return NULL;
}
static int copy_out_args(struct fuse_copy_state *cs, struct fuse_out *out,
unsigned nbytes)
{
unsigned reqsize = sizeof(struct fuse_out_header);
if (out->h.error)
return nbytes != reqsize ? -EINVAL : 0;
reqsize += len_args(out->numargs, out->args);
if (reqsize < nbytes || (reqsize > nbytes && !out->argvar))
return -EINVAL;
else if (reqsize > nbytes) {
struct fuse_arg *lastarg = &out->args[out->numargs-1];
unsigned diffsize = reqsize - nbytes;
if (diffsize > lastarg->size)
return -EINVAL;
lastarg->size -= diffsize;
}
return fuse_copy_args(cs, out->numargs, out->argpages, out->args,
out->page_zeroing);
}
/*
* Write a single reply to a request. First the header is copied from
* the write buffer. The request is then searched on the processing
* list by the unique ID found in the header. If found, then remove
* it from the list and copy the rest of the buffer to the request.
* The request is finished by calling request_end()
*/
static ssize_t fuse_dev_do_write(struct fuse_dev *fud,
struct fuse_copy_state *cs, size_t nbytes)
{
int err;
struct fuse_conn *fc = fud->fc;
struct fuse_pqueue *fpq = &fud->pq;
struct fuse_req *req;
struct fuse_out_header oh;
if (nbytes < sizeof(struct fuse_out_header))
return -EINVAL;
err = fuse_copy_one(cs, &oh, sizeof(oh));
if (err)
goto err_finish;
err = -EINVAL;
if (oh.len != nbytes)
goto err_finish;
/*
* Zero oh.unique indicates unsolicited notification message
* and error contains notification code.
*/
if (!oh.unique) {
err = fuse_notify(fc, oh.error, nbytes - sizeof(oh), cs);
return err ? err : nbytes;
}
err = -EINVAL;
if (oh.error <= -1000 || oh.error > 0)
goto err_finish;
spin_lock(&fpq->lock);
err = -ENOENT;
if (!fpq->connected)
goto err_unlock_pq;
req = request_find(fpq, oh.unique & ~FUSE_INT_REQ_BIT);
if (!req)
goto err_unlock_pq;
/* Is it an interrupt reply ID? */
if (oh.unique & FUSE_INT_REQ_BIT) {
__fuse_get_request(req);
spin_unlock(&fpq->lock);
err = -EINVAL;
if (nbytes != sizeof(struct fuse_out_header)) {
fuse_put_request(fc, req);
goto err_finish;
}
if (oh.error == -ENOSYS)
fc->no_interrupt = 1;
else if (oh.error == -EAGAIN)
queue_interrupt(&fc->iq, req);
fuse_put_request(fc, req);
fuse_copy_finish(cs);
return nbytes;
}
clear_bit(FR_SENT, &req->flags);
list_move(&req->list, &fpq->io);
req->out.h = oh;
set_bit(FR_LOCKED, &req->flags);
spin_unlock(&fpq->lock);
cs->req = req;
if (!req->out.page_replace)
cs->move_pages = 0;
err = copy_out_args(cs, &req->out, nbytes);
fuse_copy_finish(cs);
spin_lock(&fpq->lock);
clear_bit(FR_LOCKED, &req->flags);
if (!fpq->connected)
err = -ENOENT;
else if (err)
req->out.h.error = -EIO;
if (!test_bit(FR_PRIVATE, &req->flags))
list_del_init(&req->list);
spin_unlock(&fpq->lock);
request_end(fc, req);
return err ? err : nbytes;
err_unlock_pq:
spin_unlock(&fpq->lock);
err_finish:
fuse_copy_finish(cs);
return err;
}
static ssize_t fuse_dev_write(struct kiocb *iocb, struct iov_iter *from)
{
struct fuse_copy_state cs;
struct fuse_dev *fud = fuse_get_dev(iocb->ki_filp);
if (!fud)
return -EPERM;
if (!iter_is_iovec(from))
return -EINVAL;
fuse_copy_init(&cs, 0, from);
return fuse_dev_do_write(fud, &cs, iov_iter_count(from));
}
static ssize_t fuse_dev_splice_write(struct pipe_inode_info *pipe,
struct file *out, loff_t *ppos,
size_t len, unsigned int flags)
{
unsigned nbuf;
unsigned idx;
struct pipe_buffer *bufs;
struct fuse_copy_state cs;
struct fuse_dev *fud;
size_t rem;
ssize_t ret;
fud = fuse_get_dev(out);
if (!fud)
return -EPERM;
pipe_lock(pipe);
bufs = kvmalloc_array(pipe->nrbufs, sizeof(struct pipe_buffer),
GFP_KERNEL);
if (!bufs) {
pipe_unlock(pipe);
return -ENOMEM;
}
nbuf = 0;
rem = 0;
for (idx = 0; idx < pipe->nrbufs && rem < len; idx++)
rem += pipe->bufs[(pipe->curbuf + idx) & (pipe->buffers - 1)].len;
ret = -EINVAL;
if (rem < len) {
pipe_unlock(pipe);
goto out;
}
rem = len;
while (rem) {
struct pipe_buffer *ibuf;
struct pipe_buffer *obuf;
BUG_ON(nbuf >= pipe->buffers);
BUG_ON(!pipe->nrbufs);
ibuf = &pipe->bufs[pipe->curbuf];
obuf = &bufs[nbuf];
if (rem >= ibuf->len) {
*obuf = *ibuf;
ibuf->ops = NULL;
pipe->curbuf = (pipe->curbuf + 1) & (pipe->buffers - 1);
pipe->nrbufs--;
} else {
pipe_buf_get(pipe, ibuf);
*obuf = *ibuf;
obuf->flags &= ~PIPE_BUF_FLAG_GIFT;
obuf->len = rem;
ibuf->offset += obuf->len;
ibuf->len -= obuf->len;
}
nbuf++;
rem -= obuf->len;
}
pipe_unlock(pipe);
fuse_copy_init(&cs, 0, NULL);
cs.pipebufs = bufs;
cs.nr_segs = nbuf;
cs.pipe = pipe;
if (flags & SPLICE_F_MOVE)
cs.move_pages = 1;
ret = fuse_dev_do_write(fud, &cs, len);
pipe_lock(pipe);
for (idx = 0; idx < nbuf; idx++)
pipe_buf_release(pipe, &bufs[idx]);
pipe_unlock(pipe);
out:
kvfree(bufs);
return ret;
}
static __poll_t fuse_dev_poll(struct file *file, poll_table *wait)
{
__poll_t mask = EPOLLOUT | EPOLLWRNORM;
struct fuse_iqueue *fiq;
struct fuse_dev *fud = fuse_get_dev(file);
if (!fud)
return EPOLLERR;
fiq = &fud->fc->iq;
poll_wait(file, &fiq->waitq, wait);
spin_lock(&fiq->waitq.lock);
if (!fiq->connected)
mask = EPOLLERR;
else if (request_pending(fiq))
mask |= EPOLLIN | EPOLLRDNORM;
spin_unlock(&fiq->waitq.lock);
return mask;
}
/*
* Abort all requests on the given list (pending or processing)
*
* This function releases and reacquires fc->lock
*/
static void end_requests(struct fuse_conn *fc, struct list_head *head)
{
while (!list_empty(head)) {
struct fuse_req *req;
req = list_entry(head->next, struct fuse_req, list);
req->out.h.error = -ECONNABORTED;
clear_bit(FR_SENT, &req->flags);
list_del_init(&req->list);
request_end(fc, req);
}
}
static void end_polls(struct fuse_conn *fc)
{
struct rb_node *p;
p = rb_first(&fc->polled_files);
while (p) {
struct fuse_file *ff;
ff = rb_entry(p, struct fuse_file, polled_node);
wake_up_interruptible_all(&ff->poll_wait);
p = rb_next(p);
}
}
/*
* Abort all requests.
*
* Emergency exit in case of a malicious or accidental deadlock, or just a hung
* filesystem.
*
* The same effect is usually achievable through killing the filesystem daemon
* and all users of the filesystem. The exception is the combination of an
* asynchronous request and the tricky deadlock (see
* Documentation/filesystems/fuse.txt).
*
* Aborting requests under I/O goes as follows: 1: Separate out unlocked
* requests, they should be finished off immediately. Locked requests will be
* finished after unlock; see unlock_request(). 2: Finish off the unlocked
* requests. It is possible that some request will finish before we can. This
* is OK, the request will in that case be removed from the list before we touch
* it.
*/
void fuse_abort_conn(struct fuse_conn *fc, bool is_abort)
{
struct fuse_iqueue *fiq = &fc->iq;
spin_lock(&fc->lock);
if (fc->connected) {
struct fuse_dev *fud;
struct fuse_req *req, *next;
LIST_HEAD(to_end);
unsigned int i;
/* Background queuing checks fc->connected under bg_lock */
spin_lock(&fc->bg_lock);
fc->connected = 0;
spin_unlock(&fc->bg_lock);
fc->aborted = is_abort;
fuse_set_initialized(fc);
list_for_each_entry(fud, &fc->devices, entry) {
struct fuse_pqueue *fpq = &fud->pq;
spin_lock(&fpq->lock);
fpq->connected = 0;
list_for_each_entry_safe(req, next, &fpq->io, list) {
req->out.h.error = -ECONNABORTED;
spin_lock(&req->waitq.lock);
set_bit(FR_ABORTED, &req->flags);
if (!test_bit(FR_LOCKED, &req->flags)) {
set_bit(FR_PRIVATE, &req->flags);
__fuse_get_request(req);
list_move(&req->list, &to_end);
}
spin_unlock(&req->waitq.lock);
}
for (i = 0; i < FUSE_PQ_HASH_SIZE; i++)
list_splice_tail_init(&fpq->processing[i],
&to_end);
spin_unlock(&fpq->lock);
}
spin_lock(&fc->bg_lock);
fc->blocked = 0;
fc->max_background = UINT_MAX;
flush_bg_queue(fc);
spin_unlock(&fc->bg_lock);
spin_lock(&fiq->waitq.lock);
fiq->connected = 0;
list_for_each_entry(req, &fiq->pending, list)
clear_bit(FR_PENDING, &req->flags);
list_splice_tail_init(&fiq->pending, &to_end);
while (forget_pending(fiq))
kfree(dequeue_forget(fiq, 1, NULL));
wake_up_all_locked(&fiq->waitq);
spin_unlock(&fiq->waitq.lock);
kill_fasync(&fiq->fasync, SIGIO, POLL_IN);
end_polls(fc);
wake_up_all(&fc->blocked_waitq);
spin_unlock(&fc->lock);
end_requests(fc, &to_end);
} else {
spin_unlock(&fc->lock);
}
}
EXPORT_SYMBOL_GPL(fuse_abort_conn);
void fuse_wait_aborted(struct fuse_conn *fc)
{
/* matches implicit memory barrier in fuse_drop_waiting() */
smp_mb();
wait_event(fc->blocked_waitq, atomic_read(&fc->num_waiting) == 0);
}
int fuse_dev_release(struct inode *inode, struct file *file)
{
struct fuse_dev *fud = fuse_get_dev(file);
if (fud) {
struct fuse_conn *fc = fud->fc;
struct fuse_pqueue *fpq = &fud->pq;
LIST_HEAD(to_end);
unsigned int i;
spin_lock(&fpq->lock);
WARN_ON(!list_empty(&fpq->io));
for (i = 0; i < FUSE_PQ_HASH_SIZE; i++)
list_splice_init(&fpq->processing[i], &to_end);
spin_unlock(&fpq->lock);
end_requests(fc, &to_end);
/* Are we the last open device? */
if (atomic_dec_and_test(&fc->dev_count)) {
WARN_ON(fc->iq.fasync != NULL);
fuse_abort_conn(fc, false);
}
fuse_dev_free(fud);
}
return 0;
}
EXPORT_SYMBOL_GPL(fuse_dev_release);
static int fuse_dev_fasync(int fd, struct file *file, int on)
{
struct fuse_dev *fud = fuse_get_dev(file);
if (!fud)
return -EPERM;
/* No locking - fasync_helper does its own locking */
return fasync_helper(fd, file, on, &fud->fc->iq.fasync);
}
static int fuse_device_clone(struct fuse_conn *fc, struct file *new)
{
struct fuse_dev *fud;
if (new->private_data)
return -EINVAL;
fud = fuse_dev_alloc(fc);
if (!fud)
return -ENOMEM;
new->private_data = fud;
atomic_inc(&fc->dev_count);
return 0;
}
static long fuse_dev_ioctl(struct file *file, unsigned int cmd,
unsigned long arg)
{
int err = -ENOTTY;
if (cmd == FUSE_DEV_IOC_CLONE) {
int oldfd;
err = -EFAULT;
if (!get_user(oldfd, (__u32 __user *) arg)) {
struct file *old = fget(oldfd);
err = -EINVAL;
if (old) {
struct fuse_dev *fud = NULL;
/*
* Check against file->f_op because CUSE
* uses the same ioctl handler.
*/
if (old->f_op == file->f_op &&
old->f_cred->user_ns == file->f_cred->user_ns)
fud = fuse_get_dev(old);
if (fud) {
mutex_lock(&fuse_mutex);
err = fuse_device_clone(fud->fc, file);
mutex_unlock(&fuse_mutex);
}
fput(old);
}
}
}
return err;
}
const struct file_operations fuse_dev_operations = {
.owner = THIS_MODULE,
.open = fuse_dev_open,
.llseek = no_llseek,
.read_iter = fuse_dev_read,
.splice_read = fuse_dev_splice_read,
.write_iter = fuse_dev_write,
.splice_write = fuse_dev_splice_write,
.poll = fuse_dev_poll,
.release = fuse_dev_release,
.fasync = fuse_dev_fasync,
.unlocked_ioctl = fuse_dev_ioctl,
.compat_ioctl = fuse_dev_ioctl,
};
EXPORT_SYMBOL_GPL(fuse_dev_operations);
static struct miscdevice fuse_miscdevice = {
.minor = FUSE_MINOR,
.name = "fuse",
.fops = &fuse_dev_operations,
};
int __init fuse_dev_init(void)
{
int err = -ENOMEM;
fuse_req_cachep = kmem_cache_create("fuse_request",
sizeof(struct fuse_req),
0, 0, NULL);
if (!fuse_req_cachep)
goto out;
err = misc_register(&fuse_miscdevice);
if (err)
goto out_cache_clean;
return 0;
out_cache_clean:
kmem_cache_destroy(fuse_req_cachep);
out:
return err;
}
void fuse_dev_cleanup(void)
{
misc_deregister(&fuse_miscdevice);
kmem_cache_destroy(fuse_req_cachep);
}
| ./CrossVul/dataset_final_sorted/CWE-416/c/bad_819_0 |
crossvul-cpp_data_good_2638_0 | /*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% M M AAA TTTTT L AAA BBBB %
% MM MM A A T L A A B B %
% M M M AAAAA T L AAAAA BBBB %
% M M A A T L A A B B %
% M M A A T LLLLL A A BBBB %
% %
% %
% Read MATLAB Image Format %
% %
% Software Design %
% Jaroslav Fojtik %
% 2001-2008 %
% %
% %
% Permission is hereby granted, free of charge, to any person obtaining a %
% copy of this software and associated documentation files ("ImageMagick"), %
% to deal in ImageMagick without restriction, including without limitation %
% the rights to use, copy, modify, merge, publish, distribute, sublicense, %
% and/or sell copies of ImageMagick, and to permit persons to whom the %
% ImageMagick is furnished to do so, subject to the following conditions: %
% %
% The above copyright notice and this permission notice shall be included in %
% all copies or substantial portions of ImageMagick. %
% %
% The software is provided "as is", without warranty of any kind, express or %
% implied, including but not limited to the warranties of merchantability, %
% fitness for a particular purpose and noninfringement. In no event shall %
% ImageMagick Studio be liable for any claim, damages or other liability, %
% whether in an action of contract, tort or otherwise, arising from, out of %
% or in connection with ImageMagick or the use or other dealings in %
% ImageMagick. %
% %
% Except as contained in this notice, the name of the ImageMagick Studio %
% shall not be used in advertising or otherwise to promote the sale, use or %
% other dealings in ImageMagick without prior written authorization from the %
% ImageMagick Studio. %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
%
*/
/*
Include declarations.
*/
#include "MagickCore/studio.h"
#include "MagickCore/attribute.h"
#include "MagickCore/blob.h"
#include "MagickCore/blob-private.h"
#include "MagickCore/cache.h"
#include "MagickCore/color-private.h"
#include "MagickCore/colormap.h"
#include "MagickCore/colorspace-private.h"
#include "MagickCore/distort.h"
#include "MagickCore/exception.h"
#include "MagickCore/exception-private.h"
#include "MagickCore/image.h"
#include "MagickCore/image-private.h"
#include "MagickCore/list.h"
#include "MagickCore/magick.h"
#include "MagickCore/memory_.h"
#include "MagickCore/monitor.h"
#include "MagickCore/monitor-private.h"
#include "MagickCore/pixel-accessor.h"
#include "MagickCore/quantum.h"
#include "MagickCore/quantum-private.h"
#include "MagickCore/option.h"
#include "MagickCore/pixel.h"
#include "MagickCore/resource_.h"
#include "MagickCore/static.h"
#include "MagickCore/string_.h"
#include "MagickCore/module.h"
#include "MagickCore/transform.h"
#include "MagickCore/utility-private.h"
#if defined(MAGICKCORE_ZLIB_DELEGATE)
#include "zlib.h"
#endif
/*
Forward declaration.
*/
static MagickBooleanType
WriteMATImage(const ImageInfo *,Image *,ExceptionInfo *);
/* Auto coloring method, sorry this creates some artefact inside data
MinReal+j*MaxComplex = red MaxReal+j*MaxComplex = black
MinReal+j*0 = white MaxReal+j*0 = black
MinReal+j*MinComplex = blue MaxReal+j*MinComplex = black
*/
typedef struct
{
char identific[124];
unsigned short Version;
char EndianIndicator[2];
unsigned long DataType;
unsigned int ObjectSize;
unsigned long unknown1;
unsigned long unknown2;
unsigned short unknown5;
unsigned char StructureFlag;
unsigned char StructureClass;
unsigned long unknown3;
unsigned long unknown4;
unsigned long DimFlag;
unsigned long SizeX;
unsigned long SizeY;
unsigned short Flag1;
unsigned short NameFlag;
}
MATHeader;
static const char *MonthsTab[12]={"Jan","Feb","Mar","Apr","May","Jun","Jul","Aug","Sep","Oct","Nov","Dec"};
static const char *DayOfWTab[7]={"Sun","Mon","Tue","Wed","Thu","Fri","Sat"};
static const char *OsDesc=
#if defined(MAGICKCORE_WINDOWS_SUPPORT)
"PCWIN";
#else
#ifdef __APPLE__
"MAC";
#else
"LNX86";
#endif
#endif
typedef enum
{
miINT8 = 1, /* 8 bit signed */
miUINT8, /* 8 bit unsigned */
miINT16, /* 16 bit signed */
miUINT16, /* 16 bit unsigned */
miINT32, /* 32 bit signed */
miUINT32, /* 32 bit unsigned */
miSINGLE, /* IEEE 754 single precision float */
miRESERVE1,
miDOUBLE, /* IEEE 754 double precision float */
miRESERVE2,
miRESERVE3,
miINT64, /* 64 bit signed */
miUINT64, /* 64 bit unsigned */
miMATRIX, /* MATLAB array */
miCOMPRESSED, /* Compressed Data */
miUTF8, /* Unicode UTF-8 Encoded Character Data */
miUTF16, /* Unicode UTF-16 Encoded Character Data */
miUTF32 /* Unicode UTF-32 Encoded Character Data */
} mat5_data_type;
typedef enum
{
mxCELL_CLASS=1, /* cell array */
mxSTRUCT_CLASS, /* structure */
mxOBJECT_CLASS, /* object */
mxCHAR_CLASS, /* character array */
mxSPARSE_CLASS, /* sparse array */
mxDOUBLE_CLASS, /* double precision array */
mxSINGLE_CLASS, /* single precision floating point */
mxINT8_CLASS, /* 8 bit signed integer */
mxUINT8_CLASS, /* 8 bit unsigned integer */
mxINT16_CLASS, /* 16 bit signed integer */
mxUINT16_CLASS, /* 16 bit unsigned integer */
mxINT32_CLASS, /* 32 bit signed integer */
mxUINT32_CLASS, /* 32 bit unsigned integer */
mxINT64_CLASS, /* 64 bit signed integer */
mxUINT64_CLASS, /* 64 bit unsigned integer */
mxFUNCTION_CLASS /* Function handle */
} arrayclasstype;
#define FLAG_COMPLEX 0x8
#define FLAG_GLOBAL 0x4
#define FLAG_LOGICAL 0x2
static const QuantumType z2qtype[4] = {GrayQuantum, BlueQuantum, GreenQuantum, RedQuantum};
static void InsertComplexDoubleRow(Image *image,double *p,int y,double MinVal,
double MaxVal,ExceptionInfo *exception)
{
double f;
int x;
register Quantum *q;
if (MinVal == 0)
MinVal = -1;
if (MaxVal == 0)
MaxVal = 1;
q=QueueAuthenticPixels(image,0,y,image->columns,1,exception);
if (q == (Quantum *) NULL)
return;
for (x = 0; x < (ssize_t) image->columns; x++)
{
if (*p > 0)
{
f = (*p / MaxVal) * (QuantumRange-GetPixelRed(image,q));
if (f + GetPixelRed(image,q) > QuantumRange)
SetPixelRed(image,QuantumRange,q);
else
SetPixelRed(image,GetPixelRed(image,q)+(int) f,q);
if ((int) f / 2.0 > GetPixelGreen(image,q))
{
SetPixelGreen(image,0,q);
SetPixelBlue(image,0,q);
}
else
{
SetPixelBlue(image,GetPixelBlue(image,q)-(int) (f/2.0),q);
SetPixelGreen(image,GetPixelBlue(image,q),q);
}
}
if (*p < 0)
{
f = (*p / MaxVal) * (QuantumRange-GetPixelBlue(image,q));
if (f+GetPixelBlue(image,q) > QuantumRange)
SetPixelBlue(image,QuantumRange,q);
else
SetPixelBlue(image,GetPixelBlue(image,q)+(int) f,q);
if ((int) f / 2.0 > GetPixelGreen(image,q))
{
SetPixelRed(image,0,q);
SetPixelGreen(image,0,q);
}
else
{
SetPixelRed(image,GetPixelRed(image,q)-(int) (f/2.0),q);
SetPixelGreen(image,GetPixelRed(image,q),q);
}
}
p++;
q+=GetPixelChannels(image);
}
if (!SyncAuthenticPixels(image,exception))
return;
return;
}
static void InsertComplexFloatRow(Image *image,float *p,int y,double MinVal,
double MaxVal,ExceptionInfo *exception)
{
double f;
int x;
register Quantum *q;
if (MinVal == 0)
MinVal = -1;
if (MaxVal == 0)
MaxVal = 1;
q = QueueAuthenticPixels(image, 0, y, image->columns, 1,exception);
if (q == (Quantum *) NULL)
return;
for (x = 0; x < (ssize_t) image->columns; x++)
{
if (*p > 0)
{
f = (*p / MaxVal) * (QuantumRange-GetPixelRed(image,q));
if (f+GetPixelRed(image,q) > QuantumRange)
SetPixelRed(image,QuantumRange,q);
else
SetPixelRed(image,GetPixelRed(image,q)+(int) f,q);
if ((int) f / 2.0 > GetPixelGreen(image,q))
{
SetPixelGreen(image,0,q);
SetPixelBlue(image,0,q);
}
else
{
SetPixelBlue(image,GetPixelBlue(image,q)-(int) (f/2.0),q);
SetPixelGreen(image,GetPixelBlue(image,q),q);
}
}
if (*p < 0)
{
f = (*p / MaxVal) * (QuantumRange - GetPixelBlue(image,q));
if (f + GetPixelBlue(image,q) > QuantumRange)
SetPixelBlue(image,QuantumRange,q);
else
SetPixelBlue(image,GetPixelBlue(image,q)+
(int) f,q);
if ((int) f / 2.0 > GetPixelGreen(image,q))
{
SetPixelGreen(image,0,q);
SetPixelRed(image,0,q);
}
else
{
SetPixelRed(image,GetPixelRed(image,q)-(int) (f/2.0),q);
SetPixelGreen(image,GetPixelRed(image,q),q);
}
}
p++;
q++;
}
if (!SyncAuthenticPixels(image,exception))
return;
return;
}
/************** READERS ******************/
/* This function reads one block of floats*/
static void ReadBlobFloatsLSB(Image * image, size_t len, float *data)
{
while (len >= 4)
{
*data++ = ReadBlobFloat(image);
len -= sizeof(float);
}
if (len > 0)
(void) SeekBlob(image, len, SEEK_CUR);
}
static void ReadBlobFloatsMSB(Image * image, size_t len, float *data)
{
while (len >= 4)
{
*data++ = ReadBlobFloat(image);
len -= sizeof(float);
}
if (len > 0)
(void) SeekBlob(image, len, SEEK_CUR);
}
/* This function reads one block of doubles*/
static void ReadBlobDoublesLSB(Image * image, size_t len, double *data)
{
while (len >= 8)
{
*data++ = ReadBlobDouble(image);
len -= sizeof(double);
}
if (len > 0)
(void) SeekBlob(image, len, SEEK_CUR);
}
static void ReadBlobDoublesMSB(Image * image, size_t len, double *data)
{
while (len >= 8)
{
*data++ = ReadBlobDouble(image);
len -= sizeof(double);
}
if (len > 0)
(void) SeekBlob(image, len, SEEK_CUR);
}
/* Calculate minimum and maximum from a given block of data */
static void CalcMinMax(Image *image, int endian_indicator, int SizeX, int SizeY, size_t CellType, unsigned ldblk, void *BImgBuff, double *Min, double *Max)
{
MagickOffsetType filepos;
int i, x;
void (*ReadBlobDoublesXXX)(Image * image, size_t len, double *data);
void (*ReadBlobFloatsXXX)(Image * image, size_t len, float *data);
double *dblrow;
float *fltrow;
if (endian_indicator == LSBEndian)
{
ReadBlobDoublesXXX = ReadBlobDoublesLSB;
ReadBlobFloatsXXX = ReadBlobFloatsLSB;
}
else /* MI */
{
ReadBlobDoublesXXX = ReadBlobDoublesMSB;
ReadBlobFloatsXXX = ReadBlobFloatsMSB;
}
filepos = TellBlob(image); /* Please note that file seeking occurs only in the case of doubles */
for (i = 0; i < SizeY; i++)
{
if (CellType==miDOUBLE)
{
ReadBlobDoublesXXX(image, ldblk, (double *)BImgBuff);
dblrow = (double *)BImgBuff;
if (i == 0)
{
*Min = *Max = *dblrow;
}
for (x = 0; x < SizeX; x++)
{
if (*Min > *dblrow)
*Min = *dblrow;
if (*Max < *dblrow)
*Max = *dblrow;
dblrow++;
}
}
if (CellType==miSINGLE)
{
ReadBlobFloatsXXX(image, ldblk, (float *)BImgBuff);
fltrow = (float *)BImgBuff;
if (i == 0)
{
*Min = *Max = *fltrow;
}
for (x = 0; x < (ssize_t) SizeX; x++)
{
if (*Min > *fltrow)
*Min = *fltrow;
if (*Max < *fltrow)
*Max = *fltrow;
fltrow++;
}
}
}
(void) SeekBlob(image, filepos, SEEK_SET);
}
static void FixSignedValues(const Image *image,Quantum *q, int y)
{
while(y-->0)
{
/* Please note that negative values will overflow
Q=8; QuantumRange=255: <0;127> + 127+1 = <128; 255>
<-1;-128> + 127+1 = <0; 127> */
SetPixelRed(image,GetPixelRed(image,q)+QuantumRange/2+1,q);
SetPixelGreen(image,GetPixelGreen(image,q)+QuantumRange/2+1,q);
SetPixelBlue(image,GetPixelBlue(image,q)+QuantumRange/2+1,q);
q++;
}
}
/** Fix whole row of logical/binary data. It means pack it. */
static void FixLogical(unsigned char *Buff,int ldblk)
{
unsigned char mask=128;
unsigned char *BuffL = Buff;
unsigned char val = 0;
while(ldblk-->0)
{
if(*Buff++ != 0)
val |= mask;
mask >>= 1;
if(mask==0)
{
*BuffL++ = val;
val = 0;
mask = 128;
}
}
*BuffL = val;
}
#if defined(MAGICKCORE_ZLIB_DELEGATE)
static voidpf AcquireZIPMemory(voidpf context,unsigned int items,
unsigned int size)
{
(void) context;
return((voidpf) AcquireQuantumMemory(items,size));
}
static void RelinquishZIPMemory(voidpf context,voidpf memory)
{
(void) context;
memory=RelinquishMagickMemory(memory);
}
#endif
#if defined(MAGICKCORE_ZLIB_DELEGATE)
/** This procedure decompreses an image block for a new MATLAB format. */
static Image *decompress_block(Image *orig, unsigned int *Size, ImageInfo *clone_info, ExceptionInfo *exception)
{
Image *image2;
void *cache_block, *decompress_block;
z_stream zip_info;
FILE *mat_file;
size_t magick_size;
size_t extent;
int file;
int status;
int zip_status;
ssize_t TotalSize = 0;
if(clone_info==NULL) return NULL;
if(clone_info->file) /* Close file opened from previous transaction. */
{
fclose(clone_info->file);
clone_info->file = NULL;
(void) remove_utf8(clone_info->filename);
}
cache_block = AcquireQuantumMemory((size_t)(*Size < 16384) ? *Size: 16384,sizeof(unsigned char *));
if(cache_block==NULL) return NULL;
decompress_block = AcquireQuantumMemory((size_t)(4096),sizeof(unsigned char *));
if(decompress_block==NULL)
{
RelinquishMagickMemory(cache_block);
return NULL;
}
mat_file=0;
file = AcquireUniqueFileResource(clone_info->filename);
if (file != -1)
mat_file = fdopen(file,"w");
if(!mat_file)
{
RelinquishMagickMemory(cache_block);
RelinquishMagickMemory(decompress_block);
(void) LogMagickEvent(CoderEvent,GetMagickModule(),"Cannot create file stream for decompressed image");
return NULL;
}
zip_info.zalloc=AcquireZIPMemory;
zip_info.zfree=RelinquishZIPMemory;
zip_info.opaque = (voidpf) NULL;
zip_status = inflateInit(&zip_info);
if (zip_status != Z_OK)
{
RelinquishMagickMemory(cache_block);
RelinquishMagickMemory(decompress_block);
(void) ThrowMagickException(exception,GetMagickModule(),CorruptImageError,
"UnableToUncompressImage","`%s'",clone_info->filename);
(void) fclose(mat_file);
RelinquishUniqueFileResource(clone_info->filename);
return NULL;
}
/* zip_info.next_out = 8*4;*/
zip_info.avail_in = 0;
zip_info.total_out = 0;
while(*Size>0 && !EOFBlob(orig))
{
magick_size = ReadBlob(orig, (*Size < 16384) ? *Size : 16384, (unsigned char *) cache_block);
zip_info.next_in = (Bytef *) cache_block;
zip_info.avail_in = (uInt) magick_size;
while(zip_info.avail_in>0)
{
zip_info.avail_out = 4096;
zip_info.next_out = (Bytef *) decompress_block;
zip_status = inflate(&zip_info,Z_NO_FLUSH);
if ((zip_status != Z_OK) && (zip_status != Z_STREAM_END))
break;
extent=fwrite(decompress_block, 4096-zip_info.avail_out, 1, mat_file);
(void) extent;
TotalSize += 4096-zip_info.avail_out;
if(zip_status == Z_STREAM_END) goto DblBreak;
}
if ((zip_status != Z_OK) && (zip_status != Z_STREAM_END))
break;
*Size -= magick_size;
}
DblBreak:
inflateEnd(&zip_info);
(void)fclose(mat_file);
RelinquishMagickMemory(cache_block);
RelinquishMagickMemory(decompress_block);
*Size = TotalSize;
if((clone_info->file=fopen(clone_info->filename,"rb"))==NULL) goto UnlinkFile;
if( (image2 = AcquireImage(clone_info,exception))==NULL ) goto EraseFile;
status = OpenBlob(clone_info,image2,ReadBinaryBlobMode,exception);
if (status == MagickFalse)
{
DeleteImageFromList(&image2);
EraseFile:
fclose(clone_info->file);
clone_info->file = NULL;
UnlinkFile:
RelinquishUniqueFileResource(clone_info->filename);
return NULL;
}
return image2;
}
#endif
static Image *ReadMATImageV4(const ImageInfo *image_info,Image *image,
ExceptionInfo *exception)
{
typedef struct {
unsigned char Type[4];
unsigned int nRows;
unsigned int nCols;
unsigned int imagf;
unsigned int nameLen;
} MAT4_HDR;
long
ldblk;
EndianType
endian;
Image
*rotate_image;
MagickBooleanType
status;
MAT4_HDR
HDR;
QuantumInfo
*quantum_info;
QuantumFormatType
format_type;
register ssize_t
i;
ssize_t
count,
y;
unsigned char
*pixels;
unsigned int
depth;
quantum_info=(QuantumInfo *) NULL;
(void) SeekBlob(image,0,SEEK_SET);
while (EOFBlob(image) != MagickFalse)
{
/*
Object parser loop.
*/
ldblk=ReadBlobLSBLong(image);
if ((ldblk > 9999) || (ldblk < 0))
break;
HDR.Type[3]=ldblk % 10; ldblk /= 10; /* T digit */
HDR.Type[2]=ldblk % 10; ldblk /= 10; /* P digit */
HDR.Type[1]=ldblk % 10; ldblk /= 10; /* O digit */
HDR.Type[0]=ldblk; /* M digit */
if (HDR.Type[3] != 0)
break; /* Data format */
if (HDR.Type[2] != 0)
break; /* Always 0 */
if (HDR.Type[0] == 0)
{
HDR.nRows=ReadBlobLSBLong(image);
HDR.nCols=ReadBlobLSBLong(image);
HDR.imagf=ReadBlobLSBLong(image);
HDR.nameLen=ReadBlobLSBLong(image);
endian=LSBEndian;
}
else
{
HDR.nRows=ReadBlobMSBLong(image);
HDR.nCols=ReadBlobMSBLong(image);
HDR.imagf=ReadBlobMSBLong(image);
HDR.nameLen=ReadBlobMSBLong(image);
endian=MSBEndian;
}
if ((HDR.imagf != 0) && (HDR.imagf != 1))
break;
if (HDR.nameLen > 0xFFFF)
return((Image *) NULL);
for (i=0; i < (ssize_t) HDR.nameLen; i++)
{
int
byte;
/*
Skip matrix name.
*/
byte=ReadBlobByte(image);
if (byte == EOF)
{
ThrowFileException(exception,CorruptImageError,"UnexpectedEndOfFile",
image->filename);
break;
}
}
image->columns=(size_t) HDR.nRows;
image->rows=(size_t) HDR.nCols;
SetImageColorspace(image,GRAYColorspace,exception);
if (image_info->ping != MagickFalse)
{
Swap(image->columns,image->rows);
return(image);
}
status=SetImageExtent(image,image->columns,image->rows,exception);
if (status == MagickFalse)
return((Image *) NULL);
quantum_info=AcquireQuantumInfo(image_info,image);
if (quantum_info == (QuantumInfo *) NULL)
return((Image *) NULL);
switch(HDR.Type[1])
{
case 0:
format_type=FloatingPointQuantumFormat;
depth=64;
break;
case 1:
format_type=FloatingPointQuantumFormat;
depth=32;
break;
case 2:
format_type=UnsignedQuantumFormat;
depth=16;
break;
case 3:
format_type=SignedQuantumFormat;
depth=16;
break;
case 4:
format_type=UnsignedQuantumFormat;
depth=8;
break;
default:
format_type=UnsignedQuantumFormat;
depth=8;
break;
}
image->depth=depth;
if (HDR.Type[0] != 0)
SetQuantumEndian(image,quantum_info,MSBEndian);
status=SetQuantumFormat(image,quantum_info,format_type);
status=SetQuantumDepth(image,quantum_info,depth);
status=SetQuantumEndian(image,quantum_info,endian);
SetQuantumScale(quantum_info,1.0);
pixels=(unsigned char *) GetQuantumPixels(quantum_info);
for (y=0; y < (ssize_t) image->rows; y++)
{
register Quantum
*magick_restrict q;
count=ReadBlob(image,depth/8*image->columns,(char *) pixels);
if (count == -1)
break;
q=QueueAuthenticPixels(image,0,image->rows-y-1,image->columns,1,
exception);
if (q == (Quantum *) NULL)
break;
(void) ImportQuantumPixels(image,(CacheView *) NULL,quantum_info,
GrayQuantum,pixels,exception);
if ((HDR.Type[1] == 2) || (HDR.Type[1] == 3))
FixSignedValues(image,q,(int) image->columns);
if (SyncAuthenticPixels(image,exception) == MagickFalse)
break;
if (image->previous == (Image *) NULL)
{
status=SetImageProgress(image,LoadImageTag,(MagickOffsetType) y,
image->rows);
if (status == MagickFalse)
break;
}
}
if (HDR.imagf == 1)
for (y=0; y < (ssize_t) image->rows; y++)
{
/*
Read complex pixels.
*/
count=ReadBlob(image,depth/8*image->columns,(char *) pixels);
if (count == -1)
break;
if (HDR.Type[1] == 0)
InsertComplexDoubleRow(image,(double *) pixels,y,0,0,exception);
else
InsertComplexFloatRow(image,(float *) pixels,y,0,0,exception);
}
if (quantum_info != (QuantumInfo *) NULL)
quantum_info=DestroyQuantumInfo(quantum_info);
rotate_image=RotateImage(image,90.0,exception);
if (rotate_image != (Image *) NULL)
{
image=DestroyImage(image);
image=rotate_image;
}
if (EOFBlob(image) != MagickFalse)
{
ThrowFileException(exception,CorruptImageError,"UnexpectedEndOfFile",
image->filename);
break;
}
/*
Proceed to next image.
*/
if (image_info->number_scenes != 0)
if (image->scene >= (image_info->scene+image_info->number_scenes-1))
break;
/*
Allocate next image structure.
*/
AcquireNextImage(image_info,image,exception);
if (GetNextImageInList(image) == (Image *) NULL)
{
image=DestroyImageList(image);
return((Image *) NULL);
}
image=SyncNextImageInList(image);
status=SetImageProgress(image,LoadImagesTag,TellBlob(image),
GetBlobSize(image));
if (status == MagickFalse)
break;
}
(void) CloseBlob(image);
return(GetFirstImageInList(image));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% R e a d M A T L A B i m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ReadMATImage() reads an MAT X image file and returns it. It
% allocates the memory necessary for the new Image structure and returns a
% pointer to the new image.
%
% The format of the ReadMATImage method is:
%
% Image *ReadMATImage(const ImageInfo *image_info,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: Method ReadMATImage returns a pointer to the image after
% reading. A null image is returned if there is a memory shortage or if
% the image cannot be read.
%
% o image_info: Specifies a pointer to a ImageInfo structure.
%
% o exception: return any errors or warnings in this structure.
%
*/
static Image *ReadMATImage(const ImageInfo *image_info,ExceptionInfo *exception)
{
Image *image, *image2=NULL,
*rotated_image;
register Quantum *q;
unsigned int status;
MATHeader MATLAB_HDR;
size_t size;
size_t CellType;
QuantumInfo *quantum_info;
ImageInfo *clone_info;
int i;
ssize_t ldblk;
unsigned char *BImgBuff = NULL;
double MinVal, MaxVal;
unsigned z, z2;
unsigned Frames;
int logging;
int sample_size;
MagickOffsetType filepos=0x80;
BlobInfo *blob;
size_t one;
unsigned int (*ReadBlobXXXLong)(Image *image);
unsigned short (*ReadBlobXXXShort)(Image *image);
void (*ReadBlobDoublesXXX)(Image * image, size_t len, double *data);
void (*ReadBlobFloatsXXX)(Image * image, size_t len, float *data);
assert(image_info != (const ImageInfo *) NULL);
assert(image_info->signature == MagickCoreSignature);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
logging = LogMagickEvent(CoderEvent,GetMagickModule(),"enter");
/*
Open image file.
*/
image = AcquireImage(image_info,exception);
status = OpenBlob(image_info, image, ReadBinaryBlobMode, exception);
if (status == MagickFalse)
{
image=DestroyImageList(image);
return((Image *) NULL);
}
/*
Read MATLAB image.
*/
quantum_info=(QuantumInfo *) NULL;
clone_info=(ImageInfo *) NULL;
if (ReadBlob(image,124,(unsigned char *) &MATLAB_HDR.identific) != 124)
ThrowReaderException(CorruptImageError,"ImproperImageHeader");
if (strncmp(MATLAB_HDR.identific,"MATLAB",6) != 0)
{
image2=ReadMATImageV4(image_info,image,exception);
if (image2 == NULL)
goto MATLAB_KO;
image=image2;
goto END_OF_READING;
}
MATLAB_HDR.Version = ReadBlobLSBShort(image);
if(ReadBlob(image,2,(unsigned char *) &MATLAB_HDR.EndianIndicator) != 2)
ThrowReaderException(CorruptImageError,"ImproperImageHeader");
if (logging)
(void) LogMagickEvent(CoderEvent,GetMagickModule()," Endian %c%c",
MATLAB_HDR.EndianIndicator[0],MATLAB_HDR.EndianIndicator[1]);
if (!strncmp(MATLAB_HDR.EndianIndicator, "IM", 2))
{
ReadBlobXXXLong = ReadBlobLSBLong;
ReadBlobXXXShort = ReadBlobLSBShort;
ReadBlobDoublesXXX = ReadBlobDoublesLSB;
ReadBlobFloatsXXX = ReadBlobFloatsLSB;
image->endian = LSBEndian;
}
else if (!strncmp(MATLAB_HDR.EndianIndicator, "MI", 2))
{
ReadBlobXXXLong = ReadBlobMSBLong;
ReadBlobXXXShort = ReadBlobMSBShort;
ReadBlobDoublesXXX = ReadBlobDoublesMSB;
ReadBlobFloatsXXX = ReadBlobFloatsMSB;
image->endian = MSBEndian;
}
else
goto MATLAB_KO; /* unsupported endian */
if (strncmp(MATLAB_HDR.identific, "MATLAB", 6))
{
MATLAB_KO:
if ((image != image2) && (image2 != (Image *) NULL))
image2=DestroyImage(image2);
if (clone_info != (ImageInfo *) NULL)
clone_info=DestroyImageInfo(clone_info);
ThrowReaderException(CorruptImageError,"ImproperImageHeader");
}
filepos = TellBlob(image);
while(!EOFBlob(image)) /* object parser loop */
{
Frames = 1;
(void) SeekBlob(image,filepos,SEEK_SET);
/* printf("pos=%X\n",TellBlob(image)); */
MATLAB_HDR.DataType = ReadBlobXXXLong(image);
if(EOFBlob(image)) break;
MATLAB_HDR.ObjectSize = ReadBlobXXXLong(image);
if(EOFBlob(image)) break;
if((MagickSizeType) (MATLAB_HDR.ObjectSize+filepos) > GetBlobSize(image))
goto MATLAB_KO;
filepos += MATLAB_HDR.ObjectSize + 4 + 4;
clone_info=CloneImageInfo(image_info);
image2 = image;
#if defined(MAGICKCORE_ZLIB_DELEGATE)
if(MATLAB_HDR.DataType == miCOMPRESSED)
{
image2 = decompress_block(image,&MATLAB_HDR.ObjectSize,clone_info,exception);
if(image2==NULL) continue;
MATLAB_HDR.DataType = ReadBlobXXXLong(image2); /* replace compressed object type. */
}
#endif
if (MATLAB_HDR.DataType!=miMATRIX)
{
clone_info=DestroyImageInfo(clone_info);
continue; /* skip another objects. */
}
MATLAB_HDR.unknown1 = ReadBlobXXXLong(image2);
MATLAB_HDR.unknown2 = ReadBlobXXXLong(image2);
MATLAB_HDR.unknown5 = ReadBlobXXXLong(image2);
MATLAB_HDR.StructureClass = MATLAB_HDR.unknown5 & 0xFF;
MATLAB_HDR.StructureFlag = (MATLAB_HDR.unknown5>>8) & 0xFF;
MATLAB_HDR.unknown3 = ReadBlobXXXLong(image2);
if(image!=image2)
MATLAB_HDR.unknown4 = ReadBlobXXXLong(image2); /* ??? don't understand why ?? */
MATLAB_HDR.unknown4 = ReadBlobXXXLong(image2);
MATLAB_HDR.DimFlag = ReadBlobXXXLong(image2);
MATLAB_HDR.SizeX = ReadBlobXXXLong(image2);
MATLAB_HDR.SizeY = ReadBlobXXXLong(image2);
switch(MATLAB_HDR.DimFlag)
{
case 8: z2=z=1; break; /* 2D matrix*/
case 12: z2=z = ReadBlobXXXLong(image2); /* 3D matrix RGB*/
(void) ReadBlobXXXLong(image2);
if(z!=3) ThrowReaderException(CoderError, "MultidimensionalMatricesAreNotSupported");
break;
case 16: z2=z = ReadBlobXXXLong(image2); /* 4D matrix animation */
if(z!=3 && z!=1)
ThrowReaderException(CoderError, "MultidimensionalMatricesAreNotSupported");
Frames = ReadBlobXXXLong(image2);
if (Frames == 0)
ThrowReaderException(CorruptImageError,"ImproperImageHeader");
break;
default:
if (clone_info != (ImageInfo *) NULL)
clone_info=DestroyImageInfo(clone_info);
if ((image != image2) && (image2 != (Image *) NULL))
image2=DestroyImage(image2);
ThrowReaderException(CoderError, "MultidimensionalMatricesAreNotSupported");
}
MATLAB_HDR.Flag1 = ReadBlobXXXShort(image2);
MATLAB_HDR.NameFlag = ReadBlobXXXShort(image2);
if (logging) (void)LogMagickEvent(CoderEvent,GetMagickModule(),
"MATLAB_HDR.StructureClass %d",MATLAB_HDR.StructureClass);
if (MATLAB_HDR.StructureClass != mxCHAR_CLASS &&
MATLAB_HDR.StructureClass != mxSINGLE_CLASS && /* float + complex float */
MATLAB_HDR.StructureClass != mxDOUBLE_CLASS && /* double + complex double */
MATLAB_HDR.StructureClass != mxINT8_CLASS &&
MATLAB_HDR.StructureClass != mxUINT8_CLASS && /* uint8 + uint8 3D */
MATLAB_HDR.StructureClass != mxINT16_CLASS &&
MATLAB_HDR.StructureClass != mxUINT16_CLASS && /* uint16 + uint16 3D */
MATLAB_HDR.StructureClass != mxINT32_CLASS &&
MATLAB_HDR.StructureClass != mxUINT32_CLASS && /* uint32 + uint32 3D */
MATLAB_HDR.StructureClass != mxINT64_CLASS &&
MATLAB_HDR.StructureClass != mxUINT64_CLASS) /* uint64 + uint64 3D */
ThrowReaderException(CoderError,"UnsupportedCellTypeInTheMatrix");
switch (MATLAB_HDR.NameFlag)
{
case 0:
size = ReadBlobXXXLong(image2); /* Object name string size */
size = 4 * (ssize_t) ((size + 3 + 1) / 4);
(void) SeekBlob(image2, size, SEEK_CUR);
break;
case 1:
case 2:
case 3:
case 4:
(void) ReadBlob(image2, 4, (unsigned char *) &size); /* Object name string */
break;
default:
goto MATLAB_KO;
}
CellType = ReadBlobXXXLong(image2); /* Additional object type */
if (logging)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
"MATLAB_HDR.CellType: %.20g",(double) CellType);
(void) ReadBlob(image2, 4, (unsigned char *) &size); /* data size */
NEXT_FRAME:
switch (CellType)
{
case miINT8:
case miUINT8:
sample_size = 8;
if(MATLAB_HDR.StructureFlag & FLAG_LOGICAL)
image->depth = 1;
else
image->depth = 8; /* Byte type cell */
ldblk = (ssize_t) MATLAB_HDR.SizeX;
break;
case miINT16:
case miUINT16:
sample_size = 16;
image->depth = 16; /* Word type cell */
ldblk = (ssize_t) (2 * MATLAB_HDR.SizeX);
break;
case miINT32:
case miUINT32:
sample_size = 32;
image->depth = 32; /* Dword type cell */
ldblk = (ssize_t) (4 * MATLAB_HDR.SizeX);
break;
case miINT64:
case miUINT64:
sample_size = 64;
image->depth = 64; /* Qword type cell */
ldblk = (ssize_t) (8 * MATLAB_HDR.SizeX);
break;
case miSINGLE:
sample_size = 32;
image->depth = 32; /* double type cell */
(void) SetImageOption(clone_info,"quantum:format","floating-point");
if (MATLAB_HDR.StructureFlag & FLAG_COMPLEX)
{ /* complex float type cell */
}
ldblk = (ssize_t) (4 * MATLAB_HDR.SizeX);
break;
case miDOUBLE:
sample_size = 64;
image->depth = 64; /* double type cell */
(void) SetImageOption(clone_info,"quantum:format","floating-point");
DisableMSCWarning(4127)
if (sizeof(double) != 8)
RestoreMSCWarning
ThrowReaderException(CoderError, "IncompatibleSizeOfDouble");
if (MATLAB_HDR.StructureFlag & FLAG_COMPLEX)
{ /* complex double type cell */
}
ldblk = (ssize_t) (8 * MATLAB_HDR.SizeX);
break;
default:
if ((image != image2) && (image2 != (Image *) NULL))
image2=DestroyImage(image2);
if (clone_info)
clone_info=DestroyImageInfo(clone_info);
ThrowReaderException(CoderError, "UnsupportedCellTypeInTheMatrix");
}
(void) sample_size;
image->columns = MATLAB_HDR.SizeX;
image->rows = MATLAB_HDR.SizeY;
one=1;
image->colors = one << image->depth;
if (image->columns == 0 || image->rows == 0)
goto MATLAB_KO;
if((unsigned long)ldblk*MATLAB_HDR.SizeY > MATLAB_HDR.ObjectSize)
goto MATLAB_KO;
/* Image is gray when no complex flag is set and 2D Matrix */
if ((MATLAB_HDR.DimFlag == 8) &&
((MATLAB_HDR.StructureFlag & FLAG_COMPLEX) == 0))
{
image->type=GrayscaleType;
SetImageColorspace(image,GRAYColorspace,exception);
}
/*
If ping is true, then only set image size and colors without
reading any image data.
*/
if (image_info->ping)
{
size_t temp = image->columns;
image->columns = image->rows;
image->rows = temp;
goto done_reading; /* !!!!!! BAD !!!! */
}
status=SetImageExtent(image,image->columns,image->rows,exception);
if (status == MagickFalse)
{
if ((image != image2) && (image2 != (Image *) NULL))
image2=DestroyImage(image2);
return(DestroyImageList(image));
}
quantum_info=AcquireQuantumInfo(clone_info,image);
if (quantum_info == (QuantumInfo *) NULL)
ThrowReaderException(ResourceLimitError,"MemoryAllocationFailed");
/* ----- Load raster data ----- */
BImgBuff = (unsigned char *) AcquireQuantumMemory((size_t) (ldblk),sizeof(double)); /* Ldblk was set in the check phase */
if (BImgBuff == NULL)
ThrowReaderException(ResourceLimitError,"MemoryAllocationFailed");
(void) ResetMagickMemory(BImgBuff,0,ldblk*sizeof(double));
MinVal = 0;
MaxVal = 0;
if (CellType==miDOUBLE || CellType==miSINGLE) /* Find Min and Max Values for floats */
{
CalcMinMax(image2, image_info->endian, MATLAB_HDR.SizeX, MATLAB_HDR.SizeY, CellType, ldblk, BImgBuff, &quantum_info->minimum, &quantum_info->maximum);
}
/* Main loop for reading all scanlines */
if(z==1) z=0; /* read grey scanlines */
/* else read color scanlines */
do
{
for (i = 0; i < (ssize_t) MATLAB_HDR.SizeY; i++)
{
q=GetAuthenticPixels(image,0,MATLAB_HDR.SizeY-i-1,image->columns,1,exception);
if (q == (Quantum *) NULL)
{
if (logging) (void)LogMagickEvent(CoderEvent,GetMagickModule(),
" MAT set image pixels returns unexpected NULL on a row %u.", (unsigned)(MATLAB_HDR.SizeY-i-1));
goto done_reading; /* Skip image rotation, when cannot set image pixels */
}
if(ReadBlob(image2,ldblk,(unsigned char *)BImgBuff) != (ssize_t) ldblk)
{
if (logging) (void)LogMagickEvent(CoderEvent,GetMagickModule(),
" MAT cannot read scanrow %u from a file.", (unsigned)(MATLAB_HDR.SizeY-i-1));
goto ExitLoop;
}
if((CellType==miINT8 || CellType==miUINT8) && (MATLAB_HDR.StructureFlag & FLAG_LOGICAL))
{
FixLogical((unsigned char *)BImgBuff,ldblk);
if(ImportQuantumPixels(image,(CacheView *) NULL,quantum_info,z2qtype[z],BImgBuff,exception) <= 0)
{
ImportQuantumPixelsFailed:
if (logging) (void)LogMagickEvent(CoderEvent,GetMagickModule(),
" MAT failed to ImportQuantumPixels for a row %u", (unsigned)(MATLAB_HDR.SizeY-i-1));
break;
}
}
else
{
if(ImportQuantumPixels(image,(CacheView *) NULL,quantum_info,z2qtype[z],BImgBuff,exception) <= 0)
goto ImportQuantumPixelsFailed;
if (z<=1 && /* fix only during a last pass z==0 || z==1 */
(CellType==miINT8 || CellType==miINT16 || CellType==miINT32 || CellType==miINT64))
FixSignedValues(image,q,MATLAB_HDR.SizeX);
}
if (!SyncAuthenticPixels(image,exception))
{
if (logging) (void)LogMagickEvent(CoderEvent,GetMagickModule(),
" MAT failed to sync image pixels for a row %u", (unsigned)(MATLAB_HDR.SizeY-i-1));
goto ExitLoop;
}
}
} while(z-- >= 2);
ExitLoop:
/* Read complex part of numbers here */
if (MATLAB_HDR.StructureFlag & FLAG_COMPLEX)
{ /* Find Min and Max Values for complex parts of floats */
CellType = ReadBlobXXXLong(image2); /* Additional object type */
i = ReadBlobXXXLong(image2); /* size of a complex part - toss away*/
if (CellType==miDOUBLE || CellType==miSINGLE)
{
CalcMinMax(image2, image_info->endian, MATLAB_HDR.SizeX, MATLAB_HDR.SizeY, CellType, ldblk, BImgBuff, &MinVal, &MaxVal);
}
if (CellType==miDOUBLE)
for (i = 0; i < (ssize_t) MATLAB_HDR.SizeY; i++)
{
ReadBlobDoublesXXX(image2, ldblk, (double *)BImgBuff);
InsertComplexDoubleRow(image, (double *)BImgBuff, i, MinVal, MaxVal,
exception);
}
if (CellType==miSINGLE)
for (i = 0; i < (ssize_t) MATLAB_HDR.SizeY; i++)
{
ReadBlobFloatsXXX(image2, ldblk, (float *)BImgBuff);
InsertComplexFloatRow(image,(float *)BImgBuff,i,MinVal,MaxVal,
exception);
}
}
/* Image is gray when no complex flag is set and 2D Matrix AGAIN!!! */
if ((MATLAB_HDR.DimFlag == 8) &&
((MATLAB_HDR.StructureFlag & FLAG_COMPLEX) == 0))
image->type=GrayscaleType;
if (image->depth == 1)
image->type=BilevelType;
if(image2==image)
image2 = NULL; /* Remove shadow copy to an image before rotation. */
/* Rotate image. */
rotated_image = RotateImage(image, 90.0, exception);
if (rotated_image != (Image *) NULL)
{
/* Remove page offsets added by RotateImage */
rotated_image->page.x=0;
rotated_image->page.y=0;
blob = rotated_image->blob;
rotated_image->blob = image->blob;
rotated_image->colors = image->colors;
image->blob = blob;
AppendImageToList(&image,rotated_image);
DeleteImageFromList(&image);
}
done_reading:
if(image2!=NULL)
if(image2!=image)
{
DeleteImageFromList(&image2);
if(clone_info)
{
if(clone_info->file)
{
fclose(clone_info->file);
clone_info->file = NULL;
(void) remove_utf8(clone_info->filename);
}
}
}
/* Allocate next image structure. */
AcquireNextImage(image_info,image,exception);
if (image->next == (Image *) NULL) break;
image=SyncNextImageInList(image);
image->columns=image->rows=0;
image->colors=0;
/* row scan buffer is no longer needed */
RelinquishMagickMemory(BImgBuff);
BImgBuff = NULL;
if(--Frames>0)
{
z = z2;
if(image2==NULL) image2 = image;
goto NEXT_FRAME;
}
if ((image2!=NULL) && (image2!=image)) /* Does shadow temporary decompressed image exist? */
{
/* CloseBlob(image2); */
DeleteImageFromList(&image2);
if(clone_info)
{
if(clone_info->file)
{
fclose(clone_info->file);
clone_info->file = NULL;
(void) remove_utf8(clone_info->filename);
}
}
}
if (quantum_info != (QuantumInfo *) NULL)
quantum_info=DestroyQuantumInfo(quantum_info);
if (clone_info)
clone_info=DestroyImageInfo(clone_info);
}
RelinquishMagickMemory(BImgBuff);
if (quantum_info != (QuantumInfo *) NULL)
quantum_info=DestroyQuantumInfo(quantum_info);
END_OF_READING:
CloseBlob(image);
{
Image *p;
ssize_t scene=0;
/*
Rewind list, removing any empty images while rewinding.
*/
p=image;
image=NULL;
while (p != (Image *) NULL)
{
Image *tmp=p;
if ((p->rows == 0) || (p->columns == 0)) {
p=p->previous;
if (tmp == image2)
image2=(Image *) NULL;
DeleteImageFromList(&tmp);
} else {
image=p;
p=p->previous;
}
}
/*
Fix scene numbers
*/
for (p=image; p != (Image *) NULL; p=p->next)
p->scene=scene++;
}
if(clone_info != NULL) /* cleanup garbage file from compression */
{
if(clone_info->file)
{
fclose(clone_info->file);
clone_info->file = NULL;
(void) remove_utf8(clone_info->filename);
}
DestroyImageInfo(clone_info);
clone_info = NULL;
}
if (logging) (void)LogMagickEvent(CoderEvent,GetMagickModule(),"return");
if (image==NULL)
ThrowReaderException(CorruptImageError,"ImproperImageHeader")
else
if ((image != image2) && (image2 != (Image *) NULL))
image2=DestroyImage(image2);
return (image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% R e g i s t e r M A T I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% Method RegisterMATImage adds attributes for the MAT image format to
% the list of supported formats. The attributes include the image format
% tag, a method to read and/or write the format, whether the format
% supports the saving of more than one frame to the same file or blob,
% whether the format supports native in-memory I/O, and a brief
% description of the format.
%
% The format of the RegisterMATImage method is:
%
% size_t RegisterMATImage(void)
%
*/
ModuleExport size_t RegisterMATImage(void)
{
MagickInfo
*entry;
entry=AcquireMagickInfo("MAT","MAT","MATLAB level 5 image format");
entry->decoder=(DecodeImageHandler *) ReadMATImage;
entry->encoder=(EncodeImageHandler *) WriteMATImage;
entry->flags^=CoderBlobSupportFlag;
entry->flags|=CoderDecoderSeekableStreamFlag;
(void) RegisterMagickInfo(entry);
return(MagickImageCoderSignature);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% U n r e g i s t e r M A T I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% Method UnregisterMATImage removes format registrations made by the
% MAT module from the list of supported formats.
%
% The format of the UnregisterMATImage method is:
%
% UnregisterMATImage(void)
%
*/
ModuleExport void UnregisterMATImage(void)
{
(void) UnregisterMagickInfo("MAT");
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% W r i t e M A T L A B I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% Function WriteMATImage writes an Matlab matrix to a file.
%
% The format of the WriteMATImage method is:
%
% MagickBooleanType WriteMATImage(const ImageInfo *image_info,
% Image *image,ExceptionInfo *exception)
%
% A description of each parameter follows.
%
% o image_info: Specifies a pointer to a ImageInfo structure.
%
% o image: A pointer to an Image structure.
%
% o exception: return any errors or warnings in this structure.
%
*/
static MagickBooleanType WriteMATImage(const ImageInfo *image_info,Image *image,
ExceptionInfo *exception)
{
char
MATLAB_HDR[0x80];
MagickBooleanType
status;
MagickOffsetType
scene;
struct tm
local_time;
time_t
current_time;
/*
Open output image file.
*/
assert(image_info != (const ImageInfo *) NULL);
assert(image_info->signature == MagickCoreSignature);
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
(void) LogMagickEvent(CoderEvent,GetMagickModule(),"enter MAT");
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
status=OpenBlob(image_info,image,WriteBinaryBlobMode,exception);
if (status == MagickFalse)
return(MagickFalse);
image->depth=8;
current_time=time((time_t *) NULL);
#if defined(MAGICKCORE_HAVE_LOCALTIME_R)
(void) localtime_r(¤t_time,&local_time);
#else
(void) memcpy(&local_time,localtime(¤t_time),sizeof(local_time));
#endif
(void) memset(MATLAB_HDR,' ',MagickMin(sizeof(MATLAB_HDR),124));
FormatLocaleString(MATLAB_HDR,sizeof(MATLAB_HDR),
"MATLAB 5.0 MAT-file, Platform: %s, Created on: %s %s %2d %2d:%2d:%2d %d",
OsDesc,DayOfWTab[local_time.tm_wday],MonthsTab[local_time.tm_mon],
local_time.tm_mday,local_time.tm_hour,local_time.tm_min,
local_time.tm_sec,local_time.tm_year+1900);
MATLAB_HDR[0x7C]=0;
MATLAB_HDR[0x7D]=1;
MATLAB_HDR[0x7E]='I';
MATLAB_HDR[0x7F]='M';
(void) WriteBlob(image,sizeof(MATLAB_HDR),(unsigned char *) MATLAB_HDR);
scene=0;
do
{
char
padding;
MagickBooleanType
is_gray;
QuantumInfo
*quantum_info;
size_t
data_size;
unsigned char
*pixels;
unsigned int
z;
(void) TransformImageColorspace(image,sRGBColorspace,exception);
is_gray=SetImageGray(image,exception);
z=(is_gray != MagickFalse) ? 0 : 3;
/*
Store MAT header.
*/
data_size = image->rows * image->columns;
if (is_gray == MagickFalse)
data_size*=3;
padding=((unsigned char)(data_size-1) & 0x7) ^ 0x7;
(void) WriteBlobLSBLong(image,miMATRIX);
(void) WriteBlobLSBLong(image,(unsigned int) data_size+padding+
((is_gray != MagickFalse) ? 48 : 56));
(void) WriteBlobLSBLong(image,0x6); /* 0x88 */
(void) WriteBlobLSBLong(image,0x8); /* 0x8C */
(void) WriteBlobLSBLong(image,0x6); /* 0x90 */
(void) WriteBlobLSBLong(image,0);
(void) WriteBlobLSBLong(image,0x5); /* 0x98 */
(void) WriteBlobLSBLong(image,(is_gray != MagickFalse) ? 0x8 : 0xC); /* 0x9C - DimFlag */
(void) WriteBlobLSBLong(image,(unsigned int) image->rows); /* x: 0xA0 */
(void) WriteBlobLSBLong(image,(unsigned int) image->columns); /* y: 0xA4 */
if (is_gray == MagickFalse)
{
(void) WriteBlobLSBLong(image,3); /* z: 0xA8 */
(void) WriteBlobLSBLong(image,0);
}
(void) WriteBlobLSBShort(image,1); /* 0xB0 */
(void) WriteBlobLSBShort(image,1); /* 0xB2 */
(void) WriteBlobLSBLong(image,'M'); /* 0xB4 */
(void) WriteBlobLSBLong(image,0x2); /* 0xB8 */
(void) WriteBlobLSBLong(image,(unsigned int) data_size); /* 0xBC */
/*
Store image data.
*/
quantum_info=AcquireQuantumInfo(image_info,image);
if (quantum_info == (QuantumInfo *) NULL)
ThrowWriterException(ResourceLimitError,"MemoryAllocationFailed");
pixels=(unsigned char *) GetQuantumPixels(quantum_info);
do
{
const Quantum
*p;
ssize_t
y;
for (y=0; y < (ssize_t)image->columns; y++)
{
p=GetVirtualPixels(image,y,0,1,image->rows,exception);
if (p == (const Quantum *) NULL)
break;
(void) ExportQuantumPixels(image,(CacheView *) NULL,quantum_info,
z2qtype[z],pixels,exception);
(void) WriteBlob(image,image->rows,pixels);
}
if (SyncAuthenticPixels(image,exception) == MagickFalse)
break;
} while (z-- >= 2);
while (padding-- > 0)
(void) WriteBlobByte(image,0);
quantum_info=DestroyQuantumInfo(quantum_info);
if (GetNextImageInList(image) == (Image *) NULL)
break;
image=SyncNextImageInList(image);
status=SetImageProgress(image,SaveImagesTag,scene++,
GetImageListLength(image));
if (status == MagickFalse)
break;
} while (image_info->adjoin != MagickFalse);
(void) CloseBlob(image);
return(status);
}
| ./CrossVul/dataset_final_sorted/CWE-416/c/good_2638_0 |
crossvul-cpp_data_good_2838_0 | /* SCTP kernel implementation
* (C) Copyright IBM Corp. 2001, 2004
* Copyright (c) 1999-2000 Cisco, Inc.
* Copyright (c) 1999-2001 Motorola, Inc.
* Copyright (c) 2001-2003 Intel Corp.
* Copyright (c) 2001-2002 Nokia, Inc.
* Copyright (c) 2001 La Monte H.P. Yarroll
*
* This file is part of the SCTP kernel implementation
*
* These functions interface with the sockets layer to implement the
* SCTP Extensions for the Sockets API.
*
* Note that the descriptions from the specification are USER level
* functions--this file is the functions which populate the struct proto
* for SCTP which is the BOTTOM of the sockets interface.
*
* This SCTP implementation is free software;
* you can redistribute it and/or modify it under the terms of
* the GNU General Public License as published by
* the Free Software Foundation; either version 2, or (at your option)
* any later version.
*
* This SCTP implementation is distributed in the hope that it
* will be useful, but WITHOUT ANY WARRANTY; without even the implied
* ************************
* warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
* See the GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with GNU CC; see the file COPYING. If not, see
* <http://www.gnu.org/licenses/>.
*
* Please send any bug reports or fixes you make to the
* email address(es):
* lksctp developers <linux-sctp@vger.kernel.org>
*
* Written or modified by:
* La Monte H.P. Yarroll <piggy@acm.org>
* Narasimha Budihal <narsi@refcode.org>
* Karl Knutson <karl@athena.chicago.il.us>
* Jon Grimm <jgrimm@us.ibm.com>
* Xingang Guo <xingang.guo@intel.com>
* Daisy Chang <daisyc@us.ibm.com>
* Sridhar Samudrala <samudrala@us.ibm.com>
* Inaky Perez-Gonzalez <inaky.gonzalez@intel.com>
* Ardelle Fan <ardelle.fan@intel.com>
* Ryan Layer <rmlayer@us.ibm.com>
* Anup Pemmaiah <pemmaiah@cc.usu.edu>
* Kevin Gao <kevin.gao@intel.com>
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <crypto/hash.h>
#include <linux/types.h>
#include <linux/kernel.h>
#include <linux/wait.h>
#include <linux/time.h>
#include <linux/sched/signal.h>
#include <linux/ip.h>
#include <linux/capability.h>
#include <linux/fcntl.h>
#include <linux/poll.h>
#include <linux/init.h>
#include <linux/slab.h>
#include <linux/file.h>
#include <linux/compat.h>
#include <net/ip.h>
#include <net/icmp.h>
#include <net/route.h>
#include <net/ipv6.h>
#include <net/inet_common.h>
#include <net/busy_poll.h>
#include <linux/socket.h> /* for sa_family_t */
#include <linux/export.h>
#include <net/sock.h>
#include <net/sctp/sctp.h>
#include <net/sctp/sm.h>
/* Forward declarations for internal helper functions. */
static int sctp_writeable(struct sock *sk);
static void sctp_wfree(struct sk_buff *skb);
static int sctp_wait_for_sndbuf(struct sctp_association *, long *timeo_p,
size_t msg_len);
static int sctp_wait_for_packet(struct sock *sk, int *err, long *timeo_p);
static int sctp_wait_for_connect(struct sctp_association *, long *timeo_p);
static int sctp_wait_for_accept(struct sock *sk, long timeo);
static void sctp_wait_for_close(struct sock *sk, long timeo);
static void sctp_destruct_sock(struct sock *sk);
static struct sctp_af *sctp_sockaddr_af(struct sctp_sock *opt,
union sctp_addr *addr, int len);
static int sctp_bindx_add(struct sock *, struct sockaddr *, int);
static int sctp_bindx_rem(struct sock *, struct sockaddr *, int);
static int sctp_send_asconf_add_ip(struct sock *, struct sockaddr *, int);
static int sctp_send_asconf_del_ip(struct sock *, struct sockaddr *, int);
static int sctp_send_asconf(struct sctp_association *asoc,
struct sctp_chunk *chunk);
static int sctp_do_bind(struct sock *, union sctp_addr *, int);
static int sctp_autobind(struct sock *sk);
static void sctp_sock_migrate(struct sock *oldsk, struct sock *newsk,
struct sctp_association *assoc,
enum sctp_socket_type type);
static unsigned long sctp_memory_pressure;
static atomic_long_t sctp_memory_allocated;
struct percpu_counter sctp_sockets_allocated;
static void sctp_enter_memory_pressure(struct sock *sk)
{
sctp_memory_pressure = 1;
}
/* Get the sndbuf space available at the time on the association. */
static inline int sctp_wspace(struct sctp_association *asoc)
{
int amt;
if (asoc->ep->sndbuf_policy)
amt = asoc->sndbuf_used;
else
amt = sk_wmem_alloc_get(asoc->base.sk);
if (amt >= asoc->base.sk->sk_sndbuf) {
if (asoc->base.sk->sk_userlocks & SOCK_SNDBUF_LOCK)
amt = 0;
else {
amt = sk_stream_wspace(asoc->base.sk);
if (amt < 0)
amt = 0;
}
} else {
amt = asoc->base.sk->sk_sndbuf - amt;
}
return amt;
}
/* Increment the used sndbuf space count of the corresponding association by
* the size of the outgoing data chunk.
* Also, set the skb destructor for sndbuf accounting later.
*
* Since it is always 1-1 between chunk and skb, and also a new skb is always
* allocated for chunk bundling in sctp_packet_transmit(), we can use the
* destructor in the data chunk skb for the purpose of the sndbuf space
* tracking.
*/
static inline void sctp_set_owner_w(struct sctp_chunk *chunk)
{
struct sctp_association *asoc = chunk->asoc;
struct sock *sk = asoc->base.sk;
/* The sndbuf space is tracked per association. */
sctp_association_hold(asoc);
skb_set_owner_w(chunk->skb, sk);
chunk->skb->destructor = sctp_wfree;
/* Save the chunk pointer in skb for sctp_wfree to use later. */
skb_shinfo(chunk->skb)->destructor_arg = chunk;
asoc->sndbuf_used += SCTP_DATA_SNDSIZE(chunk) +
sizeof(struct sk_buff) +
sizeof(struct sctp_chunk);
refcount_add(sizeof(struct sctp_chunk), &sk->sk_wmem_alloc);
sk->sk_wmem_queued += chunk->skb->truesize;
sk_mem_charge(sk, chunk->skb->truesize);
}
/* Verify that this is a valid address. */
static inline int sctp_verify_addr(struct sock *sk, union sctp_addr *addr,
int len)
{
struct sctp_af *af;
/* Verify basic sockaddr. */
af = sctp_sockaddr_af(sctp_sk(sk), addr, len);
if (!af)
return -EINVAL;
/* Is this a valid SCTP address? */
if (!af->addr_valid(addr, sctp_sk(sk), NULL))
return -EINVAL;
if (!sctp_sk(sk)->pf->send_verify(sctp_sk(sk), (addr)))
return -EINVAL;
return 0;
}
/* Look up the association by its id. If this is not a UDP-style
* socket, the ID field is always ignored.
*/
struct sctp_association *sctp_id2assoc(struct sock *sk, sctp_assoc_t id)
{
struct sctp_association *asoc = NULL;
/* If this is not a UDP-style socket, assoc id should be ignored. */
if (!sctp_style(sk, UDP)) {
/* Return NULL if the socket state is not ESTABLISHED. It
* could be a TCP-style listening socket or a socket which
* hasn't yet called connect() to establish an association.
*/
if (!sctp_sstate(sk, ESTABLISHED) && !sctp_sstate(sk, CLOSING))
return NULL;
/* Get the first and the only association from the list. */
if (!list_empty(&sctp_sk(sk)->ep->asocs))
asoc = list_entry(sctp_sk(sk)->ep->asocs.next,
struct sctp_association, asocs);
return asoc;
}
/* Otherwise this is a UDP-style socket. */
if (!id || (id == (sctp_assoc_t)-1))
return NULL;
spin_lock_bh(&sctp_assocs_id_lock);
asoc = (struct sctp_association *)idr_find(&sctp_assocs_id, (int)id);
spin_unlock_bh(&sctp_assocs_id_lock);
if (!asoc || (asoc->base.sk != sk) || asoc->base.dead)
return NULL;
return asoc;
}
/* Look up the transport from an address and an assoc id. If both address and
* id are specified, the associations matching the address and the id should be
* the same.
*/
static struct sctp_transport *sctp_addr_id2transport(struct sock *sk,
struct sockaddr_storage *addr,
sctp_assoc_t id)
{
struct sctp_association *addr_asoc = NULL, *id_asoc = NULL;
struct sctp_af *af = sctp_get_af_specific(addr->ss_family);
union sctp_addr *laddr = (union sctp_addr *)addr;
struct sctp_transport *transport;
if (!af || sctp_verify_addr(sk, laddr, af->sockaddr_len))
return NULL;
addr_asoc = sctp_endpoint_lookup_assoc(sctp_sk(sk)->ep,
laddr,
&transport);
if (!addr_asoc)
return NULL;
id_asoc = sctp_id2assoc(sk, id);
if (id_asoc && (id_asoc != addr_asoc))
return NULL;
sctp_get_pf_specific(sk->sk_family)->addr_to_user(sctp_sk(sk),
(union sctp_addr *)addr);
return transport;
}
/* API 3.1.2 bind() - UDP Style Syntax
* The syntax of bind() is,
*
* ret = bind(int sd, struct sockaddr *addr, int addrlen);
*
* sd - the socket descriptor returned by socket().
* addr - the address structure (struct sockaddr_in or struct
* sockaddr_in6 [RFC 2553]),
* addr_len - the size of the address structure.
*/
static int sctp_bind(struct sock *sk, struct sockaddr *addr, int addr_len)
{
int retval = 0;
lock_sock(sk);
pr_debug("%s: sk:%p, addr:%p, addr_len:%d\n", __func__, sk,
addr, addr_len);
/* Disallow binding twice. */
if (!sctp_sk(sk)->ep->base.bind_addr.port)
retval = sctp_do_bind(sk, (union sctp_addr *)addr,
addr_len);
else
retval = -EINVAL;
release_sock(sk);
return retval;
}
static long sctp_get_port_local(struct sock *, union sctp_addr *);
/* Verify this is a valid sockaddr. */
static struct sctp_af *sctp_sockaddr_af(struct sctp_sock *opt,
union sctp_addr *addr, int len)
{
struct sctp_af *af;
/* Check minimum size. */
if (len < sizeof (struct sockaddr))
return NULL;
/* V4 mapped address are really of AF_INET family */
if (addr->sa.sa_family == AF_INET6 &&
ipv6_addr_v4mapped(&addr->v6.sin6_addr)) {
if (!opt->pf->af_supported(AF_INET, opt))
return NULL;
} else {
/* Does this PF support this AF? */
if (!opt->pf->af_supported(addr->sa.sa_family, opt))
return NULL;
}
/* If we get this far, af is valid. */
af = sctp_get_af_specific(addr->sa.sa_family);
if (len < af->sockaddr_len)
return NULL;
return af;
}
/* Bind a local address either to an endpoint or to an association. */
static int sctp_do_bind(struct sock *sk, union sctp_addr *addr, int len)
{
struct net *net = sock_net(sk);
struct sctp_sock *sp = sctp_sk(sk);
struct sctp_endpoint *ep = sp->ep;
struct sctp_bind_addr *bp = &ep->base.bind_addr;
struct sctp_af *af;
unsigned short snum;
int ret = 0;
/* Common sockaddr verification. */
af = sctp_sockaddr_af(sp, addr, len);
if (!af) {
pr_debug("%s: sk:%p, newaddr:%p, len:%d EINVAL\n",
__func__, sk, addr, len);
return -EINVAL;
}
snum = ntohs(addr->v4.sin_port);
pr_debug("%s: sk:%p, new addr:%pISc, port:%d, new port:%d, len:%d\n",
__func__, sk, &addr->sa, bp->port, snum, len);
/* PF specific bind() address verification. */
if (!sp->pf->bind_verify(sp, addr))
return -EADDRNOTAVAIL;
/* We must either be unbound, or bind to the same port.
* It's OK to allow 0 ports if we are already bound.
* We'll just inhert an already bound port in this case
*/
if (bp->port) {
if (!snum)
snum = bp->port;
else if (snum != bp->port) {
pr_debug("%s: new port %d doesn't match existing port "
"%d\n", __func__, snum, bp->port);
return -EINVAL;
}
}
if (snum && snum < inet_prot_sock(net) &&
!ns_capable(net->user_ns, CAP_NET_BIND_SERVICE))
return -EACCES;
/* See if the address matches any of the addresses we may have
* already bound before checking against other endpoints.
*/
if (sctp_bind_addr_match(bp, addr, sp))
return -EINVAL;
/* Make sure we are allowed to bind here.
* The function sctp_get_port_local() does duplicate address
* detection.
*/
addr->v4.sin_port = htons(snum);
if ((ret = sctp_get_port_local(sk, addr))) {
return -EADDRINUSE;
}
/* Refresh ephemeral port. */
if (!bp->port)
bp->port = inet_sk(sk)->inet_num;
/* Add the address to the bind address list.
* Use GFP_ATOMIC since BHs will be disabled.
*/
ret = sctp_add_bind_addr(bp, addr, af->sockaddr_len,
SCTP_ADDR_SRC, GFP_ATOMIC);
/* Copy back into socket for getsockname() use. */
if (!ret) {
inet_sk(sk)->inet_sport = htons(inet_sk(sk)->inet_num);
sp->pf->to_sk_saddr(addr, sk);
}
return ret;
}
/* ADDIP Section 4.1.1 Congestion Control of ASCONF Chunks
*
* R1) One and only one ASCONF Chunk MAY be in transit and unacknowledged
* at any one time. If a sender, after sending an ASCONF chunk, decides
* it needs to transfer another ASCONF Chunk, it MUST wait until the
* ASCONF-ACK Chunk returns from the previous ASCONF Chunk before sending a
* subsequent ASCONF. Note this restriction binds each side, so at any
* time two ASCONF may be in-transit on any given association (one sent
* from each endpoint).
*/
static int sctp_send_asconf(struct sctp_association *asoc,
struct sctp_chunk *chunk)
{
struct net *net = sock_net(asoc->base.sk);
int retval = 0;
/* If there is an outstanding ASCONF chunk, queue it for later
* transmission.
*/
if (asoc->addip_last_asconf) {
list_add_tail(&chunk->list, &asoc->addip_chunk_list);
goto out;
}
/* Hold the chunk until an ASCONF_ACK is received. */
sctp_chunk_hold(chunk);
retval = sctp_primitive_ASCONF(net, asoc, chunk);
if (retval)
sctp_chunk_free(chunk);
else
asoc->addip_last_asconf = chunk;
out:
return retval;
}
/* Add a list of addresses as bind addresses to local endpoint or
* association.
*
* Basically run through each address specified in the addrs/addrcnt
* array/length pair, determine if it is IPv6 or IPv4 and call
* sctp_do_bind() on it.
*
* If any of them fails, then the operation will be reversed and the
* ones that were added will be removed.
*
* Only sctp_setsockopt_bindx() is supposed to call this function.
*/
static int sctp_bindx_add(struct sock *sk, struct sockaddr *addrs, int addrcnt)
{
int cnt;
int retval = 0;
void *addr_buf;
struct sockaddr *sa_addr;
struct sctp_af *af;
pr_debug("%s: sk:%p, addrs:%p, addrcnt:%d\n", __func__, sk,
addrs, addrcnt);
addr_buf = addrs;
for (cnt = 0; cnt < addrcnt; cnt++) {
/* The list may contain either IPv4 or IPv6 address;
* determine the address length for walking thru the list.
*/
sa_addr = addr_buf;
af = sctp_get_af_specific(sa_addr->sa_family);
if (!af) {
retval = -EINVAL;
goto err_bindx_add;
}
retval = sctp_do_bind(sk, (union sctp_addr *)sa_addr,
af->sockaddr_len);
addr_buf += af->sockaddr_len;
err_bindx_add:
if (retval < 0) {
/* Failed. Cleanup the ones that have been added */
if (cnt > 0)
sctp_bindx_rem(sk, addrs, cnt);
return retval;
}
}
return retval;
}
/* Send an ASCONF chunk with Add IP address parameters to all the peers of the
* associations that are part of the endpoint indicating that a list of local
* addresses are added to the endpoint.
*
* If any of the addresses is already in the bind address list of the
* association, we do not send the chunk for that association. But it will not
* affect other associations.
*
* Only sctp_setsockopt_bindx() is supposed to call this function.
*/
static int sctp_send_asconf_add_ip(struct sock *sk,
struct sockaddr *addrs,
int addrcnt)
{
struct net *net = sock_net(sk);
struct sctp_sock *sp;
struct sctp_endpoint *ep;
struct sctp_association *asoc;
struct sctp_bind_addr *bp;
struct sctp_chunk *chunk;
struct sctp_sockaddr_entry *laddr;
union sctp_addr *addr;
union sctp_addr saveaddr;
void *addr_buf;
struct sctp_af *af;
struct list_head *p;
int i;
int retval = 0;
if (!net->sctp.addip_enable)
return retval;
sp = sctp_sk(sk);
ep = sp->ep;
pr_debug("%s: sk:%p, addrs:%p, addrcnt:%d\n",
__func__, sk, addrs, addrcnt);
list_for_each_entry(asoc, &ep->asocs, asocs) {
if (!asoc->peer.asconf_capable)
continue;
if (asoc->peer.addip_disabled_mask & SCTP_PARAM_ADD_IP)
continue;
if (!sctp_state(asoc, ESTABLISHED))
continue;
/* Check if any address in the packed array of addresses is
* in the bind address list of the association. If so,
* do not send the asconf chunk to its peer, but continue with
* other associations.
*/
addr_buf = addrs;
for (i = 0; i < addrcnt; i++) {
addr = addr_buf;
af = sctp_get_af_specific(addr->v4.sin_family);
if (!af) {
retval = -EINVAL;
goto out;
}
if (sctp_assoc_lookup_laddr(asoc, addr))
break;
addr_buf += af->sockaddr_len;
}
if (i < addrcnt)
continue;
/* Use the first valid address in bind addr list of
* association as Address Parameter of ASCONF CHUNK.
*/
bp = &asoc->base.bind_addr;
p = bp->address_list.next;
laddr = list_entry(p, struct sctp_sockaddr_entry, list);
chunk = sctp_make_asconf_update_ip(asoc, &laddr->a, addrs,
addrcnt, SCTP_PARAM_ADD_IP);
if (!chunk) {
retval = -ENOMEM;
goto out;
}
/* Add the new addresses to the bind address list with
* use_as_src set to 0.
*/
addr_buf = addrs;
for (i = 0; i < addrcnt; i++) {
addr = addr_buf;
af = sctp_get_af_specific(addr->v4.sin_family);
memcpy(&saveaddr, addr, af->sockaddr_len);
retval = sctp_add_bind_addr(bp, &saveaddr,
sizeof(saveaddr),
SCTP_ADDR_NEW, GFP_ATOMIC);
addr_buf += af->sockaddr_len;
}
if (asoc->src_out_of_asoc_ok) {
struct sctp_transport *trans;
list_for_each_entry(trans,
&asoc->peer.transport_addr_list, transports) {
/* Clear the source and route cache */
sctp_transport_dst_release(trans);
trans->cwnd = min(4*asoc->pathmtu, max_t(__u32,
2*asoc->pathmtu, 4380));
trans->ssthresh = asoc->peer.i.a_rwnd;
trans->rto = asoc->rto_initial;
sctp_max_rto(asoc, trans);
trans->rtt = trans->srtt = trans->rttvar = 0;
sctp_transport_route(trans, NULL,
sctp_sk(asoc->base.sk));
}
}
retval = sctp_send_asconf(asoc, chunk);
}
out:
return retval;
}
/* Remove a list of addresses from bind addresses list. Do not remove the
* last address.
*
* Basically run through each address specified in the addrs/addrcnt
* array/length pair, determine if it is IPv6 or IPv4 and call
* sctp_del_bind() on it.
*
* If any of them fails, then the operation will be reversed and the
* ones that were removed will be added back.
*
* At least one address has to be left; if only one address is
* available, the operation will return -EBUSY.
*
* Only sctp_setsockopt_bindx() is supposed to call this function.
*/
static int sctp_bindx_rem(struct sock *sk, struct sockaddr *addrs, int addrcnt)
{
struct sctp_sock *sp = sctp_sk(sk);
struct sctp_endpoint *ep = sp->ep;
int cnt;
struct sctp_bind_addr *bp = &ep->base.bind_addr;
int retval = 0;
void *addr_buf;
union sctp_addr *sa_addr;
struct sctp_af *af;
pr_debug("%s: sk:%p, addrs:%p, addrcnt:%d\n",
__func__, sk, addrs, addrcnt);
addr_buf = addrs;
for (cnt = 0; cnt < addrcnt; cnt++) {
/* If the bind address list is empty or if there is only one
* bind address, there is nothing more to be removed (we need
* at least one address here).
*/
if (list_empty(&bp->address_list) ||
(sctp_list_single_entry(&bp->address_list))) {
retval = -EBUSY;
goto err_bindx_rem;
}
sa_addr = addr_buf;
af = sctp_get_af_specific(sa_addr->sa.sa_family);
if (!af) {
retval = -EINVAL;
goto err_bindx_rem;
}
if (!af->addr_valid(sa_addr, sp, NULL)) {
retval = -EADDRNOTAVAIL;
goto err_bindx_rem;
}
if (sa_addr->v4.sin_port &&
sa_addr->v4.sin_port != htons(bp->port)) {
retval = -EINVAL;
goto err_bindx_rem;
}
if (!sa_addr->v4.sin_port)
sa_addr->v4.sin_port = htons(bp->port);
/* FIXME - There is probably a need to check if sk->sk_saddr and
* sk->sk_rcv_addr are currently set to one of the addresses to
* be removed. This is something which needs to be looked into
* when we are fixing the outstanding issues with multi-homing
* socket routing and failover schemes. Refer to comments in
* sctp_do_bind(). -daisy
*/
retval = sctp_del_bind_addr(bp, sa_addr);
addr_buf += af->sockaddr_len;
err_bindx_rem:
if (retval < 0) {
/* Failed. Add the ones that has been removed back */
if (cnt > 0)
sctp_bindx_add(sk, addrs, cnt);
return retval;
}
}
return retval;
}
/* Send an ASCONF chunk with Delete IP address parameters to all the peers of
* the associations that are part of the endpoint indicating that a list of
* local addresses are removed from the endpoint.
*
* If any of the addresses is already in the bind address list of the
* association, we do not send the chunk for that association. But it will not
* affect other associations.
*
* Only sctp_setsockopt_bindx() is supposed to call this function.
*/
static int sctp_send_asconf_del_ip(struct sock *sk,
struct sockaddr *addrs,
int addrcnt)
{
struct net *net = sock_net(sk);
struct sctp_sock *sp;
struct sctp_endpoint *ep;
struct sctp_association *asoc;
struct sctp_transport *transport;
struct sctp_bind_addr *bp;
struct sctp_chunk *chunk;
union sctp_addr *laddr;
void *addr_buf;
struct sctp_af *af;
struct sctp_sockaddr_entry *saddr;
int i;
int retval = 0;
int stored = 0;
chunk = NULL;
if (!net->sctp.addip_enable)
return retval;
sp = sctp_sk(sk);
ep = sp->ep;
pr_debug("%s: sk:%p, addrs:%p, addrcnt:%d\n",
__func__, sk, addrs, addrcnt);
list_for_each_entry(asoc, &ep->asocs, asocs) {
if (!asoc->peer.asconf_capable)
continue;
if (asoc->peer.addip_disabled_mask & SCTP_PARAM_DEL_IP)
continue;
if (!sctp_state(asoc, ESTABLISHED))
continue;
/* Check if any address in the packed array of addresses is
* not present in the bind address list of the association.
* If so, do not send the asconf chunk to its peer, but
* continue with other associations.
*/
addr_buf = addrs;
for (i = 0; i < addrcnt; i++) {
laddr = addr_buf;
af = sctp_get_af_specific(laddr->v4.sin_family);
if (!af) {
retval = -EINVAL;
goto out;
}
if (!sctp_assoc_lookup_laddr(asoc, laddr))
break;
addr_buf += af->sockaddr_len;
}
if (i < addrcnt)
continue;
/* Find one address in the association's bind address list
* that is not in the packed array of addresses. This is to
* make sure that we do not delete all the addresses in the
* association.
*/
bp = &asoc->base.bind_addr;
laddr = sctp_find_unmatch_addr(bp, (union sctp_addr *)addrs,
addrcnt, sp);
if ((laddr == NULL) && (addrcnt == 1)) {
if (asoc->asconf_addr_del_pending)
continue;
asoc->asconf_addr_del_pending =
kzalloc(sizeof(union sctp_addr), GFP_ATOMIC);
if (asoc->asconf_addr_del_pending == NULL) {
retval = -ENOMEM;
goto out;
}
asoc->asconf_addr_del_pending->sa.sa_family =
addrs->sa_family;
asoc->asconf_addr_del_pending->v4.sin_port =
htons(bp->port);
if (addrs->sa_family == AF_INET) {
struct sockaddr_in *sin;
sin = (struct sockaddr_in *)addrs;
asoc->asconf_addr_del_pending->v4.sin_addr.s_addr = sin->sin_addr.s_addr;
} else if (addrs->sa_family == AF_INET6) {
struct sockaddr_in6 *sin6;
sin6 = (struct sockaddr_in6 *)addrs;
asoc->asconf_addr_del_pending->v6.sin6_addr = sin6->sin6_addr;
}
pr_debug("%s: keep the last address asoc:%p %pISc at %p\n",
__func__, asoc, &asoc->asconf_addr_del_pending->sa,
asoc->asconf_addr_del_pending);
asoc->src_out_of_asoc_ok = 1;
stored = 1;
goto skip_mkasconf;
}
if (laddr == NULL)
return -EINVAL;
/* We do not need RCU protection throughout this loop
* because this is done under a socket lock from the
* setsockopt call.
*/
chunk = sctp_make_asconf_update_ip(asoc, laddr, addrs, addrcnt,
SCTP_PARAM_DEL_IP);
if (!chunk) {
retval = -ENOMEM;
goto out;
}
skip_mkasconf:
/* Reset use_as_src flag for the addresses in the bind address
* list that are to be deleted.
*/
addr_buf = addrs;
for (i = 0; i < addrcnt; i++) {
laddr = addr_buf;
af = sctp_get_af_specific(laddr->v4.sin_family);
list_for_each_entry(saddr, &bp->address_list, list) {
if (sctp_cmp_addr_exact(&saddr->a, laddr))
saddr->state = SCTP_ADDR_DEL;
}
addr_buf += af->sockaddr_len;
}
/* Update the route and saddr entries for all the transports
* as some of the addresses in the bind address list are
* about to be deleted and cannot be used as source addresses.
*/
list_for_each_entry(transport, &asoc->peer.transport_addr_list,
transports) {
sctp_transport_dst_release(transport);
sctp_transport_route(transport, NULL,
sctp_sk(asoc->base.sk));
}
if (stored)
/* We don't need to transmit ASCONF */
continue;
retval = sctp_send_asconf(asoc, chunk);
}
out:
return retval;
}
/* set addr events to assocs in the endpoint. ep and addr_wq must be locked */
int sctp_asconf_mgmt(struct sctp_sock *sp, struct sctp_sockaddr_entry *addrw)
{
struct sock *sk = sctp_opt2sk(sp);
union sctp_addr *addr;
struct sctp_af *af;
/* It is safe to write port space in caller. */
addr = &addrw->a;
addr->v4.sin_port = htons(sp->ep->base.bind_addr.port);
af = sctp_get_af_specific(addr->sa.sa_family);
if (!af)
return -EINVAL;
if (sctp_verify_addr(sk, addr, af->sockaddr_len))
return -EINVAL;
if (addrw->state == SCTP_ADDR_NEW)
return sctp_send_asconf_add_ip(sk, (struct sockaddr *)addr, 1);
else
return sctp_send_asconf_del_ip(sk, (struct sockaddr *)addr, 1);
}
/* Helper for tunneling sctp_bindx() requests through sctp_setsockopt()
*
* API 8.1
* int sctp_bindx(int sd, struct sockaddr *addrs, int addrcnt,
* int flags);
*
* If sd is an IPv4 socket, the addresses passed must be IPv4 addresses.
* If the sd is an IPv6 socket, the addresses passed can either be IPv4
* or IPv6 addresses.
*
* A single address may be specified as INADDR_ANY or IN6ADDR_ANY, see
* Section 3.1.2 for this usage.
*
* addrs is a pointer to an array of one or more socket addresses. Each
* address is contained in its appropriate structure (i.e. struct
* sockaddr_in or struct sockaddr_in6) the family of the address type
* must be used to distinguish the address length (note that this
* representation is termed a "packed array" of addresses). The caller
* specifies the number of addresses in the array with addrcnt.
*
* On success, sctp_bindx() returns 0. On failure, sctp_bindx() returns
* -1, and sets errno to the appropriate error code.
*
* For SCTP, the port given in each socket address must be the same, or
* sctp_bindx() will fail, setting errno to EINVAL.
*
* The flags parameter is formed from the bitwise OR of zero or more of
* the following currently defined flags:
*
* SCTP_BINDX_ADD_ADDR
*
* SCTP_BINDX_REM_ADDR
*
* SCTP_BINDX_ADD_ADDR directs SCTP to add the given addresses to the
* association, and SCTP_BINDX_REM_ADDR directs SCTP to remove the given
* addresses from the association. The two flags are mutually exclusive;
* if both are given, sctp_bindx() will fail with EINVAL. A caller may
* not remove all addresses from an association; sctp_bindx() will
* reject such an attempt with EINVAL.
*
* An application can use sctp_bindx(SCTP_BINDX_ADD_ADDR) to associate
* additional addresses with an endpoint after calling bind(). Or use
* sctp_bindx(SCTP_BINDX_REM_ADDR) to remove some addresses a listening
* socket is associated with so that no new association accepted will be
* associated with those addresses. If the endpoint supports dynamic
* address a SCTP_BINDX_REM_ADDR or SCTP_BINDX_ADD_ADDR may cause a
* endpoint to send the appropriate message to the peer to change the
* peers address lists.
*
* Adding and removing addresses from a connected association is
* optional functionality. Implementations that do not support this
* functionality should return EOPNOTSUPP.
*
* Basically do nothing but copying the addresses from user to kernel
* land and invoking either sctp_bindx_add() or sctp_bindx_rem() on the sk.
* This is used for tunneling the sctp_bindx() request through sctp_setsockopt()
* from userspace.
*
* We don't use copy_from_user() for optimization: we first do the
* sanity checks (buffer size -fast- and access check-healthy
* pointer); if all of those succeed, then we can alloc the memory
* (expensive operation) needed to copy the data to kernel. Then we do
* the copying without checking the user space area
* (__copy_from_user()).
*
* On exit there is no need to do sockfd_put(), sys_setsockopt() does
* it.
*
* sk The sk of the socket
* addrs The pointer to the addresses in user land
* addrssize Size of the addrs buffer
* op Operation to perform (add or remove, see the flags of
* sctp_bindx)
*
* Returns 0 if ok, <0 errno code on error.
*/
static int sctp_setsockopt_bindx(struct sock *sk,
struct sockaddr __user *addrs,
int addrs_size, int op)
{
struct sockaddr *kaddrs;
int err;
int addrcnt = 0;
int walk_size = 0;
struct sockaddr *sa_addr;
void *addr_buf;
struct sctp_af *af;
pr_debug("%s: sk:%p addrs:%p addrs_size:%d opt:%d\n",
__func__, sk, addrs, addrs_size, op);
if (unlikely(addrs_size <= 0))
return -EINVAL;
/* Check the user passed a healthy pointer. */
if (unlikely(!access_ok(VERIFY_READ, addrs, addrs_size)))
return -EFAULT;
/* Alloc space for the address array in kernel memory. */
kaddrs = kmalloc(addrs_size, GFP_USER | __GFP_NOWARN);
if (unlikely(!kaddrs))
return -ENOMEM;
if (__copy_from_user(kaddrs, addrs, addrs_size)) {
kfree(kaddrs);
return -EFAULT;
}
/* Walk through the addrs buffer and count the number of addresses. */
addr_buf = kaddrs;
while (walk_size < addrs_size) {
if (walk_size + sizeof(sa_family_t) > addrs_size) {
kfree(kaddrs);
return -EINVAL;
}
sa_addr = addr_buf;
af = sctp_get_af_specific(sa_addr->sa_family);
/* If the address family is not supported or if this address
* causes the address buffer to overflow return EINVAL.
*/
if (!af || (walk_size + af->sockaddr_len) > addrs_size) {
kfree(kaddrs);
return -EINVAL;
}
addrcnt++;
addr_buf += af->sockaddr_len;
walk_size += af->sockaddr_len;
}
/* Do the work. */
switch (op) {
case SCTP_BINDX_ADD_ADDR:
err = sctp_bindx_add(sk, kaddrs, addrcnt);
if (err)
goto out;
err = sctp_send_asconf_add_ip(sk, kaddrs, addrcnt);
break;
case SCTP_BINDX_REM_ADDR:
err = sctp_bindx_rem(sk, kaddrs, addrcnt);
if (err)
goto out;
err = sctp_send_asconf_del_ip(sk, kaddrs, addrcnt);
break;
default:
err = -EINVAL;
break;
}
out:
kfree(kaddrs);
return err;
}
/* __sctp_connect(struct sock* sk, struct sockaddr *kaddrs, int addrs_size)
*
* Common routine for handling connect() and sctp_connectx().
* Connect will come in with just a single address.
*/
static int __sctp_connect(struct sock *sk,
struct sockaddr *kaddrs,
int addrs_size,
sctp_assoc_t *assoc_id)
{
struct net *net = sock_net(sk);
struct sctp_sock *sp;
struct sctp_endpoint *ep;
struct sctp_association *asoc = NULL;
struct sctp_association *asoc2;
struct sctp_transport *transport;
union sctp_addr to;
enum sctp_scope scope;
long timeo;
int err = 0;
int addrcnt = 0;
int walk_size = 0;
union sctp_addr *sa_addr = NULL;
void *addr_buf;
unsigned short port;
unsigned int f_flags = 0;
sp = sctp_sk(sk);
ep = sp->ep;
/* connect() cannot be done on a socket that is already in ESTABLISHED
* state - UDP-style peeled off socket or a TCP-style socket that
* is already connected.
* It cannot be done even on a TCP-style listening socket.
*/
if (sctp_sstate(sk, ESTABLISHED) || sctp_sstate(sk, CLOSING) ||
(sctp_style(sk, TCP) && sctp_sstate(sk, LISTENING))) {
err = -EISCONN;
goto out_free;
}
/* Walk through the addrs buffer and count the number of addresses. */
addr_buf = kaddrs;
while (walk_size < addrs_size) {
struct sctp_af *af;
if (walk_size + sizeof(sa_family_t) > addrs_size) {
err = -EINVAL;
goto out_free;
}
sa_addr = addr_buf;
af = sctp_get_af_specific(sa_addr->sa.sa_family);
/* If the address family is not supported or if this address
* causes the address buffer to overflow return EINVAL.
*/
if (!af || (walk_size + af->sockaddr_len) > addrs_size) {
err = -EINVAL;
goto out_free;
}
port = ntohs(sa_addr->v4.sin_port);
/* Save current address so we can work with it */
memcpy(&to, sa_addr, af->sockaddr_len);
err = sctp_verify_addr(sk, &to, af->sockaddr_len);
if (err)
goto out_free;
/* Make sure the destination port is correctly set
* in all addresses.
*/
if (asoc && asoc->peer.port && asoc->peer.port != port) {
err = -EINVAL;
goto out_free;
}
/* Check if there already is a matching association on the
* endpoint (other than the one created here).
*/
asoc2 = sctp_endpoint_lookup_assoc(ep, &to, &transport);
if (asoc2 && asoc2 != asoc) {
if (asoc2->state >= SCTP_STATE_ESTABLISHED)
err = -EISCONN;
else
err = -EALREADY;
goto out_free;
}
/* If we could not find a matching association on the endpoint,
* make sure that there is no peeled-off association matching
* the peer address even on another socket.
*/
if (sctp_endpoint_is_peeled_off(ep, &to)) {
err = -EADDRNOTAVAIL;
goto out_free;
}
if (!asoc) {
/* If a bind() or sctp_bindx() is not called prior to
* an sctp_connectx() call, the system picks an
* ephemeral port and will choose an address set
* equivalent to binding with a wildcard address.
*/
if (!ep->base.bind_addr.port) {
if (sctp_autobind(sk)) {
err = -EAGAIN;
goto out_free;
}
} else {
/*
* If an unprivileged user inherits a 1-many
* style socket with open associations on a
* privileged port, it MAY be permitted to
* accept new associations, but it SHOULD NOT
* be permitted to open new associations.
*/
if (ep->base.bind_addr.port <
inet_prot_sock(net) &&
!ns_capable(net->user_ns,
CAP_NET_BIND_SERVICE)) {
err = -EACCES;
goto out_free;
}
}
scope = sctp_scope(&to);
asoc = sctp_association_new(ep, sk, scope, GFP_KERNEL);
if (!asoc) {
err = -ENOMEM;
goto out_free;
}
err = sctp_assoc_set_bind_addr_from_ep(asoc, scope,
GFP_KERNEL);
if (err < 0) {
goto out_free;
}
}
/* Prime the peer's transport structures. */
transport = sctp_assoc_add_peer(asoc, &to, GFP_KERNEL,
SCTP_UNKNOWN);
if (!transport) {
err = -ENOMEM;
goto out_free;
}
addrcnt++;
addr_buf += af->sockaddr_len;
walk_size += af->sockaddr_len;
}
/* In case the user of sctp_connectx() wants an association
* id back, assign one now.
*/
if (assoc_id) {
err = sctp_assoc_set_id(asoc, GFP_KERNEL);
if (err < 0)
goto out_free;
}
err = sctp_primitive_ASSOCIATE(net, asoc, NULL);
if (err < 0) {
goto out_free;
}
/* Initialize sk's dport and daddr for getpeername() */
inet_sk(sk)->inet_dport = htons(asoc->peer.port);
sp->pf->to_sk_daddr(sa_addr, sk);
sk->sk_err = 0;
/* in-kernel sockets don't generally have a file allocated to them
* if all they do is call sock_create_kern().
*/
if (sk->sk_socket->file)
f_flags = sk->sk_socket->file->f_flags;
timeo = sock_sndtimeo(sk, f_flags & O_NONBLOCK);
if (assoc_id)
*assoc_id = asoc->assoc_id;
err = sctp_wait_for_connect(asoc, &timeo);
/* Note: the asoc may be freed after the return of
* sctp_wait_for_connect.
*/
/* Don't free association on exit. */
asoc = NULL;
out_free:
pr_debug("%s: took out_free path with asoc:%p kaddrs:%p err:%d\n",
__func__, asoc, kaddrs, err);
if (asoc) {
/* sctp_primitive_ASSOCIATE may have added this association
* To the hash table, try to unhash it, just in case, its a noop
* if it wasn't hashed so we're safe
*/
sctp_association_free(asoc);
}
return err;
}
/* Helper for tunneling sctp_connectx() requests through sctp_setsockopt()
*
* API 8.9
* int sctp_connectx(int sd, struct sockaddr *addrs, int addrcnt,
* sctp_assoc_t *asoc);
*
* If sd is an IPv4 socket, the addresses passed must be IPv4 addresses.
* If the sd is an IPv6 socket, the addresses passed can either be IPv4
* or IPv6 addresses.
*
* A single address may be specified as INADDR_ANY or IN6ADDR_ANY, see
* Section 3.1.2 for this usage.
*
* addrs is a pointer to an array of one or more socket addresses. Each
* address is contained in its appropriate structure (i.e. struct
* sockaddr_in or struct sockaddr_in6) the family of the address type
* must be used to distengish the address length (note that this
* representation is termed a "packed array" of addresses). The caller
* specifies the number of addresses in the array with addrcnt.
*
* On success, sctp_connectx() returns 0. It also sets the assoc_id to
* the association id of the new association. On failure, sctp_connectx()
* returns -1, and sets errno to the appropriate error code. The assoc_id
* is not touched by the kernel.
*
* For SCTP, the port given in each socket address must be the same, or
* sctp_connectx() will fail, setting errno to EINVAL.
*
* An application can use sctp_connectx to initiate an association with
* an endpoint that is multi-homed. Much like sctp_bindx() this call
* allows a caller to specify multiple addresses at which a peer can be
* reached. The way the SCTP stack uses the list of addresses to set up
* the association is implementation dependent. This function only
* specifies that the stack will try to make use of all the addresses in
* the list when needed.
*
* Note that the list of addresses passed in is only used for setting up
* the association. It does not necessarily equal the set of addresses
* the peer uses for the resulting association. If the caller wants to
* find out the set of peer addresses, it must use sctp_getpaddrs() to
* retrieve them after the association has been set up.
*
* Basically do nothing but copying the addresses from user to kernel
* land and invoking either sctp_connectx(). This is used for tunneling
* the sctp_connectx() request through sctp_setsockopt() from userspace.
*
* We don't use copy_from_user() for optimization: we first do the
* sanity checks (buffer size -fast- and access check-healthy
* pointer); if all of those succeed, then we can alloc the memory
* (expensive operation) needed to copy the data to kernel. Then we do
* the copying without checking the user space area
* (__copy_from_user()).
*
* On exit there is no need to do sockfd_put(), sys_setsockopt() does
* it.
*
* sk The sk of the socket
* addrs The pointer to the addresses in user land
* addrssize Size of the addrs buffer
*
* Returns >=0 if ok, <0 errno code on error.
*/
static int __sctp_setsockopt_connectx(struct sock *sk,
struct sockaddr __user *addrs,
int addrs_size,
sctp_assoc_t *assoc_id)
{
struct sockaddr *kaddrs;
gfp_t gfp = GFP_KERNEL;
int err = 0;
pr_debug("%s: sk:%p addrs:%p addrs_size:%d\n",
__func__, sk, addrs, addrs_size);
if (unlikely(addrs_size <= 0))
return -EINVAL;
/* Check the user passed a healthy pointer. */
if (unlikely(!access_ok(VERIFY_READ, addrs, addrs_size)))
return -EFAULT;
/* Alloc space for the address array in kernel memory. */
if (sk->sk_socket->file)
gfp = GFP_USER | __GFP_NOWARN;
kaddrs = kmalloc(addrs_size, gfp);
if (unlikely(!kaddrs))
return -ENOMEM;
if (__copy_from_user(kaddrs, addrs, addrs_size)) {
err = -EFAULT;
} else {
err = __sctp_connect(sk, kaddrs, addrs_size, assoc_id);
}
kfree(kaddrs);
return err;
}
/*
* This is an older interface. It's kept for backward compatibility
* to the option that doesn't provide association id.
*/
static int sctp_setsockopt_connectx_old(struct sock *sk,
struct sockaddr __user *addrs,
int addrs_size)
{
return __sctp_setsockopt_connectx(sk, addrs, addrs_size, NULL);
}
/*
* New interface for the API. The since the API is done with a socket
* option, to make it simple we feed back the association id is as a return
* indication to the call. Error is always negative and association id is
* always positive.
*/
static int sctp_setsockopt_connectx(struct sock *sk,
struct sockaddr __user *addrs,
int addrs_size)
{
sctp_assoc_t assoc_id = 0;
int err = 0;
err = __sctp_setsockopt_connectx(sk, addrs, addrs_size, &assoc_id);
if (err)
return err;
else
return assoc_id;
}
/*
* New (hopefully final) interface for the API.
* We use the sctp_getaddrs_old structure so that use-space library
* can avoid any unnecessary allocations. The only different part
* is that we store the actual length of the address buffer into the
* addrs_num structure member. That way we can re-use the existing
* code.
*/
#ifdef CONFIG_COMPAT
struct compat_sctp_getaddrs_old {
sctp_assoc_t assoc_id;
s32 addr_num;
compat_uptr_t addrs; /* struct sockaddr * */
};
#endif
static int sctp_getsockopt_connectx3(struct sock *sk, int len,
char __user *optval,
int __user *optlen)
{
struct sctp_getaddrs_old param;
sctp_assoc_t assoc_id = 0;
int err = 0;
#ifdef CONFIG_COMPAT
if (in_compat_syscall()) {
struct compat_sctp_getaddrs_old param32;
if (len < sizeof(param32))
return -EINVAL;
if (copy_from_user(¶m32, optval, sizeof(param32)))
return -EFAULT;
param.assoc_id = param32.assoc_id;
param.addr_num = param32.addr_num;
param.addrs = compat_ptr(param32.addrs);
} else
#endif
{
if (len < sizeof(param))
return -EINVAL;
if (copy_from_user(¶m, optval, sizeof(param)))
return -EFAULT;
}
err = __sctp_setsockopt_connectx(sk, (struct sockaddr __user *)
param.addrs, param.addr_num,
&assoc_id);
if (err == 0 || err == -EINPROGRESS) {
if (copy_to_user(optval, &assoc_id, sizeof(assoc_id)))
return -EFAULT;
if (put_user(sizeof(assoc_id), optlen))
return -EFAULT;
}
return err;
}
/* API 3.1.4 close() - UDP Style Syntax
* Applications use close() to perform graceful shutdown (as described in
* Section 10.1 of [SCTP]) on ALL the associations currently represented
* by a UDP-style socket.
*
* The syntax is
*
* ret = close(int sd);
*
* sd - the socket descriptor of the associations to be closed.
*
* To gracefully shutdown a specific association represented by the
* UDP-style socket, an application should use the sendmsg() call,
* passing no user data, but including the appropriate flag in the
* ancillary data (see Section xxxx).
*
* If sd in the close() call is a branched-off socket representing only
* one association, the shutdown is performed on that association only.
*
* 4.1.6 close() - TCP Style Syntax
*
* Applications use close() to gracefully close down an association.
*
* The syntax is:
*
* int close(int sd);
*
* sd - the socket descriptor of the association to be closed.
*
* After an application calls close() on a socket descriptor, no further
* socket operations will succeed on that descriptor.
*
* API 7.1.4 SO_LINGER
*
* An application using the TCP-style socket can use this option to
* perform the SCTP ABORT primitive. The linger option structure is:
*
* struct linger {
* int l_onoff; // option on/off
* int l_linger; // linger time
* };
*
* To enable the option, set l_onoff to 1. If the l_linger value is set
* to 0, calling close() is the same as the ABORT primitive. If the
* value is set to a negative value, the setsockopt() call will return
* an error. If the value is set to a positive value linger_time, the
* close() can be blocked for at most linger_time ms. If the graceful
* shutdown phase does not finish during this period, close() will
* return but the graceful shutdown phase continues in the system.
*/
static void sctp_close(struct sock *sk, long timeout)
{
struct net *net = sock_net(sk);
struct sctp_endpoint *ep;
struct sctp_association *asoc;
struct list_head *pos, *temp;
unsigned int data_was_unread;
pr_debug("%s: sk:%p, timeout:%ld\n", __func__, sk, timeout);
lock_sock_nested(sk, SINGLE_DEPTH_NESTING);
sk->sk_shutdown = SHUTDOWN_MASK;
sk->sk_state = SCTP_SS_CLOSING;
ep = sctp_sk(sk)->ep;
/* Clean up any skbs sitting on the receive queue. */
data_was_unread = sctp_queue_purge_ulpevents(&sk->sk_receive_queue);
data_was_unread += sctp_queue_purge_ulpevents(&sctp_sk(sk)->pd_lobby);
/* Walk all associations on an endpoint. */
list_for_each_safe(pos, temp, &ep->asocs) {
asoc = list_entry(pos, struct sctp_association, asocs);
if (sctp_style(sk, TCP)) {
/* A closed association can still be in the list if
* it belongs to a TCP-style listening socket that is
* not yet accepted. If so, free it. If not, send an
* ABORT or SHUTDOWN based on the linger options.
*/
if (sctp_state(asoc, CLOSED)) {
sctp_association_free(asoc);
continue;
}
}
if (data_was_unread || !skb_queue_empty(&asoc->ulpq.lobby) ||
!skb_queue_empty(&asoc->ulpq.reasm) ||
(sock_flag(sk, SOCK_LINGER) && !sk->sk_lingertime)) {
struct sctp_chunk *chunk;
chunk = sctp_make_abort_user(asoc, NULL, 0);
sctp_primitive_ABORT(net, asoc, chunk);
} else
sctp_primitive_SHUTDOWN(net, asoc, NULL);
}
/* On a TCP-style socket, block for at most linger_time if set. */
if (sctp_style(sk, TCP) && timeout)
sctp_wait_for_close(sk, timeout);
/* This will run the backlog queue. */
release_sock(sk);
/* Supposedly, no process has access to the socket, but
* the net layers still may.
* Also, sctp_destroy_sock() needs to be called with addr_wq_lock
* held and that should be grabbed before socket lock.
*/
spin_lock_bh(&net->sctp.addr_wq_lock);
bh_lock_sock_nested(sk);
/* Hold the sock, since sk_common_release() will put sock_put()
* and we have just a little more cleanup.
*/
sock_hold(sk);
sk_common_release(sk);
bh_unlock_sock(sk);
spin_unlock_bh(&net->sctp.addr_wq_lock);
sock_put(sk);
SCTP_DBG_OBJCNT_DEC(sock);
}
/* Handle EPIPE error. */
static int sctp_error(struct sock *sk, int flags, int err)
{
if (err == -EPIPE)
err = sock_error(sk) ? : -EPIPE;
if (err == -EPIPE && !(flags & MSG_NOSIGNAL))
send_sig(SIGPIPE, current, 0);
return err;
}
/* API 3.1.3 sendmsg() - UDP Style Syntax
*
* An application uses sendmsg() and recvmsg() calls to transmit data to
* and receive data from its peer.
*
* ssize_t sendmsg(int socket, const struct msghdr *message,
* int flags);
*
* socket - the socket descriptor of the endpoint.
* message - pointer to the msghdr structure which contains a single
* user message and possibly some ancillary data.
*
* See Section 5 for complete description of the data
* structures.
*
* flags - flags sent or received with the user message, see Section
* 5 for complete description of the flags.
*
* Note: This function could use a rewrite especially when explicit
* connect support comes in.
*/
/* BUG: We do not implement the equivalent of sk_stream_wait_memory(). */
static int sctp_msghdr_parse(const struct msghdr *msg,
struct sctp_cmsgs *cmsgs);
static int sctp_sendmsg(struct sock *sk, struct msghdr *msg, size_t msg_len)
{
struct net *net = sock_net(sk);
struct sctp_sock *sp;
struct sctp_endpoint *ep;
struct sctp_association *new_asoc = NULL, *asoc = NULL;
struct sctp_transport *transport, *chunk_tp;
struct sctp_chunk *chunk;
union sctp_addr to;
struct sockaddr *msg_name = NULL;
struct sctp_sndrcvinfo default_sinfo;
struct sctp_sndrcvinfo *sinfo;
struct sctp_initmsg *sinit;
sctp_assoc_t associd = 0;
struct sctp_cmsgs cmsgs = { NULL };
enum sctp_scope scope;
bool fill_sinfo_ttl = false, wait_connect = false;
struct sctp_datamsg *datamsg;
int msg_flags = msg->msg_flags;
__u16 sinfo_flags = 0;
long timeo;
int err;
err = 0;
sp = sctp_sk(sk);
ep = sp->ep;
pr_debug("%s: sk:%p, msg:%p, msg_len:%zu ep:%p\n", __func__, sk,
msg, msg_len, ep);
/* We cannot send a message over a TCP-style listening socket. */
if (sctp_style(sk, TCP) && sctp_sstate(sk, LISTENING)) {
err = -EPIPE;
goto out_nounlock;
}
/* Parse out the SCTP CMSGs. */
err = sctp_msghdr_parse(msg, &cmsgs);
if (err) {
pr_debug("%s: msghdr parse err:%x\n", __func__, err);
goto out_nounlock;
}
/* Fetch the destination address for this packet. This
* address only selects the association--it is not necessarily
* the address we will send to.
* For a peeled-off socket, msg_name is ignored.
*/
if (!sctp_style(sk, UDP_HIGH_BANDWIDTH) && msg->msg_name) {
int msg_namelen = msg->msg_namelen;
err = sctp_verify_addr(sk, (union sctp_addr *)msg->msg_name,
msg_namelen);
if (err)
return err;
if (msg_namelen > sizeof(to))
msg_namelen = sizeof(to);
memcpy(&to, msg->msg_name, msg_namelen);
msg_name = msg->msg_name;
}
sinit = cmsgs.init;
if (cmsgs.sinfo != NULL) {
memset(&default_sinfo, 0, sizeof(default_sinfo));
default_sinfo.sinfo_stream = cmsgs.sinfo->snd_sid;
default_sinfo.sinfo_flags = cmsgs.sinfo->snd_flags;
default_sinfo.sinfo_ppid = cmsgs.sinfo->snd_ppid;
default_sinfo.sinfo_context = cmsgs.sinfo->snd_context;
default_sinfo.sinfo_assoc_id = cmsgs.sinfo->snd_assoc_id;
sinfo = &default_sinfo;
fill_sinfo_ttl = true;
} else {
sinfo = cmsgs.srinfo;
}
/* Did the user specify SNDINFO/SNDRCVINFO? */
if (sinfo) {
sinfo_flags = sinfo->sinfo_flags;
associd = sinfo->sinfo_assoc_id;
}
pr_debug("%s: msg_len:%zu, sinfo_flags:0x%x\n", __func__,
msg_len, sinfo_flags);
/* SCTP_EOF or SCTP_ABORT cannot be set on a TCP-style socket. */
if (sctp_style(sk, TCP) && (sinfo_flags & (SCTP_EOF | SCTP_ABORT))) {
err = -EINVAL;
goto out_nounlock;
}
/* If SCTP_EOF is set, no data can be sent. Disallow sending zero
* length messages when SCTP_EOF|SCTP_ABORT is not set.
* If SCTP_ABORT is set, the message length could be non zero with
* the msg_iov set to the user abort reason.
*/
if (((sinfo_flags & SCTP_EOF) && (msg_len > 0)) ||
(!(sinfo_flags & (SCTP_EOF|SCTP_ABORT)) && (msg_len == 0))) {
err = -EINVAL;
goto out_nounlock;
}
/* If SCTP_ADDR_OVER is set, there must be an address
* specified in msg_name.
*/
if ((sinfo_flags & SCTP_ADDR_OVER) && (!msg->msg_name)) {
err = -EINVAL;
goto out_nounlock;
}
transport = NULL;
pr_debug("%s: about to look up association\n", __func__);
lock_sock(sk);
/* If a msg_name has been specified, assume this is to be used. */
if (msg_name) {
/* Look for a matching association on the endpoint. */
asoc = sctp_endpoint_lookup_assoc(ep, &to, &transport);
/* If we could not find a matching association on the
* endpoint, make sure that it is not a TCP-style
* socket that already has an association or there is
* no peeled-off association on another socket.
*/
if (!asoc &&
((sctp_style(sk, TCP) &&
(sctp_sstate(sk, ESTABLISHED) ||
sctp_sstate(sk, CLOSING))) ||
sctp_endpoint_is_peeled_off(ep, &to))) {
err = -EADDRNOTAVAIL;
goto out_unlock;
}
} else {
asoc = sctp_id2assoc(sk, associd);
if (!asoc) {
err = -EPIPE;
goto out_unlock;
}
}
if (asoc) {
pr_debug("%s: just looked up association:%p\n", __func__, asoc);
/* We cannot send a message on a TCP-style SCTP_SS_ESTABLISHED
* socket that has an association in CLOSED state. This can
* happen when an accepted socket has an association that is
* already CLOSED.
*/
if (sctp_state(asoc, CLOSED) && sctp_style(sk, TCP)) {
err = -EPIPE;
goto out_unlock;
}
if (sinfo_flags & SCTP_EOF) {
pr_debug("%s: shutting down association:%p\n",
__func__, asoc);
sctp_primitive_SHUTDOWN(net, asoc, NULL);
err = 0;
goto out_unlock;
}
if (sinfo_flags & SCTP_ABORT) {
chunk = sctp_make_abort_user(asoc, msg, msg_len);
if (!chunk) {
err = -ENOMEM;
goto out_unlock;
}
pr_debug("%s: aborting association:%p\n",
__func__, asoc);
sctp_primitive_ABORT(net, asoc, chunk);
err = 0;
goto out_unlock;
}
}
/* Do we need to create the association? */
if (!asoc) {
pr_debug("%s: there is no association yet\n", __func__);
if (sinfo_flags & (SCTP_EOF | SCTP_ABORT)) {
err = -EINVAL;
goto out_unlock;
}
/* Check for invalid stream against the stream counts,
* either the default or the user specified stream counts.
*/
if (sinfo) {
if (!sinit || !sinit->sinit_num_ostreams) {
/* Check against the defaults. */
if (sinfo->sinfo_stream >=
sp->initmsg.sinit_num_ostreams) {
err = -EINVAL;
goto out_unlock;
}
} else {
/* Check against the requested. */
if (sinfo->sinfo_stream >=
sinit->sinit_num_ostreams) {
err = -EINVAL;
goto out_unlock;
}
}
}
/*
* API 3.1.2 bind() - UDP Style Syntax
* If a bind() or sctp_bindx() is not called prior to a
* sendmsg() call that initiates a new association, the
* system picks an ephemeral port and will choose an address
* set equivalent to binding with a wildcard address.
*/
if (!ep->base.bind_addr.port) {
if (sctp_autobind(sk)) {
err = -EAGAIN;
goto out_unlock;
}
} else {
/*
* If an unprivileged user inherits a one-to-many
* style socket with open associations on a privileged
* port, it MAY be permitted to accept new associations,
* but it SHOULD NOT be permitted to open new
* associations.
*/
if (ep->base.bind_addr.port < inet_prot_sock(net) &&
!ns_capable(net->user_ns, CAP_NET_BIND_SERVICE)) {
err = -EACCES;
goto out_unlock;
}
}
scope = sctp_scope(&to);
new_asoc = sctp_association_new(ep, sk, scope, GFP_KERNEL);
if (!new_asoc) {
err = -ENOMEM;
goto out_unlock;
}
asoc = new_asoc;
err = sctp_assoc_set_bind_addr_from_ep(asoc, scope, GFP_KERNEL);
if (err < 0) {
err = -ENOMEM;
goto out_free;
}
/* If the SCTP_INIT ancillary data is specified, set all
* the association init values accordingly.
*/
if (sinit) {
if (sinit->sinit_num_ostreams) {
asoc->c.sinit_num_ostreams =
sinit->sinit_num_ostreams;
}
if (sinit->sinit_max_instreams) {
asoc->c.sinit_max_instreams =
sinit->sinit_max_instreams;
}
if (sinit->sinit_max_attempts) {
asoc->max_init_attempts
= sinit->sinit_max_attempts;
}
if (sinit->sinit_max_init_timeo) {
asoc->max_init_timeo =
msecs_to_jiffies(sinit->sinit_max_init_timeo);
}
}
/* Prime the peer's transport structures. */
transport = sctp_assoc_add_peer(asoc, &to, GFP_KERNEL, SCTP_UNKNOWN);
if (!transport) {
err = -ENOMEM;
goto out_free;
}
}
/* ASSERT: we have a valid association at this point. */
pr_debug("%s: we have a valid association\n", __func__);
if (!sinfo) {
/* If the user didn't specify SNDINFO/SNDRCVINFO, make up
* one with some defaults.
*/
memset(&default_sinfo, 0, sizeof(default_sinfo));
default_sinfo.sinfo_stream = asoc->default_stream;
default_sinfo.sinfo_flags = asoc->default_flags;
default_sinfo.sinfo_ppid = asoc->default_ppid;
default_sinfo.sinfo_context = asoc->default_context;
default_sinfo.sinfo_timetolive = asoc->default_timetolive;
default_sinfo.sinfo_assoc_id = sctp_assoc2id(asoc);
sinfo = &default_sinfo;
} else if (fill_sinfo_ttl) {
/* In case SNDINFO was specified, we still need to fill
* it with a default ttl from the assoc here.
*/
sinfo->sinfo_timetolive = asoc->default_timetolive;
}
/* API 7.1.7, the sndbuf size per association bounds the
* maximum size of data that can be sent in a single send call.
*/
if (msg_len > sk->sk_sndbuf) {
err = -EMSGSIZE;
goto out_free;
}
if (asoc->pmtu_pending)
sctp_assoc_pending_pmtu(asoc);
/* If fragmentation is disabled and the message length exceeds the
* association fragmentation point, return EMSGSIZE. The I-D
* does not specify what this error is, but this looks like
* a great fit.
*/
if (sctp_sk(sk)->disable_fragments && (msg_len > asoc->frag_point)) {
err = -EMSGSIZE;
goto out_free;
}
/* Check for invalid stream. */
if (sinfo->sinfo_stream >= asoc->stream.outcnt) {
err = -EINVAL;
goto out_free;
}
if (sctp_wspace(asoc) < msg_len)
sctp_prsctp_prune(asoc, sinfo, msg_len - sctp_wspace(asoc));
timeo = sock_sndtimeo(sk, msg->msg_flags & MSG_DONTWAIT);
if (!sctp_wspace(asoc)) {
err = sctp_wait_for_sndbuf(asoc, &timeo, msg_len);
if (err)
goto out_free;
}
/* If an address is passed with the sendto/sendmsg call, it is used
* to override the primary destination address in the TCP model, or
* when SCTP_ADDR_OVER flag is set in the UDP model.
*/
if ((sctp_style(sk, TCP) && msg_name) ||
(sinfo_flags & SCTP_ADDR_OVER)) {
chunk_tp = sctp_assoc_lookup_paddr(asoc, &to);
if (!chunk_tp) {
err = -EINVAL;
goto out_free;
}
} else
chunk_tp = NULL;
/* Auto-connect, if we aren't connected already. */
if (sctp_state(asoc, CLOSED)) {
err = sctp_primitive_ASSOCIATE(net, asoc, NULL);
if (err < 0)
goto out_free;
wait_connect = true;
pr_debug("%s: we associated primitively\n", __func__);
}
/* Break the message into multiple chunks of maximum size. */
datamsg = sctp_datamsg_from_user(asoc, sinfo, &msg->msg_iter);
if (IS_ERR(datamsg)) {
err = PTR_ERR(datamsg);
goto out_free;
}
asoc->force_delay = !!(msg->msg_flags & MSG_MORE);
/* Now send the (possibly) fragmented message. */
list_for_each_entry(chunk, &datamsg->chunks, frag_list) {
sctp_chunk_hold(chunk);
/* Do accounting for the write space. */
sctp_set_owner_w(chunk);
chunk->transport = chunk_tp;
}
/* Send it to the lower layers. Note: all chunks
* must either fail or succeed. The lower layer
* works that way today. Keep it that way or this
* breaks.
*/
err = sctp_primitive_SEND(net, asoc, datamsg);
/* Did the lower layer accept the chunk? */
if (err) {
sctp_datamsg_free(datamsg);
goto out_free;
}
pr_debug("%s: we sent primitively\n", __func__);
sctp_datamsg_put(datamsg);
err = msg_len;
if (unlikely(wait_connect)) {
timeo = sock_sndtimeo(sk, msg_flags & MSG_DONTWAIT);
sctp_wait_for_connect(asoc, &timeo);
}
/* If we are already past ASSOCIATE, the lower
* layers are responsible for association cleanup.
*/
goto out_unlock;
out_free:
if (new_asoc)
sctp_association_free(asoc);
out_unlock:
release_sock(sk);
out_nounlock:
return sctp_error(sk, msg_flags, err);
#if 0
do_sock_err:
if (msg_len)
err = msg_len;
else
err = sock_error(sk);
goto out;
do_interrupted:
if (msg_len)
err = msg_len;
goto out;
#endif /* 0 */
}
/* This is an extended version of skb_pull() that removes the data from the
* start of a skb even when data is spread across the list of skb's in the
* frag_list. len specifies the total amount of data that needs to be removed.
* when 'len' bytes could be removed from the skb, it returns 0.
* If 'len' exceeds the total skb length, it returns the no. of bytes that
* could not be removed.
*/
static int sctp_skb_pull(struct sk_buff *skb, int len)
{
struct sk_buff *list;
int skb_len = skb_headlen(skb);
int rlen;
if (len <= skb_len) {
__skb_pull(skb, len);
return 0;
}
len -= skb_len;
__skb_pull(skb, skb_len);
skb_walk_frags(skb, list) {
rlen = sctp_skb_pull(list, len);
skb->len -= (len-rlen);
skb->data_len -= (len-rlen);
if (!rlen)
return 0;
len = rlen;
}
return len;
}
/* API 3.1.3 recvmsg() - UDP Style Syntax
*
* ssize_t recvmsg(int socket, struct msghdr *message,
* int flags);
*
* socket - the socket descriptor of the endpoint.
* message - pointer to the msghdr structure which contains a single
* user message and possibly some ancillary data.
*
* See Section 5 for complete description of the data
* structures.
*
* flags - flags sent or received with the user message, see Section
* 5 for complete description of the flags.
*/
static int sctp_recvmsg(struct sock *sk, struct msghdr *msg, size_t len,
int noblock, int flags, int *addr_len)
{
struct sctp_ulpevent *event = NULL;
struct sctp_sock *sp = sctp_sk(sk);
struct sk_buff *skb, *head_skb;
int copied;
int err = 0;
int skb_len;
pr_debug("%s: sk:%p, msghdr:%p, len:%zd, noblock:%d, flags:0x%x, "
"addr_len:%p)\n", __func__, sk, msg, len, noblock, flags,
addr_len);
lock_sock(sk);
if (sctp_style(sk, TCP) && !sctp_sstate(sk, ESTABLISHED) &&
!sctp_sstate(sk, CLOSING) && !sctp_sstate(sk, CLOSED)) {
err = -ENOTCONN;
goto out;
}
skb = sctp_skb_recv_datagram(sk, flags, noblock, &err);
if (!skb)
goto out;
/* Get the total length of the skb including any skb's in the
* frag_list.
*/
skb_len = skb->len;
copied = skb_len;
if (copied > len)
copied = len;
err = skb_copy_datagram_msg(skb, 0, msg, copied);
event = sctp_skb2event(skb);
if (err)
goto out_free;
if (event->chunk && event->chunk->head_skb)
head_skb = event->chunk->head_skb;
else
head_skb = skb;
sock_recv_ts_and_drops(msg, sk, head_skb);
if (sctp_ulpevent_is_notification(event)) {
msg->msg_flags |= MSG_NOTIFICATION;
sp->pf->event_msgname(event, msg->msg_name, addr_len);
} else {
sp->pf->skb_msgname(head_skb, msg->msg_name, addr_len);
}
/* Check if we allow SCTP_NXTINFO. */
if (sp->recvnxtinfo)
sctp_ulpevent_read_nxtinfo(event, msg, sk);
/* Check if we allow SCTP_RCVINFO. */
if (sp->recvrcvinfo)
sctp_ulpevent_read_rcvinfo(event, msg);
/* Check if we allow SCTP_SNDRCVINFO. */
if (sp->subscribe.sctp_data_io_event)
sctp_ulpevent_read_sndrcvinfo(event, msg);
err = copied;
/* If skb's length exceeds the user's buffer, update the skb and
* push it back to the receive_queue so that the next call to
* recvmsg() will return the remaining data. Don't set MSG_EOR.
*/
if (skb_len > copied) {
msg->msg_flags &= ~MSG_EOR;
if (flags & MSG_PEEK)
goto out_free;
sctp_skb_pull(skb, copied);
skb_queue_head(&sk->sk_receive_queue, skb);
/* When only partial message is copied to the user, increase
* rwnd by that amount. If all the data in the skb is read,
* rwnd is updated when the event is freed.
*/
if (!sctp_ulpevent_is_notification(event))
sctp_assoc_rwnd_increase(event->asoc, copied);
goto out;
} else if ((event->msg_flags & MSG_NOTIFICATION) ||
(event->msg_flags & MSG_EOR))
msg->msg_flags |= MSG_EOR;
else
msg->msg_flags &= ~MSG_EOR;
out_free:
if (flags & MSG_PEEK) {
/* Release the skb reference acquired after peeking the skb in
* sctp_skb_recv_datagram().
*/
kfree_skb(skb);
} else {
/* Free the event which includes releasing the reference to
* the owner of the skb, freeing the skb and updating the
* rwnd.
*/
sctp_ulpevent_free(event);
}
out:
release_sock(sk);
return err;
}
/* 7.1.12 Enable/Disable message fragmentation (SCTP_DISABLE_FRAGMENTS)
*
* This option is a on/off flag. If enabled no SCTP message
* fragmentation will be performed. Instead if a message being sent
* exceeds the current PMTU size, the message will NOT be sent and
* instead a error will be indicated to the user.
*/
static int sctp_setsockopt_disable_fragments(struct sock *sk,
char __user *optval,
unsigned int optlen)
{
int val;
if (optlen < sizeof(int))
return -EINVAL;
if (get_user(val, (int __user *)optval))
return -EFAULT;
sctp_sk(sk)->disable_fragments = (val == 0) ? 0 : 1;
return 0;
}
static int sctp_setsockopt_events(struct sock *sk, char __user *optval,
unsigned int optlen)
{
struct sctp_association *asoc;
struct sctp_ulpevent *event;
if (optlen > sizeof(struct sctp_event_subscribe))
return -EINVAL;
if (copy_from_user(&sctp_sk(sk)->subscribe, optval, optlen))
return -EFAULT;
/* At the time when a user app subscribes to SCTP_SENDER_DRY_EVENT,
* if there is no data to be sent or retransmit, the stack will
* immediately send up this notification.
*/
if (sctp_ulpevent_type_enabled(SCTP_SENDER_DRY_EVENT,
&sctp_sk(sk)->subscribe)) {
asoc = sctp_id2assoc(sk, 0);
if (asoc && sctp_outq_is_empty(&asoc->outqueue)) {
event = sctp_ulpevent_make_sender_dry_event(asoc,
GFP_ATOMIC);
if (!event)
return -ENOMEM;
sctp_ulpq_tail_event(&asoc->ulpq, event);
}
}
return 0;
}
/* 7.1.8 Automatic Close of associations (SCTP_AUTOCLOSE)
*
* This socket option is applicable to the UDP-style socket only. When
* set it will cause associations that are idle for more than the
* specified number of seconds to automatically close. An association
* being idle is defined an association that has NOT sent or received
* user data. The special value of '0' indicates that no automatic
* close of any associations should be performed. The option expects an
* integer defining the number of seconds of idle time before an
* association is closed.
*/
static int sctp_setsockopt_autoclose(struct sock *sk, char __user *optval,
unsigned int optlen)
{
struct sctp_sock *sp = sctp_sk(sk);
struct net *net = sock_net(sk);
/* Applicable to UDP-style socket only */
if (sctp_style(sk, TCP))
return -EOPNOTSUPP;
if (optlen != sizeof(int))
return -EINVAL;
if (copy_from_user(&sp->autoclose, optval, optlen))
return -EFAULT;
if (sp->autoclose > net->sctp.max_autoclose)
sp->autoclose = net->sctp.max_autoclose;
return 0;
}
/* 7.1.13 Peer Address Parameters (SCTP_PEER_ADDR_PARAMS)
*
* Applications can enable or disable heartbeats for any peer address of
* an association, modify an address's heartbeat interval, force a
* heartbeat to be sent immediately, and adjust the address's maximum
* number of retransmissions sent before an address is considered
* unreachable. The following structure is used to access and modify an
* address's parameters:
*
* struct sctp_paddrparams {
* sctp_assoc_t spp_assoc_id;
* struct sockaddr_storage spp_address;
* uint32_t spp_hbinterval;
* uint16_t spp_pathmaxrxt;
* uint32_t spp_pathmtu;
* uint32_t spp_sackdelay;
* uint32_t spp_flags;
* };
*
* spp_assoc_id - (one-to-many style socket) This is filled in the
* application, and identifies the association for
* this query.
* spp_address - This specifies which address is of interest.
* spp_hbinterval - This contains the value of the heartbeat interval,
* in milliseconds. If a value of zero
* is present in this field then no changes are to
* be made to this parameter.
* spp_pathmaxrxt - This contains the maximum number of
* retransmissions before this address shall be
* considered unreachable. If a value of zero
* is present in this field then no changes are to
* be made to this parameter.
* spp_pathmtu - When Path MTU discovery is disabled the value
* specified here will be the "fixed" path mtu.
* Note that if the spp_address field is empty
* then all associations on this address will
* have this fixed path mtu set upon them.
*
* spp_sackdelay - When delayed sack is enabled, this value specifies
* the number of milliseconds that sacks will be delayed
* for. This value will apply to all addresses of an
* association if the spp_address field is empty. Note
* also, that if delayed sack is enabled and this
* value is set to 0, no change is made to the last
* recorded delayed sack timer value.
*
* spp_flags - These flags are used to control various features
* on an association. The flag field may contain
* zero or more of the following options.
*
* SPP_HB_ENABLE - Enable heartbeats on the
* specified address. Note that if the address
* field is empty all addresses for the association
* have heartbeats enabled upon them.
*
* SPP_HB_DISABLE - Disable heartbeats on the
* speicifed address. Note that if the address
* field is empty all addresses for the association
* will have their heartbeats disabled. Note also
* that SPP_HB_ENABLE and SPP_HB_DISABLE are
* mutually exclusive, only one of these two should
* be specified. Enabling both fields will have
* undetermined results.
*
* SPP_HB_DEMAND - Request a user initiated heartbeat
* to be made immediately.
*
* SPP_HB_TIME_IS_ZERO - Specify's that the time for
* heartbeat delayis to be set to the value of 0
* milliseconds.
*
* SPP_PMTUD_ENABLE - This field will enable PMTU
* discovery upon the specified address. Note that
* if the address feild is empty then all addresses
* on the association are effected.
*
* SPP_PMTUD_DISABLE - This field will disable PMTU
* discovery upon the specified address. Note that
* if the address feild is empty then all addresses
* on the association are effected. Not also that
* SPP_PMTUD_ENABLE and SPP_PMTUD_DISABLE are mutually
* exclusive. Enabling both will have undetermined
* results.
*
* SPP_SACKDELAY_ENABLE - Setting this flag turns
* on delayed sack. The time specified in spp_sackdelay
* is used to specify the sack delay for this address. Note
* that if spp_address is empty then all addresses will
* enable delayed sack and take on the sack delay
* value specified in spp_sackdelay.
* SPP_SACKDELAY_DISABLE - Setting this flag turns
* off delayed sack. If the spp_address field is blank then
* delayed sack is disabled for the entire association. Note
* also that this field is mutually exclusive to
* SPP_SACKDELAY_ENABLE, setting both will have undefined
* results.
*/
static int sctp_apply_peer_addr_params(struct sctp_paddrparams *params,
struct sctp_transport *trans,
struct sctp_association *asoc,
struct sctp_sock *sp,
int hb_change,
int pmtud_change,
int sackdelay_change)
{
int error;
if (params->spp_flags & SPP_HB_DEMAND && trans) {
struct net *net = sock_net(trans->asoc->base.sk);
error = sctp_primitive_REQUESTHEARTBEAT(net, trans->asoc, trans);
if (error)
return error;
}
/* Note that unless the spp_flag is set to SPP_HB_ENABLE the value of
* this field is ignored. Note also that a value of zero indicates
* the current setting should be left unchanged.
*/
if (params->spp_flags & SPP_HB_ENABLE) {
/* Re-zero the interval if the SPP_HB_TIME_IS_ZERO is
* set. This lets us use 0 value when this flag
* is set.
*/
if (params->spp_flags & SPP_HB_TIME_IS_ZERO)
params->spp_hbinterval = 0;
if (params->spp_hbinterval ||
(params->spp_flags & SPP_HB_TIME_IS_ZERO)) {
if (trans) {
trans->hbinterval =
msecs_to_jiffies(params->spp_hbinterval);
} else if (asoc) {
asoc->hbinterval =
msecs_to_jiffies(params->spp_hbinterval);
} else {
sp->hbinterval = params->spp_hbinterval;
}
}
}
if (hb_change) {
if (trans) {
trans->param_flags =
(trans->param_flags & ~SPP_HB) | hb_change;
} else if (asoc) {
asoc->param_flags =
(asoc->param_flags & ~SPP_HB) | hb_change;
} else {
sp->param_flags =
(sp->param_flags & ~SPP_HB) | hb_change;
}
}
/* When Path MTU discovery is disabled the value specified here will
* be the "fixed" path mtu (i.e. the value of the spp_flags field must
* include the flag SPP_PMTUD_DISABLE for this field to have any
* effect).
*/
if ((params->spp_flags & SPP_PMTUD_DISABLE) && params->spp_pathmtu) {
if (trans) {
trans->pathmtu = params->spp_pathmtu;
sctp_assoc_sync_pmtu(asoc);
} else if (asoc) {
asoc->pathmtu = params->spp_pathmtu;
} else {
sp->pathmtu = params->spp_pathmtu;
}
}
if (pmtud_change) {
if (trans) {
int update = (trans->param_flags & SPP_PMTUD_DISABLE) &&
(params->spp_flags & SPP_PMTUD_ENABLE);
trans->param_flags =
(trans->param_flags & ~SPP_PMTUD) | pmtud_change;
if (update) {
sctp_transport_pmtu(trans, sctp_opt2sk(sp));
sctp_assoc_sync_pmtu(asoc);
}
} else if (asoc) {
asoc->param_flags =
(asoc->param_flags & ~SPP_PMTUD) | pmtud_change;
} else {
sp->param_flags =
(sp->param_flags & ~SPP_PMTUD) | pmtud_change;
}
}
/* Note that unless the spp_flag is set to SPP_SACKDELAY_ENABLE the
* value of this field is ignored. Note also that a value of zero
* indicates the current setting should be left unchanged.
*/
if ((params->spp_flags & SPP_SACKDELAY_ENABLE) && params->spp_sackdelay) {
if (trans) {
trans->sackdelay =
msecs_to_jiffies(params->spp_sackdelay);
} else if (asoc) {
asoc->sackdelay =
msecs_to_jiffies(params->spp_sackdelay);
} else {
sp->sackdelay = params->spp_sackdelay;
}
}
if (sackdelay_change) {
if (trans) {
trans->param_flags =
(trans->param_flags & ~SPP_SACKDELAY) |
sackdelay_change;
} else if (asoc) {
asoc->param_flags =
(asoc->param_flags & ~SPP_SACKDELAY) |
sackdelay_change;
} else {
sp->param_flags =
(sp->param_flags & ~SPP_SACKDELAY) |
sackdelay_change;
}
}
/* Note that a value of zero indicates the current setting should be
left unchanged.
*/
if (params->spp_pathmaxrxt) {
if (trans) {
trans->pathmaxrxt = params->spp_pathmaxrxt;
} else if (asoc) {
asoc->pathmaxrxt = params->spp_pathmaxrxt;
} else {
sp->pathmaxrxt = params->spp_pathmaxrxt;
}
}
return 0;
}
static int sctp_setsockopt_peer_addr_params(struct sock *sk,
char __user *optval,
unsigned int optlen)
{
struct sctp_paddrparams params;
struct sctp_transport *trans = NULL;
struct sctp_association *asoc = NULL;
struct sctp_sock *sp = sctp_sk(sk);
int error;
int hb_change, pmtud_change, sackdelay_change;
if (optlen != sizeof(struct sctp_paddrparams))
return -EINVAL;
if (copy_from_user(¶ms, optval, optlen))
return -EFAULT;
/* Validate flags and value parameters. */
hb_change = params.spp_flags & SPP_HB;
pmtud_change = params.spp_flags & SPP_PMTUD;
sackdelay_change = params.spp_flags & SPP_SACKDELAY;
if (hb_change == SPP_HB ||
pmtud_change == SPP_PMTUD ||
sackdelay_change == SPP_SACKDELAY ||
params.spp_sackdelay > 500 ||
(params.spp_pathmtu &&
params.spp_pathmtu < SCTP_DEFAULT_MINSEGMENT))
return -EINVAL;
/* If an address other than INADDR_ANY is specified, and
* no transport is found, then the request is invalid.
*/
if (!sctp_is_any(sk, (union sctp_addr *)¶ms.spp_address)) {
trans = sctp_addr_id2transport(sk, ¶ms.spp_address,
params.spp_assoc_id);
if (!trans)
return -EINVAL;
}
/* Get association, if assoc_id != 0 and the socket is a one
* to many style socket, and an association was not found, then
* the id was invalid.
*/
asoc = sctp_id2assoc(sk, params.spp_assoc_id);
if (!asoc && params.spp_assoc_id && sctp_style(sk, UDP))
return -EINVAL;
/* Heartbeat demand can only be sent on a transport or
* association, but not a socket.
*/
if (params.spp_flags & SPP_HB_DEMAND && !trans && !asoc)
return -EINVAL;
/* Process parameters. */
error = sctp_apply_peer_addr_params(¶ms, trans, asoc, sp,
hb_change, pmtud_change,
sackdelay_change);
if (error)
return error;
/* If changes are for association, also apply parameters to each
* transport.
*/
if (!trans && asoc) {
list_for_each_entry(trans, &asoc->peer.transport_addr_list,
transports) {
sctp_apply_peer_addr_params(¶ms, trans, asoc, sp,
hb_change, pmtud_change,
sackdelay_change);
}
}
return 0;
}
static inline __u32 sctp_spp_sackdelay_enable(__u32 param_flags)
{
return (param_flags & ~SPP_SACKDELAY) | SPP_SACKDELAY_ENABLE;
}
static inline __u32 sctp_spp_sackdelay_disable(__u32 param_flags)
{
return (param_flags & ~SPP_SACKDELAY) | SPP_SACKDELAY_DISABLE;
}
/*
* 7.1.23. Get or set delayed ack timer (SCTP_DELAYED_SACK)
*
* This option will effect the way delayed acks are performed. This
* option allows you to get or set the delayed ack time, in
* milliseconds. It also allows changing the delayed ack frequency.
* Changing the frequency to 1 disables the delayed sack algorithm. If
* the assoc_id is 0, then this sets or gets the endpoints default
* values. If the assoc_id field is non-zero, then the set or get
* effects the specified association for the one to many model (the
* assoc_id field is ignored by the one to one model). Note that if
* sack_delay or sack_freq are 0 when setting this option, then the
* current values will remain unchanged.
*
* struct sctp_sack_info {
* sctp_assoc_t sack_assoc_id;
* uint32_t sack_delay;
* uint32_t sack_freq;
* };
*
* sack_assoc_id - This parameter, indicates which association the user
* is performing an action upon. Note that if this field's value is
* zero then the endpoints default value is changed (effecting future
* associations only).
*
* sack_delay - This parameter contains the number of milliseconds that
* the user is requesting the delayed ACK timer be set to. Note that
* this value is defined in the standard to be between 200 and 500
* milliseconds.
*
* sack_freq - This parameter contains the number of packets that must
* be received before a sack is sent without waiting for the delay
* timer to expire. The default value for this is 2, setting this
* value to 1 will disable the delayed sack algorithm.
*/
static int sctp_setsockopt_delayed_ack(struct sock *sk,
char __user *optval, unsigned int optlen)
{
struct sctp_sack_info params;
struct sctp_transport *trans = NULL;
struct sctp_association *asoc = NULL;
struct sctp_sock *sp = sctp_sk(sk);
if (optlen == sizeof(struct sctp_sack_info)) {
if (copy_from_user(¶ms, optval, optlen))
return -EFAULT;
if (params.sack_delay == 0 && params.sack_freq == 0)
return 0;
} else if (optlen == sizeof(struct sctp_assoc_value)) {
pr_warn_ratelimited(DEPRECATED
"%s (pid %d) "
"Use of struct sctp_assoc_value in delayed_ack socket option.\n"
"Use struct sctp_sack_info instead\n",
current->comm, task_pid_nr(current));
if (copy_from_user(¶ms, optval, optlen))
return -EFAULT;
if (params.sack_delay == 0)
params.sack_freq = 1;
else
params.sack_freq = 0;
} else
return -EINVAL;
/* Validate value parameter. */
if (params.sack_delay > 500)
return -EINVAL;
/* Get association, if sack_assoc_id != 0 and the socket is a one
* to many style socket, and an association was not found, then
* the id was invalid.
*/
asoc = sctp_id2assoc(sk, params.sack_assoc_id);
if (!asoc && params.sack_assoc_id && sctp_style(sk, UDP))
return -EINVAL;
if (params.sack_delay) {
if (asoc) {
asoc->sackdelay =
msecs_to_jiffies(params.sack_delay);
asoc->param_flags =
sctp_spp_sackdelay_enable(asoc->param_flags);
} else {
sp->sackdelay = params.sack_delay;
sp->param_flags =
sctp_spp_sackdelay_enable(sp->param_flags);
}
}
if (params.sack_freq == 1) {
if (asoc) {
asoc->param_flags =
sctp_spp_sackdelay_disable(asoc->param_flags);
} else {
sp->param_flags =
sctp_spp_sackdelay_disable(sp->param_flags);
}
} else if (params.sack_freq > 1) {
if (asoc) {
asoc->sackfreq = params.sack_freq;
asoc->param_flags =
sctp_spp_sackdelay_enable(asoc->param_flags);
} else {
sp->sackfreq = params.sack_freq;
sp->param_flags =
sctp_spp_sackdelay_enable(sp->param_flags);
}
}
/* If change is for association, also apply to each transport. */
if (asoc) {
list_for_each_entry(trans, &asoc->peer.transport_addr_list,
transports) {
if (params.sack_delay) {
trans->sackdelay =
msecs_to_jiffies(params.sack_delay);
trans->param_flags =
sctp_spp_sackdelay_enable(trans->param_flags);
}
if (params.sack_freq == 1) {
trans->param_flags =
sctp_spp_sackdelay_disable(trans->param_flags);
} else if (params.sack_freq > 1) {
trans->sackfreq = params.sack_freq;
trans->param_flags =
sctp_spp_sackdelay_enable(trans->param_flags);
}
}
}
return 0;
}
/* 7.1.3 Initialization Parameters (SCTP_INITMSG)
*
* Applications can specify protocol parameters for the default association
* initialization. The option name argument to setsockopt() and getsockopt()
* is SCTP_INITMSG.
*
* Setting initialization parameters is effective only on an unconnected
* socket (for UDP-style sockets only future associations are effected
* by the change). With TCP-style sockets, this option is inherited by
* sockets derived from a listener socket.
*/
static int sctp_setsockopt_initmsg(struct sock *sk, char __user *optval, unsigned int optlen)
{
struct sctp_initmsg sinit;
struct sctp_sock *sp = sctp_sk(sk);
if (optlen != sizeof(struct sctp_initmsg))
return -EINVAL;
if (copy_from_user(&sinit, optval, optlen))
return -EFAULT;
if (sinit.sinit_num_ostreams)
sp->initmsg.sinit_num_ostreams = sinit.sinit_num_ostreams;
if (sinit.sinit_max_instreams)
sp->initmsg.sinit_max_instreams = sinit.sinit_max_instreams;
if (sinit.sinit_max_attempts)
sp->initmsg.sinit_max_attempts = sinit.sinit_max_attempts;
if (sinit.sinit_max_init_timeo)
sp->initmsg.sinit_max_init_timeo = sinit.sinit_max_init_timeo;
return 0;
}
/*
* 7.1.14 Set default send parameters (SCTP_DEFAULT_SEND_PARAM)
*
* Applications that wish to use the sendto() system call may wish to
* specify a default set of parameters that would normally be supplied
* through the inclusion of ancillary data. This socket option allows
* such an application to set the default sctp_sndrcvinfo structure.
* The application that wishes to use this socket option simply passes
* in to this call the sctp_sndrcvinfo structure defined in Section
* 5.2.2) The input parameters accepted by this call include
* sinfo_stream, sinfo_flags, sinfo_ppid, sinfo_context,
* sinfo_timetolive. The user must provide the sinfo_assoc_id field in
* to this call if the caller is using the UDP model.
*/
static int sctp_setsockopt_default_send_param(struct sock *sk,
char __user *optval,
unsigned int optlen)
{
struct sctp_sock *sp = sctp_sk(sk);
struct sctp_association *asoc;
struct sctp_sndrcvinfo info;
if (optlen != sizeof(info))
return -EINVAL;
if (copy_from_user(&info, optval, optlen))
return -EFAULT;
if (info.sinfo_flags &
~(SCTP_UNORDERED | SCTP_ADDR_OVER |
SCTP_ABORT | SCTP_EOF))
return -EINVAL;
asoc = sctp_id2assoc(sk, info.sinfo_assoc_id);
if (!asoc && info.sinfo_assoc_id && sctp_style(sk, UDP))
return -EINVAL;
if (asoc) {
asoc->default_stream = info.sinfo_stream;
asoc->default_flags = info.sinfo_flags;
asoc->default_ppid = info.sinfo_ppid;
asoc->default_context = info.sinfo_context;
asoc->default_timetolive = info.sinfo_timetolive;
} else {
sp->default_stream = info.sinfo_stream;
sp->default_flags = info.sinfo_flags;
sp->default_ppid = info.sinfo_ppid;
sp->default_context = info.sinfo_context;
sp->default_timetolive = info.sinfo_timetolive;
}
return 0;
}
/* RFC6458, Section 8.1.31. Set/get Default Send Parameters
* (SCTP_DEFAULT_SNDINFO)
*/
static int sctp_setsockopt_default_sndinfo(struct sock *sk,
char __user *optval,
unsigned int optlen)
{
struct sctp_sock *sp = sctp_sk(sk);
struct sctp_association *asoc;
struct sctp_sndinfo info;
if (optlen != sizeof(info))
return -EINVAL;
if (copy_from_user(&info, optval, optlen))
return -EFAULT;
if (info.snd_flags &
~(SCTP_UNORDERED | SCTP_ADDR_OVER |
SCTP_ABORT | SCTP_EOF))
return -EINVAL;
asoc = sctp_id2assoc(sk, info.snd_assoc_id);
if (!asoc && info.snd_assoc_id && sctp_style(sk, UDP))
return -EINVAL;
if (asoc) {
asoc->default_stream = info.snd_sid;
asoc->default_flags = info.snd_flags;
asoc->default_ppid = info.snd_ppid;
asoc->default_context = info.snd_context;
} else {
sp->default_stream = info.snd_sid;
sp->default_flags = info.snd_flags;
sp->default_ppid = info.snd_ppid;
sp->default_context = info.snd_context;
}
return 0;
}
/* 7.1.10 Set Primary Address (SCTP_PRIMARY_ADDR)
*
* Requests that the local SCTP stack use the enclosed peer address as
* the association primary. The enclosed address must be one of the
* association peer's addresses.
*/
static int sctp_setsockopt_primary_addr(struct sock *sk, char __user *optval,
unsigned int optlen)
{
struct sctp_prim prim;
struct sctp_transport *trans;
if (optlen != sizeof(struct sctp_prim))
return -EINVAL;
if (copy_from_user(&prim, optval, sizeof(struct sctp_prim)))
return -EFAULT;
trans = sctp_addr_id2transport(sk, &prim.ssp_addr, prim.ssp_assoc_id);
if (!trans)
return -EINVAL;
sctp_assoc_set_primary(trans->asoc, trans);
return 0;
}
/*
* 7.1.5 SCTP_NODELAY
*
* Turn on/off any Nagle-like algorithm. This means that packets are
* generally sent as soon as possible and no unnecessary delays are
* introduced, at the cost of more packets in the network. Expects an
* integer boolean flag.
*/
static int sctp_setsockopt_nodelay(struct sock *sk, char __user *optval,
unsigned int optlen)
{
int val;
if (optlen < sizeof(int))
return -EINVAL;
if (get_user(val, (int __user *)optval))
return -EFAULT;
sctp_sk(sk)->nodelay = (val == 0) ? 0 : 1;
return 0;
}
/*
*
* 7.1.1 SCTP_RTOINFO
*
* The protocol parameters used to initialize and bound retransmission
* timeout (RTO) are tunable. sctp_rtoinfo structure is used to access
* and modify these parameters.
* All parameters are time values, in milliseconds. A value of 0, when
* modifying the parameters, indicates that the current value should not
* be changed.
*
*/
static int sctp_setsockopt_rtoinfo(struct sock *sk, char __user *optval, unsigned int optlen)
{
struct sctp_rtoinfo rtoinfo;
struct sctp_association *asoc;
unsigned long rto_min, rto_max;
struct sctp_sock *sp = sctp_sk(sk);
if (optlen != sizeof (struct sctp_rtoinfo))
return -EINVAL;
if (copy_from_user(&rtoinfo, optval, optlen))
return -EFAULT;
asoc = sctp_id2assoc(sk, rtoinfo.srto_assoc_id);
/* Set the values to the specific association */
if (!asoc && rtoinfo.srto_assoc_id && sctp_style(sk, UDP))
return -EINVAL;
rto_max = rtoinfo.srto_max;
rto_min = rtoinfo.srto_min;
if (rto_max)
rto_max = asoc ? msecs_to_jiffies(rto_max) : rto_max;
else
rto_max = asoc ? asoc->rto_max : sp->rtoinfo.srto_max;
if (rto_min)
rto_min = asoc ? msecs_to_jiffies(rto_min) : rto_min;
else
rto_min = asoc ? asoc->rto_min : sp->rtoinfo.srto_min;
if (rto_min > rto_max)
return -EINVAL;
if (asoc) {
if (rtoinfo.srto_initial != 0)
asoc->rto_initial =
msecs_to_jiffies(rtoinfo.srto_initial);
asoc->rto_max = rto_max;
asoc->rto_min = rto_min;
} else {
/* If there is no association or the association-id = 0
* set the values to the endpoint.
*/
if (rtoinfo.srto_initial != 0)
sp->rtoinfo.srto_initial = rtoinfo.srto_initial;
sp->rtoinfo.srto_max = rto_max;
sp->rtoinfo.srto_min = rto_min;
}
return 0;
}
/*
*
* 7.1.2 SCTP_ASSOCINFO
*
* This option is used to tune the maximum retransmission attempts
* of the association.
* Returns an error if the new association retransmission value is
* greater than the sum of the retransmission value of the peer.
* See [SCTP] for more information.
*
*/
static int sctp_setsockopt_associnfo(struct sock *sk, char __user *optval, unsigned int optlen)
{
struct sctp_assocparams assocparams;
struct sctp_association *asoc;
if (optlen != sizeof(struct sctp_assocparams))
return -EINVAL;
if (copy_from_user(&assocparams, optval, optlen))
return -EFAULT;
asoc = sctp_id2assoc(sk, assocparams.sasoc_assoc_id);
if (!asoc && assocparams.sasoc_assoc_id && sctp_style(sk, UDP))
return -EINVAL;
/* Set the values to the specific association */
if (asoc) {
if (assocparams.sasoc_asocmaxrxt != 0) {
__u32 path_sum = 0;
int paths = 0;
struct sctp_transport *peer_addr;
list_for_each_entry(peer_addr, &asoc->peer.transport_addr_list,
transports) {
path_sum += peer_addr->pathmaxrxt;
paths++;
}
/* Only validate asocmaxrxt if we have more than
* one path/transport. We do this because path
* retransmissions are only counted when we have more
* then one path.
*/
if (paths > 1 &&
assocparams.sasoc_asocmaxrxt > path_sum)
return -EINVAL;
asoc->max_retrans = assocparams.sasoc_asocmaxrxt;
}
if (assocparams.sasoc_cookie_life != 0)
asoc->cookie_life = ms_to_ktime(assocparams.sasoc_cookie_life);
} else {
/* Set the values to the endpoint */
struct sctp_sock *sp = sctp_sk(sk);
if (assocparams.sasoc_asocmaxrxt != 0)
sp->assocparams.sasoc_asocmaxrxt =
assocparams.sasoc_asocmaxrxt;
if (assocparams.sasoc_cookie_life != 0)
sp->assocparams.sasoc_cookie_life =
assocparams.sasoc_cookie_life;
}
return 0;
}
/*
* 7.1.16 Set/clear IPv4 mapped addresses (SCTP_I_WANT_MAPPED_V4_ADDR)
*
* This socket option is a boolean flag which turns on or off mapped V4
* addresses. If this option is turned on and the socket is type
* PF_INET6, then IPv4 addresses will be mapped to V6 representation.
* If this option is turned off, then no mapping will be done of V4
* addresses and a user will receive both PF_INET6 and PF_INET type
* addresses on the socket.
*/
static int sctp_setsockopt_mappedv4(struct sock *sk, char __user *optval, unsigned int optlen)
{
int val;
struct sctp_sock *sp = sctp_sk(sk);
if (optlen < sizeof(int))
return -EINVAL;
if (get_user(val, (int __user *)optval))
return -EFAULT;
if (val)
sp->v4mapped = 1;
else
sp->v4mapped = 0;
return 0;
}
/*
* 8.1.16. Get or Set the Maximum Fragmentation Size (SCTP_MAXSEG)
* This option will get or set the maximum size to put in any outgoing
* SCTP DATA chunk. If a message is larger than this size it will be
* fragmented by SCTP into the specified size. Note that the underlying
* SCTP implementation may fragment into smaller sized chunks when the
* PMTU of the underlying association is smaller than the value set by
* the user. The default value for this option is '0' which indicates
* the user is NOT limiting fragmentation and only the PMTU will effect
* SCTP's choice of DATA chunk size. Note also that values set larger
* than the maximum size of an IP datagram will effectively let SCTP
* control fragmentation (i.e. the same as setting this option to 0).
*
* The following structure is used to access and modify this parameter:
*
* struct sctp_assoc_value {
* sctp_assoc_t assoc_id;
* uint32_t assoc_value;
* };
*
* assoc_id: This parameter is ignored for one-to-one style sockets.
* For one-to-many style sockets this parameter indicates which
* association the user is performing an action upon. Note that if
* this field's value is zero then the endpoints default value is
* changed (effecting future associations only).
* assoc_value: This parameter specifies the maximum size in bytes.
*/
static int sctp_setsockopt_maxseg(struct sock *sk, char __user *optval, unsigned int optlen)
{
struct sctp_assoc_value params;
struct sctp_association *asoc;
struct sctp_sock *sp = sctp_sk(sk);
int val;
if (optlen == sizeof(int)) {
pr_warn_ratelimited(DEPRECATED
"%s (pid %d) "
"Use of int in maxseg socket option.\n"
"Use struct sctp_assoc_value instead\n",
current->comm, task_pid_nr(current));
if (copy_from_user(&val, optval, optlen))
return -EFAULT;
params.assoc_id = 0;
} else if (optlen == sizeof(struct sctp_assoc_value)) {
if (copy_from_user(¶ms, optval, optlen))
return -EFAULT;
val = params.assoc_value;
} else
return -EINVAL;
if ((val != 0) && ((val < 8) || (val > SCTP_MAX_CHUNK_LEN)))
return -EINVAL;
asoc = sctp_id2assoc(sk, params.assoc_id);
if (!asoc && params.assoc_id && sctp_style(sk, UDP))
return -EINVAL;
if (asoc) {
if (val == 0) {
val = asoc->pathmtu;
val -= sp->pf->af->net_header_len;
val -= sizeof(struct sctphdr) +
sizeof(struct sctp_data_chunk);
}
asoc->user_frag = val;
asoc->frag_point = sctp_frag_point(asoc, asoc->pathmtu);
} else {
sp->user_frag = val;
}
return 0;
}
/*
* 7.1.9 Set Peer Primary Address (SCTP_SET_PEER_PRIMARY_ADDR)
*
* Requests that the peer mark the enclosed address as the association
* primary. The enclosed address must be one of the association's
* locally bound addresses. The following structure is used to make a
* set primary request:
*/
static int sctp_setsockopt_peer_primary_addr(struct sock *sk, char __user *optval,
unsigned int optlen)
{
struct net *net = sock_net(sk);
struct sctp_sock *sp;
struct sctp_association *asoc = NULL;
struct sctp_setpeerprim prim;
struct sctp_chunk *chunk;
struct sctp_af *af;
int err;
sp = sctp_sk(sk);
if (!net->sctp.addip_enable)
return -EPERM;
if (optlen != sizeof(struct sctp_setpeerprim))
return -EINVAL;
if (copy_from_user(&prim, optval, optlen))
return -EFAULT;
asoc = sctp_id2assoc(sk, prim.sspp_assoc_id);
if (!asoc)
return -EINVAL;
if (!asoc->peer.asconf_capable)
return -EPERM;
if (asoc->peer.addip_disabled_mask & SCTP_PARAM_SET_PRIMARY)
return -EPERM;
if (!sctp_state(asoc, ESTABLISHED))
return -ENOTCONN;
af = sctp_get_af_specific(prim.sspp_addr.ss_family);
if (!af)
return -EINVAL;
if (!af->addr_valid((union sctp_addr *)&prim.sspp_addr, sp, NULL))
return -EADDRNOTAVAIL;
if (!sctp_assoc_lookup_laddr(asoc, (union sctp_addr *)&prim.sspp_addr))
return -EADDRNOTAVAIL;
/* Create an ASCONF chunk with SET_PRIMARY parameter */
chunk = sctp_make_asconf_set_prim(asoc,
(union sctp_addr *)&prim.sspp_addr);
if (!chunk)
return -ENOMEM;
err = sctp_send_asconf(asoc, chunk);
pr_debug("%s: we set peer primary addr primitively\n", __func__);
return err;
}
static int sctp_setsockopt_adaptation_layer(struct sock *sk, char __user *optval,
unsigned int optlen)
{
struct sctp_setadaptation adaptation;
if (optlen != sizeof(struct sctp_setadaptation))
return -EINVAL;
if (copy_from_user(&adaptation, optval, optlen))
return -EFAULT;
sctp_sk(sk)->adaptation_ind = adaptation.ssb_adaptation_ind;
return 0;
}
/*
* 7.1.29. Set or Get the default context (SCTP_CONTEXT)
*
* The context field in the sctp_sndrcvinfo structure is normally only
* used when a failed message is retrieved holding the value that was
* sent down on the actual send call. This option allows the setting of
* a default context on an association basis that will be received on
* reading messages from the peer. This is especially helpful in the
* one-2-many model for an application to keep some reference to an
* internal state machine that is processing messages on the
* association. Note that the setting of this value only effects
* received messages from the peer and does not effect the value that is
* saved with outbound messages.
*/
static int sctp_setsockopt_context(struct sock *sk, char __user *optval,
unsigned int optlen)
{
struct sctp_assoc_value params;
struct sctp_sock *sp;
struct sctp_association *asoc;
if (optlen != sizeof(struct sctp_assoc_value))
return -EINVAL;
if (copy_from_user(¶ms, optval, optlen))
return -EFAULT;
sp = sctp_sk(sk);
if (params.assoc_id != 0) {
asoc = sctp_id2assoc(sk, params.assoc_id);
if (!asoc)
return -EINVAL;
asoc->default_rcv_context = params.assoc_value;
} else {
sp->default_rcv_context = params.assoc_value;
}
return 0;
}
/*
* 7.1.24. Get or set fragmented interleave (SCTP_FRAGMENT_INTERLEAVE)
*
* This options will at a minimum specify if the implementation is doing
* fragmented interleave. Fragmented interleave, for a one to many
* socket, is when subsequent calls to receive a message may return
* parts of messages from different associations. Some implementations
* may allow you to turn this value on or off. If so, when turned off,
* no fragment interleave will occur (which will cause a head of line
* blocking amongst multiple associations sharing the same one to many
* socket). When this option is turned on, then each receive call may
* come from a different association (thus the user must receive data
* with the extended calls (e.g. sctp_recvmsg) to keep track of which
* association each receive belongs to.
*
* This option takes a boolean value. A non-zero value indicates that
* fragmented interleave is on. A value of zero indicates that
* fragmented interleave is off.
*
* Note that it is important that an implementation that allows this
* option to be turned on, have it off by default. Otherwise an unaware
* application using the one to many model may become confused and act
* incorrectly.
*/
static int sctp_setsockopt_fragment_interleave(struct sock *sk,
char __user *optval,
unsigned int optlen)
{
int val;
if (optlen != sizeof(int))
return -EINVAL;
if (get_user(val, (int __user *)optval))
return -EFAULT;
sctp_sk(sk)->frag_interleave = (val == 0) ? 0 : 1;
return 0;
}
/*
* 8.1.21. Set or Get the SCTP Partial Delivery Point
* (SCTP_PARTIAL_DELIVERY_POINT)
*
* This option will set or get the SCTP partial delivery point. This
* point is the size of a message where the partial delivery API will be
* invoked to help free up rwnd space for the peer. Setting this to a
* lower value will cause partial deliveries to happen more often. The
* calls argument is an integer that sets or gets the partial delivery
* point. Note also that the call will fail if the user attempts to set
* this value larger than the socket receive buffer size.
*
* Note that any single message having a length smaller than or equal to
* the SCTP partial delivery point will be delivered in one single read
* call as long as the user provided buffer is large enough to hold the
* message.
*/
static int sctp_setsockopt_partial_delivery_point(struct sock *sk,
char __user *optval,
unsigned int optlen)
{
u32 val;
if (optlen != sizeof(u32))
return -EINVAL;
if (get_user(val, (int __user *)optval))
return -EFAULT;
/* Note: We double the receive buffer from what the user sets
* it to be, also initial rwnd is based on rcvbuf/2.
*/
if (val > (sk->sk_rcvbuf >> 1))
return -EINVAL;
sctp_sk(sk)->pd_point = val;
return 0; /* is this the right error code? */
}
/*
* 7.1.28. Set or Get the maximum burst (SCTP_MAX_BURST)
*
* This option will allow a user to change the maximum burst of packets
* that can be emitted by this association. Note that the default value
* is 4, and some implementations may restrict this setting so that it
* can only be lowered.
*
* NOTE: This text doesn't seem right. Do this on a socket basis with
* future associations inheriting the socket value.
*/
static int sctp_setsockopt_maxburst(struct sock *sk,
char __user *optval,
unsigned int optlen)
{
struct sctp_assoc_value params;
struct sctp_sock *sp;
struct sctp_association *asoc;
int val;
int assoc_id = 0;
if (optlen == sizeof(int)) {
pr_warn_ratelimited(DEPRECATED
"%s (pid %d) "
"Use of int in max_burst socket option deprecated.\n"
"Use struct sctp_assoc_value instead\n",
current->comm, task_pid_nr(current));
if (copy_from_user(&val, optval, optlen))
return -EFAULT;
} else if (optlen == sizeof(struct sctp_assoc_value)) {
if (copy_from_user(¶ms, optval, optlen))
return -EFAULT;
val = params.assoc_value;
assoc_id = params.assoc_id;
} else
return -EINVAL;
sp = sctp_sk(sk);
if (assoc_id != 0) {
asoc = sctp_id2assoc(sk, assoc_id);
if (!asoc)
return -EINVAL;
asoc->max_burst = val;
} else
sp->max_burst = val;
return 0;
}
/*
* 7.1.18. Add a chunk that must be authenticated (SCTP_AUTH_CHUNK)
*
* This set option adds a chunk type that the user is requesting to be
* received only in an authenticated way. Changes to the list of chunks
* will only effect future associations on the socket.
*/
static int sctp_setsockopt_auth_chunk(struct sock *sk,
char __user *optval,
unsigned int optlen)
{
struct sctp_endpoint *ep = sctp_sk(sk)->ep;
struct sctp_authchunk val;
if (!ep->auth_enable)
return -EACCES;
if (optlen != sizeof(struct sctp_authchunk))
return -EINVAL;
if (copy_from_user(&val, optval, optlen))
return -EFAULT;
switch (val.sauth_chunk) {
case SCTP_CID_INIT:
case SCTP_CID_INIT_ACK:
case SCTP_CID_SHUTDOWN_COMPLETE:
case SCTP_CID_AUTH:
return -EINVAL;
}
/* add this chunk id to the endpoint */
return sctp_auth_ep_add_chunkid(ep, val.sauth_chunk);
}
/*
* 7.1.19. Get or set the list of supported HMAC Identifiers (SCTP_HMAC_IDENT)
*
* This option gets or sets the list of HMAC algorithms that the local
* endpoint requires the peer to use.
*/
static int sctp_setsockopt_hmac_ident(struct sock *sk,
char __user *optval,
unsigned int optlen)
{
struct sctp_endpoint *ep = sctp_sk(sk)->ep;
struct sctp_hmacalgo *hmacs;
u32 idents;
int err;
if (!ep->auth_enable)
return -EACCES;
if (optlen < sizeof(struct sctp_hmacalgo))
return -EINVAL;
hmacs = memdup_user(optval, optlen);
if (IS_ERR(hmacs))
return PTR_ERR(hmacs);
idents = hmacs->shmac_num_idents;
if (idents == 0 || idents > SCTP_AUTH_NUM_HMACS ||
(idents * sizeof(u16)) > (optlen - sizeof(struct sctp_hmacalgo))) {
err = -EINVAL;
goto out;
}
err = sctp_auth_ep_set_hmacs(ep, hmacs);
out:
kfree(hmacs);
return err;
}
/*
* 7.1.20. Set a shared key (SCTP_AUTH_KEY)
*
* This option will set a shared secret key which is used to build an
* association shared key.
*/
static int sctp_setsockopt_auth_key(struct sock *sk,
char __user *optval,
unsigned int optlen)
{
struct sctp_endpoint *ep = sctp_sk(sk)->ep;
struct sctp_authkey *authkey;
struct sctp_association *asoc;
int ret;
if (!ep->auth_enable)
return -EACCES;
if (optlen <= sizeof(struct sctp_authkey))
return -EINVAL;
authkey = memdup_user(optval, optlen);
if (IS_ERR(authkey))
return PTR_ERR(authkey);
if (authkey->sca_keylength > optlen - sizeof(struct sctp_authkey)) {
ret = -EINVAL;
goto out;
}
asoc = sctp_id2assoc(sk, authkey->sca_assoc_id);
if (!asoc && authkey->sca_assoc_id && sctp_style(sk, UDP)) {
ret = -EINVAL;
goto out;
}
ret = sctp_auth_set_key(ep, asoc, authkey);
out:
kzfree(authkey);
return ret;
}
/*
* 7.1.21. Get or set the active shared key (SCTP_AUTH_ACTIVE_KEY)
*
* This option will get or set the active shared key to be used to build
* the association shared key.
*/
static int sctp_setsockopt_active_key(struct sock *sk,
char __user *optval,
unsigned int optlen)
{
struct sctp_endpoint *ep = sctp_sk(sk)->ep;
struct sctp_authkeyid val;
struct sctp_association *asoc;
if (!ep->auth_enable)
return -EACCES;
if (optlen != sizeof(struct sctp_authkeyid))
return -EINVAL;
if (copy_from_user(&val, optval, optlen))
return -EFAULT;
asoc = sctp_id2assoc(sk, val.scact_assoc_id);
if (!asoc && val.scact_assoc_id && sctp_style(sk, UDP))
return -EINVAL;
return sctp_auth_set_active_key(ep, asoc, val.scact_keynumber);
}
/*
* 7.1.22. Delete a shared key (SCTP_AUTH_DELETE_KEY)
*
* This set option will delete a shared secret key from use.
*/
static int sctp_setsockopt_del_key(struct sock *sk,
char __user *optval,
unsigned int optlen)
{
struct sctp_endpoint *ep = sctp_sk(sk)->ep;
struct sctp_authkeyid val;
struct sctp_association *asoc;
if (!ep->auth_enable)
return -EACCES;
if (optlen != sizeof(struct sctp_authkeyid))
return -EINVAL;
if (copy_from_user(&val, optval, optlen))
return -EFAULT;
asoc = sctp_id2assoc(sk, val.scact_assoc_id);
if (!asoc && val.scact_assoc_id && sctp_style(sk, UDP))
return -EINVAL;
return sctp_auth_del_key_id(ep, asoc, val.scact_keynumber);
}
/*
* 8.1.23 SCTP_AUTO_ASCONF
*
* This option will enable or disable the use of the automatic generation of
* ASCONF chunks to add and delete addresses to an existing association. Note
* that this option has two caveats namely: a) it only affects sockets that
* are bound to all addresses available to the SCTP stack, and b) the system
* administrator may have an overriding control that turns the ASCONF feature
* off no matter what setting the socket option may have.
* This option expects an integer boolean flag, where a non-zero value turns on
* the option, and a zero value turns off the option.
* Note. In this implementation, socket operation overrides default parameter
* being set by sysctl as well as FreeBSD implementation
*/
static int sctp_setsockopt_auto_asconf(struct sock *sk, char __user *optval,
unsigned int optlen)
{
int val;
struct sctp_sock *sp = sctp_sk(sk);
if (optlen < sizeof(int))
return -EINVAL;
if (get_user(val, (int __user *)optval))
return -EFAULT;
if (!sctp_is_ep_boundall(sk) && val)
return -EINVAL;
if ((val && sp->do_auto_asconf) || (!val && !sp->do_auto_asconf))
return 0;
spin_lock_bh(&sock_net(sk)->sctp.addr_wq_lock);
if (val == 0 && sp->do_auto_asconf) {
list_del(&sp->auto_asconf_list);
sp->do_auto_asconf = 0;
} else if (val && !sp->do_auto_asconf) {
list_add_tail(&sp->auto_asconf_list,
&sock_net(sk)->sctp.auto_asconf_splist);
sp->do_auto_asconf = 1;
}
spin_unlock_bh(&sock_net(sk)->sctp.addr_wq_lock);
return 0;
}
/*
* SCTP_PEER_ADDR_THLDS
*
* This option allows us to alter the partially failed threshold for one or all
* transports in an association. See Section 6.1 of:
* http://www.ietf.org/id/draft-nishida-tsvwg-sctp-failover-05.txt
*/
static int sctp_setsockopt_paddr_thresholds(struct sock *sk,
char __user *optval,
unsigned int optlen)
{
struct sctp_paddrthlds val;
struct sctp_transport *trans;
struct sctp_association *asoc;
if (optlen < sizeof(struct sctp_paddrthlds))
return -EINVAL;
if (copy_from_user(&val, (struct sctp_paddrthlds __user *)optval,
sizeof(struct sctp_paddrthlds)))
return -EFAULT;
if (sctp_is_any(sk, (const union sctp_addr *)&val.spt_address)) {
asoc = sctp_id2assoc(sk, val.spt_assoc_id);
if (!asoc)
return -ENOENT;
list_for_each_entry(trans, &asoc->peer.transport_addr_list,
transports) {
if (val.spt_pathmaxrxt)
trans->pathmaxrxt = val.spt_pathmaxrxt;
trans->pf_retrans = val.spt_pathpfthld;
}
if (val.spt_pathmaxrxt)
asoc->pathmaxrxt = val.spt_pathmaxrxt;
asoc->pf_retrans = val.spt_pathpfthld;
} else {
trans = sctp_addr_id2transport(sk, &val.spt_address,
val.spt_assoc_id);
if (!trans)
return -ENOENT;
if (val.spt_pathmaxrxt)
trans->pathmaxrxt = val.spt_pathmaxrxt;
trans->pf_retrans = val.spt_pathpfthld;
}
return 0;
}
static int sctp_setsockopt_recvrcvinfo(struct sock *sk,
char __user *optval,
unsigned int optlen)
{
int val;
if (optlen < sizeof(int))
return -EINVAL;
if (get_user(val, (int __user *) optval))
return -EFAULT;
sctp_sk(sk)->recvrcvinfo = (val == 0) ? 0 : 1;
return 0;
}
static int sctp_setsockopt_recvnxtinfo(struct sock *sk,
char __user *optval,
unsigned int optlen)
{
int val;
if (optlen < sizeof(int))
return -EINVAL;
if (get_user(val, (int __user *) optval))
return -EFAULT;
sctp_sk(sk)->recvnxtinfo = (val == 0) ? 0 : 1;
return 0;
}
static int sctp_setsockopt_pr_supported(struct sock *sk,
char __user *optval,
unsigned int optlen)
{
struct sctp_assoc_value params;
struct sctp_association *asoc;
int retval = -EINVAL;
if (optlen != sizeof(params))
goto out;
if (copy_from_user(¶ms, optval, optlen)) {
retval = -EFAULT;
goto out;
}
asoc = sctp_id2assoc(sk, params.assoc_id);
if (asoc) {
asoc->prsctp_enable = !!params.assoc_value;
} else if (!params.assoc_id) {
struct sctp_sock *sp = sctp_sk(sk);
sp->ep->prsctp_enable = !!params.assoc_value;
} else {
goto out;
}
retval = 0;
out:
return retval;
}
static int sctp_setsockopt_default_prinfo(struct sock *sk,
char __user *optval,
unsigned int optlen)
{
struct sctp_default_prinfo info;
struct sctp_association *asoc;
int retval = -EINVAL;
if (optlen != sizeof(info))
goto out;
if (copy_from_user(&info, optval, sizeof(info))) {
retval = -EFAULT;
goto out;
}
if (info.pr_policy & ~SCTP_PR_SCTP_MASK)
goto out;
if (info.pr_policy == SCTP_PR_SCTP_NONE)
info.pr_value = 0;
asoc = sctp_id2assoc(sk, info.pr_assoc_id);
if (asoc) {
SCTP_PR_SET_POLICY(asoc->default_flags, info.pr_policy);
asoc->default_timetolive = info.pr_value;
} else if (!info.pr_assoc_id) {
struct sctp_sock *sp = sctp_sk(sk);
SCTP_PR_SET_POLICY(sp->default_flags, info.pr_policy);
sp->default_timetolive = info.pr_value;
} else {
goto out;
}
retval = 0;
out:
return retval;
}
static int sctp_setsockopt_reconfig_supported(struct sock *sk,
char __user *optval,
unsigned int optlen)
{
struct sctp_assoc_value params;
struct sctp_association *asoc;
int retval = -EINVAL;
if (optlen != sizeof(params))
goto out;
if (copy_from_user(¶ms, optval, optlen)) {
retval = -EFAULT;
goto out;
}
asoc = sctp_id2assoc(sk, params.assoc_id);
if (asoc) {
asoc->reconf_enable = !!params.assoc_value;
} else if (!params.assoc_id) {
struct sctp_sock *sp = sctp_sk(sk);
sp->ep->reconf_enable = !!params.assoc_value;
} else {
goto out;
}
retval = 0;
out:
return retval;
}
static int sctp_setsockopt_enable_strreset(struct sock *sk,
char __user *optval,
unsigned int optlen)
{
struct sctp_assoc_value params;
struct sctp_association *asoc;
int retval = -EINVAL;
if (optlen != sizeof(params))
goto out;
if (copy_from_user(¶ms, optval, optlen)) {
retval = -EFAULT;
goto out;
}
if (params.assoc_value & (~SCTP_ENABLE_STRRESET_MASK))
goto out;
asoc = sctp_id2assoc(sk, params.assoc_id);
if (asoc) {
asoc->strreset_enable = params.assoc_value;
} else if (!params.assoc_id) {
struct sctp_sock *sp = sctp_sk(sk);
sp->ep->strreset_enable = params.assoc_value;
} else {
goto out;
}
retval = 0;
out:
return retval;
}
static int sctp_setsockopt_reset_streams(struct sock *sk,
char __user *optval,
unsigned int optlen)
{
struct sctp_reset_streams *params;
struct sctp_association *asoc;
int retval = -EINVAL;
if (optlen < sizeof(struct sctp_reset_streams))
return -EINVAL;
params = memdup_user(optval, optlen);
if (IS_ERR(params))
return PTR_ERR(params);
asoc = sctp_id2assoc(sk, params->srs_assoc_id);
if (!asoc)
goto out;
retval = sctp_send_reset_streams(asoc, params);
out:
kfree(params);
return retval;
}
static int sctp_setsockopt_reset_assoc(struct sock *sk,
char __user *optval,
unsigned int optlen)
{
struct sctp_association *asoc;
sctp_assoc_t associd;
int retval = -EINVAL;
if (optlen != sizeof(associd))
goto out;
if (copy_from_user(&associd, optval, optlen)) {
retval = -EFAULT;
goto out;
}
asoc = sctp_id2assoc(sk, associd);
if (!asoc)
goto out;
retval = sctp_send_reset_assoc(asoc);
out:
return retval;
}
static int sctp_setsockopt_add_streams(struct sock *sk,
char __user *optval,
unsigned int optlen)
{
struct sctp_association *asoc;
struct sctp_add_streams params;
int retval = -EINVAL;
if (optlen != sizeof(params))
goto out;
if (copy_from_user(¶ms, optval, optlen)) {
retval = -EFAULT;
goto out;
}
asoc = sctp_id2assoc(sk, params.sas_assoc_id);
if (!asoc)
goto out;
retval = sctp_send_add_streams(asoc, ¶ms);
out:
return retval;
}
/* API 6.2 setsockopt(), getsockopt()
*
* Applications use setsockopt() and getsockopt() to set or retrieve
* socket options. Socket options are used to change the default
* behavior of sockets calls. They are described in Section 7.
*
* The syntax is:
*
* ret = getsockopt(int sd, int level, int optname, void __user *optval,
* int __user *optlen);
* ret = setsockopt(int sd, int level, int optname, const void __user *optval,
* int optlen);
*
* sd - the socket descript.
* level - set to IPPROTO_SCTP for all SCTP options.
* optname - the option name.
* optval - the buffer to store the value of the option.
* optlen - the size of the buffer.
*/
static int sctp_setsockopt(struct sock *sk, int level, int optname,
char __user *optval, unsigned int optlen)
{
int retval = 0;
pr_debug("%s: sk:%p, optname:%d\n", __func__, sk, optname);
/* I can hardly begin to describe how wrong this is. This is
* so broken as to be worse than useless. The API draft
* REALLY is NOT helpful here... I am not convinced that the
* semantics of setsockopt() with a level OTHER THAN SOL_SCTP
* are at all well-founded.
*/
if (level != SOL_SCTP) {
struct sctp_af *af = sctp_sk(sk)->pf->af;
retval = af->setsockopt(sk, level, optname, optval, optlen);
goto out_nounlock;
}
lock_sock(sk);
switch (optname) {
case SCTP_SOCKOPT_BINDX_ADD:
/* 'optlen' is the size of the addresses buffer. */
retval = sctp_setsockopt_bindx(sk, (struct sockaddr __user *)optval,
optlen, SCTP_BINDX_ADD_ADDR);
break;
case SCTP_SOCKOPT_BINDX_REM:
/* 'optlen' is the size of the addresses buffer. */
retval = sctp_setsockopt_bindx(sk, (struct sockaddr __user *)optval,
optlen, SCTP_BINDX_REM_ADDR);
break;
case SCTP_SOCKOPT_CONNECTX_OLD:
/* 'optlen' is the size of the addresses buffer. */
retval = sctp_setsockopt_connectx_old(sk,
(struct sockaddr __user *)optval,
optlen);
break;
case SCTP_SOCKOPT_CONNECTX:
/* 'optlen' is the size of the addresses buffer. */
retval = sctp_setsockopt_connectx(sk,
(struct sockaddr __user *)optval,
optlen);
break;
case SCTP_DISABLE_FRAGMENTS:
retval = sctp_setsockopt_disable_fragments(sk, optval, optlen);
break;
case SCTP_EVENTS:
retval = sctp_setsockopt_events(sk, optval, optlen);
break;
case SCTP_AUTOCLOSE:
retval = sctp_setsockopt_autoclose(sk, optval, optlen);
break;
case SCTP_PEER_ADDR_PARAMS:
retval = sctp_setsockopt_peer_addr_params(sk, optval, optlen);
break;
case SCTP_DELAYED_SACK:
retval = sctp_setsockopt_delayed_ack(sk, optval, optlen);
break;
case SCTP_PARTIAL_DELIVERY_POINT:
retval = sctp_setsockopt_partial_delivery_point(sk, optval, optlen);
break;
case SCTP_INITMSG:
retval = sctp_setsockopt_initmsg(sk, optval, optlen);
break;
case SCTP_DEFAULT_SEND_PARAM:
retval = sctp_setsockopt_default_send_param(sk, optval,
optlen);
break;
case SCTP_DEFAULT_SNDINFO:
retval = sctp_setsockopt_default_sndinfo(sk, optval, optlen);
break;
case SCTP_PRIMARY_ADDR:
retval = sctp_setsockopt_primary_addr(sk, optval, optlen);
break;
case SCTP_SET_PEER_PRIMARY_ADDR:
retval = sctp_setsockopt_peer_primary_addr(sk, optval, optlen);
break;
case SCTP_NODELAY:
retval = sctp_setsockopt_nodelay(sk, optval, optlen);
break;
case SCTP_RTOINFO:
retval = sctp_setsockopt_rtoinfo(sk, optval, optlen);
break;
case SCTP_ASSOCINFO:
retval = sctp_setsockopt_associnfo(sk, optval, optlen);
break;
case SCTP_I_WANT_MAPPED_V4_ADDR:
retval = sctp_setsockopt_mappedv4(sk, optval, optlen);
break;
case SCTP_MAXSEG:
retval = sctp_setsockopt_maxseg(sk, optval, optlen);
break;
case SCTP_ADAPTATION_LAYER:
retval = sctp_setsockopt_adaptation_layer(sk, optval, optlen);
break;
case SCTP_CONTEXT:
retval = sctp_setsockopt_context(sk, optval, optlen);
break;
case SCTP_FRAGMENT_INTERLEAVE:
retval = sctp_setsockopt_fragment_interleave(sk, optval, optlen);
break;
case SCTP_MAX_BURST:
retval = sctp_setsockopt_maxburst(sk, optval, optlen);
break;
case SCTP_AUTH_CHUNK:
retval = sctp_setsockopt_auth_chunk(sk, optval, optlen);
break;
case SCTP_HMAC_IDENT:
retval = sctp_setsockopt_hmac_ident(sk, optval, optlen);
break;
case SCTP_AUTH_KEY:
retval = sctp_setsockopt_auth_key(sk, optval, optlen);
break;
case SCTP_AUTH_ACTIVE_KEY:
retval = sctp_setsockopt_active_key(sk, optval, optlen);
break;
case SCTP_AUTH_DELETE_KEY:
retval = sctp_setsockopt_del_key(sk, optval, optlen);
break;
case SCTP_AUTO_ASCONF:
retval = sctp_setsockopt_auto_asconf(sk, optval, optlen);
break;
case SCTP_PEER_ADDR_THLDS:
retval = sctp_setsockopt_paddr_thresholds(sk, optval, optlen);
break;
case SCTP_RECVRCVINFO:
retval = sctp_setsockopt_recvrcvinfo(sk, optval, optlen);
break;
case SCTP_RECVNXTINFO:
retval = sctp_setsockopt_recvnxtinfo(sk, optval, optlen);
break;
case SCTP_PR_SUPPORTED:
retval = sctp_setsockopt_pr_supported(sk, optval, optlen);
break;
case SCTP_DEFAULT_PRINFO:
retval = sctp_setsockopt_default_prinfo(sk, optval, optlen);
break;
case SCTP_RECONFIG_SUPPORTED:
retval = sctp_setsockopt_reconfig_supported(sk, optval, optlen);
break;
case SCTP_ENABLE_STREAM_RESET:
retval = sctp_setsockopt_enable_strreset(sk, optval, optlen);
break;
case SCTP_RESET_STREAMS:
retval = sctp_setsockopt_reset_streams(sk, optval, optlen);
break;
case SCTP_RESET_ASSOC:
retval = sctp_setsockopt_reset_assoc(sk, optval, optlen);
break;
case SCTP_ADD_STREAMS:
retval = sctp_setsockopt_add_streams(sk, optval, optlen);
break;
default:
retval = -ENOPROTOOPT;
break;
}
release_sock(sk);
out_nounlock:
return retval;
}
/* API 3.1.6 connect() - UDP Style Syntax
*
* An application may use the connect() call in the UDP model to initiate an
* association without sending data.
*
* The syntax is:
*
* ret = connect(int sd, const struct sockaddr *nam, socklen_t len);
*
* sd: the socket descriptor to have a new association added to.
*
* nam: the address structure (either struct sockaddr_in or struct
* sockaddr_in6 defined in RFC2553 [7]).
*
* len: the size of the address.
*/
static int sctp_connect(struct sock *sk, struct sockaddr *addr,
int addr_len)
{
int err = 0;
struct sctp_af *af;
lock_sock(sk);
pr_debug("%s: sk:%p, sockaddr:%p, addr_len:%d\n", __func__, sk,
addr, addr_len);
/* Validate addr_len before calling common connect/connectx routine. */
af = sctp_get_af_specific(addr->sa_family);
if (!af || addr_len < af->sockaddr_len) {
err = -EINVAL;
} else {
/* Pass correct addr len to common routine (so it knows there
* is only one address being passed.
*/
err = __sctp_connect(sk, addr, af->sockaddr_len, NULL);
}
release_sock(sk);
return err;
}
/* FIXME: Write comments. */
static int sctp_disconnect(struct sock *sk, int flags)
{
return -EOPNOTSUPP; /* STUB */
}
/* 4.1.4 accept() - TCP Style Syntax
*
* Applications use accept() call to remove an established SCTP
* association from the accept queue of the endpoint. A new socket
* descriptor will be returned from accept() to represent the newly
* formed association.
*/
static struct sock *sctp_accept(struct sock *sk, int flags, int *err, bool kern)
{
struct sctp_sock *sp;
struct sctp_endpoint *ep;
struct sock *newsk = NULL;
struct sctp_association *asoc;
long timeo;
int error = 0;
lock_sock(sk);
sp = sctp_sk(sk);
ep = sp->ep;
if (!sctp_style(sk, TCP)) {
error = -EOPNOTSUPP;
goto out;
}
if (!sctp_sstate(sk, LISTENING)) {
error = -EINVAL;
goto out;
}
timeo = sock_rcvtimeo(sk, flags & O_NONBLOCK);
error = sctp_wait_for_accept(sk, timeo);
if (error)
goto out;
/* We treat the list of associations on the endpoint as the accept
* queue and pick the first association on the list.
*/
asoc = list_entry(ep->asocs.next, struct sctp_association, asocs);
newsk = sp->pf->create_accept_sk(sk, asoc, kern);
if (!newsk) {
error = -ENOMEM;
goto out;
}
/* Populate the fields of the newsk from the oldsk and migrate the
* asoc to the newsk.
*/
sctp_sock_migrate(sk, newsk, asoc, SCTP_SOCKET_TCP);
out:
release_sock(sk);
*err = error;
return newsk;
}
/* The SCTP ioctl handler. */
static int sctp_ioctl(struct sock *sk, int cmd, unsigned long arg)
{
int rc = -ENOTCONN;
lock_sock(sk);
/*
* SEQPACKET-style sockets in LISTENING state are valid, for
* SCTP, so only discard TCP-style sockets in LISTENING state.
*/
if (sctp_style(sk, TCP) && sctp_sstate(sk, LISTENING))
goto out;
switch (cmd) {
case SIOCINQ: {
struct sk_buff *skb;
unsigned int amount = 0;
skb = skb_peek(&sk->sk_receive_queue);
if (skb != NULL) {
/*
* We will only return the amount of this packet since
* that is all that will be read.
*/
amount = skb->len;
}
rc = put_user(amount, (int __user *)arg);
break;
}
default:
rc = -ENOIOCTLCMD;
break;
}
out:
release_sock(sk);
return rc;
}
/* This is the function which gets called during socket creation to
* initialized the SCTP-specific portion of the sock.
* The sock structure should already be zero-filled memory.
*/
static int sctp_init_sock(struct sock *sk)
{
struct net *net = sock_net(sk);
struct sctp_sock *sp;
pr_debug("%s: sk:%p\n", __func__, sk);
sp = sctp_sk(sk);
/* Initialize the SCTP per socket area. */
switch (sk->sk_type) {
case SOCK_SEQPACKET:
sp->type = SCTP_SOCKET_UDP;
break;
case SOCK_STREAM:
sp->type = SCTP_SOCKET_TCP;
break;
default:
return -ESOCKTNOSUPPORT;
}
sk->sk_gso_type = SKB_GSO_SCTP;
/* Initialize default send parameters. These parameters can be
* modified with the SCTP_DEFAULT_SEND_PARAM socket option.
*/
sp->default_stream = 0;
sp->default_ppid = 0;
sp->default_flags = 0;
sp->default_context = 0;
sp->default_timetolive = 0;
sp->default_rcv_context = 0;
sp->max_burst = net->sctp.max_burst;
sp->sctp_hmac_alg = net->sctp.sctp_hmac_alg;
/* Initialize default setup parameters. These parameters
* can be modified with the SCTP_INITMSG socket option or
* overridden by the SCTP_INIT CMSG.
*/
sp->initmsg.sinit_num_ostreams = sctp_max_outstreams;
sp->initmsg.sinit_max_instreams = sctp_max_instreams;
sp->initmsg.sinit_max_attempts = net->sctp.max_retrans_init;
sp->initmsg.sinit_max_init_timeo = net->sctp.rto_max;
/* Initialize default RTO related parameters. These parameters can
* be modified for with the SCTP_RTOINFO socket option.
*/
sp->rtoinfo.srto_initial = net->sctp.rto_initial;
sp->rtoinfo.srto_max = net->sctp.rto_max;
sp->rtoinfo.srto_min = net->sctp.rto_min;
/* Initialize default association related parameters. These parameters
* can be modified with the SCTP_ASSOCINFO socket option.
*/
sp->assocparams.sasoc_asocmaxrxt = net->sctp.max_retrans_association;
sp->assocparams.sasoc_number_peer_destinations = 0;
sp->assocparams.sasoc_peer_rwnd = 0;
sp->assocparams.sasoc_local_rwnd = 0;
sp->assocparams.sasoc_cookie_life = net->sctp.valid_cookie_life;
/* Initialize default event subscriptions. By default, all the
* options are off.
*/
memset(&sp->subscribe, 0, sizeof(struct sctp_event_subscribe));
/* Default Peer Address Parameters. These defaults can
* be modified via SCTP_PEER_ADDR_PARAMS
*/
sp->hbinterval = net->sctp.hb_interval;
sp->pathmaxrxt = net->sctp.max_retrans_path;
sp->pathmtu = 0; /* allow default discovery */
sp->sackdelay = net->sctp.sack_timeout;
sp->sackfreq = 2;
sp->param_flags = SPP_HB_ENABLE |
SPP_PMTUD_ENABLE |
SPP_SACKDELAY_ENABLE;
/* If enabled no SCTP message fragmentation will be performed.
* Configure through SCTP_DISABLE_FRAGMENTS socket option.
*/
sp->disable_fragments = 0;
/* Enable Nagle algorithm by default. */
sp->nodelay = 0;
sp->recvrcvinfo = 0;
sp->recvnxtinfo = 0;
/* Enable by default. */
sp->v4mapped = 1;
/* Auto-close idle associations after the configured
* number of seconds. A value of 0 disables this
* feature. Configure through the SCTP_AUTOCLOSE socket option,
* for UDP-style sockets only.
*/
sp->autoclose = 0;
/* User specified fragmentation limit. */
sp->user_frag = 0;
sp->adaptation_ind = 0;
sp->pf = sctp_get_pf_specific(sk->sk_family);
/* Control variables for partial data delivery. */
atomic_set(&sp->pd_mode, 0);
skb_queue_head_init(&sp->pd_lobby);
sp->frag_interleave = 0;
/* Create a per socket endpoint structure. Even if we
* change the data structure relationships, this may still
* be useful for storing pre-connect address information.
*/
sp->ep = sctp_endpoint_new(sk, GFP_KERNEL);
if (!sp->ep)
return -ENOMEM;
sp->hmac = NULL;
sk->sk_destruct = sctp_destruct_sock;
SCTP_DBG_OBJCNT_INC(sock);
local_bh_disable();
percpu_counter_inc(&sctp_sockets_allocated);
sock_prot_inuse_add(net, sk->sk_prot, 1);
/* Nothing can fail after this block, otherwise
* sctp_destroy_sock() will be called without addr_wq_lock held
*/
if (net->sctp.default_auto_asconf) {
spin_lock(&sock_net(sk)->sctp.addr_wq_lock);
list_add_tail(&sp->auto_asconf_list,
&net->sctp.auto_asconf_splist);
sp->do_auto_asconf = 1;
spin_unlock(&sock_net(sk)->sctp.addr_wq_lock);
} else {
sp->do_auto_asconf = 0;
}
local_bh_enable();
return 0;
}
/* Cleanup any SCTP per socket resources. Must be called with
* sock_net(sk)->sctp.addr_wq_lock held if sp->do_auto_asconf is true
*/
static void sctp_destroy_sock(struct sock *sk)
{
struct sctp_sock *sp;
pr_debug("%s: sk:%p\n", __func__, sk);
/* Release our hold on the endpoint. */
sp = sctp_sk(sk);
/* This could happen during socket init, thus we bail out
* early, since the rest of the below is not setup either.
*/
if (sp->ep == NULL)
return;
if (sp->do_auto_asconf) {
sp->do_auto_asconf = 0;
list_del(&sp->auto_asconf_list);
}
sctp_endpoint_free(sp->ep);
local_bh_disable();
percpu_counter_dec(&sctp_sockets_allocated);
sock_prot_inuse_add(sock_net(sk), sk->sk_prot, -1);
local_bh_enable();
}
/* Triggered when there are no references on the socket anymore */
static void sctp_destruct_sock(struct sock *sk)
{
struct sctp_sock *sp = sctp_sk(sk);
/* Free up the HMAC transform. */
crypto_free_shash(sp->hmac);
inet_sock_destruct(sk);
}
/* API 4.1.7 shutdown() - TCP Style Syntax
* int shutdown(int socket, int how);
*
* sd - the socket descriptor of the association to be closed.
* how - Specifies the type of shutdown. The values are
* as follows:
* SHUT_RD
* Disables further receive operations. No SCTP
* protocol action is taken.
* SHUT_WR
* Disables further send operations, and initiates
* the SCTP shutdown sequence.
* SHUT_RDWR
* Disables further send and receive operations
* and initiates the SCTP shutdown sequence.
*/
static void sctp_shutdown(struct sock *sk, int how)
{
struct net *net = sock_net(sk);
struct sctp_endpoint *ep;
if (!sctp_style(sk, TCP))
return;
ep = sctp_sk(sk)->ep;
if (how & SEND_SHUTDOWN && !list_empty(&ep->asocs)) {
struct sctp_association *asoc;
sk->sk_state = SCTP_SS_CLOSING;
asoc = list_entry(ep->asocs.next,
struct sctp_association, asocs);
sctp_primitive_SHUTDOWN(net, asoc, NULL);
}
}
int sctp_get_sctp_info(struct sock *sk, struct sctp_association *asoc,
struct sctp_info *info)
{
struct sctp_transport *prim;
struct list_head *pos;
int mask;
memset(info, 0, sizeof(*info));
if (!asoc) {
struct sctp_sock *sp = sctp_sk(sk);
info->sctpi_s_autoclose = sp->autoclose;
info->sctpi_s_adaptation_ind = sp->adaptation_ind;
info->sctpi_s_pd_point = sp->pd_point;
info->sctpi_s_nodelay = sp->nodelay;
info->sctpi_s_disable_fragments = sp->disable_fragments;
info->sctpi_s_v4mapped = sp->v4mapped;
info->sctpi_s_frag_interleave = sp->frag_interleave;
info->sctpi_s_type = sp->type;
return 0;
}
info->sctpi_tag = asoc->c.my_vtag;
info->sctpi_state = asoc->state;
info->sctpi_rwnd = asoc->a_rwnd;
info->sctpi_unackdata = asoc->unack_data;
info->sctpi_penddata = sctp_tsnmap_pending(&asoc->peer.tsn_map);
info->sctpi_instrms = asoc->stream.incnt;
info->sctpi_outstrms = asoc->stream.outcnt;
list_for_each(pos, &asoc->base.inqueue.in_chunk_list)
info->sctpi_inqueue++;
list_for_each(pos, &asoc->outqueue.out_chunk_list)
info->sctpi_outqueue++;
info->sctpi_overall_error = asoc->overall_error_count;
info->sctpi_max_burst = asoc->max_burst;
info->sctpi_maxseg = asoc->frag_point;
info->sctpi_peer_rwnd = asoc->peer.rwnd;
info->sctpi_peer_tag = asoc->c.peer_vtag;
mask = asoc->peer.ecn_capable << 1;
mask = (mask | asoc->peer.ipv4_address) << 1;
mask = (mask | asoc->peer.ipv6_address) << 1;
mask = (mask | asoc->peer.hostname_address) << 1;
mask = (mask | asoc->peer.asconf_capable) << 1;
mask = (mask | asoc->peer.prsctp_capable) << 1;
mask = (mask | asoc->peer.auth_capable);
info->sctpi_peer_capable = mask;
mask = asoc->peer.sack_needed << 1;
mask = (mask | asoc->peer.sack_generation) << 1;
mask = (mask | asoc->peer.zero_window_announced);
info->sctpi_peer_sack = mask;
info->sctpi_isacks = asoc->stats.isacks;
info->sctpi_osacks = asoc->stats.osacks;
info->sctpi_opackets = asoc->stats.opackets;
info->sctpi_ipackets = asoc->stats.ipackets;
info->sctpi_rtxchunks = asoc->stats.rtxchunks;
info->sctpi_outofseqtsns = asoc->stats.outofseqtsns;
info->sctpi_idupchunks = asoc->stats.idupchunks;
info->sctpi_gapcnt = asoc->stats.gapcnt;
info->sctpi_ouodchunks = asoc->stats.ouodchunks;
info->sctpi_iuodchunks = asoc->stats.iuodchunks;
info->sctpi_oodchunks = asoc->stats.oodchunks;
info->sctpi_iodchunks = asoc->stats.iodchunks;
info->sctpi_octrlchunks = asoc->stats.octrlchunks;
info->sctpi_ictrlchunks = asoc->stats.ictrlchunks;
prim = asoc->peer.primary_path;
memcpy(&info->sctpi_p_address, &prim->ipaddr, sizeof(prim->ipaddr));
info->sctpi_p_state = prim->state;
info->sctpi_p_cwnd = prim->cwnd;
info->sctpi_p_srtt = prim->srtt;
info->sctpi_p_rto = jiffies_to_msecs(prim->rto);
info->sctpi_p_hbinterval = prim->hbinterval;
info->sctpi_p_pathmaxrxt = prim->pathmaxrxt;
info->sctpi_p_sackdelay = jiffies_to_msecs(prim->sackdelay);
info->sctpi_p_ssthresh = prim->ssthresh;
info->sctpi_p_partial_bytes_acked = prim->partial_bytes_acked;
info->sctpi_p_flight_size = prim->flight_size;
info->sctpi_p_error = prim->error_count;
return 0;
}
EXPORT_SYMBOL_GPL(sctp_get_sctp_info);
/* use callback to avoid exporting the core structure */
int sctp_transport_walk_start(struct rhashtable_iter *iter)
{
int err;
rhltable_walk_enter(&sctp_transport_hashtable, iter);
err = rhashtable_walk_start(iter);
if (err && err != -EAGAIN) {
rhashtable_walk_stop(iter);
rhashtable_walk_exit(iter);
return err;
}
return 0;
}
void sctp_transport_walk_stop(struct rhashtable_iter *iter)
{
rhashtable_walk_stop(iter);
rhashtable_walk_exit(iter);
}
struct sctp_transport *sctp_transport_get_next(struct net *net,
struct rhashtable_iter *iter)
{
struct sctp_transport *t;
t = rhashtable_walk_next(iter);
for (; t; t = rhashtable_walk_next(iter)) {
if (IS_ERR(t)) {
if (PTR_ERR(t) == -EAGAIN)
continue;
break;
}
if (net_eq(sock_net(t->asoc->base.sk), net) &&
t->asoc->peer.primary_path == t)
break;
}
return t;
}
struct sctp_transport *sctp_transport_get_idx(struct net *net,
struct rhashtable_iter *iter,
int pos)
{
void *obj = SEQ_START_TOKEN;
while (pos && (obj = sctp_transport_get_next(net, iter)) &&
!IS_ERR(obj))
pos--;
return obj;
}
int sctp_for_each_endpoint(int (*cb)(struct sctp_endpoint *, void *),
void *p) {
int err = 0;
int hash = 0;
struct sctp_ep_common *epb;
struct sctp_hashbucket *head;
for (head = sctp_ep_hashtable; hash < sctp_ep_hashsize;
hash++, head++) {
read_lock_bh(&head->lock);
sctp_for_each_hentry(epb, &head->chain) {
err = cb(sctp_ep(epb), p);
if (err)
break;
}
read_unlock_bh(&head->lock);
}
return err;
}
EXPORT_SYMBOL_GPL(sctp_for_each_endpoint);
int sctp_transport_lookup_process(int (*cb)(struct sctp_transport *, void *),
struct net *net,
const union sctp_addr *laddr,
const union sctp_addr *paddr, void *p)
{
struct sctp_transport *transport;
int err;
rcu_read_lock();
transport = sctp_addrs_lookup_transport(net, laddr, paddr);
rcu_read_unlock();
if (!transport)
return -ENOENT;
err = cb(transport, p);
sctp_transport_put(transport);
return err;
}
EXPORT_SYMBOL_GPL(sctp_transport_lookup_process);
int sctp_for_each_transport(int (*cb)(struct sctp_transport *, void *),
int (*cb_done)(struct sctp_transport *, void *),
struct net *net, int *pos, void *p) {
struct rhashtable_iter hti;
struct sctp_transport *tsp;
int ret;
again:
ret = sctp_transport_walk_start(&hti);
if (ret)
return ret;
tsp = sctp_transport_get_idx(net, &hti, *pos + 1);
for (; !IS_ERR_OR_NULL(tsp); tsp = sctp_transport_get_next(net, &hti)) {
if (!sctp_transport_hold(tsp))
continue;
ret = cb(tsp, p);
if (ret)
break;
(*pos)++;
sctp_transport_put(tsp);
}
sctp_transport_walk_stop(&hti);
if (ret) {
if (cb_done && !cb_done(tsp, p)) {
(*pos)++;
sctp_transport_put(tsp);
goto again;
}
sctp_transport_put(tsp);
}
return ret;
}
EXPORT_SYMBOL_GPL(sctp_for_each_transport);
/* 7.2.1 Association Status (SCTP_STATUS)
* Applications can retrieve current status information about an
* association, including association state, peer receiver window size,
* number of unacked data chunks, and number of data chunks pending
* receipt. This information is read-only.
*/
static int sctp_getsockopt_sctp_status(struct sock *sk, int len,
char __user *optval,
int __user *optlen)
{
struct sctp_status status;
struct sctp_association *asoc = NULL;
struct sctp_transport *transport;
sctp_assoc_t associd;
int retval = 0;
if (len < sizeof(status)) {
retval = -EINVAL;
goto out;
}
len = sizeof(status);
if (copy_from_user(&status, optval, len)) {
retval = -EFAULT;
goto out;
}
associd = status.sstat_assoc_id;
asoc = sctp_id2assoc(sk, associd);
if (!asoc) {
retval = -EINVAL;
goto out;
}
transport = asoc->peer.primary_path;
status.sstat_assoc_id = sctp_assoc2id(asoc);
status.sstat_state = sctp_assoc_to_state(asoc);
status.sstat_rwnd = asoc->peer.rwnd;
status.sstat_unackdata = asoc->unack_data;
status.sstat_penddata = sctp_tsnmap_pending(&asoc->peer.tsn_map);
status.sstat_instrms = asoc->stream.incnt;
status.sstat_outstrms = asoc->stream.outcnt;
status.sstat_fragmentation_point = asoc->frag_point;
status.sstat_primary.spinfo_assoc_id = sctp_assoc2id(transport->asoc);
memcpy(&status.sstat_primary.spinfo_address, &transport->ipaddr,
transport->af_specific->sockaddr_len);
/* Map ipv4 address into v4-mapped-on-v6 address. */
sctp_get_pf_specific(sk->sk_family)->addr_to_user(sctp_sk(sk),
(union sctp_addr *)&status.sstat_primary.spinfo_address);
status.sstat_primary.spinfo_state = transport->state;
status.sstat_primary.spinfo_cwnd = transport->cwnd;
status.sstat_primary.spinfo_srtt = transport->srtt;
status.sstat_primary.spinfo_rto = jiffies_to_msecs(transport->rto);
status.sstat_primary.spinfo_mtu = transport->pathmtu;
if (status.sstat_primary.spinfo_state == SCTP_UNKNOWN)
status.sstat_primary.spinfo_state = SCTP_ACTIVE;
if (put_user(len, optlen)) {
retval = -EFAULT;
goto out;
}
pr_debug("%s: len:%d, state:%d, rwnd:%d, assoc_id:%d\n",
__func__, len, status.sstat_state, status.sstat_rwnd,
status.sstat_assoc_id);
if (copy_to_user(optval, &status, len)) {
retval = -EFAULT;
goto out;
}
out:
return retval;
}
/* 7.2.2 Peer Address Information (SCTP_GET_PEER_ADDR_INFO)
*
* Applications can retrieve information about a specific peer address
* of an association, including its reachability state, congestion
* window, and retransmission timer values. This information is
* read-only.
*/
static int sctp_getsockopt_peer_addr_info(struct sock *sk, int len,
char __user *optval,
int __user *optlen)
{
struct sctp_paddrinfo pinfo;
struct sctp_transport *transport;
int retval = 0;
if (len < sizeof(pinfo)) {
retval = -EINVAL;
goto out;
}
len = sizeof(pinfo);
if (copy_from_user(&pinfo, optval, len)) {
retval = -EFAULT;
goto out;
}
transport = sctp_addr_id2transport(sk, &pinfo.spinfo_address,
pinfo.spinfo_assoc_id);
if (!transport)
return -EINVAL;
pinfo.spinfo_assoc_id = sctp_assoc2id(transport->asoc);
pinfo.spinfo_state = transport->state;
pinfo.spinfo_cwnd = transport->cwnd;
pinfo.spinfo_srtt = transport->srtt;
pinfo.spinfo_rto = jiffies_to_msecs(transport->rto);
pinfo.spinfo_mtu = transport->pathmtu;
if (pinfo.spinfo_state == SCTP_UNKNOWN)
pinfo.spinfo_state = SCTP_ACTIVE;
if (put_user(len, optlen)) {
retval = -EFAULT;
goto out;
}
if (copy_to_user(optval, &pinfo, len)) {
retval = -EFAULT;
goto out;
}
out:
return retval;
}
/* 7.1.12 Enable/Disable message fragmentation (SCTP_DISABLE_FRAGMENTS)
*
* This option is a on/off flag. If enabled no SCTP message
* fragmentation will be performed. Instead if a message being sent
* exceeds the current PMTU size, the message will NOT be sent and
* instead a error will be indicated to the user.
*/
static int sctp_getsockopt_disable_fragments(struct sock *sk, int len,
char __user *optval, int __user *optlen)
{
int val;
if (len < sizeof(int))
return -EINVAL;
len = sizeof(int);
val = (sctp_sk(sk)->disable_fragments == 1);
if (put_user(len, optlen))
return -EFAULT;
if (copy_to_user(optval, &val, len))
return -EFAULT;
return 0;
}
/* 7.1.15 Set notification and ancillary events (SCTP_EVENTS)
*
* This socket option is used to specify various notifications and
* ancillary data the user wishes to receive.
*/
static int sctp_getsockopt_events(struct sock *sk, int len, char __user *optval,
int __user *optlen)
{
if (len == 0)
return -EINVAL;
if (len > sizeof(struct sctp_event_subscribe))
len = sizeof(struct sctp_event_subscribe);
if (put_user(len, optlen))
return -EFAULT;
if (copy_to_user(optval, &sctp_sk(sk)->subscribe, len))
return -EFAULT;
return 0;
}
/* 7.1.8 Automatic Close of associations (SCTP_AUTOCLOSE)
*
* This socket option is applicable to the UDP-style socket only. When
* set it will cause associations that are idle for more than the
* specified number of seconds to automatically close. An association
* being idle is defined an association that has NOT sent or received
* user data. The special value of '0' indicates that no automatic
* close of any associations should be performed. The option expects an
* integer defining the number of seconds of idle time before an
* association is closed.
*/
static int sctp_getsockopt_autoclose(struct sock *sk, int len, char __user *optval, int __user *optlen)
{
/* Applicable to UDP-style socket only */
if (sctp_style(sk, TCP))
return -EOPNOTSUPP;
if (len < sizeof(int))
return -EINVAL;
len = sizeof(int);
if (put_user(len, optlen))
return -EFAULT;
if (copy_to_user(optval, &sctp_sk(sk)->autoclose, sizeof(int)))
return -EFAULT;
return 0;
}
/* Helper routine to branch off an association to a new socket. */
int sctp_do_peeloff(struct sock *sk, sctp_assoc_t id, struct socket **sockp)
{
struct sctp_association *asoc = sctp_id2assoc(sk, id);
struct sctp_sock *sp = sctp_sk(sk);
struct socket *sock;
int err = 0;
/* Do not peel off from one netns to another one. */
if (!net_eq(current->nsproxy->net_ns, sock_net(sk)))
return -EINVAL;
if (!asoc)
return -EINVAL;
/* If there is a thread waiting on more sndbuf space for
* sending on this asoc, it cannot be peeled.
*/
if (waitqueue_active(&asoc->wait))
return -EBUSY;
/* An association cannot be branched off from an already peeled-off
* socket, nor is this supported for tcp style sockets.
*/
if (!sctp_style(sk, UDP))
return -EINVAL;
/* Create a new socket. */
err = sock_create(sk->sk_family, SOCK_SEQPACKET, IPPROTO_SCTP, &sock);
if (err < 0)
return err;
sctp_copy_sock(sock->sk, sk, asoc);
/* Make peeled-off sockets more like 1-1 accepted sockets.
* Set the daddr and initialize id to something more random
*/
sp->pf->to_sk_daddr(&asoc->peer.primary_addr, sk);
/* Populate the fields of the newsk from the oldsk and migrate the
* asoc to the newsk.
*/
sctp_sock_migrate(sk, sock->sk, asoc, SCTP_SOCKET_UDP_HIGH_BANDWIDTH);
*sockp = sock;
return err;
}
EXPORT_SYMBOL(sctp_do_peeloff);
static int sctp_getsockopt_peeloff_common(struct sock *sk, sctp_peeloff_arg_t *peeloff,
struct file **newfile, unsigned flags)
{
struct socket *newsock;
int retval;
retval = sctp_do_peeloff(sk, peeloff->associd, &newsock);
if (retval < 0)
goto out;
/* Map the socket to an unused fd that can be returned to the user. */
retval = get_unused_fd_flags(flags & SOCK_CLOEXEC);
if (retval < 0) {
sock_release(newsock);
goto out;
}
*newfile = sock_alloc_file(newsock, 0, NULL);
if (IS_ERR(*newfile)) {
put_unused_fd(retval);
sock_release(newsock);
retval = PTR_ERR(*newfile);
*newfile = NULL;
return retval;
}
pr_debug("%s: sk:%p, newsk:%p, sd:%d\n", __func__, sk, newsock->sk,
retval);
peeloff->sd = retval;
if (flags & SOCK_NONBLOCK)
(*newfile)->f_flags |= O_NONBLOCK;
out:
return retval;
}
static int sctp_getsockopt_peeloff(struct sock *sk, int len, char __user *optval, int __user *optlen)
{
sctp_peeloff_arg_t peeloff;
struct file *newfile = NULL;
int retval = 0;
if (len < sizeof(sctp_peeloff_arg_t))
return -EINVAL;
len = sizeof(sctp_peeloff_arg_t);
if (copy_from_user(&peeloff, optval, len))
return -EFAULT;
retval = sctp_getsockopt_peeloff_common(sk, &peeloff, &newfile, 0);
if (retval < 0)
goto out;
/* Return the fd mapped to the new socket. */
if (put_user(len, optlen)) {
fput(newfile);
put_unused_fd(retval);
return -EFAULT;
}
if (copy_to_user(optval, &peeloff, len)) {
fput(newfile);
put_unused_fd(retval);
return -EFAULT;
}
fd_install(retval, newfile);
out:
return retval;
}
static int sctp_getsockopt_peeloff_flags(struct sock *sk, int len,
char __user *optval, int __user *optlen)
{
sctp_peeloff_flags_arg_t peeloff;
struct file *newfile = NULL;
int retval = 0;
if (len < sizeof(sctp_peeloff_flags_arg_t))
return -EINVAL;
len = sizeof(sctp_peeloff_flags_arg_t);
if (copy_from_user(&peeloff, optval, len))
return -EFAULT;
retval = sctp_getsockopt_peeloff_common(sk, &peeloff.p_arg,
&newfile, peeloff.flags);
if (retval < 0)
goto out;
/* Return the fd mapped to the new socket. */
if (put_user(len, optlen)) {
fput(newfile);
put_unused_fd(retval);
return -EFAULT;
}
if (copy_to_user(optval, &peeloff, len)) {
fput(newfile);
put_unused_fd(retval);
return -EFAULT;
}
fd_install(retval, newfile);
out:
return retval;
}
/* 7.1.13 Peer Address Parameters (SCTP_PEER_ADDR_PARAMS)
*
* Applications can enable or disable heartbeats for any peer address of
* an association, modify an address's heartbeat interval, force a
* heartbeat to be sent immediately, and adjust the address's maximum
* number of retransmissions sent before an address is considered
* unreachable. The following structure is used to access and modify an
* address's parameters:
*
* struct sctp_paddrparams {
* sctp_assoc_t spp_assoc_id;
* struct sockaddr_storage spp_address;
* uint32_t spp_hbinterval;
* uint16_t spp_pathmaxrxt;
* uint32_t spp_pathmtu;
* uint32_t spp_sackdelay;
* uint32_t spp_flags;
* };
*
* spp_assoc_id - (one-to-many style socket) This is filled in the
* application, and identifies the association for
* this query.
* spp_address - This specifies which address is of interest.
* spp_hbinterval - This contains the value of the heartbeat interval,
* in milliseconds. If a value of zero
* is present in this field then no changes are to
* be made to this parameter.
* spp_pathmaxrxt - This contains the maximum number of
* retransmissions before this address shall be
* considered unreachable. If a value of zero
* is present in this field then no changes are to
* be made to this parameter.
* spp_pathmtu - When Path MTU discovery is disabled the value
* specified here will be the "fixed" path mtu.
* Note that if the spp_address field is empty
* then all associations on this address will
* have this fixed path mtu set upon them.
*
* spp_sackdelay - When delayed sack is enabled, this value specifies
* the number of milliseconds that sacks will be delayed
* for. This value will apply to all addresses of an
* association if the spp_address field is empty. Note
* also, that if delayed sack is enabled and this
* value is set to 0, no change is made to the last
* recorded delayed sack timer value.
*
* spp_flags - These flags are used to control various features
* on an association. The flag field may contain
* zero or more of the following options.
*
* SPP_HB_ENABLE - Enable heartbeats on the
* specified address. Note that if the address
* field is empty all addresses for the association
* have heartbeats enabled upon them.
*
* SPP_HB_DISABLE - Disable heartbeats on the
* speicifed address. Note that if the address
* field is empty all addresses for the association
* will have their heartbeats disabled. Note also
* that SPP_HB_ENABLE and SPP_HB_DISABLE are
* mutually exclusive, only one of these two should
* be specified. Enabling both fields will have
* undetermined results.
*
* SPP_HB_DEMAND - Request a user initiated heartbeat
* to be made immediately.
*
* SPP_PMTUD_ENABLE - This field will enable PMTU
* discovery upon the specified address. Note that
* if the address feild is empty then all addresses
* on the association are effected.
*
* SPP_PMTUD_DISABLE - This field will disable PMTU
* discovery upon the specified address. Note that
* if the address feild is empty then all addresses
* on the association are effected. Not also that
* SPP_PMTUD_ENABLE and SPP_PMTUD_DISABLE are mutually
* exclusive. Enabling both will have undetermined
* results.
*
* SPP_SACKDELAY_ENABLE - Setting this flag turns
* on delayed sack. The time specified in spp_sackdelay
* is used to specify the sack delay for this address. Note
* that if spp_address is empty then all addresses will
* enable delayed sack and take on the sack delay
* value specified in spp_sackdelay.
* SPP_SACKDELAY_DISABLE - Setting this flag turns
* off delayed sack. If the spp_address field is blank then
* delayed sack is disabled for the entire association. Note
* also that this field is mutually exclusive to
* SPP_SACKDELAY_ENABLE, setting both will have undefined
* results.
*/
static int sctp_getsockopt_peer_addr_params(struct sock *sk, int len,
char __user *optval, int __user *optlen)
{
struct sctp_paddrparams params;
struct sctp_transport *trans = NULL;
struct sctp_association *asoc = NULL;
struct sctp_sock *sp = sctp_sk(sk);
if (len < sizeof(struct sctp_paddrparams))
return -EINVAL;
len = sizeof(struct sctp_paddrparams);
if (copy_from_user(¶ms, optval, len))
return -EFAULT;
/* If an address other than INADDR_ANY is specified, and
* no transport is found, then the request is invalid.
*/
if (!sctp_is_any(sk, (union sctp_addr *)¶ms.spp_address)) {
trans = sctp_addr_id2transport(sk, ¶ms.spp_address,
params.spp_assoc_id);
if (!trans) {
pr_debug("%s: failed no transport\n", __func__);
return -EINVAL;
}
}
/* Get association, if assoc_id != 0 and the socket is a one
* to many style socket, and an association was not found, then
* the id was invalid.
*/
asoc = sctp_id2assoc(sk, params.spp_assoc_id);
if (!asoc && params.spp_assoc_id && sctp_style(sk, UDP)) {
pr_debug("%s: failed no association\n", __func__);
return -EINVAL;
}
if (trans) {
/* Fetch transport values. */
params.spp_hbinterval = jiffies_to_msecs(trans->hbinterval);
params.spp_pathmtu = trans->pathmtu;
params.spp_pathmaxrxt = trans->pathmaxrxt;
params.spp_sackdelay = jiffies_to_msecs(trans->sackdelay);
/*draft-11 doesn't say what to return in spp_flags*/
params.spp_flags = trans->param_flags;
} else if (asoc) {
/* Fetch association values. */
params.spp_hbinterval = jiffies_to_msecs(asoc->hbinterval);
params.spp_pathmtu = asoc->pathmtu;
params.spp_pathmaxrxt = asoc->pathmaxrxt;
params.spp_sackdelay = jiffies_to_msecs(asoc->sackdelay);
/*draft-11 doesn't say what to return in spp_flags*/
params.spp_flags = asoc->param_flags;
} else {
/* Fetch socket values. */
params.spp_hbinterval = sp->hbinterval;
params.spp_pathmtu = sp->pathmtu;
params.spp_sackdelay = sp->sackdelay;
params.spp_pathmaxrxt = sp->pathmaxrxt;
/*draft-11 doesn't say what to return in spp_flags*/
params.spp_flags = sp->param_flags;
}
if (copy_to_user(optval, ¶ms, len))
return -EFAULT;
if (put_user(len, optlen))
return -EFAULT;
return 0;
}
/*
* 7.1.23. Get or set delayed ack timer (SCTP_DELAYED_SACK)
*
* This option will effect the way delayed acks are performed. This
* option allows you to get or set the delayed ack time, in
* milliseconds. It also allows changing the delayed ack frequency.
* Changing the frequency to 1 disables the delayed sack algorithm. If
* the assoc_id is 0, then this sets or gets the endpoints default
* values. If the assoc_id field is non-zero, then the set or get
* effects the specified association for the one to many model (the
* assoc_id field is ignored by the one to one model). Note that if
* sack_delay or sack_freq are 0 when setting this option, then the
* current values will remain unchanged.
*
* struct sctp_sack_info {
* sctp_assoc_t sack_assoc_id;
* uint32_t sack_delay;
* uint32_t sack_freq;
* };
*
* sack_assoc_id - This parameter, indicates which association the user
* is performing an action upon. Note that if this field's value is
* zero then the endpoints default value is changed (effecting future
* associations only).
*
* sack_delay - This parameter contains the number of milliseconds that
* the user is requesting the delayed ACK timer be set to. Note that
* this value is defined in the standard to be between 200 and 500
* milliseconds.
*
* sack_freq - This parameter contains the number of packets that must
* be received before a sack is sent without waiting for the delay
* timer to expire. The default value for this is 2, setting this
* value to 1 will disable the delayed sack algorithm.
*/
static int sctp_getsockopt_delayed_ack(struct sock *sk, int len,
char __user *optval,
int __user *optlen)
{
struct sctp_sack_info params;
struct sctp_association *asoc = NULL;
struct sctp_sock *sp = sctp_sk(sk);
if (len >= sizeof(struct sctp_sack_info)) {
len = sizeof(struct sctp_sack_info);
if (copy_from_user(¶ms, optval, len))
return -EFAULT;
} else if (len == sizeof(struct sctp_assoc_value)) {
pr_warn_ratelimited(DEPRECATED
"%s (pid %d) "
"Use of struct sctp_assoc_value in delayed_ack socket option.\n"
"Use struct sctp_sack_info instead\n",
current->comm, task_pid_nr(current));
if (copy_from_user(¶ms, optval, len))
return -EFAULT;
} else
return -EINVAL;
/* Get association, if sack_assoc_id != 0 and the socket is a one
* to many style socket, and an association was not found, then
* the id was invalid.
*/
asoc = sctp_id2assoc(sk, params.sack_assoc_id);
if (!asoc && params.sack_assoc_id && sctp_style(sk, UDP))
return -EINVAL;
if (asoc) {
/* Fetch association values. */
if (asoc->param_flags & SPP_SACKDELAY_ENABLE) {
params.sack_delay = jiffies_to_msecs(
asoc->sackdelay);
params.sack_freq = asoc->sackfreq;
} else {
params.sack_delay = 0;
params.sack_freq = 1;
}
} else {
/* Fetch socket values. */
if (sp->param_flags & SPP_SACKDELAY_ENABLE) {
params.sack_delay = sp->sackdelay;
params.sack_freq = sp->sackfreq;
} else {
params.sack_delay = 0;
params.sack_freq = 1;
}
}
if (copy_to_user(optval, ¶ms, len))
return -EFAULT;
if (put_user(len, optlen))
return -EFAULT;
return 0;
}
/* 7.1.3 Initialization Parameters (SCTP_INITMSG)
*
* Applications can specify protocol parameters for the default association
* initialization. The option name argument to setsockopt() and getsockopt()
* is SCTP_INITMSG.
*
* Setting initialization parameters is effective only on an unconnected
* socket (for UDP-style sockets only future associations are effected
* by the change). With TCP-style sockets, this option is inherited by
* sockets derived from a listener socket.
*/
static int sctp_getsockopt_initmsg(struct sock *sk, int len, char __user *optval, int __user *optlen)
{
if (len < sizeof(struct sctp_initmsg))
return -EINVAL;
len = sizeof(struct sctp_initmsg);
if (put_user(len, optlen))
return -EFAULT;
if (copy_to_user(optval, &sctp_sk(sk)->initmsg, len))
return -EFAULT;
return 0;
}
static int sctp_getsockopt_peer_addrs(struct sock *sk, int len,
char __user *optval, int __user *optlen)
{
struct sctp_association *asoc;
int cnt = 0;
struct sctp_getaddrs getaddrs;
struct sctp_transport *from;
void __user *to;
union sctp_addr temp;
struct sctp_sock *sp = sctp_sk(sk);
int addrlen;
size_t space_left;
int bytes_copied;
if (len < sizeof(struct sctp_getaddrs))
return -EINVAL;
if (copy_from_user(&getaddrs, optval, sizeof(struct sctp_getaddrs)))
return -EFAULT;
/* For UDP-style sockets, id specifies the association to query. */
asoc = sctp_id2assoc(sk, getaddrs.assoc_id);
if (!asoc)
return -EINVAL;
to = optval + offsetof(struct sctp_getaddrs, addrs);
space_left = len - offsetof(struct sctp_getaddrs, addrs);
list_for_each_entry(from, &asoc->peer.transport_addr_list,
transports) {
memcpy(&temp, &from->ipaddr, sizeof(temp));
addrlen = sctp_get_pf_specific(sk->sk_family)
->addr_to_user(sp, &temp);
if (space_left < addrlen)
return -ENOMEM;
if (copy_to_user(to, &temp, addrlen))
return -EFAULT;
to += addrlen;
cnt++;
space_left -= addrlen;
}
if (put_user(cnt, &((struct sctp_getaddrs __user *)optval)->addr_num))
return -EFAULT;
bytes_copied = ((char __user *)to) - optval;
if (put_user(bytes_copied, optlen))
return -EFAULT;
return 0;
}
static int sctp_copy_laddrs(struct sock *sk, __u16 port, void *to,
size_t space_left, int *bytes_copied)
{
struct sctp_sockaddr_entry *addr;
union sctp_addr temp;
int cnt = 0;
int addrlen;
struct net *net = sock_net(sk);
rcu_read_lock();
list_for_each_entry_rcu(addr, &net->sctp.local_addr_list, list) {
if (!addr->valid)
continue;
if ((PF_INET == sk->sk_family) &&
(AF_INET6 == addr->a.sa.sa_family))
continue;
if ((PF_INET6 == sk->sk_family) &&
inet_v6_ipv6only(sk) &&
(AF_INET == addr->a.sa.sa_family))
continue;
memcpy(&temp, &addr->a, sizeof(temp));
if (!temp.v4.sin_port)
temp.v4.sin_port = htons(port);
addrlen = sctp_get_pf_specific(sk->sk_family)
->addr_to_user(sctp_sk(sk), &temp);
if (space_left < addrlen) {
cnt = -ENOMEM;
break;
}
memcpy(to, &temp, addrlen);
to += addrlen;
cnt++;
space_left -= addrlen;
*bytes_copied += addrlen;
}
rcu_read_unlock();
return cnt;
}
static int sctp_getsockopt_local_addrs(struct sock *sk, int len,
char __user *optval, int __user *optlen)
{
struct sctp_bind_addr *bp;
struct sctp_association *asoc;
int cnt = 0;
struct sctp_getaddrs getaddrs;
struct sctp_sockaddr_entry *addr;
void __user *to;
union sctp_addr temp;
struct sctp_sock *sp = sctp_sk(sk);
int addrlen;
int err = 0;
size_t space_left;
int bytes_copied = 0;
void *addrs;
void *buf;
if (len < sizeof(struct sctp_getaddrs))
return -EINVAL;
if (copy_from_user(&getaddrs, optval, sizeof(struct sctp_getaddrs)))
return -EFAULT;
/*
* For UDP-style sockets, id specifies the association to query.
* If the id field is set to the value '0' then the locally bound
* addresses are returned without regard to any particular
* association.
*/
if (0 == getaddrs.assoc_id) {
bp = &sctp_sk(sk)->ep->base.bind_addr;
} else {
asoc = sctp_id2assoc(sk, getaddrs.assoc_id);
if (!asoc)
return -EINVAL;
bp = &asoc->base.bind_addr;
}
to = optval + offsetof(struct sctp_getaddrs, addrs);
space_left = len - offsetof(struct sctp_getaddrs, addrs);
addrs = kmalloc(space_left, GFP_USER | __GFP_NOWARN);
if (!addrs)
return -ENOMEM;
/* If the endpoint is bound to 0.0.0.0 or ::0, get the valid
* addresses from the global local address list.
*/
if (sctp_list_single_entry(&bp->address_list)) {
addr = list_entry(bp->address_list.next,
struct sctp_sockaddr_entry, list);
if (sctp_is_any(sk, &addr->a)) {
cnt = sctp_copy_laddrs(sk, bp->port, addrs,
space_left, &bytes_copied);
if (cnt < 0) {
err = cnt;
goto out;
}
goto copy_getaddrs;
}
}
buf = addrs;
/* Protection on the bound address list is not needed since
* in the socket option context we hold a socket lock and
* thus the bound address list can't change.
*/
list_for_each_entry(addr, &bp->address_list, list) {
memcpy(&temp, &addr->a, sizeof(temp));
addrlen = sctp_get_pf_specific(sk->sk_family)
->addr_to_user(sp, &temp);
if (space_left < addrlen) {
err = -ENOMEM; /*fixme: right error?*/
goto out;
}
memcpy(buf, &temp, addrlen);
buf += addrlen;
bytes_copied += addrlen;
cnt++;
space_left -= addrlen;
}
copy_getaddrs:
if (copy_to_user(to, addrs, bytes_copied)) {
err = -EFAULT;
goto out;
}
if (put_user(cnt, &((struct sctp_getaddrs __user *)optval)->addr_num)) {
err = -EFAULT;
goto out;
}
if (put_user(bytes_copied, optlen))
err = -EFAULT;
out:
kfree(addrs);
return err;
}
/* 7.1.10 Set Primary Address (SCTP_PRIMARY_ADDR)
*
* Requests that the local SCTP stack use the enclosed peer address as
* the association primary. The enclosed address must be one of the
* association peer's addresses.
*/
static int sctp_getsockopt_primary_addr(struct sock *sk, int len,
char __user *optval, int __user *optlen)
{
struct sctp_prim prim;
struct sctp_association *asoc;
struct sctp_sock *sp = sctp_sk(sk);
if (len < sizeof(struct sctp_prim))
return -EINVAL;
len = sizeof(struct sctp_prim);
if (copy_from_user(&prim, optval, len))
return -EFAULT;
asoc = sctp_id2assoc(sk, prim.ssp_assoc_id);
if (!asoc)
return -EINVAL;
if (!asoc->peer.primary_path)
return -ENOTCONN;
memcpy(&prim.ssp_addr, &asoc->peer.primary_path->ipaddr,
asoc->peer.primary_path->af_specific->sockaddr_len);
sctp_get_pf_specific(sk->sk_family)->addr_to_user(sp,
(union sctp_addr *)&prim.ssp_addr);
if (put_user(len, optlen))
return -EFAULT;
if (copy_to_user(optval, &prim, len))
return -EFAULT;
return 0;
}
/*
* 7.1.11 Set Adaptation Layer Indicator (SCTP_ADAPTATION_LAYER)
*
* Requests that the local endpoint set the specified Adaptation Layer
* Indication parameter for all future INIT and INIT-ACK exchanges.
*/
static int sctp_getsockopt_adaptation_layer(struct sock *sk, int len,
char __user *optval, int __user *optlen)
{
struct sctp_setadaptation adaptation;
if (len < sizeof(struct sctp_setadaptation))
return -EINVAL;
len = sizeof(struct sctp_setadaptation);
adaptation.ssb_adaptation_ind = sctp_sk(sk)->adaptation_ind;
if (put_user(len, optlen))
return -EFAULT;
if (copy_to_user(optval, &adaptation, len))
return -EFAULT;
return 0;
}
/*
*
* 7.1.14 Set default send parameters (SCTP_DEFAULT_SEND_PARAM)
*
* Applications that wish to use the sendto() system call may wish to
* specify a default set of parameters that would normally be supplied
* through the inclusion of ancillary data. This socket option allows
* such an application to set the default sctp_sndrcvinfo structure.
* The application that wishes to use this socket option simply passes
* in to this call the sctp_sndrcvinfo structure defined in Section
* 5.2.2) The input parameters accepted by this call include
* sinfo_stream, sinfo_flags, sinfo_ppid, sinfo_context,
* sinfo_timetolive. The user must provide the sinfo_assoc_id field in
* to this call if the caller is using the UDP model.
*
* For getsockopt, it get the default sctp_sndrcvinfo structure.
*/
static int sctp_getsockopt_default_send_param(struct sock *sk,
int len, char __user *optval,
int __user *optlen)
{
struct sctp_sock *sp = sctp_sk(sk);
struct sctp_association *asoc;
struct sctp_sndrcvinfo info;
if (len < sizeof(info))
return -EINVAL;
len = sizeof(info);
if (copy_from_user(&info, optval, len))
return -EFAULT;
asoc = sctp_id2assoc(sk, info.sinfo_assoc_id);
if (!asoc && info.sinfo_assoc_id && sctp_style(sk, UDP))
return -EINVAL;
if (asoc) {
info.sinfo_stream = asoc->default_stream;
info.sinfo_flags = asoc->default_flags;
info.sinfo_ppid = asoc->default_ppid;
info.sinfo_context = asoc->default_context;
info.sinfo_timetolive = asoc->default_timetolive;
} else {
info.sinfo_stream = sp->default_stream;
info.sinfo_flags = sp->default_flags;
info.sinfo_ppid = sp->default_ppid;
info.sinfo_context = sp->default_context;
info.sinfo_timetolive = sp->default_timetolive;
}
if (put_user(len, optlen))
return -EFAULT;
if (copy_to_user(optval, &info, len))
return -EFAULT;
return 0;
}
/* RFC6458, Section 8.1.31. Set/get Default Send Parameters
* (SCTP_DEFAULT_SNDINFO)
*/
static int sctp_getsockopt_default_sndinfo(struct sock *sk, int len,
char __user *optval,
int __user *optlen)
{
struct sctp_sock *sp = sctp_sk(sk);
struct sctp_association *asoc;
struct sctp_sndinfo info;
if (len < sizeof(info))
return -EINVAL;
len = sizeof(info);
if (copy_from_user(&info, optval, len))
return -EFAULT;
asoc = sctp_id2assoc(sk, info.snd_assoc_id);
if (!asoc && info.snd_assoc_id && sctp_style(sk, UDP))
return -EINVAL;
if (asoc) {
info.snd_sid = asoc->default_stream;
info.snd_flags = asoc->default_flags;
info.snd_ppid = asoc->default_ppid;
info.snd_context = asoc->default_context;
} else {
info.snd_sid = sp->default_stream;
info.snd_flags = sp->default_flags;
info.snd_ppid = sp->default_ppid;
info.snd_context = sp->default_context;
}
if (put_user(len, optlen))
return -EFAULT;
if (copy_to_user(optval, &info, len))
return -EFAULT;
return 0;
}
/*
*
* 7.1.5 SCTP_NODELAY
*
* Turn on/off any Nagle-like algorithm. This means that packets are
* generally sent as soon as possible and no unnecessary delays are
* introduced, at the cost of more packets in the network. Expects an
* integer boolean flag.
*/
static int sctp_getsockopt_nodelay(struct sock *sk, int len,
char __user *optval, int __user *optlen)
{
int val;
if (len < sizeof(int))
return -EINVAL;
len = sizeof(int);
val = (sctp_sk(sk)->nodelay == 1);
if (put_user(len, optlen))
return -EFAULT;
if (copy_to_user(optval, &val, len))
return -EFAULT;
return 0;
}
/*
*
* 7.1.1 SCTP_RTOINFO
*
* The protocol parameters used to initialize and bound retransmission
* timeout (RTO) are tunable. sctp_rtoinfo structure is used to access
* and modify these parameters.
* All parameters are time values, in milliseconds. A value of 0, when
* modifying the parameters, indicates that the current value should not
* be changed.
*
*/
static int sctp_getsockopt_rtoinfo(struct sock *sk, int len,
char __user *optval,
int __user *optlen) {
struct sctp_rtoinfo rtoinfo;
struct sctp_association *asoc;
if (len < sizeof (struct sctp_rtoinfo))
return -EINVAL;
len = sizeof(struct sctp_rtoinfo);
if (copy_from_user(&rtoinfo, optval, len))
return -EFAULT;
asoc = sctp_id2assoc(sk, rtoinfo.srto_assoc_id);
if (!asoc && rtoinfo.srto_assoc_id && sctp_style(sk, UDP))
return -EINVAL;
/* Values corresponding to the specific association. */
if (asoc) {
rtoinfo.srto_initial = jiffies_to_msecs(asoc->rto_initial);
rtoinfo.srto_max = jiffies_to_msecs(asoc->rto_max);
rtoinfo.srto_min = jiffies_to_msecs(asoc->rto_min);
} else {
/* Values corresponding to the endpoint. */
struct sctp_sock *sp = sctp_sk(sk);
rtoinfo.srto_initial = sp->rtoinfo.srto_initial;
rtoinfo.srto_max = sp->rtoinfo.srto_max;
rtoinfo.srto_min = sp->rtoinfo.srto_min;
}
if (put_user(len, optlen))
return -EFAULT;
if (copy_to_user(optval, &rtoinfo, len))
return -EFAULT;
return 0;
}
/*
*
* 7.1.2 SCTP_ASSOCINFO
*
* This option is used to tune the maximum retransmission attempts
* of the association.
* Returns an error if the new association retransmission value is
* greater than the sum of the retransmission value of the peer.
* See [SCTP] for more information.
*
*/
static int sctp_getsockopt_associnfo(struct sock *sk, int len,
char __user *optval,
int __user *optlen)
{
struct sctp_assocparams assocparams;
struct sctp_association *asoc;
struct list_head *pos;
int cnt = 0;
if (len < sizeof (struct sctp_assocparams))
return -EINVAL;
len = sizeof(struct sctp_assocparams);
if (copy_from_user(&assocparams, optval, len))
return -EFAULT;
asoc = sctp_id2assoc(sk, assocparams.sasoc_assoc_id);
if (!asoc && assocparams.sasoc_assoc_id && sctp_style(sk, UDP))
return -EINVAL;
/* Values correspoinding to the specific association */
if (asoc) {
assocparams.sasoc_asocmaxrxt = asoc->max_retrans;
assocparams.sasoc_peer_rwnd = asoc->peer.rwnd;
assocparams.sasoc_local_rwnd = asoc->a_rwnd;
assocparams.sasoc_cookie_life = ktime_to_ms(asoc->cookie_life);
list_for_each(pos, &asoc->peer.transport_addr_list) {
cnt++;
}
assocparams.sasoc_number_peer_destinations = cnt;
} else {
/* Values corresponding to the endpoint */
struct sctp_sock *sp = sctp_sk(sk);
assocparams.sasoc_asocmaxrxt = sp->assocparams.sasoc_asocmaxrxt;
assocparams.sasoc_peer_rwnd = sp->assocparams.sasoc_peer_rwnd;
assocparams.sasoc_local_rwnd = sp->assocparams.sasoc_local_rwnd;
assocparams.sasoc_cookie_life =
sp->assocparams.sasoc_cookie_life;
assocparams.sasoc_number_peer_destinations =
sp->assocparams.
sasoc_number_peer_destinations;
}
if (put_user(len, optlen))
return -EFAULT;
if (copy_to_user(optval, &assocparams, len))
return -EFAULT;
return 0;
}
/*
* 7.1.16 Set/clear IPv4 mapped addresses (SCTP_I_WANT_MAPPED_V4_ADDR)
*
* This socket option is a boolean flag which turns on or off mapped V4
* addresses. If this option is turned on and the socket is type
* PF_INET6, then IPv4 addresses will be mapped to V6 representation.
* If this option is turned off, then no mapping will be done of V4
* addresses and a user will receive both PF_INET6 and PF_INET type
* addresses on the socket.
*/
static int sctp_getsockopt_mappedv4(struct sock *sk, int len,
char __user *optval, int __user *optlen)
{
int val;
struct sctp_sock *sp = sctp_sk(sk);
if (len < sizeof(int))
return -EINVAL;
len = sizeof(int);
val = sp->v4mapped;
if (put_user(len, optlen))
return -EFAULT;
if (copy_to_user(optval, &val, len))
return -EFAULT;
return 0;
}
/*
* 7.1.29. Set or Get the default context (SCTP_CONTEXT)
* (chapter and verse is quoted at sctp_setsockopt_context())
*/
static int sctp_getsockopt_context(struct sock *sk, int len,
char __user *optval, int __user *optlen)
{
struct sctp_assoc_value params;
struct sctp_sock *sp;
struct sctp_association *asoc;
if (len < sizeof(struct sctp_assoc_value))
return -EINVAL;
len = sizeof(struct sctp_assoc_value);
if (copy_from_user(¶ms, optval, len))
return -EFAULT;
sp = sctp_sk(sk);
if (params.assoc_id != 0) {
asoc = sctp_id2assoc(sk, params.assoc_id);
if (!asoc)
return -EINVAL;
params.assoc_value = asoc->default_rcv_context;
} else {
params.assoc_value = sp->default_rcv_context;
}
if (put_user(len, optlen))
return -EFAULT;
if (copy_to_user(optval, ¶ms, len))
return -EFAULT;
return 0;
}
/*
* 8.1.16. Get or Set the Maximum Fragmentation Size (SCTP_MAXSEG)
* This option will get or set the maximum size to put in any outgoing
* SCTP DATA chunk. If a message is larger than this size it will be
* fragmented by SCTP into the specified size. Note that the underlying
* SCTP implementation may fragment into smaller sized chunks when the
* PMTU of the underlying association is smaller than the value set by
* the user. The default value for this option is '0' which indicates
* the user is NOT limiting fragmentation and only the PMTU will effect
* SCTP's choice of DATA chunk size. Note also that values set larger
* than the maximum size of an IP datagram will effectively let SCTP
* control fragmentation (i.e. the same as setting this option to 0).
*
* The following structure is used to access and modify this parameter:
*
* struct sctp_assoc_value {
* sctp_assoc_t assoc_id;
* uint32_t assoc_value;
* };
*
* assoc_id: This parameter is ignored for one-to-one style sockets.
* For one-to-many style sockets this parameter indicates which
* association the user is performing an action upon. Note that if
* this field's value is zero then the endpoints default value is
* changed (effecting future associations only).
* assoc_value: This parameter specifies the maximum size in bytes.
*/
static int sctp_getsockopt_maxseg(struct sock *sk, int len,
char __user *optval, int __user *optlen)
{
struct sctp_assoc_value params;
struct sctp_association *asoc;
if (len == sizeof(int)) {
pr_warn_ratelimited(DEPRECATED
"%s (pid %d) "
"Use of int in maxseg socket option.\n"
"Use struct sctp_assoc_value instead\n",
current->comm, task_pid_nr(current));
params.assoc_id = 0;
} else if (len >= sizeof(struct sctp_assoc_value)) {
len = sizeof(struct sctp_assoc_value);
if (copy_from_user(¶ms, optval, sizeof(params)))
return -EFAULT;
} else
return -EINVAL;
asoc = sctp_id2assoc(sk, params.assoc_id);
if (!asoc && params.assoc_id && sctp_style(sk, UDP))
return -EINVAL;
if (asoc)
params.assoc_value = asoc->frag_point;
else
params.assoc_value = sctp_sk(sk)->user_frag;
if (put_user(len, optlen))
return -EFAULT;
if (len == sizeof(int)) {
if (copy_to_user(optval, ¶ms.assoc_value, len))
return -EFAULT;
} else {
if (copy_to_user(optval, ¶ms, len))
return -EFAULT;
}
return 0;
}
/*
* 7.1.24. Get or set fragmented interleave (SCTP_FRAGMENT_INTERLEAVE)
* (chapter and verse is quoted at sctp_setsockopt_fragment_interleave())
*/
static int sctp_getsockopt_fragment_interleave(struct sock *sk, int len,
char __user *optval, int __user *optlen)
{
int val;
if (len < sizeof(int))
return -EINVAL;
len = sizeof(int);
val = sctp_sk(sk)->frag_interleave;
if (put_user(len, optlen))
return -EFAULT;
if (copy_to_user(optval, &val, len))
return -EFAULT;
return 0;
}
/*
* 7.1.25. Set or Get the sctp partial delivery point
* (chapter and verse is quoted at sctp_setsockopt_partial_delivery_point())
*/
static int sctp_getsockopt_partial_delivery_point(struct sock *sk, int len,
char __user *optval,
int __user *optlen)
{
u32 val;
if (len < sizeof(u32))
return -EINVAL;
len = sizeof(u32);
val = sctp_sk(sk)->pd_point;
if (put_user(len, optlen))
return -EFAULT;
if (copy_to_user(optval, &val, len))
return -EFAULT;
return 0;
}
/*
* 7.1.28. Set or Get the maximum burst (SCTP_MAX_BURST)
* (chapter and verse is quoted at sctp_setsockopt_maxburst())
*/
static int sctp_getsockopt_maxburst(struct sock *sk, int len,
char __user *optval,
int __user *optlen)
{
struct sctp_assoc_value params;
struct sctp_sock *sp;
struct sctp_association *asoc;
if (len == sizeof(int)) {
pr_warn_ratelimited(DEPRECATED
"%s (pid %d) "
"Use of int in max_burst socket option.\n"
"Use struct sctp_assoc_value instead\n",
current->comm, task_pid_nr(current));
params.assoc_id = 0;
} else if (len >= sizeof(struct sctp_assoc_value)) {
len = sizeof(struct sctp_assoc_value);
if (copy_from_user(¶ms, optval, len))
return -EFAULT;
} else
return -EINVAL;
sp = sctp_sk(sk);
if (params.assoc_id != 0) {
asoc = sctp_id2assoc(sk, params.assoc_id);
if (!asoc)
return -EINVAL;
params.assoc_value = asoc->max_burst;
} else
params.assoc_value = sp->max_burst;
if (len == sizeof(int)) {
if (copy_to_user(optval, ¶ms.assoc_value, len))
return -EFAULT;
} else {
if (copy_to_user(optval, ¶ms, len))
return -EFAULT;
}
return 0;
}
static int sctp_getsockopt_hmac_ident(struct sock *sk, int len,
char __user *optval, int __user *optlen)
{
struct sctp_endpoint *ep = sctp_sk(sk)->ep;
struct sctp_hmacalgo __user *p = (void __user *)optval;
struct sctp_hmac_algo_param *hmacs;
__u16 data_len = 0;
u32 num_idents;
int i;
if (!ep->auth_enable)
return -EACCES;
hmacs = ep->auth_hmacs_list;
data_len = ntohs(hmacs->param_hdr.length) -
sizeof(struct sctp_paramhdr);
if (len < sizeof(struct sctp_hmacalgo) + data_len)
return -EINVAL;
len = sizeof(struct sctp_hmacalgo) + data_len;
num_idents = data_len / sizeof(u16);
if (put_user(len, optlen))
return -EFAULT;
if (put_user(num_idents, &p->shmac_num_idents))
return -EFAULT;
for (i = 0; i < num_idents; i++) {
__u16 hmacid = ntohs(hmacs->hmac_ids[i]);
if (copy_to_user(&p->shmac_idents[i], &hmacid, sizeof(__u16)))
return -EFAULT;
}
return 0;
}
static int sctp_getsockopt_active_key(struct sock *sk, int len,
char __user *optval, int __user *optlen)
{
struct sctp_endpoint *ep = sctp_sk(sk)->ep;
struct sctp_authkeyid val;
struct sctp_association *asoc;
if (!ep->auth_enable)
return -EACCES;
if (len < sizeof(struct sctp_authkeyid))
return -EINVAL;
if (copy_from_user(&val, optval, sizeof(struct sctp_authkeyid)))
return -EFAULT;
asoc = sctp_id2assoc(sk, val.scact_assoc_id);
if (!asoc && val.scact_assoc_id && sctp_style(sk, UDP))
return -EINVAL;
if (asoc)
val.scact_keynumber = asoc->active_key_id;
else
val.scact_keynumber = ep->active_key_id;
len = sizeof(struct sctp_authkeyid);
if (put_user(len, optlen))
return -EFAULT;
if (copy_to_user(optval, &val, len))
return -EFAULT;
return 0;
}
static int sctp_getsockopt_peer_auth_chunks(struct sock *sk, int len,
char __user *optval, int __user *optlen)
{
struct sctp_endpoint *ep = sctp_sk(sk)->ep;
struct sctp_authchunks __user *p = (void __user *)optval;
struct sctp_authchunks val;
struct sctp_association *asoc;
struct sctp_chunks_param *ch;
u32 num_chunks = 0;
char __user *to;
if (!ep->auth_enable)
return -EACCES;
if (len < sizeof(struct sctp_authchunks))
return -EINVAL;
if (copy_from_user(&val, optval, sizeof(struct sctp_authchunks)))
return -EFAULT;
to = p->gauth_chunks;
asoc = sctp_id2assoc(sk, val.gauth_assoc_id);
if (!asoc)
return -EINVAL;
ch = asoc->peer.peer_chunks;
if (!ch)
goto num;
/* See if the user provided enough room for all the data */
num_chunks = ntohs(ch->param_hdr.length) - sizeof(struct sctp_paramhdr);
if (len < num_chunks)
return -EINVAL;
if (copy_to_user(to, ch->chunks, num_chunks))
return -EFAULT;
num:
len = sizeof(struct sctp_authchunks) + num_chunks;
if (put_user(len, optlen))
return -EFAULT;
if (put_user(num_chunks, &p->gauth_number_of_chunks))
return -EFAULT;
return 0;
}
static int sctp_getsockopt_local_auth_chunks(struct sock *sk, int len,
char __user *optval, int __user *optlen)
{
struct sctp_endpoint *ep = sctp_sk(sk)->ep;
struct sctp_authchunks __user *p = (void __user *)optval;
struct sctp_authchunks val;
struct sctp_association *asoc;
struct sctp_chunks_param *ch;
u32 num_chunks = 0;
char __user *to;
if (!ep->auth_enable)
return -EACCES;
if (len < sizeof(struct sctp_authchunks))
return -EINVAL;
if (copy_from_user(&val, optval, sizeof(struct sctp_authchunks)))
return -EFAULT;
to = p->gauth_chunks;
asoc = sctp_id2assoc(sk, val.gauth_assoc_id);
if (!asoc && val.gauth_assoc_id && sctp_style(sk, UDP))
return -EINVAL;
if (asoc)
ch = (struct sctp_chunks_param *)asoc->c.auth_chunks;
else
ch = ep->auth_chunk_list;
if (!ch)
goto num;
num_chunks = ntohs(ch->param_hdr.length) - sizeof(struct sctp_paramhdr);
if (len < sizeof(struct sctp_authchunks) + num_chunks)
return -EINVAL;
if (copy_to_user(to, ch->chunks, num_chunks))
return -EFAULT;
num:
len = sizeof(struct sctp_authchunks) + num_chunks;
if (put_user(len, optlen))
return -EFAULT;
if (put_user(num_chunks, &p->gauth_number_of_chunks))
return -EFAULT;
return 0;
}
/*
* 8.2.5. Get the Current Number of Associations (SCTP_GET_ASSOC_NUMBER)
* This option gets the current number of associations that are attached
* to a one-to-many style socket. The option value is an uint32_t.
*/
static int sctp_getsockopt_assoc_number(struct sock *sk, int len,
char __user *optval, int __user *optlen)
{
struct sctp_sock *sp = sctp_sk(sk);
struct sctp_association *asoc;
u32 val = 0;
if (sctp_style(sk, TCP))
return -EOPNOTSUPP;
if (len < sizeof(u32))
return -EINVAL;
len = sizeof(u32);
list_for_each_entry(asoc, &(sp->ep->asocs), asocs) {
val++;
}
if (put_user(len, optlen))
return -EFAULT;
if (copy_to_user(optval, &val, len))
return -EFAULT;
return 0;
}
/*
* 8.1.23 SCTP_AUTO_ASCONF
* See the corresponding setsockopt entry as description
*/
static int sctp_getsockopt_auto_asconf(struct sock *sk, int len,
char __user *optval, int __user *optlen)
{
int val = 0;
if (len < sizeof(int))
return -EINVAL;
len = sizeof(int);
if (sctp_sk(sk)->do_auto_asconf && sctp_is_ep_boundall(sk))
val = 1;
if (put_user(len, optlen))
return -EFAULT;
if (copy_to_user(optval, &val, len))
return -EFAULT;
return 0;
}
/*
* 8.2.6. Get the Current Identifiers of Associations
* (SCTP_GET_ASSOC_ID_LIST)
*
* This option gets the current list of SCTP association identifiers of
* the SCTP associations handled by a one-to-many style socket.
*/
static int sctp_getsockopt_assoc_ids(struct sock *sk, int len,
char __user *optval, int __user *optlen)
{
struct sctp_sock *sp = sctp_sk(sk);
struct sctp_association *asoc;
struct sctp_assoc_ids *ids;
u32 num = 0;
if (sctp_style(sk, TCP))
return -EOPNOTSUPP;
if (len < sizeof(struct sctp_assoc_ids))
return -EINVAL;
list_for_each_entry(asoc, &(sp->ep->asocs), asocs) {
num++;
}
if (len < sizeof(struct sctp_assoc_ids) + sizeof(sctp_assoc_t) * num)
return -EINVAL;
len = sizeof(struct sctp_assoc_ids) + sizeof(sctp_assoc_t) * num;
ids = kmalloc(len, GFP_USER | __GFP_NOWARN);
if (unlikely(!ids))
return -ENOMEM;
ids->gaids_number_of_ids = num;
num = 0;
list_for_each_entry(asoc, &(sp->ep->asocs), asocs) {
ids->gaids_assoc_id[num++] = asoc->assoc_id;
}
if (put_user(len, optlen) || copy_to_user(optval, ids, len)) {
kfree(ids);
return -EFAULT;
}
kfree(ids);
return 0;
}
/*
* SCTP_PEER_ADDR_THLDS
*
* This option allows us to fetch the partially failed threshold for one or all
* transports in an association. See Section 6.1 of:
* http://www.ietf.org/id/draft-nishida-tsvwg-sctp-failover-05.txt
*/
static int sctp_getsockopt_paddr_thresholds(struct sock *sk,
char __user *optval,
int len,
int __user *optlen)
{
struct sctp_paddrthlds val;
struct sctp_transport *trans;
struct sctp_association *asoc;
if (len < sizeof(struct sctp_paddrthlds))
return -EINVAL;
len = sizeof(struct sctp_paddrthlds);
if (copy_from_user(&val, (struct sctp_paddrthlds __user *)optval, len))
return -EFAULT;
if (sctp_is_any(sk, (const union sctp_addr *)&val.spt_address)) {
asoc = sctp_id2assoc(sk, val.spt_assoc_id);
if (!asoc)
return -ENOENT;
val.spt_pathpfthld = asoc->pf_retrans;
val.spt_pathmaxrxt = asoc->pathmaxrxt;
} else {
trans = sctp_addr_id2transport(sk, &val.spt_address,
val.spt_assoc_id);
if (!trans)
return -ENOENT;
val.spt_pathmaxrxt = trans->pathmaxrxt;
val.spt_pathpfthld = trans->pf_retrans;
}
if (put_user(len, optlen) || copy_to_user(optval, &val, len))
return -EFAULT;
return 0;
}
/*
* SCTP_GET_ASSOC_STATS
*
* This option retrieves local per endpoint statistics. It is modeled
* after OpenSolaris' implementation
*/
static int sctp_getsockopt_assoc_stats(struct sock *sk, int len,
char __user *optval,
int __user *optlen)
{
struct sctp_assoc_stats sas;
struct sctp_association *asoc = NULL;
/* User must provide at least the assoc id */
if (len < sizeof(sctp_assoc_t))
return -EINVAL;
/* Allow the struct to grow and fill in as much as possible */
len = min_t(size_t, len, sizeof(sas));
if (copy_from_user(&sas, optval, len))
return -EFAULT;
asoc = sctp_id2assoc(sk, sas.sas_assoc_id);
if (!asoc)
return -EINVAL;
sas.sas_rtxchunks = asoc->stats.rtxchunks;
sas.sas_gapcnt = asoc->stats.gapcnt;
sas.sas_outofseqtsns = asoc->stats.outofseqtsns;
sas.sas_osacks = asoc->stats.osacks;
sas.sas_isacks = asoc->stats.isacks;
sas.sas_octrlchunks = asoc->stats.octrlchunks;
sas.sas_ictrlchunks = asoc->stats.ictrlchunks;
sas.sas_oodchunks = asoc->stats.oodchunks;
sas.sas_iodchunks = asoc->stats.iodchunks;
sas.sas_ouodchunks = asoc->stats.ouodchunks;
sas.sas_iuodchunks = asoc->stats.iuodchunks;
sas.sas_idupchunks = asoc->stats.idupchunks;
sas.sas_opackets = asoc->stats.opackets;
sas.sas_ipackets = asoc->stats.ipackets;
/* New high max rto observed, will return 0 if not a single
* RTO update took place. obs_rto_ipaddr will be bogus
* in such a case
*/
sas.sas_maxrto = asoc->stats.max_obs_rto;
memcpy(&sas.sas_obs_rto_ipaddr, &asoc->stats.obs_rto_ipaddr,
sizeof(struct sockaddr_storage));
/* Mark beginning of a new observation period */
asoc->stats.max_obs_rto = asoc->rto_min;
if (put_user(len, optlen))
return -EFAULT;
pr_debug("%s: len:%d, assoc_id:%d\n", __func__, len, sas.sas_assoc_id);
if (copy_to_user(optval, &sas, len))
return -EFAULT;
return 0;
}
static int sctp_getsockopt_recvrcvinfo(struct sock *sk, int len,
char __user *optval,
int __user *optlen)
{
int val = 0;
if (len < sizeof(int))
return -EINVAL;
len = sizeof(int);
if (sctp_sk(sk)->recvrcvinfo)
val = 1;
if (put_user(len, optlen))
return -EFAULT;
if (copy_to_user(optval, &val, len))
return -EFAULT;
return 0;
}
static int sctp_getsockopt_recvnxtinfo(struct sock *sk, int len,
char __user *optval,
int __user *optlen)
{
int val = 0;
if (len < sizeof(int))
return -EINVAL;
len = sizeof(int);
if (sctp_sk(sk)->recvnxtinfo)
val = 1;
if (put_user(len, optlen))
return -EFAULT;
if (copy_to_user(optval, &val, len))
return -EFAULT;
return 0;
}
static int sctp_getsockopt_pr_supported(struct sock *sk, int len,
char __user *optval,
int __user *optlen)
{
struct sctp_assoc_value params;
struct sctp_association *asoc;
int retval = -EFAULT;
if (len < sizeof(params)) {
retval = -EINVAL;
goto out;
}
len = sizeof(params);
if (copy_from_user(¶ms, optval, len))
goto out;
asoc = sctp_id2assoc(sk, params.assoc_id);
if (asoc) {
params.assoc_value = asoc->prsctp_enable;
} else if (!params.assoc_id) {
struct sctp_sock *sp = sctp_sk(sk);
params.assoc_value = sp->ep->prsctp_enable;
} else {
retval = -EINVAL;
goto out;
}
if (put_user(len, optlen))
goto out;
if (copy_to_user(optval, ¶ms, len))
goto out;
retval = 0;
out:
return retval;
}
static int sctp_getsockopt_default_prinfo(struct sock *sk, int len,
char __user *optval,
int __user *optlen)
{
struct sctp_default_prinfo info;
struct sctp_association *asoc;
int retval = -EFAULT;
if (len < sizeof(info)) {
retval = -EINVAL;
goto out;
}
len = sizeof(info);
if (copy_from_user(&info, optval, len))
goto out;
asoc = sctp_id2assoc(sk, info.pr_assoc_id);
if (asoc) {
info.pr_policy = SCTP_PR_POLICY(asoc->default_flags);
info.pr_value = asoc->default_timetolive;
} else if (!info.pr_assoc_id) {
struct sctp_sock *sp = sctp_sk(sk);
info.pr_policy = SCTP_PR_POLICY(sp->default_flags);
info.pr_value = sp->default_timetolive;
} else {
retval = -EINVAL;
goto out;
}
if (put_user(len, optlen))
goto out;
if (copy_to_user(optval, &info, len))
goto out;
retval = 0;
out:
return retval;
}
static int sctp_getsockopt_pr_assocstatus(struct sock *sk, int len,
char __user *optval,
int __user *optlen)
{
struct sctp_prstatus params;
struct sctp_association *asoc;
int policy;
int retval = -EINVAL;
if (len < sizeof(params))
goto out;
len = sizeof(params);
if (copy_from_user(¶ms, optval, len)) {
retval = -EFAULT;
goto out;
}
policy = params.sprstat_policy;
if (policy & ~SCTP_PR_SCTP_MASK)
goto out;
asoc = sctp_id2assoc(sk, params.sprstat_assoc_id);
if (!asoc)
goto out;
if (policy == SCTP_PR_SCTP_NONE) {
params.sprstat_abandoned_unsent = 0;
params.sprstat_abandoned_sent = 0;
for (policy = 0; policy <= SCTP_PR_INDEX(MAX); policy++) {
params.sprstat_abandoned_unsent +=
asoc->abandoned_unsent[policy];
params.sprstat_abandoned_sent +=
asoc->abandoned_sent[policy];
}
} else {
params.sprstat_abandoned_unsent =
asoc->abandoned_unsent[__SCTP_PR_INDEX(policy)];
params.sprstat_abandoned_sent =
asoc->abandoned_sent[__SCTP_PR_INDEX(policy)];
}
if (put_user(len, optlen)) {
retval = -EFAULT;
goto out;
}
if (copy_to_user(optval, ¶ms, len)) {
retval = -EFAULT;
goto out;
}
retval = 0;
out:
return retval;
}
static int sctp_getsockopt_pr_streamstatus(struct sock *sk, int len,
char __user *optval,
int __user *optlen)
{
struct sctp_stream_out *streamout;
struct sctp_association *asoc;
struct sctp_prstatus params;
int retval = -EINVAL;
int policy;
if (len < sizeof(params))
goto out;
len = sizeof(params);
if (copy_from_user(¶ms, optval, len)) {
retval = -EFAULT;
goto out;
}
policy = params.sprstat_policy;
if (policy & ~SCTP_PR_SCTP_MASK)
goto out;
asoc = sctp_id2assoc(sk, params.sprstat_assoc_id);
if (!asoc || params.sprstat_sid >= asoc->stream.outcnt)
goto out;
streamout = &asoc->stream.out[params.sprstat_sid];
if (policy == SCTP_PR_SCTP_NONE) {
params.sprstat_abandoned_unsent = 0;
params.sprstat_abandoned_sent = 0;
for (policy = 0; policy <= SCTP_PR_INDEX(MAX); policy++) {
params.sprstat_abandoned_unsent +=
streamout->abandoned_unsent[policy];
params.sprstat_abandoned_sent +=
streamout->abandoned_sent[policy];
}
} else {
params.sprstat_abandoned_unsent =
streamout->abandoned_unsent[__SCTP_PR_INDEX(policy)];
params.sprstat_abandoned_sent =
streamout->abandoned_sent[__SCTP_PR_INDEX(policy)];
}
if (put_user(len, optlen) || copy_to_user(optval, ¶ms, len)) {
retval = -EFAULT;
goto out;
}
retval = 0;
out:
return retval;
}
static int sctp_getsockopt_reconfig_supported(struct sock *sk, int len,
char __user *optval,
int __user *optlen)
{
struct sctp_assoc_value params;
struct sctp_association *asoc;
int retval = -EFAULT;
if (len < sizeof(params)) {
retval = -EINVAL;
goto out;
}
len = sizeof(params);
if (copy_from_user(¶ms, optval, len))
goto out;
asoc = sctp_id2assoc(sk, params.assoc_id);
if (asoc) {
params.assoc_value = asoc->reconf_enable;
} else if (!params.assoc_id) {
struct sctp_sock *sp = sctp_sk(sk);
params.assoc_value = sp->ep->reconf_enable;
} else {
retval = -EINVAL;
goto out;
}
if (put_user(len, optlen))
goto out;
if (copy_to_user(optval, ¶ms, len))
goto out;
retval = 0;
out:
return retval;
}
static int sctp_getsockopt_enable_strreset(struct sock *sk, int len,
char __user *optval,
int __user *optlen)
{
struct sctp_assoc_value params;
struct sctp_association *asoc;
int retval = -EFAULT;
if (len < sizeof(params)) {
retval = -EINVAL;
goto out;
}
len = sizeof(params);
if (copy_from_user(¶ms, optval, len))
goto out;
asoc = sctp_id2assoc(sk, params.assoc_id);
if (asoc) {
params.assoc_value = asoc->strreset_enable;
} else if (!params.assoc_id) {
struct sctp_sock *sp = sctp_sk(sk);
params.assoc_value = sp->ep->strreset_enable;
} else {
retval = -EINVAL;
goto out;
}
if (put_user(len, optlen))
goto out;
if (copy_to_user(optval, ¶ms, len))
goto out;
retval = 0;
out:
return retval;
}
static int sctp_getsockopt(struct sock *sk, int level, int optname,
char __user *optval, int __user *optlen)
{
int retval = 0;
int len;
pr_debug("%s: sk:%p, optname:%d\n", __func__, sk, optname);
/* I can hardly begin to describe how wrong this is. This is
* so broken as to be worse than useless. The API draft
* REALLY is NOT helpful here... I am not convinced that the
* semantics of getsockopt() with a level OTHER THAN SOL_SCTP
* are at all well-founded.
*/
if (level != SOL_SCTP) {
struct sctp_af *af = sctp_sk(sk)->pf->af;
retval = af->getsockopt(sk, level, optname, optval, optlen);
return retval;
}
if (get_user(len, optlen))
return -EFAULT;
if (len < 0)
return -EINVAL;
lock_sock(sk);
switch (optname) {
case SCTP_STATUS:
retval = sctp_getsockopt_sctp_status(sk, len, optval, optlen);
break;
case SCTP_DISABLE_FRAGMENTS:
retval = sctp_getsockopt_disable_fragments(sk, len, optval,
optlen);
break;
case SCTP_EVENTS:
retval = sctp_getsockopt_events(sk, len, optval, optlen);
break;
case SCTP_AUTOCLOSE:
retval = sctp_getsockopt_autoclose(sk, len, optval, optlen);
break;
case SCTP_SOCKOPT_PEELOFF:
retval = sctp_getsockopt_peeloff(sk, len, optval, optlen);
break;
case SCTP_SOCKOPT_PEELOFF_FLAGS:
retval = sctp_getsockopt_peeloff_flags(sk, len, optval, optlen);
break;
case SCTP_PEER_ADDR_PARAMS:
retval = sctp_getsockopt_peer_addr_params(sk, len, optval,
optlen);
break;
case SCTP_DELAYED_SACK:
retval = sctp_getsockopt_delayed_ack(sk, len, optval,
optlen);
break;
case SCTP_INITMSG:
retval = sctp_getsockopt_initmsg(sk, len, optval, optlen);
break;
case SCTP_GET_PEER_ADDRS:
retval = sctp_getsockopt_peer_addrs(sk, len, optval,
optlen);
break;
case SCTP_GET_LOCAL_ADDRS:
retval = sctp_getsockopt_local_addrs(sk, len, optval,
optlen);
break;
case SCTP_SOCKOPT_CONNECTX3:
retval = sctp_getsockopt_connectx3(sk, len, optval, optlen);
break;
case SCTP_DEFAULT_SEND_PARAM:
retval = sctp_getsockopt_default_send_param(sk, len,
optval, optlen);
break;
case SCTP_DEFAULT_SNDINFO:
retval = sctp_getsockopt_default_sndinfo(sk, len,
optval, optlen);
break;
case SCTP_PRIMARY_ADDR:
retval = sctp_getsockopt_primary_addr(sk, len, optval, optlen);
break;
case SCTP_NODELAY:
retval = sctp_getsockopt_nodelay(sk, len, optval, optlen);
break;
case SCTP_RTOINFO:
retval = sctp_getsockopt_rtoinfo(sk, len, optval, optlen);
break;
case SCTP_ASSOCINFO:
retval = sctp_getsockopt_associnfo(sk, len, optval, optlen);
break;
case SCTP_I_WANT_MAPPED_V4_ADDR:
retval = sctp_getsockopt_mappedv4(sk, len, optval, optlen);
break;
case SCTP_MAXSEG:
retval = sctp_getsockopt_maxseg(sk, len, optval, optlen);
break;
case SCTP_GET_PEER_ADDR_INFO:
retval = sctp_getsockopt_peer_addr_info(sk, len, optval,
optlen);
break;
case SCTP_ADAPTATION_LAYER:
retval = sctp_getsockopt_adaptation_layer(sk, len, optval,
optlen);
break;
case SCTP_CONTEXT:
retval = sctp_getsockopt_context(sk, len, optval, optlen);
break;
case SCTP_FRAGMENT_INTERLEAVE:
retval = sctp_getsockopt_fragment_interleave(sk, len, optval,
optlen);
break;
case SCTP_PARTIAL_DELIVERY_POINT:
retval = sctp_getsockopt_partial_delivery_point(sk, len, optval,
optlen);
break;
case SCTP_MAX_BURST:
retval = sctp_getsockopt_maxburst(sk, len, optval, optlen);
break;
case SCTP_AUTH_KEY:
case SCTP_AUTH_CHUNK:
case SCTP_AUTH_DELETE_KEY:
retval = -EOPNOTSUPP;
break;
case SCTP_HMAC_IDENT:
retval = sctp_getsockopt_hmac_ident(sk, len, optval, optlen);
break;
case SCTP_AUTH_ACTIVE_KEY:
retval = sctp_getsockopt_active_key(sk, len, optval, optlen);
break;
case SCTP_PEER_AUTH_CHUNKS:
retval = sctp_getsockopt_peer_auth_chunks(sk, len, optval,
optlen);
break;
case SCTP_LOCAL_AUTH_CHUNKS:
retval = sctp_getsockopt_local_auth_chunks(sk, len, optval,
optlen);
break;
case SCTP_GET_ASSOC_NUMBER:
retval = sctp_getsockopt_assoc_number(sk, len, optval, optlen);
break;
case SCTP_GET_ASSOC_ID_LIST:
retval = sctp_getsockopt_assoc_ids(sk, len, optval, optlen);
break;
case SCTP_AUTO_ASCONF:
retval = sctp_getsockopt_auto_asconf(sk, len, optval, optlen);
break;
case SCTP_PEER_ADDR_THLDS:
retval = sctp_getsockopt_paddr_thresholds(sk, optval, len, optlen);
break;
case SCTP_GET_ASSOC_STATS:
retval = sctp_getsockopt_assoc_stats(sk, len, optval, optlen);
break;
case SCTP_RECVRCVINFO:
retval = sctp_getsockopt_recvrcvinfo(sk, len, optval, optlen);
break;
case SCTP_RECVNXTINFO:
retval = sctp_getsockopt_recvnxtinfo(sk, len, optval, optlen);
break;
case SCTP_PR_SUPPORTED:
retval = sctp_getsockopt_pr_supported(sk, len, optval, optlen);
break;
case SCTP_DEFAULT_PRINFO:
retval = sctp_getsockopt_default_prinfo(sk, len, optval,
optlen);
break;
case SCTP_PR_ASSOC_STATUS:
retval = sctp_getsockopt_pr_assocstatus(sk, len, optval,
optlen);
break;
case SCTP_PR_STREAM_STATUS:
retval = sctp_getsockopt_pr_streamstatus(sk, len, optval,
optlen);
break;
case SCTP_RECONFIG_SUPPORTED:
retval = sctp_getsockopt_reconfig_supported(sk, len, optval,
optlen);
break;
case SCTP_ENABLE_STREAM_RESET:
retval = sctp_getsockopt_enable_strreset(sk, len, optval,
optlen);
break;
default:
retval = -ENOPROTOOPT;
break;
}
release_sock(sk);
return retval;
}
static int sctp_hash(struct sock *sk)
{
/* STUB */
return 0;
}
static void sctp_unhash(struct sock *sk)
{
/* STUB */
}
/* Check if port is acceptable. Possibly find first available port.
*
* The port hash table (contained in the 'global' SCTP protocol storage
* returned by struct sctp_protocol *sctp_get_protocol()). The hash
* table is an array of 4096 lists (sctp_bind_hashbucket). Each
* list (the list number is the port number hashed out, so as you
* would expect from a hash function, all the ports in a given list have
* such a number that hashes out to the same list number; you were
* expecting that, right?); so each list has a set of ports, with a
* link to the socket (struct sock) that uses it, the port number and
* a fastreuse flag (FIXME: NPI ipg).
*/
static struct sctp_bind_bucket *sctp_bucket_create(
struct sctp_bind_hashbucket *head, struct net *, unsigned short snum);
static long sctp_get_port_local(struct sock *sk, union sctp_addr *addr)
{
struct sctp_bind_hashbucket *head; /* hash list */
struct sctp_bind_bucket *pp;
unsigned short snum;
int ret;
snum = ntohs(addr->v4.sin_port);
pr_debug("%s: begins, snum:%d\n", __func__, snum);
local_bh_disable();
if (snum == 0) {
/* Search for an available port. */
int low, high, remaining, index;
unsigned int rover;
struct net *net = sock_net(sk);
inet_get_local_port_range(net, &low, &high);
remaining = (high - low) + 1;
rover = prandom_u32() % remaining + low;
do {
rover++;
if ((rover < low) || (rover > high))
rover = low;
if (inet_is_local_reserved_port(net, rover))
continue;
index = sctp_phashfn(sock_net(sk), rover);
head = &sctp_port_hashtable[index];
spin_lock(&head->lock);
sctp_for_each_hentry(pp, &head->chain)
if ((pp->port == rover) &&
net_eq(sock_net(sk), pp->net))
goto next;
break;
next:
spin_unlock(&head->lock);
} while (--remaining > 0);
/* Exhausted local port range during search? */
ret = 1;
if (remaining <= 0)
goto fail;
/* OK, here is the one we will use. HEAD (the port
* hash table list entry) is non-NULL and we hold it's
* mutex.
*/
snum = rover;
} else {
/* We are given an specific port number; we verify
* that it is not being used. If it is used, we will
* exahust the search in the hash list corresponding
* to the port number (snum) - we detect that with the
* port iterator, pp being NULL.
*/
head = &sctp_port_hashtable[sctp_phashfn(sock_net(sk), snum)];
spin_lock(&head->lock);
sctp_for_each_hentry(pp, &head->chain) {
if ((pp->port == snum) && net_eq(pp->net, sock_net(sk)))
goto pp_found;
}
}
pp = NULL;
goto pp_not_found;
pp_found:
if (!hlist_empty(&pp->owner)) {
/* We had a port hash table hit - there is an
* available port (pp != NULL) and it is being
* used by other socket (pp->owner not empty); that other
* socket is going to be sk2.
*/
int reuse = sk->sk_reuse;
struct sock *sk2;
pr_debug("%s: found a possible match\n", __func__);
if (pp->fastreuse && sk->sk_reuse &&
sk->sk_state != SCTP_SS_LISTENING)
goto success;
/* Run through the list of sockets bound to the port
* (pp->port) [via the pointers bind_next and
* bind_pprev in the struct sock *sk2 (pp->sk)]. On each one,
* we get the endpoint they describe and run through
* the endpoint's list of IP (v4 or v6) addresses,
* comparing each of the addresses with the address of
* the socket sk. If we find a match, then that means
* that this port/socket (sk) combination are already
* in an endpoint.
*/
sk_for_each_bound(sk2, &pp->owner) {
struct sctp_endpoint *ep2;
ep2 = sctp_sk(sk2)->ep;
if (sk == sk2 ||
(reuse && sk2->sk_reuse &&
sk2->sk_state != SCTP_SS_LISTENING))
continue;
if (sctp_bind_addr_conflict(&ep2->base.bind_addr, addr,
sctp_sk(sk2), sctp_sk(sk))) {
ret = (long)sk2;
goto fail_unlock;
}
}
pr_debug("%s: found a match\n", __func__);
}
pp_not_found:
/* If there was a hash table miss, create a new port. */
ret = 1;
if (!pp && !(pp = sctp_bucket_create(head, sock_net(sk), snum)))
goto fail_unlock;
/* In either case (hit or miss), make sure fastreuse is 1 only
* if sk->sk_reuse is too (that is, if the caller requested
* SO_REUSEADDR on this socket -sk-).
*/
if (hlist_empty(&pp->owner)) {
if (sk->sk_reuse && sk->sk_state != SCTP_SS_LISTENING)
pp->fastreuse = 1;
else
pp->fastreuse = 0;
} else if (pp->fastreuse &&
(!sk->sk_reuse || sk->sk_state == SCTP_SS_LISTENING))
pp->fastreuse = 0;
/* We are set, so fill up all the data in the hash table
* entry, tie the socket list information with the rest of the
* sockets FIXME: Blurry, NPI (ipg).
*/
success:
if (!sctp_sk(sk)->bind_hash) {
inet_sk(sk)->inet_num = snum;
sk_add_bind_node(sk, &pp->owner);
sctp_sk(sk)->bind_hash = pp;
}
ret = 0;
fail_unlock:
spin_unlock(&head->lock);
fail:
local_bh_enable();
return ret;
}
/* Assign a 'snum' port to the socket. If snum == 0, an ephemeral
* port is requested.
*/
static int sctp_get_port(struct sock *sk, unsigned short snum)
{
union sctp_addr addr;
struct sctp_af *af = sctp_sk(sk)->pf->af;
/* Set up a dummy address struct from the sk. */
af->from_sk(&addr, sk);
addr.v4.sin_port = htons(snum);
/* Note: sk->sk_num gets filled in if ephemeral port request. */
return !!sctp_get_port_local(sk, &addr);
}
/*
* Move a socket to LISTENING state.
*/
static int sctp_listen_start(struct sock *sk, int backlog)
{
struct sctp_sock *sp = sctp_sk(sk);
struct sctp_endpoint *ep = sp->ep;
struct crypto_shash *tfm = NULL;
char alg[32];
/* Allocate HMAC for generating cookie. */
if (!sp->hmac && sp->sctp_hmac_alg) {
sprintf(alg, "hmac(%s)", sp->sctp_hmac_alg);
tfm = crypto_alloc_shash(alg, 0, 0);
if (IS_ERR(tfm)) {
net_info_ratelimited("failed to load transform for %s: %ld\n",
sp->sctp_hmac_alg, PTR_ERR(tfm));
return -ENOSYS;
}
sctp_sk(sk)->hmac = tfm;
}
/*
* If a bind() or sctp_bindx() is not called prior to a listen()
* call that allows new associations to be accepted, the system
* picks an ephemeral port and will choose an address set equivalent
* to binding with a wildcard address.
*
* This is not currently spelled out in the SCTP sockets
* extensions draft, but follows the practice as seen in TCP
* sockets.
*
*/
sk->sk_state = SCTP_SS_LISTENING;
if (!ep->base.bind_addr.port) {
if (sctp_autobind(sk))
return -EAGAIN;
} else {
if (sctp_get_port(sk, inet_sk(sk)->inet_num)) {
sk->sk_state = SCTP_SS_CLOSED;
return -EADDRINUSE;
}
}
sk->sk_max_ack_backlog = backlog;
sctp_hash_endpoint(ep);
return 0;
}
/*
* 4.1.3 / 5.1.3 listen()
*
* By default, new associations are not accepted for UDP style sockets.
* An application uses listen() to mark a socket as being able to
* accept new associations.
*
* On TCP style sockets, applications use listen() to ready the SCTP
* endpoint for accepting inbound associations.
*
* On both types of endpoints a backlog of '0' disables listening.
*
* Move a socket to LISTENING state.
*/
int sctp_inet_listen(struct socket *sock, int backlog)
{
struct sock *sk = sock->sk;
struct sctp_endpoint *ep = sctp_sk(sk)->ep;
int err = -EINVAL;
if (unlikely(backlog < 0))
return err;
lock_sock(sk);
/* Peeled-off sockets are not allowed to listen(). */
if (sctp_style(sk, UDP_HIGH_BANDWIDTH))
goto out;
if (sock->state != SS_UNCONNECTED)
goto out;
if (!sctp_sstate(sk, LISTENING) && !sctp_sstate(sk, CLOSED))
goto out;
/* If backlog is zero, disable listening. */
if (!backlog) {
if (sctp_sstate(sk, CLOSED))
goto out;
err = 0;
sctp_unhash_endpoint(ep);
sk->sk_state = SCTP_SS_CLOSED;
if (sk->sk_reuse)
sctp_sk(sk)->bind_hash->fastreuse = 1;
goto out;
}
/* If we are already listening, just update the backlog */
if (sctp_sstate(sk, LISTENING))
sk->sk_max_ack_backlog = backlog;
else {
err = sctp_listen_start(sk, backlog);
if (err)
goto out;
}
err = 0;
out:
release_sock(sk);
return err;
}
/*
* This function is done by modeling the current datagram_poll() and the
* tcp_poll(). Note that, based on these implementations, we don't
* lock the socket in this function, even though it seems that,
* ideally, locking or some other mechanisms can be used to ensure
* the integrity of the counters (sndbuf and wmem_alloc) used
* in this place. We assume that we don't need locks either until proven
* otherwise.
*
* Another thing to note is that we include the Async I/O support
* here, again, by modeling the current TCP/UDP code. We don't have
* a good way to test with it yet.
*/
unsigned int sctp_poll(struct file *file, struct socket *sock, poll_table *wait)
{
struct sock *sk = sock->sk;
struct sctp_sock *sp = sctp_sk(sk);
unsigned int mask;
poll_wait(file, sk_sleep(sk), wait);
sock_rps_record_flow(sk);
/* A TCP-style listening socket becomes readable when the accept queue
* is not empty.
*/
if (sctp_style(sk, TCP) && sctp_sstate(sk, LISTENING))
return (!list_empty(&sp->ep->asocs)) ?
(POLLIN | POLLRDNORM) : 0;
mask = 0;
/* Is there any exceptional events? */
if (sk->sk_err || !skb_queue_empty(&sk->sk_error_queue))
mask |= POLLERR |
(sock_flag(sk, SOCK_SELECT_ERR_QUEUE) ? POLLPRI : 0);
if (sk->sk_shutdown & RCV_SHUTDOWN)
mask |= POLLRDHUP | POLLIN | POLLRDNORM;
if (sk->sk_shutdown == SHUTDOWN_MASK)
mask |= POLLHUP;
/* Is it readable? Reconsider this code with TCP-style support. */
if (!skb_queue_empty(&sk->sk_receive_queue))
mask |= POLLIN | POLLRDNORM;
/* The association is either gone or not ready. */
if (!sctp_style(sk, UDP) && sctp_sstate(sk, CLOSED))
return mask;
/* Is it writable? */
if (sctp_writeable(sk)) {
mask |= POLLOUT | POLLWRNORM;
} else {
sk_set_bit(SOCKWQ_ASYNC_NOSPACE, sk);
/*
* Since the socket is not locked, the buffer
* might be made available after the writeable check and
* before the bit is set. This could cause a lost I/O
* signal. tcp_poll() has a race breaker for this race
* condition. Based on their implementation, we put
* in the following code to cover it as well.
*/
if (sctp_writeable(sk))
mask |= POLLOUT | POLLWRNORM;
}
return mask;
}
/********************************************************************
* 2nd Level Abstractions
********************************************************************/
static struct sctp_bind_bucket *sctp_bucket_create(
struct sctp_bind_hashbucket *head, struct net *net, unsigned short snum)
{
struct sctp_bind_bucket *pp;
pp = kmem_cache_alloc(sctp_bucket_cachep, GFP_ATOMIC);
if (pp) {
SCTP_DBG_OBJCNT_INC(bind_bucket);
pp->port = snum;
pp->fastreuse = 0;
INIT_HLIST_HEAD(&pp->owner);
pp->net = net;
hlist_add_head(&pp->node, &head->chain);
}
return pp;
}
/* Caller must hold hashbucket lock for this tb with local BH disabled */
static void sctp_bucket_destroy(struct sctp_bind_bucket *pp)
{
if (pp && hlist_empty(&pp->owner)) {
__hlist_del(&pp->node);
kmem_cache_free(sctp_bucket_cachep, pp);
SCTP_DBG_OBJCNT_DEC(bind_bucket);
}
}
/* Release this socket's reference to a local port. */
static inline void __sctp_put_port(struct sock *sk)
{
struct sctp_bind_hashbucket *head =
&sctp_port_hashtable[sctp_phashfn(sock_net(sk),
inet_sk(sk)->inet_num)];
struct sctp_bind_bucket *pp;
spin_lock(&head->lock);
pp = sctp_sk(sk)->bind_hash;
__sk_del_bind_node(sk);
sctp_sk(sk)->bind_hash = NULL;
inet_sk(sk)->inet_num = 0;
sctp_bucket_destroy(pp);
spin_unlock(&head->lock);
}
void sctp_put_port(struct sock *sk)
{
local_bh_disable();
__sctp_put_port(sk);
local_bh_enable();
}
/*
* The system picks an ephemeral port and choose an address set equivalent
* to binding with a wildcard address.
* One of those addresses will be the primary address for the association.
* This automatically enables the multihoming capability of SCTP.
*/
static int sctp_autobind(struct sock *sk)
{
union sctp_addr autoaddr;
struct sctp_af *af;
__be16 port;
/* Initialize a local sockaddr structure to INADDR_ANY. */
af = sctp_sk(sk)->pf->af;
port = htons(inet_sk(sk)->inet_num);
af->inaddr_any(&autoaddr, port);
return sctp_do_bind(sk, &autoaddr, af->sockaddr_len);
}
/* Parse out IPPROTO_SCTP CMSG headers. Perform only minimal validation.
*
* From RFC 2292
* 4.2 The cmsghdr Structure *
*
* When ancillary data is sent or received, any number of ancillary data
* objects can be specified by the msg_control and msg_controllen members of
* the msghdr structure, because each object is preceded by
* a cmsghdr structure defining the object's length (the cmsg_len member).
* Historically Berkeley-derived implementations have passed only one object
* at a time, but this API allows multiple objects to be
* passed in a single call to sendmsg() or recvmsg(). The following example
* shows two ancillary data objects in a control buffer.
*
* |<--------------------------- msg_controllen -------------------------->|
* | |
*
* |<----- ancillary data object ----->|<----- ancillary data object ----->|
*
* |<---------- CMSG_SPACE() --------->|<---------- CMSG_SPACE() --------->|
* | | |
*
* |<---------- cmsg_len ---------->| |<--------- cmsg_len ----------->| |
*
* |<--------- CMSG_LEN() --------->| |<-------- CMSG_LEN() ---------->| |
* | | | | |
*
* +-----+-----+-----+--+-----------+--+-----+-----+-----+--+-----------+--+
* |cmsg_|cmsg_|cmsg_|XX| |XX|cmsg_|cmsg_|cmsg_|XX| |XX|
*
* |len |level|type |XX|cmsg_data[]|XX|len |level|type |XX|cmsg_data[]|XX|
*
* +-----+-----+-----+--+-----------+--+-----+-----+-----+--+-----------+--+
* ^
* |
*
* msg_control
* points here
*/
static int sctp_msghdr_parse(const struct msghdr *msg, struct sctp_cmsgs *cmsgs)
{
struct msghdr *my_msg = (struct msghdr *)msg;
struct cmsghdr *cmsg;
for_each_cmsghdr(cmsg, my_msg) {
if (!CMSG_OK(my_msg, cmsg))
return -EINVAL;
/* Should we parse this header or ignore? */
if (cmsg->cmsg_level != IPPROTO_SCTP)
continue;
/* Strictly check lengths following example in SCM code. */
switch (cmsg->cmsg_type) {
case SCTP_INIT:
/* SCTP Socket API Extension
* 5.3.1 SCTP Initiation Structure (SCTP_INIT)
*
* This cmsghdr structure provides information for
* initializing new SCTP associations with sendmsg().
* The SCTP_INITMSG socket option uses this same data
* structure. This structure is not used for
* recvmsg().
*
* cmsg_level cmsg_type cmsg_data[]
* ------------ ------------ ----------------------
* IPPROTO_SCTP SCTP_INIT struct sctp_initmsg
*/
if (cmsg->cmsg_len != CMSG_LEN(sizeof(struct sctp_initmsg)))
return -EINVAL;
cmsgs->init = CMSG_DATA(cmsg);
break;
case SCTP_SNDRCV:
/* SCTP Socket API Extension
* 5.3.2 SCTP Header Information Structure(SCTP_SNDRCV)
*
* This cmsghdr structure specifies SCTP options for
* sendmsg() and describes SCTP header information
* about a received message through recvmsg().
*
* cmsg_level cmsg_type cmsg_data[]
* ------------ ------------ ----------------------
* IPPROTO_SCTP SCTP_SNDRCV struct sctp_sndrcvinfo
*/
if (cmsg->cmsg_len != CMSG_LEN(sizeof(struct sctp_sndrcvinfo)))
return -EINVAL;
cmsgs->srinfo = CMSG_DATA(cmsg);
if (cmsgs->srinfo->sinfo_flags &
~(SCTP_UNORDERED | SCTP_ADDR_OVER |
SCTP_SACK_IMMEDIATELY | SCTP_PR_SCTP_MASK |
SCTP_ABORT | SCTP_EOF))
return -EINVAL;
break;
case SCTP_SNDINFO:
/* SCTP Socket API Extension
* 5.3.4 SCTP Send Information Structure (SCTP_SNDINFO)
*
* This cmsghdr structure specifies SCTP options for
* sendmsg(). This structure and SCTP_RCVINFO replaces
* SCTP_SNDRCV which has been deprecated.
*
* cmsg_level cmsg_type cmsg_data[]
* ------------ ------------ ---------------------
* IPPROTO_SCTP SCTP_SNDINFO struct sctp_sndinfo
*/
if (cmsg->cmsg_len != CMSG_LEN(sizeof(struct sctp_sndinfo)))
return -EINVAL;
cmsgs->sinfo = CMSG_DATA(cmsg);
if (cmsgs->sinfo->snd_flags &
~(SCTP_UNORDERED | SCTP_ADDR_OVER |
SCTP_SACK_IMMEDIATELY | SCTP_PR_SCTP_MASK |
SCTP_ABORT | SCTP_EOF))
return -EINVAL;
break;
default:
return -EINVAL;
}
}
return 0;
}
/*
* Wait for a packet..
* Note: This function is the same function as in core/datagram.c
* with a few modifications to make lksctp work.
*/
static int sctp_wait_for_packet(struct sock *sk, int *err, long *timeo_p)
{
int error;
DEFINE_WAIT(wait);
prepare_to_wait_exclusive(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE);
/* Socket errors? */
error = sock_error(sk);
if (error)
goto out;
if (!skb_queue_empty(&sk->sk_receive_queue))
goto ready;
/* Socket shut down? */
if (sk->sk_shutdown & RCV_SHUTDOWN)
goto out;
/* Sequenced packets can come disconnected. If so we report the
* problem.
*/
error = -ENOTCONN;
/* Is there a good reason to think that we may receive some data? */
if (list_empty(&sctp_sk(sk)->ep->asocs) && !sctp_sstate(sk, LISTENING))
goto out;
/* Handle signals. */
if (signal_pending(current))
goto interrupted;
/* Let another process have a go. Since we are going to sleep
* anyway. Note: This may cause odd behaviors if the message
* does not fit in the user's buffer, but this seems to be the
* only way to honor MSG_DONTWAIT realistically.
*/
release_sock(sk);
*timeo_p = schedule_timeout(*timeo_p);
lock_sock(sk);
ready:
finish_wait(sk_sleep(sk), &wait);
return 0;
interrupted:
error = sock_intr_errno(*timeo_p);
out:
finish_wait(sk_sleep(sk), &wait);
*err = error;
return error;
}
/* Receive a datagram.
* Note: This is pretty much the same routine as in core/datagram.c
* with a few changes to make lksctp work.
*/
struct sk_buff *sctp_skb_recv_datagram(struct sock *sk, int flags,
int noblock, int *err)
{
int error;
struct sk_buff *skb;
long timeo;
timeo = sock_rcvtimeo(sk, noblock);
pr_debug("%s: timeo:%ld, max:%ld\n", __func__, timeo,
MAX_SCHEDULE_TIMEOUT);
do {
/* Again only user level code calls this function,
* so nothing interrupt level
* will suddenly eat the receive_queue.
*
* Look at current nfs client by the way...
* However, this function was correct in any case. 8)
*/
if (flags & MSG_PEEK) {
skb = skb_peek(&sk->sk_receive_queue);
if (skb)
refcount_inc(&skb->users);
} else {
skb = __skb_dequeue(&sk->sk_receive_queue);
}
if (skb)
return skb;
/* Caller is allowed not to check sk->sk_err before calling. */
error = sock_error(sk);
if (error)
goto no_packet;
if (sk->sk_shutdown & RCV_SHUTDOWN)
break;
if (sk_can_busy_loop(sk)) {
sk_busy_loop(sk, noblock);
if (!skb_queue_empty(&sk->sk_receive_queue))
continue;
}
/* User doesn't want to wait. */
error = -EAGAIN;
if (!timeo)
goto no_packet;
} while (sctp_wait_for_packet(sk, err, &timeo) == 0);
return NULL;
no_packet:
*err = error;
return NULL;
}
/* If sndbuf has changed, wake up per association sndbuf waiters. */
static void __sctp_write_space(struct sctp_association *asoc)
{
struct sock *sk = asoc->base.sk;
if (sctp_wspace(asoc) <= 0)
return;
if (waitqueue_active(&asoc->wait))
wake_up_interruptible(&asoc->wait);
if (sctp_writeable(sk)) {
struct socket_wq *wq;
rcu_read_lock();
wq = rcu_dereference(sk->sk_wq);
if (wq) {
if (waitqueue_active(&wq->wait))
wake_up_interruptible(&wq->wait);
/* Note that we try to include the Async I/O support
* here by modeling from the current TCP/UDP code.
* We have not tested with it yet.
*/
if (!(sk->sk_shutdown & SEND_SHUTDOWN))
sock_wake_async(wq, SOCK_WAKE_SPACE, POLL_OUT);
}
rcu_read_unlock();
}
}
static void sctp_wake_up_waiters(struct sock *sk,
struct sctp_association *asoc)
{
struct sctp_association *tmp = asoc;
/* We do accounting for the sndbuf space per association,
* so we only need to wake our own association.
*/
if (asoc->ep->sndbuf_policy)
return __sctp_write_space(asoc);
/* If association goes down and is just flushing its
* outq, then just normally notify others.
*/
if (asoc->base.dead)
return sctp_write_space(sk);
/* Accounting for the sndbuf space is per socket, so we
* need to wake up others, try to be fair and in case of
* other associations, let them have a go first instead
* of just doing a sctp_write_space() call.
*
* Note that we reach sctp_wake_up_waiters() only when
* associations free up queued chunks, thus we are under
* lock and the list of associations on a socket is
* guaranteed not to change.
*/
for (tmp = list_next_entry(tmp, asocs); 1;
tmp = list_next_entry(tmp, asocs)) {
/* Manually skip the head element. */
if (&tmp->asocs == &((sctp_sk(sk))->ep->asocs))
continue;
/* Wake up association. */
__sctp_write_space(tmp);
/* We've reached the end. */
if (tmp == asoc)
break;
}
}
/* Do accounting for the sndbuf space.
* Decrement the used sndbuf space of the corresponding association by the
* data size which was just transmitted(freed).
*/
static void sctp_wfree(struct sk_buff *skb)
{
struct sctp_chunk *chunk = skb_shinfo(skb)->destructor_arg;
struct sctp_association *asoc = chunk->asoc;
struct sock *sk = asoc->base.sk;
asoc->sndbuf_used -= SCTP_DATA_SNDSIZE(chunk) +
sizeof(struct sk_buff) +
sizeof(struct sctp_chunk);
WARN_ON(refcount_sub_and_test(sizeof(struct sctp_chunk), &sk->sk_wmem_alloc));
/*
* This undoes what is done via sctp_set_owner_w and sk_mem_charge
*/
sk->sk_wmem_queued -= skb->truesize;
sk_mem_uncharge(sk, skb->truesize);
sock_wfree(skb);
sctp_wake_up_waiters(sk, asoc);
sctp_association_put(asoc);
}
/* Do accounting for the receive space on the socket.
* Accounting for the association is done in ulpevent.c
* We set this as a destructor for the cloned data skbs so that
* accounting is done at the correct time.
*/
void sctp_sock_rfree(struct sk_buff *skb)
{
struct sock *sk = skb->sk;
struct sctp_ulpevent *event = sctp_skb2event(skb);
atomic_sub(event->rmem_len, &sk->sk_rmem_alloc);
/*
* Mimic the behavior of sock_rfree
*/
sk_mem_uncharge(sk, event->rmem_len);
}
/* Helper function to wait for space in the sndbuf. */
static int sctp_wait_for_sndbuf(struct sctp_association *asoc, long *timeo_p,
size_t msg_len)
{
struct sock *sk = asoc->base.sk;
int err = 0;
long current_timeo = *timeo_p;
DEFINE_WAIT(wait);
pr_debug("%s: asoc:%p, timeo:%ld, msg_len:%zu\n", __func__, asoc,
*timeo_p, msg_len);
/* Increment the association's refcnt. */
sctp_association_hold(asoc);
/* Wait on the association specific sndbuf space. */
for (;;) {
prepare_to_wait_exclusive(&asoc->wait, &wait,
TASK_INTERRUPTIBLE);
if (!*timeo_p)
goto do_nonblock;
if (sk->sk_err || asoc->state >= SCTP_STATE_SHUTDOWN_PENDING ||
asoc->base.dead)
goto do_error;
if (signal_pending(current))
goto do_interrupted;
if (msg_len <= sctp_wspace(asoc))
break;
/* Let another process have a go. Since we are going
* to sleep anyway.
*/
release_sock(sk);
current_timeo = schedule_timeout(current_timeo);
lock_sock(sk);
*timeo_p = current_timeo;
}
out:
finish_wait(&asoc->wait, &wait);
/* Release the association's refcnt. */
sctp_association_put(asoc);
return err;
do_error:
err = -EPIPE;
goto out;
do_interrupted:
err = sock_intr_errno(*timeo_p);
goto out;
do_nonblock:
err = -EAGAIN;
goto out;
}
void sctp_data_ready(struct sock *sk)
{
struct socket_wq *wq;
rcu_read_lock();
wq = rcu_dereference(sk->sk_wq);
if (skwq_has_sleeper(wq))
wake_up_interruptible_sync_poll(&wq->wait, POLLIN |
POLLRDNORM | POLLRDBAND);
sk_wake_async(sk, SOCK_WAKE_WAITD, POLL_IN);
rcu_read_unlock();
}
/* If socket sndbuf has changed, wake up all per association waiters. */
void sctp_write_space(struct sock *sk)
{
struct sctp_association *asoc;
/* Wake up the tasks in each wait queue. */
list_for_each_entry(asoc, &((sctp_sk(sk))->ep->asocs), asocs) {
__sctp_write_space(asoc);
}
}
/* Is there any sndbuf space available on the socket?
*
* Note that sk_wmem_alloc is the sum of the send buffers on all of the
* associations on the same socket. For a UDP-style socket with
* multiple associations, it is possible for it to be "unwriteable"
* prematurely. I assume that this is acceptable because
* a premature "unwriteable" is better than an accidental "writeable" which
* would cause an unwanted block under certain circumstances. For the 1-1
* UDP-style sockets or TCP-style sockets, this code should work.
* - Daisy
*/
static int sctp_writeable(struct sock *sk)
{
int amt = 0;
amt = sk->sk_sndbuf - sk_wmem_alloc_get(sk);
if (amt < 0)
amt = 0;
return amt;
}
/* Wait for an association to go into ESTABLISHED state. If timeout is 0,
* returns immediately with EINPROGRESS.
*/
static int sctp_wait_for_connect(struct sctp_association *asoc, long *timeo_p)
{
struct sock *sk = asoc->base.sk;
int err = 0;
long current_timeo = *timeo_p;
DEFINE_WAIT(wait);
pr_debug("%s: asoc:%p, timeo:%ld\n", __func__, asoc, *timeo_p);
/* Increment the association's refcnt. */
sctp_association_hold(asoc);
for (;;) {
prepare_to_wait_exclusive(&asoc->wait, &wait,
TASK_INTERRUPTIBLE);
if (!*timeo_p)
goto do_nonblock;
if (sk->sk_shutdown & RCV_SHUTDOWN)
break;
if (sk->sk_err || asoc->state >= SCTP_STATE_SHUTDOWN_PENDING ||
asoc->base.dead)
goto do_error;
if (signal_pending(current))
goto do_interrupted;
if (sctp_state(asoc, ESTABLISHED))
break;
/* Let another process have a go. Since we are going
* to sleep anyway.
*/
release_sock(sk);
current_timeo = schedule_timeout(current_timeo);
lock_sock(sk);
*timeo_p = current_timeo;
}
out:
finish_wait(&asoc->wait, &wait);
/* Release the association's refcnt. */
sctp_association_put(asoc);
return err;
do_error:
if (asoc->init_err_counter + 1 > asoc->max_init_attempts)
err = -ETIMEDOUT;
else
err = -ECONNREFUSED;
goto out;
do_interrupted:
err = sock_intr_errno(*timeo_p);
goto out;
do_nonblock:
err = -EINPROGRESS;
goto out;
}
static int sctp_wait_for_accept(struct sock *sk, long timeo)
{
struct sctp_endpoint *ep;
int err = 0;
DEFINE_WAIT(wait);
ep = sctp_sk(sk)->ep;
for (;;) {
prepare_to_wait_exclusive(sk_sleep(sk), &wait,
TASK_INTERRUPTIBLE);
if (list_empty(&ep->asocs)) {
release_sock(sk);
timeo = schedule_timeout(timeo);
lock_sock(sk);
}
err = -EINVAL;
if (!sctp_sstate(sk, LISTENING))
break;
err = 0;
if (!list_empty(&ep->asocs))
break;
err = sock_intr_errno(timeo);
if (signal_pending(current))
break;
err = -EAGAIN;
if (!timeo)
break;
}
finish_wait(sk_sleep(sk), &wait);
return err;
}
static void sctp_wait_for_close(struct sock *sk, long timeout)
{
DEFINE_WAIT(wait);
do {
prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE);
if (list_empty(&sctp_sk(sk)->ep->asocs))
break;
release_sock(sk);
timeout = schedule_timeout(timeout);
lock_sock(sk);
} while (!signal_pending(current) && timeout);
finish_wait(sk_sleep(sk), &wait);
}
static void sctp_skb_set_owner_r_frag(struct sk_buff *skb, struct sock *sk)
{
struct sk_buff *frag;
if (!skb->data_len)
goto done;
/* Don't forget the fragments. */
skb_walk_frags(skb, frag)
sctp_skb_set_owner_r_frag(frag, sk);
done:
sctp_skb_set_owner_r(skb, sk);
}
void sctp_copy_sock(struct sock *newsk, struct sock *sk,
struct sctp_association *asoc)
{
struct inet_sock *inet = inet_sk(sk);
struct inet_sock *newinet;
newsk->sk_type = sk->sk_type;
newsk->sk_bound_dev_if = sk->sk_bound_dev_if;
newsk->sk_flags = sk->sk_flags;
newsk->sk_tsflags = sk->sk_tsflags;
newsk->sk_no_check_tx = sk->sk_no_check_tx;
newsk->sk_no_check_rx = sk->sk_no_check_rx;
newsk->sk_reuse = sk->sk_reuse;
newsk->sk_shutdown = sk->sk_shutdown;
newsk->sk_destruct = sctp_destruct_sock;
newsk->sk_family = sk->sk_family;
newsk->sk_protocol = IPPROTO_SCTP;
newsk->sk_backlog_rcv = sk->sk_prot->backlog_rcv;
newsk->sk_sndbuf = sk->sk_sndbuf;
newsk->sk_rcvbuf = sk->sk_rcvbuf;
newsk->sk_lingertime = sk->sk_lingertime;
newsk->sk_rcvtimeo = sk->sk_rcvtimeo;
newsk->sk_sndtimeo = sk->sk_sndtimeo;
newsk->sk_rxhash = sk->sk_rxhash;
newinet = inet_sk(newsk);
/* Initialize sk's sport, dport, rcv_saddr and daddr for
* getsockname() and getpeername()
*/
newinet->inet_sport = inet->inet_sport;
newinet->inet_saddr = inet->inet_saddr;
newinet->inet_rcv_saddr = inet->inet_rcv_saddr;
newinet->inet_dport = htons(asoc->peer.port);
newinet->pmtudisc = inet->pmtudisc;
newinet->inet_id = asoc->next_tsn ^ jiffies;
newinet->uc_ttl = inet->uc_ttl;
newinet->mc_loop = 1;
newinet->mc_ttl = 1;
newinet->mc_index = 0;
newinet->mc_list = NULL;
if (newsk->sk_flags & SK_FLAGS_TIMESTAMP)
net_enable_timestamp();
security_sk_clone(sk, newsk);
}
static inline void sctp_copy_descendant(struct sock *sk_to,
const struct sock *sk_from)
{
int ancestor_size = sizeof(struct inet_sock) +
sizeof(struct sctp_sock) -
offsetof(struct sctp_sock, auto_asconf_list);
if (sk_from->sk_family == PF_INET6)
ancestor_size += sizeof(struct ipv6_pinfo);
__inet_sk_copy_descendant(sk_to, sk_from, ancestor_size);
}
/* Populate the fields of the newsk from the oldsk and migrate the assoc
* and its messages to the newsk.
*/
static void sctp_sock_migrate(struct sock *oldsk, struct sock *newsk,
struct sctp_association *assoc,
enum sctp_socket_type type)
{
struct sctp_sock *oldsp = sctp_sk(oldsk);
struct sctp_sock *newsp = sctp_sk(newsk);
struct sctp_bind_bucket *pp; /* hash list port iterator */
struct sctp_endpoint *newep = newsp->ep;
struct sk_buff *skb, *tmp;
struct sctp_ulpevent *event;
struct sctp_bind_hashbucket *head;
/* Migrate socket buffer sizes and all the socket level options to the
* new socket.
*/
newsk->sk_sndbuf = oldsk->sk_sndbuf;
newsk->sk_rcvbuf = oldsk->sk_rcvbuf;
/* Brute force copy old sctp opt. */
sctp_copy_descendant(newsk, oldsk);
/* Restore the ep value that was overwritten with the above structure
* copy.
*/
newsp->ep = newep;
newsp->hmac = NULL;
/* Hook this new socket in to the bind_hash list. */
head = &sctp_port_hashtable[sctp_phashfn(sock_net(oldsk),
inet_sk(oldsk)->inet_num)];
spin_lock_bh(&head->lock);
pp = sctp_sk(oldsk)->bind_hash;
sk_add_bind_node(newsk, &pp->owner);
sctp_sk(newsk)->bind_hash = pp;
inet_sk(newsk)->inet_num = inet_sk(oldsk)->inet_num;
spin_unlock_bh(&head->lock);
/* Copy the bind_addr list from the original endpoint to the new
* endpoint so that we can handle restarts properly
*/
sctp_bind_addr_dup(&newsp->ep->base.bind_addr,
&oldsp->ep->base.bind_addr, GFP_KERNEL);
/* Move any messages in the old socket's receive queue that are for the
* peeled off association to the new socket's receive queue.
*/
sctp_skb_for_each(skb, &oldsk->sk_receive_queue, tmp) {
event = sctp_skb2event(skb);
if (event->asoc == assoc) {
__skb_unlink(skb, &oldsk->sk_receive_queue);
__skb_queue_tail(&newsk->sk_receive_queue, skb);
sctp_skb_set_owner_r_frag(skb, newsk);
}
}
/* Clean up any messages pending delivery due to partial
* delivery. Three cases:
* 1) No partial deliver; no work.
* 2) Peeling off partial delivery; keep pd_lobby in new pd_lobby.
* 3) Peeling off non-partial delivery; move pd_lobby to receive_queue.
*/
skb_queue_head_init(&newsp->pd_lobby);
atomic_set(&sctp_sk(newsk)->pd_mode, assoc->ulpq.pd_mode);
if (atomic_read(&sctp_sk(oldsk)->pd_mode)) {
struct sk_buff_head *queue;
/* Decide which queue to move pd_lobby skbs to. */
if (assoc->ulpq.pd_mode) {
queue = &newsp->pd_lobby;
} else
queue = &newsk->sk_receive_queue;
/* Walk through the pd_lobby, looking for skbs that
* need moved to the new socket.
*/
sctp_skb_for_each(skb, &oldsp->pd_lobby, tmp) {
event = sctp_skb2event(skb);
if (event->asoc == assoc) {
__skb_unlink(skb, &oldsp->pd_lobby);
__skb_queue_tail(queue, skb);
sctp_skb_set_owner_r_frag(skb, newsk);
}
}
/* Clear up any skbs waiting for the partial
* delivery to finish.
*/
if (assoc->ulpq.pd_mode)
sctp_clear_pd(oldsk, NULL);
}
sctp_skb_for_each(skb, &assoc->ulpq.reasm, tmp)
sctp_skb_set_owner_r_frag(skb, newsk);
sctp_skb_for_each(skb, &assoc->ulpq.lobby, tmp)
sctp_skb_set_owner_r_frag(skb, newsk);
/* Set the type of socket to indicate that it is peeled off from the
* original UDP-style socket or created with the accept() call on a
* TCP-style socket..
*/
newsp->type = type;
/* Mark the new socket "in-use" by the user so that any packets
* that may arrive on the association after we've moved it are
* queued to the backlog. This prevents a potential race between
* backlog processing on the old socket and new-packet processing
* on the new socket.
*
* The caller has just allocated newsk so we can guarantee that other
* paths won't try to lock it and then oldsk.
*/
lock_sock_nested(newsk, SINGLE_DEPTH_NESTING);
sctp_assoc_migrate(assoc, newsk);
/* If the association on the newsk is already closed before accept()
* is called, set RCV_SHUTDOWN flag.
*/
if (sctp_state(assoc, CLOSED) && sctp_style(newsk, TCP)) {
newsk->sk_state = SCTP_SS_CLOSED;
newsk->sk_shutdown |= RCV_SHUTDOWN;
} else {
newsk->sk_state = SCTP_SS_ESTABLISHED;
}
release_sock(newsk);
}
/* This proto struct describes the ULP interface for SCTP. */
struct proto sctp_prot = {
.name = "SCTP",
.owner = THIS_MODULE,
.close = sctp_close,
.connect = sctp_connect,
.disconnect = sctp_disconnect,
.accept = sctp_accept,
.ioctl = sctp_ioctl,
.init = sctp_init_sock,
.destroy = sctp_destroy_sock,
.shutdown = sctp_shutdown,
.setsockopt = sctp_setsockopt,
.getsockopt = sctp_getsockopt,
.sendmsg = sctp_sendmsg,
.recvmsg = sctp_recvmsg,
.bind = sctp_bind,
.backlog_rcv = sctp_backlog_rcv,
.hash = sctp_hash,
.unhash = sctp_unhash,
.get_port = sctp_get_port,
.obj_size = sizeof(struct sctp_sock),
.sysctl_mem = sysctl_sctp_mem,
.sysctl_rmem = sysctl_sctp_rmem,
.sysctl_wmem = sysctl_sctp_wmem,
.memory_pressure = &sctp_memory_pressure,
.enter_memory_pressure = sctp_enter_memory_pressure,
.memory_allocated = &sctp_memory_allocated,
.sockets_allocated = &sctp_sockets_allocated,
};
#if IS_ENABLED(CONFIG_IPV6)
#include <net/transp_v6.h>
static void sctp_v6_destroy_sock(struct sock *sk)
{
sctp_destroy_sock(sk);
inet6_destroy_sock(sk);
}
struct proto sctpv6_prot = {
.name = "SCTPv6",
.owner = THIS_MODULE,
.close = sctp_close,
.connect = sctp_connect,
.disconnect = sctp_disconnect,
.accept = sctp_accept,
.ioctl = sctp_ioctl,
.init = sctp_init_sock,
.destroy = sctp_v6_destroy_sock,
.shutdown = sctp_shutdown,
.setsockopt = sctp_setsockopt,
.getsockopt = sctp_getsockopt,
.sendmsg = sctp_sendmsg,
.recvmsg = sctp_recvmsg,
.bind = sctp_bind,
.backlog_rcv = sctp_backlog_rcv,
.hash = sctp_hash,
.unhash = sctp_unhash,
.get_port = sctp_get_port,
.obj_size = sizeof(struct sctp6_sock),
.sysctl_mem = sysctl_sctp_mem,
.sysctl_rmem = sysctl_sctp_rmem,
.sysctl_wmem = sysctl_sctp_wmem,
.memory_pressure = &sctp_memory_pressure,
.enter_memory_pressure = sctp_enter_memory_pressure,
.memory_allocated = &sctp_memory_allocated,
.sockets_allocated = &sctp_sockets_allocated,
};
#endif /* IS_ENABLED(CONFIG_IPV6) */
| ./CrossVul/dataset_final_sorted/CWE-416/c/good_2838_0 |
crossvul-cpp_data_good_2437_0 | /*
* common UDP/RAW code
* Linux INET implementation
*
* Authors:
* Hideaki YOSHIFUJI <yoshfuji@linux-ipv6.org>
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version
* 2 of the License, or (at your option) any later version.
*/
#include <linux/types.h>
#include <linux/module.h>
#include <linux/ip.h>
#include <linux/in.h>
#include <net/ip.h>
#include <net/sock.h>
#include <net/route.h>
#include <net/tcp_states.h>
int ip4_datagram_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len)
{
struct inet_sock *inet = inet_sk(sk);
struct sockaddr_in *usin = (struct sockaddr_in *) uaddr;
struct flowi4 *fl4;
struct rtable *rt;
__be32 saddr;
int oif;
int err;
if (addr_len < sizeof(*usin))
return -EINVAL;
if (usin->sin_family != AF_INET)
return -EAFNOSUPPORT;
sk_dst_reset(sk);
lock_sock(sk);
oif = sk->sk_bound_dev_if;
saddr = inet->inet_saddr;
if (ipv4_is_multicast(usin->sin_addr.s_addr)) {
if (!oif)
oif = inet->mc_index;
if (!saddr)
saddr = inet->mc_addr;
}
fl4 = &inet->cork.fl.u.ip4;
rt = ip_route_connect(fl4, usin->sin_addr.s_addr, saddr,
RT_CONN_FLAGS(sk), oif,
sk->sk_protocol,
inet->inet_sport, usin->sin_port, sk);
if (IS_ERR(rt)) {
err = PTR_ERR(rt);
if (err == -ENETUNREACH)
IP_INC_STATS(sock_net(sk), IPSTATS_MIB_OUTNOROUTES);
goto out;
}
if ((rt->rt_flags & RTCF_BROADCAST) && !sock_flag(sk, SOCK_BROADCAST)) {
ip_rt_put(rt);
err = -EACCES;
goto out;
}
if (!inet->inet_saddr)
inet->inet_saddr = fl4->saddr; /* Update source address */
if (!inet->inet_rcv_saddr) {
inet->inet_rcv_saddr = fl4->saddr;
if (sk->sk_prot->rehash)
sk->sk_prot->rehash(sk);
}
inet->inet_daddr = fl4->daddr;
inet->inet_dport = usin->sin_port;
sk->sk_state = TCP_ESTABLISHED;
inet->inet_id = jiffies;
sk_dst_set(sk, &rt->dst);
err = 0;
out:
release_sock(sk);
return err;
}
EXPORT_SYMBOL(ip4_datagram_connect);
/* Because UDP xmit path can manipulate sk_dst_cache without holding
* socket lock, we need to use sk_dst_set() here,
* even if we own the socket lock.
*/
void ip4_datagram_release_cb(struct sock *sk)
{
const struct inet_sock *inet = inet_sk(sk);
const struct ip_options_rcu *inet_opt;
__be32 daddr = inet->inet_daddr;
struct dst_entry *dst;
struct flowi4 fl4;
struct rtable *rt;
rcu_read_lock();
dst = __sk_dst_get(sk);
if (!dst || !dst->obsolete || dst->ops->check(dst, 0)) {
rcu_read_unlock();
return;
}
inet_opt = rcu_dereference(inet->inet_opt);
if (inet_opt && inet_opt->opt.srr)
daddr = inet_opt->opt.faddr;
rt = ip_route_output_ports(sock_net(sk), &fl4, sk, daddr,
inet->inet_saddr, inet->inet_dport,
inet->inet_sport, sk->sk_protocol,
RT_CONN_FLAGS(sk), sk->sk_bound_dev_if);
dst = !IS_ERR(rt) ? &rt->dst : NULL;
sk_dst_set(sk, dst);
rcu_read_unlock();
}
EXPORT_SYMBOL_GPL(ip4_datagram_release_cb);
| ./CrossVul/dataset_final_sorted/CWE-416/c/good_2437_0 |
crossvul-cpp_data_good_1390_0 | /* libcomps - C alternative to yum.comps library
* Copyright (C) 2013 Jindrich Luza
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* any later version.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301,
* USA
*/
#include "comps_mradix.h"
#include <stdio.h>
void comps_mrtree_data_destroy(COMPS_MRTreeData * rtd) {
free(rtd->key);
comps_hslist_destroy(&rtd->data);
comps_hslist_destroy(&rtd->subnodes);
free(rtd);
}
inline void comps_mrtree_data_destroy_v(void * rtd) {
comps_mrtree_data_destroy((COMPS_MRTreeData*)rtd);
}
COMPS_MRTreeData * comps_mrtree_data_create_n(COMPS_MRTree * tree, char * key,
size_t keylen, void * data) {
COMPS_MRTreeData * rtd;
if ((rtd = malloc(sizeof(*rtd))) == NULL)
return NULL;
if ((rtd->key = malloc(sizeof(char) * (keylen+1))) == NULL) {
free(rtd);
return NULL;
}
memcpy(rtd->key, key, sizeof(char)*keylen);
rtd->key[keylen] = 0;
rtd->is_leaf = 1;
rtd->data = comps_hslist_create();
comps_hslist_init(rtd->data, NULL, tree->data_cloner,
tree->data_destructor);
if (data)
comps_hslist_append(rtd->data, data, 0);
rtd->subnodes = comps_hslist_create();
comps_hslist_init(rtd->subnodes, NULL,
NULL,
&comps_mrtree_data_destroy_v);
return rtd;
}
COMPS_MRTreeData * comps_mrtree_data_create(COMPS_MRTree * tree,
char * key, void * data) {
return comps_mrtree_data_create_n(tree, key, strlen(key), data);
}
COMPS_MRTree * comps_mrtree_create(void* (*data_constructor)(void*),
void* (*data_cloner)(void*),
void (*data_destructor)(void*)) {
COMPS_MRTree *ret;
if ((ret = malloc(sizeof(COMPS_MRTree))) == NULL)
return NULL;
ret->subnodes = comps_hslist_create();
comps_hslist_init(ret->subnodes, NULL, NULL, &comps_mrtree_data_destroy_v);
if (ret->subnodes == NULL) {
free(ret);
return NULL;
}
ret->data_constructor = data_constructor;
ret->data_cloner = data_cloner;
ret->data_destructor = data_destructor;
return ret;
}
void comps_mrtree_destroy(COMPS_MRTree * rt) {
comps_hslist_destroy(&(rt->subnodes));
free(rt);
}
void comps_mrtree_print(COMPS_HSList * hl, unsigned deep) {
COMPS_HSListItem * it;
for (it = hl->first; it != NULL; it=it->next) {
printf("%d %s\n",deep, (((COMPS_MRTreeData*)it->data)->key));
comps_mrtree_print(((COMPS_MRTreeData*)it->data)->subnodes, deep+1);
}
}
void comps_mrtree_values_walk(COMPS_MRTree * rt, void* udata,
void (*walk_f)(void*, void*)) {
COMPS_HSList *tmplist, *tmp_subnodes;
COMPS_HSListItem *it, *it2;
tmplist = comps_hslist_create();
comps_hslist_init(tmplist, NULL, NULL, NULL);
comps_hslist_append(tmplist, rt->subnodes, 0);
while (tmplist->first != NULL) {
it = tmplist->first;
comps_hslist_remove(tmplist, tmplist->first);
tmp_subnodes = (COMPS_HSList*)it->data;
free(it);
for (it = tmp_subnodes->first; it != NULL; it=it->next) {
if (((COMPS_MRTreeData*)it->data)->subnodes->first) {
comps_hslist_append(tmplist,
((COMPS_MRTreeData*)it->data)->subnodes, 0);
}
for (it2 = (COMPS_HSListItem*)((COMPS_MRTreeData*)it->data)->data->first;
it2 != NULL; it2 = it2->next) {
walk_f(udata, it2->data);
}
}
}
comps_hslist_destroy(&tmplist);
}
COMPS_MRTree * comps_mrtree_clone(COMPS_MRTree * rt) {
COMPS_HSList * to_clone, *tmplist, *new_subnodes;
COMPS_MRTree * ret;
COMPS_HSListItem *it, *it2;
COMPS_MRTreeData *rtdata;
COMPS_HSList *new_data_list;
to_clone = comps_hslist_create();
comps_hslist_init(to_clone, NULL, NULL, NULL);
ret = comps_mrtree_create(rt->data_constructor, rt->data_cloner,
rt->data_destructor);
for (it = rt->subnodes->first; it != NULL; it = it->next) {
rtdata = comps_mrtree_data_create(rt,
((COMPS_MRTreeData*)it->data)->key, NULL);
new_data_list = comps_hslist_clone(((COMPS_MRTreeData*)it->data)->data);
comps_hslist_destroy(&rtdata->data);
comps_hslist_destroy(&rtdata->subnodes);
rtdata->subnodes = ((COMPS_MRTreeData*)it->data)->subnodes;
rtdata->data = new_data_list;
comps_hslist_append(ret->subnodes, rtdata, 0);
comps_hslist_append(to_clone, rtdata, 0);
}
while (to_clone->first) {
it2 = to_clone->first;
tmplist = ((COMPS_MRTreeData*)it2->data)->subnodes;
comps_hslist_remove(to_clone, to_clone->first);
new_subnodes = comps_hslist_create();
comps_hslist_init(new_subnodes, NULL, NULL, &comps_mrtree_data_destroy_v);
for (it = tmplist->first; it != NULL; it = it->next) {
rtdata = comps_mrtree_data_create(rt,
((COMPS_MRTreeData*)it->data)->key, NULL);
new_data_list = comps_hslist_clone(((COMPS_MRTreeData*)it->data)->data);
comps_hslist_destroy(&rtdata->subnodes);
comps_hslist_destroy(&rtdata->data);
rtdata->subnodes = ((COMPS_MRTreeData*)it->data)->subnodes;
rtdata->data = new_data_list;
comps_hslist_append(new_subnodes, rtdata, 0);
comps_hslist_append(to_clone, rtdata, 0);
}
((COMPS_MRTreeData*)it2->data)->subnodes = new_subnodes;
free(it2);
}
comps_hslist_destroy(&to_clone);
return ret;
}
void comps_mrtree_unite(COMPS_MRTree *rt1, COMPS_MRTree *rt2) {
COMPS_HSList *tmplist, *tmp_subnodes;
COMPS_HSListItem *it, *it2;
struct Pair {
COMPS_HSList * subnodes;
char * key;
} *pair, *parent_pair;
pair = malloc(sizeof(struct Pair));
pair->subnodes = rt2->subnodes;
pair->key = NULL;
tmplist = comps_hslist_create();
comps_hslist_init(tmplist, NULL, NULL, &free);
comps_hslist_append(tmplist, pair, 0);
while (tmplist->first != NULL) {
it = tmplist->first;
comps_hslist_remove(tmplist, tmplist->first);
tmp_subnodes = ((struct Pair*)it->data)->subnodes;
parent_pair = (struct Pair*) it->data;
free(it);
for (it = tmp_subnodes->first; it != NULL; it=it->next) {
pair = malloc(sizeof(struct Pair));
pair->subnodes = ((COMPS_MRTreeData*)it->data)->subnodes;
if (parent_pair->key != NULL) {
pair->key =
malloc(sizeof(char)
* (strlen(((COMPS_MRTreeData*)it->data)->key)
+ strlen(parent_pair->key) + 1));
memcpy(pair->key, parent_pair->key,
sizeof(char) * strlen(parent_pair->key));
memcpy(pair->key+strlen(parent_pair->key),
((COMPS_MRTreeData*)it->data)->key,
sizeof(char)*(strlen(((COMPS_MRTreeData*)it->data)->key)+1));
} else {
pair->key = malloc(sizeof(char)*
(strlen(((COMPS_MRTreeData*)it->data)->key) +
1));
memcpy(pair->key, ((COMPS_MRTreeData*)it->data)->key,
sizeof(char)*(strlen(((COMPS_MRTreeData*)it->data)->key)+1));
}
/* current node has data */
if (((COMPS_MRTreeData*)it->data)->data->first != NULL) {
for (it2 = ((COMPS_MRTreeData*)it->data)->data->first;
it2 != NULL; it2 = it2->next) {
comps_mrtree_set(rt1, pair->key, it2->data);
}
if (((COMPS_MRTreeData*)it->data)->subnodes->first) {
comps_hslist_append(tmplist, pair, 0);
} else {
free(pair->key);
free(pair);
}
/* current node hasn't data */
} else {
if (((COMPS_MRTreeData*)it->data)->subnodes->first) {
comps_hslist_append(tmplist, pair, 0);
} else {
free(pair->key);
free(pair);
}
}
}
free(parent_pair->key);
free(parent_pair);
}
comps_hslist_destroy(&tmplist);
}
COMPS_HSList* comps_mrtree_keys(COMPS_MRTree * rt) {
COMPS_HSList *tmplist, *tmp_subnodes, *ret;
COMPS_HSListItem *it;
struct Pair {
COMPS_HSList * subnodes;
char * key;
char added;
} *pair, *parent_pair;
pair = malloc(sizeof(struct Pair));
pair->subnodes = rt->subnodes;
pair->key = NULL;
pair->added = 0;
tmplist = comps_hslist_create();
comps_hslist_init(tmplist, NULL, NULL, &free);
ret = comps_hslist_create();
comps_hslist_init(ret, NULL, NULL, &free);
comps_hslist_append(tmplist, pair, 0);
while (tmplist->first != NULL) {
it = tmplist->first;
comps_hslist_remove(tmplist, tmplist->first);
tmp_subnodes = ((struct Pair*)it->data)->subnodes;
parent_pair = (struct Pair*) it->data;
free(it);
for (it = tmp_subnodes->first; it != NULL; it=it->next) {
pair = malloc(sizeof(struct Pair));
pair->subnodes = ((COMPS_MRTreeData*)it->data)->subnodes;
pair->added = 0;
if (parent_pair->key != NULL) {
pair->key =
malloc(sizeof(char)
* (strlen(((COMPS_MRTreeData*)it->data)->key)
+ strlen(parent_pair->key) + 1));
memcpy(pair->key, parent_pair->key,
sizeof(char) * strlen(parent_pair->key));
memcpy(pair->key+strlen(parent_pair->key),
((COMPS_MRTreeData*)it->data)->key,
sizeof(char)*(strlen(((COMPS_MRTreeData*)it->data)->key)+1));
} else {
pair->key = malloc(sizeof(char)*
(strlen(((COMPS_MRTreeData*)it->data)->key) +
1));
memcpy(pair->key, ((COMPS_MRTreeData*)it->data)->key,
sizeof(char)*(strlen(((COMPS_MRTreeData*)it->data)->key)+1));
}
/* current node has data */
if (((COMPS_MRTreeData*)it->data)->data->first != NULL) {
//printf("data not null for |%s|\n", pair->key);
comps_hslist_append(ret, pair->key, 0);
pair->added = 1;
if (((COMPS_MRTreeData*)it->data)->subnodes->first != NULL) {
// printf("subnodes found\b");
comps_hslist_append(tmplist, pair, 0);
} else {
free(pair);
}
/* current node hasn't data */
} else {
if (((COMPS_MRTreeData*)it->data)->subnodes->first) {
comps_hslist_append(tmplist, pair, 0);
} else {
free(pair->key);
free(pair);
}
}
}
if (parent_pair->added == 0)
free(parent_pair->key);
free(parent_pair);
}
comps_hslist_destroy(&tmplist);
return ret;
}
void __comps_mrtree_set(COMPS_MRTree * rt, char * key, size_t len, void * data)
{
static COMPS_HSListItem *it;
COMPS_HSList *subnodes;
COMPS_MRTreeData *rtd;
static COMPS_MRTreeData *rtdata;
size_t _len, offset=0;
unsigned x, found = 0;
void *ndata;
char ended;//, tmpch;
if (rt->subnodes == NULL)
return;
if (rt->data_constructor)
ndata = rt->data_constructor(data);
else
ndata = data;
subnodes = rt->subnodes;
while (offset != len)
{
found = 0;
for (it = subnodes->first; it != NULL; it=it->next) {
if (((COMPS_MRTreeData*)it->data)->key[0] == key[offset]) {
found = 1;
break;
}
}
if (!found) { // not found in subnodes; create new subnode
rtd = comps_mrtree_data_create(rt, key+offset, ndata);
comps_hslist_append(subnodes, rtd, 0);
return;
} else {
rtdata = (COMPS_MRTreeData*)it->data;
ended = 0;
for (x=1; ;x++) {
if (rtdata->key[x] == 0) ended += 1;
if (x == len - offset) ended += 2;
if (ended != 0) break;
if (key[offset+x] != rtdata->key[x]) break;
}
if (ended == 3) { //keys equals; append new data
comps_hslist_append(rtdata->data, ndata, 0);
return;
} else if (ended == 2) { //global key ends first; make global leaf
comps_hslist_remove(subnodes, it);
it->next = NULL;
rtd = comps_mrtree_data_create(rt, key+offset, ndata);
comps_hslist_append(subnodes, rtd, 0);
((COMPS_MRTreeData*)subnodes->last->data)->subnodes->last = it;
((COMPS_MRTreeData*)subnodes->last->data)->subnodes->first = it;
_len = strlen(key + offset);
memmove(rtdata->key,rtdata->key+_len, strlen(rtdata->key) - _len);
rtdata->key[strlen(rtdata->key) - _len] = 0;
rtdata->key = realloc(rtdata->key,
sizeof(char)* (strlen(rtdata->key)+1));
return;
} else if (ended == 1) { //local key ends first; go deeper
subnodes = rtdata->subnodes;
offset += x;
} else { /* keys differ */
void *tmpdata = rtdata->data;
COMPS_HSList *tmpnodes = rtdata->subnodes;
int cmpret = strcmp(key+offset+x, rtdata->key+x);
rtdata->subnodes = comps_hslist_create();
comps_hslist_init(rtdata->subnodes, NULL, NULL,
&comps_mrtree_data_destroy_v);
rtdata->data = NULL;
if (cmpret > 0) {
rtd = comps_mrtree_data_create(rt, rtdata->key+x, tmpdata);
comps_hslist_destroy(&rtd->subnodes);
rtd->subnodes = tmpnodes;
comps_hslist_append(rtdata->subnodes, rtd, 0);
rtd = comps_mrtree_data_create(rt, key+offset+x, ndata);
comps_hslist_append(rtdata->subnodes, rtd, 0);
} else {
rtd = comps_mrtree_data_create(rt, key+offset+x, ndata);
comps_hslist_append(rtdata->subnodes, rtd, 0);
rtd = comps_mrtree_data_create(rt, rtdata->key+x, tmpdata);
comps_hslist_destroy(&rtd->subnodes);
rtd->subnodes = tmpnodes;
comps_hslist_append(rtdata->subnodes, rtd, 0);
}
rtdata->key = realloc(rtdata->key, sizeof(char)*(x+1));
rtdata->key[x] = 0;
return;
}
}
}
}
void comps_mrtree_set(COMPS_MRTree * rt, char * key, void * data)
{
__comps_mrtree_set(rt, key, strlen(key), data);
}
void comps_mrtree_set_n(COMPS_MRTree * rt, char * key, size_t len, void * data)
{
__comps_mrtree_set(rt, key, len, data);
}
COMPS_HSList * comps_mrtree_get(COMPS_MRTree * rt, const char * key) {
COMPS_HSList * subnodes;
COMPS_HSListItem * it = NULL;
COMPS_MRTreeData * rtdata;
unsigned int offset, len, x;
char found, ended;
len = strlen(key);
offset = 0;
subnodes = rt->subnodes;
while (offset != len) {
found = 0;
for (it = subnodes->first; it != NULL; it=it->next) {
if (((COMPS_MRTreeData*)it->data)->key[0] == key[offset]) {
found = 1;
break;
}
}
if (!found)
return NULL;
rtdata = (COMPS_MRTreeData*)it->data;
for (x=1; ;x++) {
ended=0;
if (rtdata->key[x] == 0) ended += 1;
if (x == len - offset) ended += 2;
if (ended != 0) break;
if (key[offset+x] != rtdata->key[x]) break;
}
if (ended == 3) return rtdata->data;
else if (ended == 1) offset+=x;
else return NULL;
subnodes = ((COMPS_MRTreeData*)it->data)->subnodes;
}
if (it)
return ((COMPS_MRTreeData*)it->data)->data;
else return NULL;
}
COMPS_HSList ** comps_mrtree_getp(COMPS_MRTree * rt, const char * key) {
COMPS_HSList * subnodes;
COMPS_HSListItem * it = NULL;
COMPS_MRTreeData * rtdata;
unsigned int offset, len, x;
char found, ended;
len = strlen(key);
offset = 0;
subnodes = rt->subnodes;
while (offset != len) {
found = 0;
for (it = subnodes->first; it != NULL; it=it->next) {
if (((COMPS_MRTreeData*)it->data)->key[0] == key[offset]) {
found = 1;
break;
}
}
if (!found)
return NULL;
rtdata = (COMPS_MRTreeData*)it->data;
for (x=1; ;x++) {
ended=0;
if (rtdata->key[x] == 0) ended += 1;
if (x == len - offset) ended += 2;
if (ended != 0) break;
if (key[offset+x] != rtdata->key[x]) break;
}
if (ended == 3) return &rtdata->data;
else if (ended == 1) offset+=x;
else return NULL;
subnodes = ((COMPS_MRTreeData*)it->data)->subnodes;
}
if (it)
return &((COMPS_MRTreeData*)it->data)->data;
else return NULL;
}
void comps_mrtree_unset(COMPS_MRTree * rt, const char * key) {
COMPS_HSList * subnodes;
COMPS_HSListItem * it;
COMPS_MRTreeData * rtdata;
unsigned int offset, len, x;
char found, ended;
COMPS_HSList * path;
struct Relation {
COMPS_HSList * parent_nodes;
COMPS_HSListItem * child_it;
} *relation;
path = comps_hslist_create();
comps_hslist_init(path, NULL, NULL, &free);
len = strlen(key);
offset = 0;
subnodes = rt->subnodes;
while (offset != len) {
found = 0;
for (it = subnodes->first; it != NULL; it=it->next) {
if (((COMPS_MRTreeData*)it->data)->key[0] == key[offset]) {
found = 1;
break;
}
}
if (!found) {
comps_hslist_destroy(&path);
return;
}
rtdata = (COMPS_MRTreeData*)it->data;
for (x=1; ;x++) {
ended=0;
if (rtdata->key[x] == 0) ended += 1;
if (x == len - offset) ended += 2;
if (ended != 0) break;
if (key[offset+x] != rtdata->key[x]) break;
}
if (ended == 3) {
/* remove node from tree only if there's no descendant*/
if (rtdata->subnodes->last == NULL) {
printf("removing all\n");
comps_hslist_remove(subnodes, it);
comps_mrtree_data_destroy(rtdata);
free(it);
}
else {
printf("removing data only\n");
comps_hslist_clear(rtdata->data);
rtdata->is_leaf = 0;
}
if (path->last == NULL) {
comps_hslist_destroy(&path);
return;
}
rtdata = (COMPS_MRTreeData*)
((struct Relation*)path->last->data)->child_it->data;
/*remove all predecessor of deleted node (recursive) with no childs*/
while (rtdata->subnodes->last == NULL) {
printf("removing '%s'\n", rtdata->key);
comps_mrtree_data_destroy(rtdata);
comps_hslist_remove(
((struct Relation*)path->last->data)->parent_nodes,
((struct Relation*)path->last->data)->child_it);
free(((struct Relation*)path->last->data)->child_it);
it = path->last;
comps_hslist_remove(path, path->last);
free(it);
rtdata = (COMPS_MRTreeData*)
((struct Relation*)path->last->data)->child_it->data;
}
comps_hslist_destroy(&path);
return;
}
else if (ended == 1) offset+=x;
else {
comps_hslist_destroy(&path);
return;
}
if ((relation = malloc(sizeof(struct Relation))) == NULL) {
comps_hslist_destroy(&path);
return;
}
subnodes = ((COMPS_MRTreeData*)it->data)->subnodes;
relation->parent_nodes = subnodes;
relation->child_it = it;
comps_hslist_append(path, (void*)relation, 0);
}
comps_hslist_destroy(&path);
return;
}
void comps_mrtree_clear(COMPS_MRTree * rt) {
COMPS_HSListItem *it, *oldit;
if (rt == NULL) return;
if (rt->subnodes == NULL) return;
oldit = rt->subnodes->first;
it = (oldit)?oldit->next:NULL;
for (;it != NULL; it=it->next) {
if (rt->subnodes->data_destructor != NULL)
rt->subnodes->data_destructor(oldit->data);
free(oldit);
oldit = it;
}
if (oldit) {
if (rt->subnodes->data_destructor != NULL)
rt->subnodes->data_destructor(oldit->data);
free(oldit);
}
}
| ./CrossVul/dataset_final_sorted/CWE-416/c/good_1390_0 |
crossvul-cpp_data_good_5021_5 | /*
* Extension Header handling for IPv6
* Linux INET6 implementation
*
* Authors:
* Pedro Roque <roque@di.fc.ul.pt>
* Andi Kleen <ak@muc.de>
* Alexey Kuznetsov <kuznet@ms2.inr.ac.ru>
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version
* 2 of the License, or (at your option) any later version.
*/
/* Changes:
* yoshfuji : ensure not to overrun while parsing
* tlv options.
* Mitsuru KANDA @USAGI and: Remove ipv6_parse_exthdrs().
* YOSHIFUJI Hideaki @USAGI Register inbound extension header
* handlers as inet6_protocol{}.
*/
#include <linux/errno.h>
#include <linux/types.h>
#include <linux/socket.h>
#include <linux/sockios.h>
#include <linux/net.h>
#include <linux/netdevice.h>
#include <linux/in6.h>
#include <linux/icmpv6.h>
#include <linux/slab.h>
#include <linux/export.h>
#include <net/dst.h>
#include <net/sock.h>
#include <net/snmp.h>
#include <net/ipv6.h>
#include <net/protocol.h>
#include <net/transp_v6.h>
#include <net/rawv6.h>
#include <net/ndisc.h>
#include <net/ip6_route.h>
#include <net/addrconf.h>
#if IS_ENABLED(CONFIG_IPV6_MIP6)
#include <net/xfrm.h>
#endif
#include <linux/uaccess.h>
/*
* Parsing tlv encoded headers.
*
* Parsing function "func" returns true, if parsing succeed
* and false, if it failed.
* It MUST NOT touch skb->h.
*/
struct tlvtype_proc {
int type;
bool (*func)(struct sk_buff *skb, int offset);
};
/*********************
Generic functions
*********************/
/* An unknown option is detected, decide what to do */
static bool ip6_tlvopt_unknown(struct sk_buff *skb, int optoff)
{
switch ((skb_network_header(skb)[optoff] & 0xC0) >> 6) {
case 0: /* ignore */
return true;
case 1: /* drop packet */
break;
case 3: /* Send ICMP if not a multicast address and drop packet */
/* Actually, it is redundant check. icmp_send
will recheck in any case.
*/
if (ipv6_addr_is_multicast(&ipv6_hdr(skb)->daddr))
break;
case 2: /* send ICMP PARM PROB regardless and drop packet */
icmpv6_param_prob(skb, ICMPV6_UNK_OPTION, optoff);
return false;
}
kfree_skb(skb);
return false;
}
/* Parse tlv encoded option header (hop-by-hop or destination) */
static bool ip6_parse_tlv(const struct tlvtype_proc *procs, struct sk_buff *skb)
{
const struct tlvtype_proc *curr;
const unsigned char *nh = skb_network_header(skb);
int off = skb_network_header_len(skb);
int len = (skb_transport_header(skb)[1] + 1) << 3;
int padlen = 0;
if (skb_transport_offset(skb) + len > skb_headlen(skb))
goto bad;
off += 2;
len -= 2;
while (len > 0) {
int optlen = nh[off + 1] + 2;
int i;
switch (nh[off]) {
case IPV6_TLV_PAD1:
optlen = 1;
padlen++;
if (padlen > 7)
goto bad;
break;
case IPV6_TLV_PADN:
/* RFC 2460 states that the purpose of PadN is
* to align the containing header to multiples
* of 8. 7 is therefore the highest valid value.
* See also RFC 4942, Section 2.1.9.5.
*/
padlen += optlen;
if (padlen > 7)
goto bad;
/* RFC 4942 recommends receiving hosts to
* actively check PadN payload to contain
* only zeroes.
*/
for (i = 2; i < optlen; i++) {
if (nh[off + i] != 0)
goto bad;
}
break;
default: /* Other TLV code so scan list */
if (optlen > len)
goto bad;
for (curr = procs; curr->type >= 0; curr++) {
if (curr->type == nh[off]) {
/* type specific length/alignment
checks will be performed in the
func(). */
if (curr->func(skb, off) == false)
return false;
break;
}
}
if (curr->type < 0) {
if (ip6_tlvopt_unknown(skb, off) == 0)
return false;
}
padlen = 0;
break;
}
off += optlen;
len -= optlen;
}
if (len == 0)
return true;
bad:
kfree_skb(skb);
return false;
}
/*****************************
Destination options header.
*****************************/
#if IS_ENABLED(CONFIG_IPV6_MIP6)
static bool ipv6_dest_hao(struct sk_buff *skb, int optoff)
{
struct ipv6_destopt_hao *hao;
struct inet6_skb_parm *opt = IP6CB(skb);
struct ipv6hdr *ipv6h = ipv6_hdr(skb);
struct in6_addr tmp_addr;
int ret;
if (opt->dsthao) {
net_dbg_ratelimited("hao duplicated\n");
goto discard;
}
opt->dsthao = opt->dst1;
opt->dst1 = 0;
hao = (struct ipv6_destopt_hao *)(skb_network_header(skb) + optoff);
if (hao->length != 16) {
net_dbg_ratelimited("hao invalid option length = %d\n",
hao->length);
goto discard;
}
if (!(ipv6_addr_type(&hao->addr) & IPV6_ADDR_UNICAST)) {
net_dbg_ratelimited("hao is not an unicast addr: %pI6\n",
&hao->addr);
goto discard;
}
ret = xfrm6_input_addr(skb, (xfrm_address_t *)&ipv6h->daddr,
(xfrm_address_t *)&hao->addr, IPPROTO_DSTOPTS);
if (unlikely(ret < 0))
goto discard;
if (skb_cloned(skb)) {
if (pskb_expand_head(skb, 0, 0, GFP_ATOMIC))
goto discard;
/* update all variable using below by copied skbuff */
hao = (struct ipv6_destopt_hao *)(skb_network_header(skb) +
optoff);
ipv6h = ipv6_hdr(skb);
}
if (skb->ip_summed == CHECKSUM_COMPLETE)
skb->ip_summed = CHECKSUM_NONE;
tmp_addr = ipv6h->saddr;
ipv6h->saddr = hao->addr;
hao->addr = tmp_addr;
if (skb->tstamp.tv64 == 0)
__net_timestamp(skb);
return true;
discard:
kfree_skb(skb);
return false;
}
#endif
static const struct tlvtype_proc tlvprocdestopt_lst[] = {
#if IS_ENABLED(CONFIG_IPV6_MIP6)
{
.type = IPV6_TLV_HAO,
.func = ipv6_dest_hao,
},
#endif
{-1, NULL}
};
static int ipv6_destopt_rcv(struct sk_buff *skb)
{
struct inet6_skb_parm *opt = IP6CB(skb);
#if IS_ENABLED(CONFIG_IPV6_MIP6)
__u16 dstbuf;
#endif
struct dst_entry *dst = skb_dst(skb);
if (!pskb_may_pull(skb, skb_transport_offset(skb) + 8) ||
!pskb_may_pull(skb, (skb_transport_offset(skb) +
((skb_transport_header(skb)[1] + 1) << 3)))) {
IP6_INC_STATS_BH(dev_net(dst->dev), ip6_dst_idev(dst),
IPSTATS_MIB_INHDRERRORS);
kfree_skb(skb);
return -1;
}
opt->lastopt = opt->dst1 = skb_network_header_len(skb);
#if IS_ENABLED(CONFIG_IPV6_MIP6)
dstbuf = opt->dst1;
#endif
if (ip6_parse_tlv(tlvprocdestopt_lst, skb)) {
skb->transport_header += (skb_transport_header(skb)[1] + 1) << 3;
opt = IP6CB(skb);
#if IS_ENABLED(CONFIG_IPV6_MIP6)
opt->nhoff = dstbuf;
#else
opt->nhoff = opt->dst1;
#endif
return 1;
}
IP6_INC_STATS_BH(dev_net(dst->dev),
ip6_dst_idev(dst), IPSTATS_MIB_INHDRERRORS);
return -1;
}
/********************************
Routing header.
********************************/
/* called with rcu_read_lock() */
static int ipv6_rthdr_rcv(struct sk_buff *skb)
{
struct inet6_skb_parm *opt = IP6CB(skb);
struct in6_addr *addr = NULL;
struct in6_addr daddr;
struct inet6_dev *idev;
int n, i;
struct ipv6_rt_hdr *hdr;
struct rt0_hdr *rthdr;
struct net *net = dev_net(skb->dev);
int accept_source_route = net->ipv6.devconf_all->accept_source_route;
idev = __in6_dev_get(skb->dev);
if (idev && accept_source_route > idev->cnf.accept_source_route)
accept_source_route = idev->cnf.accept_source_route;
if (!pskb_may_pull(skb, skb_transport_offset(skb) + 8) ||
!pskb_may_pull(skb, (skb_transport_offset(skb) +
((skb_transport_header(skb)[1] + 1) << 3)))) {
IP6_INC_STATS_BH(net, ip6_dst_idev(skb_dst(skb)),
IPSTATS_MIB_INHDRERRORS);
kfree_skb(skb);
return -1;
}
hdr = (struct ipv6_rt_hdr *)skb_transport_header(skb);
if (ipv6_addr_is_multicast(&ipv6_hdr(skb)->daddr) ||
skb->pkt_type != PACKET_HOST) {
IP6_INC_STATS_BH(net, ip6_dst_idev(skb_dst(skb)),
IPSTATS_MIB_INADDRERRORS);
kfree_skb(skb);
return -1;
}
looped_back:
if (hdr->segments_left == 0) {
switch (hdr->type) {
#if IS_ENABLED(CONFIG_IPV6_MIP6)
case IPV6_SRCRT_TYPE_2:
/* Silently discard type 2 header unless it was
* processed by own
*/
if (!addr) {
IP6_INC_STATS_BH(net, ip6_dst_idev(skb_dst(skb)),
IPSTATS_MIB_INADDRERRORS);
kfree_skb(skb);
return -1;
}
break;
#endif
default:
break;
}
opt->lastopt = opt->srcrt = skb_network_header_len(skb);
skb->transport_header += (hdr->hdrlen + 1) << 3;
opt->dst0 = opt->dst1;
opt->dst1 = 0;
opt->nhoff = (&hdr->nexthdr) - skb_network_header(skb);
return 1;
}
switch (hdr->type) {
#if IS_ENABLED(CONFIG_IPV6_MIP6)
case IPV6_SRCRT_TYPE_2:
if (accept_source_route < 0)
goto unknown_rh;
/* Silently discard invalid RTH type 2 */
if (hdr->hdrlen != 2 || hdr->segments_left != 1) {
IP6_INC_STATS_BH(net, ip6_dst_idev(skb_dst(skb)),
IPSTATS_MIB_INHDRERRORS);
kfree_skb(skb);
return -1;
}
break;
#endif
default:
goto unknown_rh;
}
/*
* This is the routing header forwarding algorithm from
* RFC 2460, page 16.
*/
n = hdr->hdrlen >> 1;
if (hdr->segments_left > n) {
IP6_INC_STATS_BH(net, ip6_dst_idev(skb_dst(skb)),
IPSTATS_MIB_INHDRERRORS);
icmpv6_param_prob(skb, ICMPV6_HDR_FIELD,
((&hdr->segments_left) -
skb_network_header(skb)));
return -1;
}
/* We are about to mangle packet header. Be careful!
Do not damage packets queued somewhere.
*/
if (skb_cloned(skb)) {
/* the copy is a forwarded packet */
if (pskb_expand_head(skb, 0, 0, GFP_ATOMIC)) {
IP6_INC_STATS_BH(net, ip6_dst_idev(skb_dst(skb)),
IPSTATS_MIB_OUTDISCARDS);
kfree_skb(skb);
return -1;
}
hdr = (struct ipv6_rt_hdr *)skb_transport_header(skb);
}
if (skb->ip_summed == CHECKSUM_COMPLETE)
skb->ip_summed = CHECKSUM_NONE;
i = n - --hdr->segments_left;
rthdr = (struct rt0_hdr *) hdr;
addr = rthdr->addr;
addr += i - 1;
switch (hdr->type) {
#if IS_ENABLED(CONFIG_IPV6_MIP6)
case IPV6_SRCRT_TYPE_2:
if (xfrm6_input_addr(skb, (xfrm_address_t *)addr,
(xfrm_address_t *)&ipv6_hdr(skb)->saddr,
IPPROTO_ROUTING) < 0) {
IP6_INC_STATS_BH(net, ip6_dst_idev(skb_dst(skb)),
IPSTATS_MIB_INADDRERRORS);
kfree_skb(skb);
return -1;
}
if (!ipv6_chk_home_addr(dev_net(skb_dst(skb)->dev), addr)) {
IP6_INC_STATS_BH(net, ip6_dst_idev(skb_dst(skb)),
IPSTATS_MIB_INADDRERRORS);
kfree_skb(skb);
return -1;
}
break;
#endif
default:
break;
}
if (ipv6_addr_is_multicast(addr)) {
IP6_INC_STATS_BH(net, ip6_dst_idev(skb_dst(skb)),
IPSTATS_MIB_INADDRERRORS);
kfree_skb(skb);
return -1;
}
daddr = *addr;
*addr = ipv6_hdr(skb)->daddr;
ipv6_hdr(skb)->daddr = daddr;
skb_dst_drop(skb);
ip6_route_input(skb);
if (skb_dst(skb)->error) {
skb_push(skb, skb->data - skb_network_header(skb));
dst_input(skb);
return -1;
}
if (skb_dst(skb)->dev->flags&IFF_LOOPBACK) {
if (ipv6_hdr(skb)->hop_limit <= 1) {
IP6_INC_STATS_BH(net, ip6_dst_idev(skb_dst(skb)),
IPSTATS_MIB_INHDRERRORS);
icmpv6_send(skb, ICMPV6_TIME_EXCEED, ICMPV6_EXC_HOPLIMIT,
0);
kfree_skb(skb);
return -1;
}
ipv6_hdr(skb)->hop_limit--;
goto looped_back;
}
skb_push(skb, skb->data - skb_network_header(skb));
dst_input(skb);
return -1;
unknown_rh:
IP6_INC_STATS_BH(net, ip6_dst_idev(skb_dst(skb)), IPSTATS_MIB_INHDRERRORS);
icmpv6_param_prob(skb, ICMPV6_HDR_FIELD,
(&hdr->type) - skb_network_header(skb));
return -1;
}
static const struct inet6_protocol rthdr_protocol = {
.handler = ipv6_rthdr_rcv,
.flags = INET6_PROTO_NOPOLICY,
};
static const struct inet6_protocol destopt_protocol = {
.handler = ipv6_destopt_rcv,
.flags = INET6_PROTO_NOPOLICY,
};
static const struct inet6_protocol nodata_protocol = {
.handler = dst_discard,
.flags = INET6_PROTO_NOPOLICY,
};
int __init ipv6_exthdrs_init(void)
{
int ret;
ret = inet6_add_protocol(&rthdr_protocol, IPPROTO_ROUTING);
if (ret)
goto out;
ret = inet6_add_protocol(&destopt_protocol, IPPROTO_DSTOPTS);
if (ret)
goto out_rthdr;
ret = inet6_add_protocol(&nodata_protocol, IPPROTO_NONE);
if (ret)
goto out_destopt;
out:
return ret;
out_destopt:
inet6_del_protocol(&destopt_protocol, IPPROTO_DSTOPTS);
out_rthdr:
inet6_del_protocol(&rthdr_protocol, IPPROTO_ROUTING);
goto out;
};
void ipv6_exthdrs_exit(void)
{
inet6_del_protocol(&nodata_protocol, IPPROTO_NONE);
inet6_del_protocol(&destopt_protocol, IPPROTO_DSTOPTS);
inet6_del_protocol(&rthdr_protocol, IPPROTO_ROUTING);
}
/**********************************
Hop-by-hop options.
**********************************/
/*
* Note: we cannot rely on skb_dst(skb) before we assign it in ip6_route_input().
*/
static inline struct inet6_dev *ipv6_skb_idev(struct sk_buff *skb)
{
return skb_dst(skb) ? ip6_dst_idev(skb_dst(skb)) : __in6_dev_get(skb->dev);
}
static inline struct net *ipv6_skb_net(struct sk_buff *skb)
{
return skb_dst(skb) ? dev_net(skb_dst(skb)->dev) : dev_net(skb->dev);
}
/* Router Alert as of RFC 2711 */
static bool ipv6_hop_ra(struct sk_buff *skb, int optoff)
{
const unsigned char *nh = skb_network_header(skb);
if (nh[optoff + 1] == 2) {
IP6CB(skb)->flags |= IP6SKB_ROUTERALERT;
memcpy(&IP6CB(skb)->ra, nh + optoff + 2, sizeof(IP6CB(skb)->ra));
return true;
}
net_dbg_ratelimited("ipv6_hop_ra: wrong RA length %d\n",
nh[optoff + 1]);
kfree_skb(skb);
return false;
}
/* Jumbo payload */
static bool ipv6_hop_jumbo(struct sk_buff *skb, int optoff)
{
const unsigned char *nh = skb_network_header(skb);
struct net *net = ipv6_skb_net(skb);
u32 pkt_len;
if (nh[optoff + 1] != 4 || (optoff & 3) != 2) {
net_dbg_ratelimited("ipv6_hop_jumbo: wrong jumbo opt length/alignment %d\n",
nh[optoff+1]);
IP6_INC_STATS_BH(net, ipv6_skb_idev(skb),
IPSTATS_MIB_INHDRERRORS);
goto drop;
}
pkt_len = ntohl(*(__be32 *)(nh + optoff + 2));
if (pkt_len <= IPV6_MAXPLEN) {
IP6_INC_STATS_BH(net, ipv6_skb_idev(skb),
IPSTATS_MIB_INHDRERRORS);
icmpv6_param_prob(skb, ICMPV6_HDR_FIELD, optoff+2);
return false;
}
if (ipv6_hdr(skb)->payload_len) {
IP6_INC_STATS_BH(net, ipv6_skb_idev(skb),
IPSTATS_MIB_INHDRERRORS);
icmpv6_param_prob(skb, ICMPV6_HDR_FIELD, optoff);
return false;
}
if (pkt_len > skb->len - sizeof(struct ipv6hdr)) {
IP6_INC_STATS_BH(net, ipv6_skb_idev(skb),
IPSTATS_MIB_INTRUNCATEDPKTS);
goto drop;
}
if (pskb_trim_rcsum(skb, pkt_len + sizeof(struct ipv6hdr)))
goto drop;
return true;
drop:
kfree_skb(skb);
return false;
}
static const struct tlvtype_proc tlvprochopopt_lst[] = {
{
.type = IPV6_TLV_ROUTERALERT,
.func = ipv6_hop_ra,
},
{
.type = IPV6_TLV_JUMBO,
.func = ipv6_hop_jumbo,
},
{ -1, }
};
int ipv6_parse_hopopts(struct sk_buff *skb)
{
struct inet6_skb_parm *opt = IP6CB(skb);
/*
* skb_network_header(skb) is equal to skb->data, and
* skb_network_header_len(skb) is always equal to
* sizeof(struct ipv6hdr) by definition of
* hop-by-hop options.
*/
if (!pskb_may_pull(skb, sizeof(struct ipv6hdr) + 8) ||
!pskb_may_pull(skb, (sizeof(struct ipv6hdr) +
((skb_transport_header(skb)[1] + 1) << 3)))) {
kfree_skb(skb);
return -1;
}
opt->flags |= IP6SKB_HOPBYHOP;
if (ip6_parse_tlv(tlvprochopopt_lst, skb)) {
skb->transport_header += (skb_transport_header(skb)[1] + 1) << 3;
opt = IP6CB(skb);
opt->nhoff = sizeof(struct ipv6hdr);
return 1;
}
return -1;
}
/*
* Creating outbound headers.
*
* "build" functions work when skb is filled from head to tail (datagram)
* "push" functions work when headers are added from tail to head (tcp)
*
* In both cases we assume, that caller reserved enough room
* for headers.
*/
static void ipv6_push_rthdr(struct sk_buff *skb, u8 *proto,
struct ipv6_rt_hdr *opt,
struct in6_addr **addr_p)
{
struct rt0_hdr *phdr, *ihdr;
int hops;
ihdr = (struct rt0_hdr *) opt;
phdr = (struct rt0_hdr *) skb_push(skb, (ihdr->rt_hdr.hdrlen + 1) << 3);
memcpy(phdr, ihdr, sizeof(struct rt0_hdr));
hops = ihdr->rt_hdr.hdrlen >> 1;
if (hops > 1)
memcpy(phdr->addr, ihdr->addr + 1,
(hops - 1) * sizeof(struct in6_addr));
phdr->addr[hops - 1] = **addr_p;
*addr_p = ihdr->addr;
phdr->rt_hdr.nexthdr = *proto;
*proto = NEXTHDR_ROUTING;
}
static void ipv6_push_exthdr(struct sk_buff *skb, u8 *proto, u8 type, struct ipv6_opt_hdr *opt)
{
struct ipv6_opt_hdr *h = (struct ipv6_opt_hdr *)skb_push(skb, ipv6_optlen(opt));
memcpy(h, opt, ipv6_optlen(opt));
h->nexthdr = *proto;
*proto = type;
}
void ipv6_push_nfrag_opts(struct sk_buff *skb, struct ipv6_txoptions *opt,
u8 *proto,
struct in6_addr **daddr)
{
if (opt->srcrt) {
ipv6_push_rthdr(skb, proto, opt->srcrt, daddr);
/*
* IPV6_RTHDRDSTOPTS is ignored
* unless IPV6_RTHDR is set (RFC3542).
*/
if (opt->dst0opt)
ipv6_push_exthdr(skb, proto, NEXTHDR_DEST, opt->dst0opt);
}
if (opt->hopopt)
ipv6_push_exthdr(skb, proto, NEXTHDR_HOP, opt->hopopt);
}
EXPORT_SYMBOL(ipv6_push_nfrag_opts);
void ipv6_push_frag_opts(struct sk_buff *skb, struct ipv6_txoptions *opt, u8 *proto)
{
if (opt->dst1opt)
ipv6_push_exthdr(skb, proto, NEXTHDR_DEST, opt->dst1opt);
}
struct ipv6_txoptions *
ipv6_dup_options(struct sock *sk, struct ipv6_txoptions *opt)
{
struct ipv6_txoptions *opt2;
opt2 = sock_kmalloc(sk, opt->tot_len, GFP_ATOMIC);
if (opt2) {
long dif = (char *)opt2 - (char *)opt;
memcpy(opt2, opt, opt->tot_len);
if (opt2->hopopt)
*((char **)&opt2->hopopt) += dif;
if (opt2->dst0opt)
*((char **)&opt2->dst0opt) += dif;
if (opt2->dst1opt)
*((char **)&opt2->dst1opt) += dif;
if (opt2->srcrt)
*((char **)&opt2->srcrt) += dif;
atomic_set(&opt2->refcnt, 1);
}
return opt2;
}
EXPORT_SYMBOL_GPL(ipv6_dup_options);
static int ipv6_renew_option(void *ohdr,
struct ipv6_opt_hdr __user *newopt, int newoptlen,
int inherit,
struct ipv6_opt_hdr **hdr,
char **p)
{
if (inherit) {
if (ohdr) {
memcpy(*p, ohdr, ipv6_optlen((struct ipv6_opt_hdr *)ohdr));
*hdr = (struct ipv6_opt_hdr *)*p;
*p += CMSG_ALIGN(ipv6_optlen(*hdr));
}
} else {
if (newopt) {
if (copy_from_user(*p, newopt, newoptlen))
return -EFAULT;
*hdr = (struct ipv6_opt_hdr *)*p;
if (ipv6_optlen(*hdr) > newoptlen)
return -EINVAL;
*p += CMSG_ALIGN(newoptlen);
}
}
return 0;
}
struct ipv6_txoptions *
ipv6_renew_options(struct sock *sk, struct ipv6_txoptions *opt,
int newtype,
struct ipv6_opt_hdr __user *newopt, int newoptlen)
{
int tot_len = 0;
char *p;
struct ipv6_txoptions *opt2;
int err;
if (opt) {
if (newtype != IPV6_HOPOPTS && opt->hopopt)
tot_len += CMSG_ALIGN(ipv6_optlen(opt->hopopt));
if (newtype != IPV6_RTHDRDSTOPTS && opt->dst0opt)
tot_len += CMSG_ALIGN(ipv6_optlen(opt->dst0opt));
if (newtype != IPV6_RTHDR && opt->srcrt)
tot_len += CMSG_ALIGN(ipv6_optlen(opt->srcrt));
if (newtype != IPV6_DSTOPTS && opt->dst1opt)
tot_len += CMSG_ALIGN(ipv6_optlen(opt->dst1opt));
}
if (newopt && newoptlen)
tot_len += CMSG_ALIGN(newoptlen);
if (!tot_len)
return NULL;
tot_len += sizeof(*opt2);
opt2 = sock_kmalloc(sk, tot_len, GFP_ATOMIC);
if (!opt2)
return ERR_PTR(-ENOBUFS);
memset(opt2, 0, tot_len);
atomic_set(&opt2->refcnt, 1);
opt2->tot_len = tot_len;
p = (char *)(opt2 + 1);
err = ipv6_renew_option(opt ? opt->hopopt : NULL, newopt, newoptlen,
newtype != IPV6_HOPOPTS,
&opt2->hopopt, &p);
if (err)
goto out;
err = ipv6_renew_option(opt ? opt->dst0opt : NULL, newopt, newoptlen,
newtype != IPV6_RTHDRDSTOPTS,
&opt2->dst0opt, &p);
if (err)
goto out;
err = ipv6_renew_option(opt ? opt->srcrt : NULL, newopt, newoptlen,
newtype != IPV6_RTHDR,
(struct ipv6_opt_hdr **)&opt2->srcrt, &p);
if (err)
goto out;
err = ipv6_renew_option(opt ? opt->dst1opt : NULL, newopt, newoptlen,
newtype != IPV6_DSTOPTS,
&opt2->dst1opt, &p);
if (err)
goto out;
opt2->opt_nflen = (opt2->hopopt ? ipv6_optlen(opt2->hopopt) : 0) +
(opt2->dst0opt ? ipv6_optlen(opt2->dst0opt) : 0) +
(opt2->srcrt ? ipv6_optlen(opt2->srcrt) : 0);
opt2->opt_flen = (opt2->dst1opt ? ipv6_optlen(opt2->dst1opt) : 0);
return opt2;
out:
sock_kfree_s(sk, opt2, opt2->tot_len);
return ERR_PTR(err);
}
struct ipv6_txoptions *ipv6_fixup_options(struct ipv6_txoptions *opt_space,
struct ipv6_txoptions *opt)
{
/*
* ignore the dest before srcrt unless srcrt is being included.
* --yoshfuji
*/
if (opt && opt->dst0opt && !opt->srcrt) {
if (opt_space != opt) {
memcpy(opt_space, opt, sizeof(*opt_space));
opt = opt_space;
}
opt->opt_nflen -= ipv6_optlen(opt->dst0opt);
opt->dst0opt = NULL;
}
return opt;
}
EXPORT_SYMBOL_GPL(ipv6_fixup_options);
/**
* fl6_update_dst - update flowi destination address with info given
* by srcrt option, if any.
*
* @fl6: flowi6 for which daddr is to be updated
* @opt: struct ipv6_txoptions in which to look for srcrt opt
* @orig: copy of original daddr address if modified
*
* Returns NULL if no txoptions or no srcrt, otherwise returns orig
* and initial value of fl6->daddr set in orig
*/
struct in6_addr *fl6_update_dst(struct flowi6 *fl6,
const struct ipv6_txoptions *opt,
struct in6_addr *orig)
{
if (!opt || !opt->srcrt)
return NULL;
*orig = fl6->daddr;
fl6->daddr = *((struct rt0_hdr *)opt->srcrt)->addr;
return orig;
}
EXPORT_SYMBOL_GPL(fl6_update_dst);
| ./CrossVul/dataset_final_sorted/CWE-416/c/good_5021_5 |
crossvul-cpp_data_bad_4007_0 | /*
* Budget Fair Queueing (BFQ) I/O scheduler.
*
* Based on ideas and code from CFQ:
* Copyright (C) 2003 Jens Axboe <axboe@kernel.dk>
*
* Copyright (C) 2008 Fabio Checconi <fabio@gandalf.sssup.it>
* Paolo Valente <paolo.valente@unimore.it>
*
* Copyright (C) 2010 Paolo Valente <paolo.valente@unimore.it>
* Arianna Avanzini <avanzini@google.com>
*
* Copyright (C) 2017 Paolo Valente <paolo.valente@linaro.org>
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License as
* published by the Free Software Foundation; either version 2 of the
* License, or (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License for more details.
*
* BFQ is a proportional-share I/O scheduler, with some extra
* low-latency capabilities. BFQ also supports full hierarchical
* scheduling through cgroups. Next paragraphs provide an introduction
* on BFQ inner workings. Details on BFQ benefits, usage and
* limitations can be found in Documentation/block/bfq-iosched.txt.
*
* BFQ is a proportional-share storage-I/O scheduling algorithm based
* on the slice-by-slice service scheme of CFQ. But BFQ assigns
* budgets, measured in number of sectors, to processes instead of
* time slices. The device is not granted to the in-service process
* for a given time slice, but until it has exhausted its assigned
* budget. This change from the time to the service domain enables BFQ
* to distribute the device throughput among processes as desired,
* without any distortion due to throughput fluctuations, or to device
* internal queueing. BFQ uses an ad hoc internal scheduler, called
* B-WF2Q+, to schedule processes according to their budgets. More
* precisely, BFQ schedules queues associated with processes. Each
* process/queue is assigned a user-configurable weight, and B-WF2Q+
* guarantees that each queue receives a fraction of the throughput
* proportional to its weight. Thanks to the accurate policy of
* B-WF2Q+, BFQ can afford to assign high budgets to I/O-bound
* processes issuing sequential requests (to boost the throughput),
* and yet guarantee a low latency to interactive and soft real-time
* applications.
*
* In particular, to provide these low-latency guarantees, BFQ
* explicitly privileges the I/O of two classes of time-sensitive
* applications: interactive and soft real-time. This feature enables
* BFQ to provide applications in these classes with a very low
* latency. Finally, BFQ also features additional heuristics for
* preserving both a low latency and a high throughput on NCQ-capable,
* rotational or flash-based devices, and to get the job done quickly
* for applications consisting in many I/O-bound processes.
*
* BFQ is described in [1], where also a reference to the initial, more
* theoretical paper on BFQ can be found. The interested reader can find
* in the latter paper full details on the main algorithm, as well as
* formulas of the guarantees and formal proofs of all the properties.
* With respect to the version of BFQ presented in these papers, this
* implementation adds a few more heuristics, such as the one that
* guarantees a low latency to soft real-time applications, and a
* hierarchical extension based on H-WF2Q+.
*
* B-WF2Q+ is based on WF2Q+, which is described in [2], together with
* H-WF2Q+, while the augmented tree used here to implement B-WF2Q+
* with O(log N) complexity derives from the one introduced with EEVDF
* in [3].
*
* [1] P. Valente, A. Avanzini, "Evolution of the BFQ Storage I/O
* Scheduler", Proceedings of the First Workshop on Mobile System
* Technologies (MST-2015), May 2015.
* http://algogroup.unimore.it/people/paolo/disk_sched/mst-2015.pdf
*
* [2] Jon C.R. Bennett and H. Zhang, "Hierarchical Packet Fair Queueing
* Algorithms", IEEE/ACM Transactions on Networking, 5(5):675-689,
* Oct 1997.
*
* http://www.cs.cmu.edu/~hzhang/papers/TON-97-Oct.ps.gz
*
* [3] I. Stoica and H. Abdel-Wahab, "Earliest Eligible Virtual Deadline
* First: A Flexible and Accurate Mechanism for Proportional Share
* Resource Allocation", technical report.
*
* http://www.cs.berkeley.edu/~istoica/papers/eevdf-tr-95.pdf
*/
#include <linux/module.h>
#include <linux/slab.h>
#include <linux/blkdev.h>
#include <linux/elevator.h>
#include <linux/ktime.h>
#include <linux/rbtree.h>
#include <linux/ioprio.h>
#include <linux/sbitmap.h>
#include <linux/delay.h>
#include "blk.h"
#include "blk-mq.h"
#include "blk-mq-tag.h"
#include "blk-mq-sched.h"
#include <linux/blktrace_api.h>
#include <linux/hrtimer.h>
#include <linux/blk-cgroup.h>
#define BFQ_IOPRIO_CLASSES 3
#define BFQ_CL_IDLE_TIMEOUT (HZ/5)
#define BFQ_MIN_WEIGHT 1
#define BFQ_MAX_WEIGHT 1000
#define BFQ_WEIGHT_CONVERSION_COEFF 10
#define BFQ_DEFAULT_QUEUE_IOPRIO 4
#define BFQ_DEFAULT_GRP_WEIGHT 10
#define BFQ_DEFAULT_GRP_IOPRIO 0
#define BFQ_DEFAULT_GRP_CLASS IOPRIO_CLASS_BE
struct bfq_entity;
/**
* struct bfq_service_tree - per ioprio_class service tree.
*
* Each service tree represents a B-WF2Q+ scheduler on its own. Each
* ioprio_class has its own independent scheduler, and so its own
* bfq_service_tree. All the fields are protected by the queue lock
* of the containing bfqd.
*/
struct bfq_service_tree {
/* tree for active entities (i.e., those backlogged) */
struct rb_root active;
/* tree for idle entities (i.e., not backlogged, with V <= F_i)*/
struct rb_root idle;
/* idle entity with minimum F_i */
struct bfq_entity *first_idle;
/* idle entity with maximum F_i */
struct bfq_entity *last_idle;
/* scheduler virtual time */
u64 vtime;
/* scheduler weight sum; active and idle entities contribute to it */
unsigned long wsum;
};
/**
* struct bfq_sched_data - multi-class scheduler.
*
* bfq_sched_data is the basic scheduler queue. It supports three
* ioprio_classes, and can be used either as a toplevel queue or as
* an intermediate queue on a hierarchical setup.
* @next_in_service points to the active entity of the sched_data
* service trees that will be scheduled next.
*
* The supported ioprio_classes are the same as in CFQ, in descending
* priority order, IOPRIO_CLASS_RT, IOPRIO_CLASS_BE, IOPRIO_CLASS_IDLE.
* Requests from higher priority queues are served before all the
* requests from lower priority queues; among requests of the same
* queue requests are served according to B-WF2Q+.
* All the fields are protected by the queue lock of the containing bfqd.
*/
struct bfq_sched_data {
/* entity in service */
struct bfq_entity *in_service_entity;
/* head-of-the-line entity in the scheduler */
struct bfq_entity *next_in_service;
/* array of service trees, one per ioprio_class */
struct bfq_service_tree service_tree[BFQ_IOPRIO_CLASSES];
};
/**
* struct bfq_entity - schedulable entity.
*
* A bfq_entity is used to represent a bfq_queue (leaf node in the upper
* level scheduler). Each entity belongs to the sched_data of the parent
* group hierarchy. Non-leaf entities have also their own sched_data,
* stored in @my_sched_data.
*
* Each entity stores independently its priority values; this would
* allow different weights on different devices, but this
* functionality is not exported to userspace by now. Priorities and
* weights are updated lazily, first storing the new values into the
* new_* fields, then setting the @prio_changed flag. As soon as
* there is a transition in the entity state that allows the priority
* update to take place the effective and the requested priority
* values are synchronized.
*
* The weight value is calculated from the ioprio to export the same
* interface as CFQ. When dealing with ``well-behaved'' queues (i.e.,
* queues that do not spend too much time to consume their budget
* and have true sequential behavior, and when there are no external
* factors breaking anticipation) the relative weights at each level
* of the hierarchy should be guaranteed. All the fields are
* protected by the queue lock of the containing bfqd.
*/
struct bfq_entity {
/* service_tree member */
struct rb_node rb_node;
/*
* flag, true if the entity is on a tree (either the active or
* the idle one of its service_tree).
*/
int on_st;
/* B-WF2Q+ start and finish timestamps [sectors/weight] */
u64 start, finish;
/* tree the entity is enqueued into; %NULL if not on a tree */
struct rb_root *tree;
/*
* minimum start time of the (active) subtree rooted at this
* entity; used for O(log N) lookups into active trees
*/
u64 min_start;
/* amount of service received during the last service slot */
int service;
/* budget, used also to calculate F_i: F_i = S_i + @budget / @weight */
int budget;
/* weight of the queue */
int weight;
/* next weight if a change is in progress */
int new_weight;
/* original weight, used to implement weight boosting */
int orig_weight;
/* parent entity, for hierarchical scheduling */
struct bfq_entity *parent;
/*
* For non-leaf nodes in the hierarchy, the associated
* scheduler queue, %NULL on leaf nodes.
*/
struct bfq_sched_data *my_sched_data;
/* the scheduler queue this entity belongs to */
struct bfq_sched_data *sched_data;
/* flag, set to request a weight, ioprio or ioprio_class change */
int prio_changed;
};
/**
* struct bfq_ttime - per process thinktime stats.
*/
struct bfq_ttime {
/* completion time of the last request */
u64 last_end_request;
/* total process thinktime */
u64 ttime_total;
/* number of thinktime samples */
unsigned long ttime_samples;
/* average process thinktime */
u64 ttime_mean;
};
/**
* struct bfq_queue - leaf schedulable entity.
*
* A bfq_queue is a leaf request queue; it can be associated with an
* io_context or more, if it is async.
*/
struct bfq_queue {
/* reference counter */
int ref;
/* parent bfq_data */
struct bfq_data *bfqd;
/* current ioprio and ioprio class */
unsigned short ioprio, ioprio_class;
/* next ioprio and ioprio class if a change is in progress */
unsigned short new_ioprio, new_ioprio_class;
/* sorted list of pending requests */
struct rb_root sort_list;
/* if fifo isn't expired, next request to serve */
struct request *next_rq;
/* number of sync and async requests queued */
int queued[2];
/* number of requests currently allocated */
int allocated;
/* number of pending metadata requests */
int meta_pending;
/* fifo list of requests in sort_list */
struct list_head fifo;
/* entity representing this queue in the scheduler */
struct bfq_entity entity;
/* maximum budget allowed from the feedback mechanism */
int max_budget;
/* budget expiration (in jiffies) */
unsigned long budget_timeout;
/* number of requests on the dispatch list or inside driver */
int dispatched;
/* status flags */
unsigned long flags;
/* node for active/idle bfqq list inside parent bfqd */
struct list_head bfqq_list;
/* associated @bfq_ttime struct */
struct bfq_ttime ttime;
/* bit vector: a 1 for each seeky requests in history */
u32 seek_history;
/* position of the last request enqueued */
sector_t last_request_pos;
/* Number of consecutive pairs of request completion and
* arrival, such that the queue becomes idle after the
* completion, but the next request arrives within an idle
* time slice; used only if the queue's IO_bound flag has been
* cleared.
*/
unsigned int requests_within_timer;
/* pid of the process owning the queue, used for logging purposes */
pid_t pid;
};
/**
* struct bfq_io_cq - per (request_queue, io_context) structure.
*/
struct bfq_io_cq {
/* associated io_cq structure */
struct io_cq icq; /* must be the first member */
/* array of two process queues, the sync and the async */
struct bfq_queue *bfqq[2];
/* per (request_queue, blkcg) ioprio */
int ioprio;
};
/**
* struct bfq_data - per-device data structure.
*
* All the fields are protected by @lock.
*/
struct bfq_data {
/* device request queue */
struct request_queue *queue;
/* dispatch queue */
struct list_head dispatch;
/* root @bfq_sched_data for the device */
struct bfq_sched_data sched_data;
/*
* Number of bfq_queues containing requests (including the
* queue in service, even if it is idling).
*/
int busy_queues;
/* number of queued requests */
int queued;
/* number of requests dispatched and waiting for completion */
int rq_in_driver;
/*
* Maximum number of requests in driver in the last
* @hw_tag_samples completed requests.
*/
int max_rq_in_driver;
/* number of samples used to calculate hw_tag */
int hw_tag_samples;
/* flag set to one if the driver is showing a queueing behavior */
int hw_tag;
/* number of budgets assigned */
int budgets_assigned;
/*
* Timer set when idling (waiting) for the next request from
* the queue in service.
*/
struct hrtimer idle_slice_timer;
/* bfq_queue in service */
struct bfq_queue *in_service_queue;
/* bfq_io_cq (bic) associated with the @in_service_queue */
struct bfq_io_cq *in_service_bic;
/* on-disk position of the last served request */
sector_t last_position;
/* beginning of the last budget */
ktime_t last_budget_start;
/* beginning of the last idle slice */
ktime_t last_idling_start;
/* number of samples used to calculate @peak_rate */
int peak_rate_samples;
/*
* Peak read/write rate, observed during the service of a
* budget [BFQ_RATE_SHIFT * sectors/usec]. The value is
* left-shifted by BFQ_RATE_SHIFT to increase precision in
* fixed-point calculations.
*/
u64 peak_rate;
/* maximum budget allotted to a bfq_queue before rescheduling */
int bfq_max_budget;
/* list of all the bfq_queues active on the device */
struct list_head active_list;
/* list of all the bfq_queues idle on the device */
struct list_head idle_list;
/*
* Timeout for async/sync requests; when it fires, requests
* are served in fifo order.
*/
u64 bfq_fifo_expire[2];
/* weight of backward seeks wrt forward ones */
unsigned int bfq_back_penalty;
/* maximum allowed backward seek */
unsigned int bfq_back_max;
/* maximum idling time */
u32 bfq_slice_idle;
/* last time CLASS_IDLE was served */
u64 bfq_class_idle_last_service;
/* user-configured max budget value (0 for auto-tuning) */
int bfq_user_max_budget;
/*
* Timeout for bfq_queues to consume their budget; used to
* prevent seeky queues from imposing long latencies to
* sequential or quasi-sequential ones (this also implies that
* seeky queues cannot receive guarantees in the service
* domain; after a timeout they are charged for the time they
* have been in service, to preserve fairness among them, but
* without service-domain guarantees).
*/
unsigned int bfq_timeout;
/*
* Number of consecutive requests that must be issued within
* the idle time slice to set again idling to a queue which
* was marked as non-I/O-bound (see the definition of the
* IO_bound flag for further details).
*/
unsigned int bfq_requests_within_timer;
/*
* Force device idling whenever needed to provide accurate
* service guarantees, without caring about throughput
* issues. CAVEAT: this may even increase latencies, in case
* of useless idling for processes that did stop doing I/O.
*/
bool strict_guarantees;
/* fallback dummy bfqq for extreme OOM conditions */
struct bfq_queue oom_bfqq;
spinlock_t lock;
/*
* bic associated with the task issuing current bio for
* merging. This and the next field are used as a support to
* be able to perform the bic lookup, needed by bio-merge
* functions, before the scheduler lock is taken, and thus
* avoid taking the request-queue lock while the scheduler
* lock is being held.
*/
struct bfq_io_cq *bio_bic;
/* bfqq associated with the task issuing current bio for merging */
struct bfq_queue *bio_bfqq;
};
enum bfqq_state_flags {
BFQQF_busy = 0, /* has requests or is in service */
BFQQF_wait_request, /* waiting for a request */
BFQQF_non_blocking_wait_rq, /*
* waiting for a request
* without idling the device
*/
BFQQF_fifo_expire, /* FIFO checked in this slice */
BFQQF_idle_window, /* slice idling enabled */
BFQQF_sync, /* synchronous queue */
BFQQF_budget_new, /* no completion with this budget */
BFQQF_IO_bound, /*
* bfqq has timed-out at least once
* having consumed at most 2/10 of
* its budget
*/
};
#define BFQ_BFQQ_FNS(name) \
static void bfq_mark_bfqq_##name(struct bfq_queue *bfqq) \
{ \
__set_bit(BFQQF_##name, &(bfqq)->flags); \
} \
static void bfq_clear_bfqq_##name(struct bfq_queue *bfqq) \
{ \
__clear_bit(BFQQF_##name, &(bfqq)->flags); \
} \
static int bfq_bfqq_##name(const struct bfq_queue *bfqq) \
{ \
return test_bit(BFQQF_##name, &(bfqq)->flags); \
}
BFQ_BFQQ_FNS(busy);
BFQ_BFQQ_FNS(wait_request);
BFQ_BFQQ_FNS(non_blocking_wait_rq);
BFQ_BFQQ_FNS(fifo_expire);
BFQ_BFQQ_FNS(idle_window);
BFQ_BFQQ_FNS(sync);
BFQ_BFQQ_FNS(budget_new);
BFQ_BFQQ_FNS(IO_bound);
#undef BFQ_BFQQ_FNS
/* Logging facilities. */
#define bfq_log_bfqq(bfqd, bfqq, fmt, args...) \
blk_add_trace_msg((bfqd)->queue, "bfq%d " fmt, (bfqq)->pid, ##args)
#define bfq_log(bfqd, fmt, args...) \
blk_add_trace_msg((bfqd)->queue, "bfq " fmt, ##args)
/* Expiration reasons. */
enum bfqq_expiration {
BFQQE_TOO_IDLE = 0, /*
* queue has been idling for
* too long
*/
BFQQE_BUDGET_TIMEOUT, /* budget took too long to be used */
BFQQE_BUDGET_EXHAUSTED, /* budget consumed */
BFQQE_NO_MORE_REQUESTS, /* the queue has no more requests */
BFQQE_PREEMPTED /* preemption in progress */
};
static struct bfq_queue *bfq_entity_to_bfqq(struct bfq_entity *entity);
static struct bfq_service_tree *
bfq_entity_service_tree(struct bfq_entity *entity)
{
struct bfq_sched_data *sched_data = entity->sched_data;
struct bfq_queue *bfqq = bfq_entity_to_bfqq(entity);
unsigned int idx = bfqq ? bfqq->ioprio_class - 1 :
BFQ_DEFAULT_GRP_CLASS - 1;
return sched_data->service_tree + idx;
}
static struct bfq_queue *bic_to_bfqq(struct bfq_io_cq *bic, bool is_sync)
{
return bic->bfqq[is_sync];
}
static void bic_set_bfqq(struct bfq_io_cq *bic, struct bfq_queue *bfqq,
bool is_sync)
{
bic->bfqq[is_sync] = bfqq;
}
static struct bfq_data *bic_to_bfqd(struct bfq_io_cq *bic)
{
return bic->icq.q->elevator->elevator_data;
}
static void bfq_check_ioprio_change(struct bfq_io_cq *bic, struct bio *bio);
static void bfq_put_queue(struct bfq_queue *bfqq);
static struct bfq_queue *bfq_get_queue(struct bfq_data *bfqd,
struct bio *bio, bool is_sync,
struct bfq_io_cq *bic);
static void bfq_exit_bfqq(struct bfq_data *bfqd, struct bfq_queue *bfqq);
/*
* Array of async queues for all the processes, one queue
* per ioprio value per ioprio_class.
*/
struct bfq_queue *async_bfqq[2][IOPRIO_BE_NR];
/* Async queue for the idle class (ioprio is ignored) */
struct bfq_queue *async_idle_bfqq;
/* Expiration time of sync (0) and async (1) requests, in ns. */
static const u64 bfq_fifo_expire[2] = { NSEC_PER_SEC / 4, NSEC_PER_SEC / 8 };
/* Maximum backwards seek (magic number lifted from CFQ), in KiB. */
static const int bfq_back_max = 16 * 1024;
/* Penalty of a backwards seek, in number of sectors. */
static const int bfq_back_penalty = 2;
/* Idling period duration, in ns. */
static u64 bfq_slice_idle = NSEC_PER_SEC / 125;
/* Minimum number of assigned budgets for which stats are safe to compute. */
static const int bfq_stats_min_budgets = 194;
/* Default maximum budget values, in sectors and number of requests. */
static const int bfq_default_max_budget = 16 * 1024;
/* Default timeout values, in jiffies, approximating CFQ defaults. */
static const int bfq_timeout = HZ / 8;
static struct kmem_cache *bfq_pool;
/* Below this threshold (in ms), we consider thinktime immediate. */
#define BFQ_MIN_TT (2 * NSEC_PER_MSEC)
/* hw_tag detection: parallel requests threshold and min samples needed. */
#define BFQ_HW_QUEUE_THRESHOLD 4
#define BFQ_HW_QUEUE_SAMPLES 32
#define BFQQ_SEEK_THR (sector_t)(8 * 100)
#define BFQQ_SECT_THR_NONROT (sector_t)(2 * 32)
#define BFQQ_CLOSE_THR (sector_t)(8 * 1024)
#define BFQQ_SEEKY(bfqq) (hweight32(bfqq->seek_history) > 32/8)
/* Budget feedback step. */
#define BFQ_BUDGET_STEP 128
/* Min samples used for peak rate estimation (for autotuning). */
#define BFQ_PEAK_RATE_SAMPLES 32
/* Shift used for peak rate fixed precision calculations. */
#define BFQ_RATE_SHIFT 16
#define BFQ_SERVICE_TREE_INIT ((struct bfq_service_tree) \
{ RB_ROOT, RB_ROOT, NULL, NULL, 0, 0 })
#define RQ_BIC(rq) ((struct bfq_io_cq *) (rq)->elv.priv[0])
#define RQ_BFQQ(rq) ((rq)->elv.priv[1])
/**
* icq_to_bic - convert iocontext queue structure to bfq_io_cq.
* @icq: the iocontext queue.
*/
static struct bfq_io_cq *icq_to_bic(struct io_cq *icq)
{
/* bic->icq is the first member, %NULL will convert to %NULL */
return container_of(icq, struct bfq_io_cq, icq);
}
/**
* bfq_bic_lookup - search into @ioc a bic associated to @bfqd.
* @bfqd: the lookup key.
* @ioc: the io_context of the process doing I/O.
* @q: the request queue.
*/
static struct bfq_io_cq *bfq_bic_lookup(struct bfq_data *bfqd,
struct io_context *ioc,
struct request_queue *q)
{
if (ioc) {
unsigned long flags;
struct bfq_io_cq *icq;
spin_lock_irqsave(q->queue_lock, flags);
icq = icq_to_bic(ioc_lookup_icq(ioc, q));
spin_unlock_irqrestore(q->queue_lock, flags);
return icq;
}
return NULL;
}
/*
* Next two macros are just fake loops for the moment. They will
* become true loops in the cgroups-enabled variant of the code. Such
* a variant, in its turn, will be introduced by next commit.
*/
#define for_each_entity(entity) \
for (; entity ; entity = NULL)
#define for_each_entity_safe(entity, parent) \
for (parent = NULL; entity ; entity = parent)
static int bfq_update_next_in_service(struct bfq_sched_data *sd)
{
return 0;
}
static void bfq_check_next_in_service(struct bfq_sched_data *sd,
struct bfq_entity *entity)
{
}
static void bfq_update_budget(struct bfq_entity *next_in_service)
{
}
/*
* Shift for timestamp calculations. This actually limits the maximum
* service allowed in one timestamp delta (small shift values increase it),
* the maximum total weight that can be used for the queues in the system
* (big shift values increase it), and the period of virtual time
* wraparounds.
*/
#define WFQ_SERVICE_SHIFT 22
/**
* bfq_gt - compare two timestamps.
* @a: first ts.
* @b: second ts.
*
* Return @a > @b, dealing with wrapping correctly.
*/
static int bfq_gt(u64 a, u64 b)
{
return (s64)(a - b) > 0;
}
static struct bfq_queue *bfq_entity_to_bfqq(struct bfq_entity *entity)
{
struct bfq_queue *bfqq = NULL;
if (!entity->my_sched_data)
bfqq = container_of(entity, struct bfq_queue, entity);
return bfqq;
}
/**
* bfq_delta - map service into the virtual time domain.
* @service: amount of service.
* @weight: scale factor (weight of an entity or weight sum).
*/
static u64 bfq_delta(unsigned long service, unsigned long weight)
{
u64 d = (u64)service << WFQ_SERVICE_SHIFT;
do_div(d, weight);
return d;
}
/**
* bfq_calc_finish - assign the finish time to an entity.
* @entity: the entity to act upon.
* @service: the service to be charged to the entity.
*/
static void bfq_calc_finish(struct bfq_entity *entity, unsigned long service)
{
struct bfq_queue *bfqq = bfq_entity_to_bfqq(entity);
entity->finish = entity->start +
bfq_delta(service, entity->weight);
if (bfqq) {
bfq_log_bfqq(bfqq->bfqd, bfqq,
"calc_finish: serv %lu, w %d",
service, entity->weight);
bfq_log_bfqq(bfqq->bfqd, bfqq,
"calc_finish: start %llu, finish %llu, delta %llu",
entity->start, entity->finish,
bfq_delta(service, entity->weight));
}
}
/**
* bfq_entity_of - get an entity from a node.
* @node: the node field of the entity.
*
* Convert a node pointer to the relative entity. This is used only
* to simplify the logic of some functions and not as the generic
* conversion mechanism because, e.g., in the tree walking functions,
* the check for a %NULL value would be redundant.
*/
static struct bfq_entity *bfq_entity_of(struct rb_node *node)
{
struct bfq_entity *entity = NULL;
if (node)
entity = rb_entry(node, struct bfq_entity, rb_node);
return entity;
}
/**
* bfq_extract - remove an entity from a tree.
* @root: the tree root.
* @entity: the entity to remove.
*/
static void bfq_extract(struct rb_root *root, struct bfq_entity *entity)
{
entity->tree = NULL;
rb_erase(&entity->rb_node, root);
}
/**
* bfq_idle_extract - extract an entity from the idle tree.
* @st: the service tree of the owning @entity.
* @entity: the entity being removed.
*/
static void bfq_idle_extract(struct bfq_service_tree *st,
struct bfq_entity *entity)
{
struct bfq_queue *bfqq = bfq_entity_to_bfqq(entity);
struct rb_node *next;
if (entity == st->first_idle) {
next = rb_next(&entity->rb_node);
st->first_idle = bfq_entity_of(next);
}
if (entity == st->last_idle) {
next = rb_prev(&entity->rb_node);
st->last_idle = bfq_entity_of(next);
}
bfq_extract(&st->idle, entity);
if (bfqq)
list_del(&bfqq->bfqq_list);
}
/**
* bfq_insert - generic tree insertion.
* @root: tree root.
* @entity: entity to insert.
*
* This is used for the idle and the active tree, since they are both
* ordered by finish time.
*/
static void bfq_insert(struct rb_root *root, struct bfq_entity *entity)
{
struct bfq_entity *entry;
struct rb_node **node = &root->rb_node;
struct rb_node *parent = NULL;
while (*node) {
parent = *node;
entry = rb_entry(parent, struct bfq_entity, rb_node);
if (bfq_gt(entry->finish, entity->finish))
node = &parent->rb_left;
else
node = &parent->rb_right;
}
rb_link_node(&entity->rb_node, parent, node);
rb_insert_color(&entity->rb_node, root);
entity->tree = root;
}
/**
* bfq_update_min - update the min_start field of a entity.
* @entity: the entity to update.
* @node: one of its children.
*
* This function is called when @entity may store an invalid value for
* min_start due to updates to the active tree. The function assumes
* that the subtree rooted at @node (which may be its left or its right
* child) has a valid min_start value.
*/
static void bfq_update_min(struct bfq_entity *entity, struct rb_node *node)
{
struct bfq_entity *child;
if (node) {
child = rb_entry(node, struct bfq_entity, rb_node);
if (bfq_gt(entity->min_start, child->min_start))
entity->min_start = child->min_start;
}
}
/**
* bfq_update_active_node - recalculate min_start.
* @node: the node to update.
*
* @node may have changed position or one of its children may have moved,
* this function updates its min_start value. The left and right subtrees
* are assumed to hold a correct min_start value.
*/
static void bfq_update_active_node(struct rb_node *node)
{
struct bfq_entity *entity = rb_entry(node, struct bfq_entity, rb_node);
entity->min_start = entity->start;
bfq_update_min(entity, node->rb_right);
bfq_update_min(entity, node->rb_left);
}
/**
* bfq_update_active_tree - update min_start for the whole active tree.
* @node: the starting node.
*
* @node must be the deepest modified node after an update. This function
* updates its min_start using the values held by its children, assuming
* that they did not change, and then updates all the nodes that may have
* changed in the path to the root. The only nodes that may have changed
* are the ones in the path or their siblings.
*/
static void bfq_update_active_tree(struct rb_node *node)
{
struct rb_node *parent;
up:
bfq_update_active_node(node);
parent = rb_parent(node);
if (!parent)
return;
if (node == parent->rb_left && parent->rb_right)
bfq_update_active_node(parent->rb_right);
else if (parent->rb_left)
bfq_update_active_node(parent->rb_left);
node = parent;
goto up;
}
/**
* bfq_active_insert - insert an entity in the active tree of its
* group/device.
* @st: the service tree of the entity.
* @entity: the entity being inserted.
*
* The active tree is ordered by finish time, but an extra key is kept
* per each node, containing the minimum value for the start times of
* its children (and the node itself), so it's possible to search for
* the eligible node with the lowest finish time in logarithmic time.
*/
static void bfq_active_insert(struct bfq_service_tree *st,
struct bfq_entity *entity)
{
struct bfq_queue *bfqq = bfq_entity_to_bfqq(entity);
struct rb_node *node = &entity->rb_node;
bfq_insert(&st->active, entity);
if (node->rb_left)
node = node->rb_left;
else if (node->rb_right)
node = node->rb_right;
bfq_update_active_tree(node);
if (bfqq)
list_add(&bfqq->bfqq_list, &bfqq->bfqd->active_list);
}
/**
* bfq_ioprio_to_weight - calc a weight from an ioprio.
* @ioprio: the ioprio value to convert.
*/
static unsigned short bfq_ioprio_to_weight(int ioprio)
{
return (IOPRIO_BE_NR - ioprio) * BFQ_WEIGHT_CONVERSION_COEFF;
}
/**
* bfq_weight_to_ioprio - calc an ioprio from a weight.
* @weight: the weight value to convert.
*
* To preserve as much as possible the old only-ioprio user interface,
* 0 is used as an escape ioprio value for weights (numerically) equal or
* larger than IOPRIO_BE_NR * BFQ_WEIGHT_CONVERSION_COEFF.
*/
static unsigned short bfq_weight_to_ioprio(int weight)
{
return max_t(int, 0,
IOPRIO_BE_NR * BFQ_WEIGHT_CONVERSION_COEFF - weight);
}
static void bfq_get_entity(struct bfq_entity *entity)
{
struct bfq_queue *bfqq = bfq_entity_to_bfqq(entity);
if (bfqq) {
bfqq->ref++;
bfq_log_bfqq(bfqq->bfqd, bfqq, "get_entity: %p %d",
bfqq, bfqq->ref);
}
}
/**
* bfq_find_deepest - find the deepest node that an extraction can modify.
* @node: the node being removed.
*
* Do the first step of an extraction in an rb tree, looking for the
* node that will replace @node, and returning the deepest node that
* the following modifications to the tree can touch. If @node is the
* last node in the tree return %NULL.
*/
static struct rb_node *bfq_find_deepest(struct rb_node *node)
{
struct rb_node *deepest;
if (!node->rb_right && !node->rb_left)
deepest = rb_parent(node);
else if (!node->rb_right)
deepest = node->rb_left;
else if (!node->rb_left)
deepest = node->rb_right;
else {
deepest = rb_next(node);
if (deepest->rb_right)
deepest = deepest->rb_right;
else if (rb_parent(deepest) != node)
deepest = rb_parent(deepest);
}
return deepest;
}
/**
* bfq_active_extract - remove an entity from the active tree.
* @st: the service_tree containing the tree.
* @entity: the entity being removed.
*/
static void bfq_active_extract(struct bfq_service_tree *st,
struct bfq_entity *entity)
{
struct bfq_queue *bfqq = bfq_entity_to_bfqq(entity);
struct rb_node *node;
node = bfq_find_deepest(&entity->rb_node);
bfq_extract(&st->active, entity);
if (node)
bfq_update_active_tree(node);
if (bfqq)
list_del(&bfqq->bfqq_list);
}
/**
* bfq_idle_insert - insert an entity into the idle tree.
* @st: the service tree containing the tree.
* @entity: the entity to insert.
*/
static void bfq_idle_insert(struct bfq_service_tree *st,
struct bfq_entity *entity)
{
struct bfq_queue *bfqq = bfq_entity_to_bfqq(entity);
struct bfq_entity *first_idle = st->first_idle;
struct bfq_entity *last_idle = st->last_idle;
if (!first_idle || bfq_gt(first_idle->finish, entity->finish))
st->first_idle = entity;
if (!last_idle || bfq_gt(entity->finish, last_idle->finish))
st->last_idle = entity;
bfq_insert(&st->idle, entity);
if (bfqq)
list_add(&bfqq->bfqq_list, &bfqq->bfqd->idle_list);
}
/**
* bfq_forget_entity - do not consider entity any longer for scheduling
* @st: the service tree.
* @entity: the entity being removed.
* @is_in_service: true if entity is currently the in-service entity.
*
* Forget everything about @entity. In addition, if entity represents
* a queue, and the latter is not in service, then release the service
* reference to the queue (the one taken through bfq_get_entity). In
* fact, in this case, there is really no more service reference to
* the queue, as the latter is also outside any service tree. If,
* instead, the queue is in service, then __bfq_bfqd_reset_in_service
* will take care of putting the reference when the queue finally
* stops being served.
*/
static void bfq_forget_entity(struct bfq_service_tree *st,
struct bfq_entity *entity,
bool is_in_service)
{
struct bfq_queue *bfqq = bfq_entity_to_bfqq(entity);
entity->on_st = 0;
st->wsum -= entity->weight;
if (bfqq && !is_in_service)
bfq_put_queue(bfqq);
}
/**
* bfq_put_idle_entity - release the idle tree ref of an entity.
* @st: service tree for the entity.
* @entity: the entity being released.
*/
static void bfq_put_idle_entity(struct bfq_service_tree *st,
struct bfq_entity *entity)
{
bfq_idle_extract(st, entity);
bfq_forget_entity(st, entity,
entity == entity->sched_data->in_service_entity);
}
/**
* bfq_forget_idle - update the idle tree if necessary.
* @st: the service tree to act upon.
*
* To preserve the global O(log N) complexity we only remove one entry here;
* as the idle tree will not grow indefinitely this can be done safely.
*/
static void bfq_forget_idle(struct bfq_service_tree *st)
{
struct bfq_entity *first_idle = st->first_idle;
struct bfq_entity *last_idle = st->last_idle;
if (RB_EMPTY_ROOT(&st->active) && last_idle &&
!bfq_gt(last_idle->finish, st->vtime)) {
/*
* Forget the whole idle tree, increasing the vtime past
* the last finish time of idle entities.
*/
st->vtime = last_idle->finish;
}
if (first_idle && !bfq_gt(first_idle->finish, st->vtime))
bfq_put_idle_entity(st, first_idle);
}
static struct bfq_service_tree *
__bfq_entity_update_weight_prio(struct bfq_service_tree *old_st,
struct bfq_entity *entity)
{
struct bfq_service_tree *new_st = old_st;
if (entity->prio_changed) {
struct bfq_queue *bfqq = bfq_entity_to_bfqq(entity);
unsigned short prev_weight, new_weight;
struct bfq_data *bfqd = NULL;
if (bfqq)
bfqd = bfqq->bfqd;
old_st->wsum -= entity->weight;
if (entity->new_weight != entity->orig_weight) {
if (entity->new_weight < BFQ_MIN_WEIGHT ||
entity->new_weight > BFQ_MAX_WEIGHT) {
pr_crit("update_weight_prio: new_weight %d\n",
entity->new_weight);
if (entity->new_weight < BFQ_MIN_WEIGHT)
entity->new_weight = BFQ_MIN_WEIGHT;
else
entity->new_weight = BFQ_MAX_WEIGHT;
}
entity->orig_weight = entity->new_weight;
if (bfqq)
bfqq->ioprio =
bfq_weight_to_ioprio(entity->orig_weight);
}
if (bfqq)
bfqq->ioprio_class = bfqq->new_ioprio_class;
entity->prio_changed = 0;
/*
* NOTE: here we may be changing the weight too early,
* this will cause unfairness. The correct approach
* would have required additional complexity to defer
* weight changes to the proper time instants (i.e.,
* when entity->finish <= old_st->vtime).
*/
new_st = bfq_entity_service_tree(entity);
prev_weight = entity->weight;
new_weight = entity->orig_weight;
entity->weight = new_weight;
new_st->wsum += entity->weight;
if (new_st != old_st)
entity->start = new_st->vtime;
}
return new_st;
}
/**
* bfq_bfqq_served - update the scheduler status after selection for
* service.
* @bfqq: the queue being served.
* @served: bytes to transfer.
*
* NOTE: this can be optimized, as the timestamps of upper level entities
* are synchronized every time a new bfqq is selected for service. By now,
* we keep it to better check consistency.
*/
static void bfq_bfqq_served(struct bfq_queue *bfqq, int served)
{
struct bfq_entity *entity = &bfqq->entity;
struct bfq_service_tree *st;
for_each_entity(entity) {
st = bfq_entity_service_tree(entity);
entity->service += served;
st->vtime += bfq_delta(served, st->wsum);
bfq_forget_idle(st);
}
bfq_log_bfqq(bfqq->bfqd, bfqq, "bfqq_served %d secs", served);
}
/**
* bfq_bfqq_charge_full_budget - set the service to the entity budget.
* @bfqq: the queue that needs a service update.
*
* When it's not possible to be fair in the service domain, because
* a queue is not consuming its budget fast enough (the meaning of
* fast depends on the timeout parameter), we charge it a full
* budget. In this way we should obtain a sort of time-domain
* fairness among all the seeky/slow queues.
*/
static void bfq_bfqq_charge_full_budget(struct bfq_queue *bfqq)
{
struct bfq_entity *entity = &bfqq->entity;
bfq_log_bfqq(bfqq->bfqd, bfqq, "charge_full_budget");
bfq_bfqq_served(bfqq, entity->budget - entity->service);
}
/**
* __bfq_activate_entity - activate an entity.
* @entity: the entity being activated.
* @non_blocking_wait_rq: true if this entity was waiting for a request
*
* Called whenever an entity is activated, i.e., it is not active and one
* of its children receives a new request, or has to be reactivated due to
* budget exhaustion. It uses the current budget of the entity (and the
* service received if @entity is active) of the queue to calculate its
* timestamps.
*/
static void __bfq_activate_entity(struct bfq_entity *entity,
bool non_blocking_wait_rq)
{
struct bfq_sched_data *sd = entity->sched_data;
struct bfq_service_tree *st = bfq_entity_service_tree(entity);
bool backshifted = false;
if (entity == sd->in_service_entity) {
/*
* If we are requeueing the current entity we have
* to take care of not charging to it service it has
* not received.
*/
bfq_calc_finish(entity, entity->service);
entity->start = entity->finish;
sd->in_service_entity = NULL;
} else if (entity->tree == &st->active) {
/*
* Requeueing an entity due to a change of some
* next_in_service entity below it. We reuse the
* old start time.
*/
bfq_active_extract(st, entity);
} else {
unsigned long long min_vstart;
/* See comments on bfq_fqq_update_budg_for_activation */
if (non_blocking_wait_rq && bfq_gt(st->vtime, entity->finish)) {
backshifted = true;
min_vstart = entity->finish;
} else
min_vstart = st->vtime;
if (entity->tree == &st->idle) {
/*
* Must be on the idle tree, bfq_idle_extract() will
* check for that.
*/
bfq_idle_extract(st, entity);
entity->start = bfq_gt(min_vstart, entity->finish) ?
min_vstart : entity->finish;
} else {
/*
* The finish time of the entity may be invalid, and
* it is in the past for sure, otherwise the queue
* would have been on the idle tree.
*/
entity->start = min_vstart;
st->wsum += entity->weight;
/*
* entity is about to be inserted into a service tree,
* and then set in service: get a reference to make
* sure entity does not disappear until it is no
* longer in service or scheduled for service.
*/
bfq_get_entity(entity);
entity->on_st = 1;
}
}
st = __bfq_entity_update_weight_prio(st, entity);
bfq_calc_finish(entity, entity->budget);
/*
* If some queues enjoy backshifting for a while, then their
* (virtual) finish timestamps may happen to become lower and
* lower than the system virtual time. In particular, if
* these queues often happen to be idle for short time
* periods, and during such time periods other queues with
* higher timestamps happen to be busy, then the backshifted
* timestamps of the former queues can become much lower than
* the system virtual time. In fact, to serve the queues with
* higher timestamps while the ones with lower timestamps are
* idle, the system virtual time may be pushed-up to much
* higher values than the finish timestamps of the idle
* queues. As a consequence, the finish timestamps of all new
* or newly activated queues may end up being much larger than
* those of lucky queues with backshifted timestamps. The
* latter queues may then monopolize the device for a lot of
* time. This would simply break service guarantees.
*
* To reduce this problem, push up a little bit the
* backshifted timestamps of the queue associated with this
* entity (only a queue can happen to have the backshifted
* flag set): just enough to let the finish timestamp of the
* queue be equal to the current value of the system virtual
* time. This may introduce a little unfairness among queues
* with backshifted timestamps, but it does not break
* worst-case fairness guarantees.
*/
if (backshifted && bfq_gt(st->vtime, entity->finish)) {
unsigned long delta = st->vtime - entity->finish;
entity->start += delta;
entity->finish += delta;
}
bfq_active_insert(st, entity);
}
/**
* bfq_activate_entity - activate an entity and its ancestors if necessary.
* @entity: the entity to activate.
* @non_blocking_wait_rq: true if this entity was waiting for a request
*
* Activate @entity and all the entities on the path from it to the root.
*/
static void bfq_activate_entity(struct bfq_entity *entity,
bool non_blocking_wait_rq)
{
struct bfq_sched_data *sd;
for_each_entity(entity) {
__bfq_activate_entity(entity, non_blocking_wait_rq);
sd = entity->sched_data;
if (!bfq_update_next_in_service(sd))
/*
* No need to propagate the activation to the
* upper entities, as they will be updated when
* the in-service entity is rescheduled.
*/
break;
}
}
/**
* __bfq_deactivate_entity - deactivate an entity from its service tree.
* @entity: the entity to deactivate.
* @requeue: if false, the entity will not be put into the idle tree.
*
* Deactivate an entity, independently from its previous state. If the
* entity was not on a service tree just return, otherwise if it is on
* any scheduler tree, extract it from that tree, and if necessary
* and if the caller did not specify @requeue, put it on the idle tree.
*
* Return %1 if the caller should update the entity hierarchy, i.e.,
* if the entity was in service or if it was the next_in_service for
* its sched_data; return %0 otherwise.
*/
static int __bfq_deactivate_entity(struct bfq_entity *entity, int requeue)
{
struct bfq_sched_data *sd = entity->sched_data;
struct bfq_service_tree *st = bfq_entity_service_tree(entity);
int is_in_service = entity == sd->in_service_entity;
int ret = 0;
if (!entity->on_st)
return 0;
if (is_in_service) {
bfq_calc_finish(entity, entity->service);
sd->in_service_entity = NULL;
} else if (entity->tree == &st->active)
bfq_active_extract(st, entity);
else if (entity->tree == &st->idle)
bfq_idle_extract(st, entity);
if (is_in_service || sd->next_in_service == entity)
ret = bfq_update_next_in_service(sd);
if (!requeue || !bfq_gt(entity->finish, st->vtime))
bfq_forget_entity(st, entity, is_in_service);
else
bfq_idle_insert(st, entity);
return ret;
}
/**
* bfq_deactivate_entity - deactivate an entity.
* @entity: the entity to deactivate.
* @requeue: true if the entity can be put on the idle tree
*/
static void bfq_deactivate_entity(struct bfq_entity *entity, int requeue)
{
struct bfq_sched_data *sd;
struct bfq_entity *parent = NULL;
for_each_entity_safe(entity, parent) {
sd = entity->sched_data;
if (!__bfq_deactivate_entity(entity, requeue))
/*
* The parent entity is still backlogged, and
* we don't need to update it as it is still
* in service.
*/
break;
if (sd->next_in_service)
/*
* The parent entity is still backlogged and
* the budgets on the path towards the root
* need to be updated.
*/
goto update;
/*
* If we get here, then the parent is no more backlogged and
* we want to propagate the deactivation upwards.
*/
requeue = 1;
}
return;
update:
entity = parent;
for_each_entity(entity) {
__bfq_activate_entity(entity, false);
sd = entity->sched_data;
if (!bfq_update_next_in_service(sd))
break;
}
}
/**
* bfq_update_vtime - update vtime if necessary.
* @st: the service tree to act upon.
*
* If necessary update the service tree vtime to have at least one
* eligible entity, skipping to its start time. Assumes that the
* active tree of the device is not empty.
*
* NOTE: this hierarchical implementation updates vtimes quite often,
* we may end up with reactivated processes getting timestamps after a
* vtime skip done because we needed a ->first_active entity on some
* intermediate node.
*/
static void bfq_update_vtime(struct bfq_service_tree *st)
{
struct bfq_entity *entry;
struct rb_node *node = st->active.rb_node;
entry = rb_entry(node, struct bfq_entity, rb_node);
if (bfq_gt(entry->min_start, st->vtime)) {
st->vtime = entry->min_start;
bfq_forget_idle(st);
}
}
/**
* bfq_first_active_entity - find the eligible entity with
* the smallest finish time
* @st: the service tree to select from.
*
* This function searches the first schedulable entity, starting from the
* root of the tree and going on the left every time on this side there is
* a subtree with at least one eligible (start >= vtime) entity. The path on
* the right is followed only if a) the left subtree contains no eligible
* entities and b) no eligible entity has been found yet.
*/
static struct bfq_entity *bfq_first_active_entity(struct bfq_service_tree *st)
{
struct bfq_entity *entry, *first = NULL;
struct rb_node *node = st->active.rb_node;
while (node) {
entry = rb_entry(node, struct bfq_entity, rb_node);
left:
if (!bfq_gt(entry->start, st->vtime))
first = entry;
if (node->rb_left) {
entry = rb_entry(node->rb_left,
struct bfq_entity, rb_node);
if (!bfq_gt(entry->min_start, st->vtime)) {
node = node->rb_left;
goto left;
}
}
if (first)
break;
node = node->rb_right;
}
return first;
}
/**
* __bfq_lookup_next_entity - return the first eligible entity in @st.
* @st: the service tree.
*
* Update the virtual time in @st and return the first eligible entity
* it contains.
*/
static struct bfq_entity *__bfq_lookup_next_entity(struct bfq_service_tree *st,
bool force)
{
struct bfq_entity *entity, *new_next_in_service = NULL;
if (RB_EMPTY_ROOT(&st->active))
return NULL;
bfq_update_vtime(st);
entity = bfq_first_active_entity(st);
/*
* If the chosen entity does not match with the sched_data's
* next_in_service and we are forcedly serving the IDLE priority
* class tree, bubble up budget update.
*/
if (unlikely(force && entity != entity->sched_data->next_in_service)) {
new_next_in_service = entity;
for_each_entity(new_next_in_service)
bfq_update_budget(new_next_in_service);
}
return entity;
}
/**
* bfq_lookup_next_entity - return the first eligible entity in @sd.
* @sd: the sched_data.
* @extract: if true the returned entity will be also extracted from @sd.
*
* NOTE: since we cache the next_in_service entity at each level of the
* hierarchy, the complexity of the lookup can be decreased with
* absolutely no effort just returning the cached next_in_service value;
* we prefer to do full lookups to test the consistency of the data
* structures.
*/
static struct bfq_entity *bfq_lookup_next_entity(struct bfq_sched_data *sd,
int extract,
struct bfq_data *bfqd)
{
struct bfq_service_tree *st = sd->service_tree;
struct bfq_entity *entity;
int i = 0;
/*
* Choose from idle class, if needed to guarantee a minimum
* bandwidth to this class. This should also mitigate
* priority-inversion problems in case a low priority task is
* holding file system resources.
*/
if (bfqd &&
jiffies - bfqd->bfq_class_idle_last_service >
BFQ_CL_IDLE_TIMEOUT) {
entity = __bfq_lookup_next_entity(st + BFQ_IOPRIO_CLASSES - 1,
true);
if (entity) {
i = BFQ_IOPRIO_CLASSES - 1;
bfqd->bfq_class_idle_last_service = jiffies;
sd->next_in_service = entity;
}
}
for (; i < BFQ_IOPRIO_CLASSES; i++) {
entity = __bfq_lookup_next_entity(st + i, false);
if (entity) {
if (extract) {
bfq_check_next_in_service(sd, entity);
bfq_active_extract(st + i, entity);
sd->in_service_entity = entity;
sd->next_in_service = NULL;
}
break;
}
}
return entity;
}
static bool next_queue_may_preempt(struct bfq_data *bfqd)
{
struct bfq_sched_data *sd = &bfqd->sched_data;
return sd->next_in_service != sd->in_service_entity;
}
/*
* Get next queue for service.
*/
static struct bfq_queue *bfq_get_next_queue(struct bfq_data *bfqd)
{
struct bfq_entity *entity = NULL;
struct bfq_sched_data *sd;
struct bfq_queue *bfqq;
if (bfqd->busy_queues == 0)
return NULL;
sd = &bfqd->sched_data;
for (; sd ; sd = entity->my_sched_data) {
entity = bfq_lookup_next_entity(sd, 1, bfqd);
entity->service = 0;
}
bfqq = bfq_entity_to_bfqq(entity);
return bfqq;
}
static void __bfq_bfqd_reset_in_service(struct bfq_data *bfqd)
{
struct bfq_queue *in_serv_bfqq = bfqd->in_service_queue;
struct bfq_entity *in_serv_entity = &in_serv_bfqq->entity;
if (bfqd->in_service_bic) {
put_io_context(bfqd->in_service_bic->icq.ioc);
bfqd->in_service_bic = NULL;
}
bfq_clear_bfqq_wait_request(in_serv_bfqq);
hrtimer_try_to_cancel(&bfqd->idle_slice_timer);
bfqd->in_service_queue = NULL;
/*
* in_serv_entity is no longer in service, so, if it is in no
* service tree either, then release the service reference to
* the queue it represents (taken with bfq_get_entity).
*/
if (!in_serv_entity->on_st)
bfq_put_queue(in_serv_bfqq);
}
static void bfq_deactivate_bfqq(struct bfq_data *bfqd, struct bfq_queue *bfqq,
int requeue)
{
struct bfq_entity *entity = &bfqq->entity;
bfq_deactivate_entity(entity, requeue);
}
static void bfq_activate_bfqq(struct bfq_data *bfqd, struct bfq_queue *bfqq)
{
struct bfq_entity *entity = &bfqq->entity;
bfq_activate_entity(entity, bfq_bfqq_non_blocking_wait_rq(bfqq));
bfq_clear_bfqq_non_blocking_wait_rq(bfqq);
}
/*
* Called when the bfqq no longer has requests pending, remove it from
* the service tree.
*/
static void bfq_del_bfqq_busy(struct bfq_data *bfqd, struct bfq_queue *bfqq,
int requeue)
{
bfq_log_bfqq(bfqd, bfqq, "del from busy");
bfq_clear_bfqq_busy(bfqq);
bfqd->busy_queues--;
bfq_deactivate_bfqq(bfqd, bfqq, requeue);
}
/*
* Called when an inactive queue receives a new request.
*/
static void bfq_add_bfqq_busy(struct bfq_data *bfqd, struct bfq_queue *bfqq)
{
bfq_log_bfqq(bfqd, bfqq, "add to busy");
bfq_activate_bfqq(bfqd, bfqq);
bfq_mark_bfqq_busy(bfqq);
bfqd->busy_queues++;
}
static void bfq_init_entity(struct bfq_entity *entity)
{
struct bfq_queue *bfqq = bfq_entity_to_bfqq(entity);
entity->weight = entity->new_weight;
entity->orig_weight = entity->new_weight;
bfqq->ioprio = bfqq->new_ioprio;
bfqq->ioprio_class = bfqq->new_ioprio_class;
entity->sched_data = &bfqq->bfqd->sched_data;
}
#define bfq_class_idle(bfqq) ((bfqq)->ioprio_class == IOPRIO_CLASS_IDLE)
#define bfq_class_rt(bfqq) ((bfqq)->ioprio_class == IOPRIO_CLASS_RT)
#define bfq_sample_valid(samples) ((samples) > 80)
/*
* Scheduler run of queue, if there are requests pending and no one in the
* driver that will restart queueing.
*/
static void bfq_schedule_dispatch(struct bfq_data *bfqd)
{
if (bfqd->queued != 0) {
bfq_log(bfqd, "schedule dispatch");
blk_mq_run_hw_queues(bfqd->queue, true);
}
}
/*
* Lifted from AS - choose which of rq1 and rq2 that is best served now.
* We choose the request that is closesr to the head right now. Distance
* behind the head is penalized and only allowed to a certain extent.
*/
static struct request *bfq_choose_req(struct bfq_data *bfqd,
struct request *rq1,
struct request *rq2,
sector_t last)
{
sector_t s1, s2, d1 = 0, d2 = 0;
unsigned long back_max;
#define BFQ_RQ1_WRAP 0x01 /* request 1 wraps */
#define BFQ_RQ2_WRAP 0x02 /* request 2 wraps */
unsigned int wrap = 0; /* bit mask: requests behind the disk head? */
if (!rq1 || rq1 == rq2)
return rq2;
if (!rq2)
return rq1;
if (rq_is_sync(rq1) && !rq_is_sync(rq2))
return rq1;
else if (rq_is_sync(rq2) && !rq_is_sync(rq1))
return rq2;
if ((rq1->cmd_flags & REQ_META) && !(rq2->cmd_flags & REQ_META))
return rq1;
else if ((rq2->cmd_flags & REQ_META) && !(rq1->cmd_flags & REQ_META))
return rq2;
s1 = blk_rq_pos(rq1);
s2 = blk_rq_pos(rq2);
/*
* By definition, 1KiB is 2 sectors.
*/
back_max = bfqd->bfq_back_max * 2;
/*
* Strict one way elevator _except_ in the case where we allow
* short backward seeks which are biased as twice the cost of a
* similar forward seek.
*/
if (s1 >= last)
d1 = s1 - last;
else if (s1 + back_max >= last)
d1 = (last - s1) * bfqd->bfq_back_penalty;
else
wrap |= BFQ_RQ1_WRAP;
if (s2 >= last)
d2 = s2 - last;
else if (s2 + back_max >= last)
d2 = (last - s2) * bfqd->bfq_back_penalty;
else
wrap |= BFQ_RQ2_WRAP;
/* Found required data */
/*
* By doing switch() on the bit mask "wrap" we avoid having to
* check two variables for all permutations: --> faster!
*/
switch (wrap) {
case 0: /* common case for CFQ: rq1 and rq2 not wrapped */
if (d1 < d2)
return rq1;
else if (d2 < d1)
return rq2;
if (s1 >= s2)
return rq1;
else
return rq2;
case BFQ_RQ2_WRAP:
return rq1;
case BFQ_RQ1_WRAP:
return rq2;
case BFQ_RQ1_WRAP|BFQ_RQ2_WRAP: /* both rqs wrapped */
default:
/*
* Since both rqs are wrapped,
* start with the one that's further behind head
* (--> only *one* back seek required),
* since back seek takes more time than forward.
*/
if (s1 <= s2)
return rq1;
else
return rq2;
}
}
/*
* Return expired entry, or NULL to just start from scratch in rbtree.
*/
static struct request *bfq_check_fifo(struct bfq_queue *bfqq,
struct request *last)
{
struct request *rq;
if (bfq_bfqq_fifo_expire(bfqq))
return NULL;
bfq_mark_bfqq_fifo_expire(bfqq);
rq = rq_entry_fifo(bfqq->fifo.next);
if (rq == last || ktime_get_ns() < rq->fifo_time)
return NULL;
bfq_log_bfqq(bfqq->bfqd, bfqq, "check_fifo: returned %p", rq);
return rq;
}
static struct request *bfq_find_next_rq(struct bfq_data *bfqd,
struct bfq_queue *bfqq,
struct request *last)
{
struct rb_node *rbnext = rb_next(&last->rb_node);
struct rb_node *rbprev = rb_prev(&last->rb_node);
struct request *next, *prev = NULL;
/* Follow expired path, else get first next available. */
next = bfq_check_fifo(bfqq, last);
if (next)
return next;
if (rbprev)
prev = rb_entry_rq(rbprev);
if (rbnext)
next = rb_entry_rq(rbnext);
else {
rbnext = rb_first(&bfqq->sort_list);
if (rbnext && rbnext != &last->rb_node)
next = rb_entry_rq(rbnext);
}
return bfq_choose_req(bfqd, next, prev, blk_rq_pos(last));
}
static unsigned long bfq_serv_to_charge(struct request *rq,
struct bfq_queue *bfqq)
{
return blk_rq_sectors(rq);
}
/**
* bfq_updated_next_req - update the queue after a new next_rq selection.
* @bfqd: the device data the queue belongs to.
* @bfqq: the queue to update.
*
* If the first request of a queue changes we make sure that the queue
* has enough budget to serve at least its first request (if the
* request has grown). We do this because if the queue has not enough
* budget for its first request, it has to go through two dispatch
* rounds to actually get it dispatched.
*/
static void bfq_updated_next_req(struct bfq_data *bfqd,
struct bfq_queue *bfqq)
{
struct bfq_entity *entity = &bfqq->entity;
struct request *next_rq = bfqq->next_rq;
unsigned long new_budget;
if (!next_rq)
return;
if (bfqq == bfqd->in_service_queue)
/*
* In order not to break guarantees, budgets cannot be
* changed after an entity has been selected.
*/
return;
new_budget = max_t(unsigned long, bfqq->max_budget,
bfq_serv_to_charge(next_rq, bfqq));
if (entity->budget != new_budget) {
entity->budget = new_budget;
bfq_log_bfqq(bfqd, bfqq, "updated next rq: new budget %lu",
new_budget);
bfq_activate_bfqq(bfqd, bfqq);
}
}
static int bfq_bfqq_budget_left(struct bfq_queue *bfqq)
{
struct bfq_entity *entity = &bfqq->entity;
return entity->budget - entity->service;
}
/*
* If enough samples have been computed, return the current max budget
* stored in bfqd, which is dynamically updated according to the
* estimated disk peak rate; otherwise return the default max budget
*/
static int bfq_max_budget(struct bfq_data *bfqd)
{
if (bfqd->budgets_assigned < bfq_stats_min_budgets)
return bfq_default_max_budget;
else
return bfqd->bfq_max_budget;
}
/*
* Return min budget, which is a fraction of the current or default
* max budget (trying with 1/32)
*/
static int bfq_min_budget(struct bfq_data *bfqd)
{
if (bfqd->budgets_assigned < bfq_stats_min_budgets)
return bfq_default_max_budget / 32;
else
return bfqd->bfq_max_budget / 32;
}
static void bfq_bfqq_expire(struct bfq_data *bfqd,
struct bfq_queue *bfqq,
bool compensate,
enum bfqq_expiration reason);
/*
* The next function, invoked after the input queue bfqq switches from
* idle to busy, updates the budget of bfqq. The function also tells
* whether the in-service queue should be expired, by returning
* true. The purpose of expiring the in-service queue is to give bfqq
* the chance to possibly preempt the in-service queue, and the reason
* for preempting the in-service queue is to achieve the following
* goal: guarantee to bfqq its reserved bandwidth even if bfqq has
* expired because it has remained idle.
*
* In particular, bfqq may have expired for one of the following two
* reasons:
*
* - BFQQE_NO_MORE_REQUESTS bfqq did not enjoy any device idling
* and did not make it to issue a new request before its last
* request was served;
*
* - BFQQE_TOO_IDLE bfqq did enjoy device idling, but did not issue
* a new request before the expiration of the idling-time.
*
* Even if bfqq has expired for one of the above reasons, the process
* associated with the queue may be however issuing requests greedily,
* and thus be sensitive to the bandwidth it receives (bfqq may have
* remained idle for other reasons: CPU high load, bfqq not enjoying
* idling, I/O throttling somewhere in the path from the process to
* the I/O scheduler, ...). But if, after every expiration for one of
* the above two reasons, bfqq has to wait for the service of at least
* one full budget of another queue before being served again, then
* bfqq is likely to get a much lower bandwidth or resource time than
* its reserved ones. To address this issue, two countermeasures need
* to be taken.
*
* First, the budget and the timestamps of bfqq need to be updated in
* a special way on bfqq reactivation: they need to be updated as if
* bfqq did not remain idle and did not expire. In fact, if they are
* computed as if bfqq expired and remained idle until reactivation,
* then the process associated with bfqq is treated as if, instead of
* being greedy, it stopped issuing requests when bfqq remained idle,
* and restarts issuing requests only on this reactivation. In other
* words, the scheduler does not help the process recover the "service
* hole" between bfqq expiration and reactivation. As a consequence,
* the process receives a lower bandwidth than its reserved one. In
* contrast, to recover this hole, the budget must be updated as if
* bfqq was not expired at all before this reactivation, i.e., it must
* be set to the value of the remaining budget when bfqq was
* expired. Along the same line, timestamps need to be assigned the
* value they had the last time bfqq was selected for service, i.e.,
* before last expiration. Thus timestamps need to be back-shifted
* with respect to their normal computation (see [1] for more details
* on this tricky aspect).
*
* Secondly, to allow the process to recover the hole, the in-service
* queue must be expired too, to give bfqq the chance to preempt it
* immediately. In fact, if bfqq has to wait for a full budget of the
* in-service queue to be completed, then it may become impossible to
* let the process recover the hole, even if the back-shifted
* timestamps of bfqq are lower than those of the in-service queue. If
* this happens for most or all of the holes, then the process may not
* receive its reserved bandwidth. In this respect, it is worth noting
* that, being the service of outstanding requests unpreemptible, a
* little fraction of the holes may however be unrecoverable, thereby
* causing a little loss of bandwidth.
*
* The last important point is detecting whether bfqq does need this
* bandwidth recovery. In this respect, the next function deems the
* process associated with bfqq greedy, and thus allows it to recover
* the hole, if: 1) the process is waiting for the arrival of a new
* request (which implies that bfqq expired for one of the above two
* reasons), and 2) such a request has arrived soon. The first
* condition is controlled through the flag non_blocking_wait_rq,
* while the second through the flag arrived_in_time. If both
* conditions hold, then the function computes the budget in the
* above-described special way, and signals that the in-service queue
* should be expired. Timestamp back-shifting is done later in
* __bfq_activate_entity.
*/
static bool bfq_bfqq_update_budg_for_activation(struct bfq_data *bfqd,
struct bfq_queue *bfqq,
bool arrived_in_time)
{
struct bfq_entity *entity = &bfqq->entity;
if (bfq_bfqq_non_blocking_wait_rq(bfqq) && arrived_in_time) {
/*
* We do not clear the flag non_blocking_wait_rq here, as
* the latter is used in bfq_activate_bfqq to signal
* that timestamps need to be back-shifted (and is
* cleared right after).
*/
/*
* In next assignment we rely on that either
* entity->service or entity->budget are not updated
* on expiration if bfqq is empty (see
* __bfq_bfqq_recalc_budget). Thus both quantities
* remain unchanged after such an expiration, and the
* following statement therefore assigns to
* entity->budget the remaining budget on such an
* expiration. For clarity, entity->service is not
* updated on expiration in any case, and, in normal
* operation, is reset only when bfqq is selected for
* service (see bfq_get_next_queue).
*/
entity->budget = min_t(unsigned long,
bfq_bfqq_budget_left(bfqq),
bfqq->max_budget);
return true;
}
entity->budget = max_t(unsigned long, bfqq->max_budget,
bfq_serv_to_charge(bfqq->next_rq, bfqq));
bfq_clear_bfqq_non_blocking_wait_rq(bfqq);
return false;
}
static void bfq_bfqq_handle_idle_busy_switch(struct bfq_data *bfqd,
struct bfq_queue *bfqq,
struct request *rq)
{
bool bfqq_wants_to_preempt,
/*
* See the comments on
* bfq_bfqq_update_budg_for_activation for
* details on the usage of the next variable.
*/
arrived_in_time = ktime_get_ns() <=
bfqq->ttime.last_end_request +
bfqd->bfq_slice_idle * 3;
/*
* Update budget and check whether bfqq may want to preempt
* the in-service queue.
*/
bfqq_wants_to_preempt =
bfq_bfqq_update_budg_for_activation(bfqd, bfqq,
arrived_in_time);
if (!bfq_bfqq_IO_bound(bfqq)) {
if (arrived_in_time) {
bfqq->requests_within_timer++;
if (bfqq->requests_within_timer >=
bfqd->bfq_requests_within_timer)
bfq_mark_bfqq_IO_bound(bfqq);
} else
bfqq->requests_within_timer = 0;
}
bfq_add_bfqq_busy(bfqd, bfqq);
/*
* Expire in-service queue only if preemption may be needed
* for guarantees. In this respect, the function
* next_queue_may_preempt just checks a simple, necessary
* condition, and not a sufficient condition based on
* timestamps. In fact, for the latter condition to be
* evaluated, timestamps would need first to be updated, and
* this operation is quite costly (see the comments on the
* function bfq_bfqq_update_budg_for_activation).
*/
if (bfqd->in_service_queue && bfqq_wants_to_preempt &&
next_queue_may_preempt(bfqd))
bfq_bfqq_expire(bfqd, bfqd->in_service_queue,
false, BFQQE_PREEMPTED);
}
static void bfq_add_request(struct request *rq)
{
struct bfq_queue *bfqq = RQ_BFQQ(rq);
struct bfq_data *bfqd = bfqq->bfqd;
struct request *next_rq, *prev;
bfq_log_bfqq(bfqd, bfqq, "add_request %d", rq_is_sync(rq));
bfqq->queued[rq_is_sync(rq)]++;
bfqd->queued++;
elv_rb_add(&bfqq->sort_list, rq);
/*
* Check if this request is a better next-serve candidate.
*/
prev = bfqq->next_rq;
next_rq = bfq_choose_req(bfqd, bfqq->next_rq, rq, bfqd->last_position);
bfqq->next_rq = next_rq;
if (!bfq_bfqq_busy(bfqq)) /* switching to busy ... */
bfq_bfqq_handle_idle_busy_switch(bfqd, bfqq, rq);
else if (prev != bfqq->next_rq)
bfq_updated_next_req(bfqd, bfqq);
}
static struct request *bfq_find_rq_fmerge(struct bfq_data *bfqd,
struct bio *bio,
struct request_queue *q)
{
struct bfq_queue *bfqq = bfqd->bio_bfqq;
if (bfqq)
return elv_rb_find(&bfqq->sort_list, bio_end_sector(bio));
return NULL;
}
#if 0 /* Still not clear if we can do without next two functions */
static void bfq_activate_request(struct request_queue *q, struct request *rq)
{
struct bfq_data *bfqd = q->elevator->elevator_data;
bfqd->rq_in_driver++;
bfqd->last_position = blk_rq_pos(rq) + blk_rq_sectors(rq);
bfq_log(bfqd, "activate_request: new bfqd->last_position %llu",
(unsigned long long)bfqd->last_position);
}
static void bfq_deactivate_request(struct request_queue *q, struct request *rq)
{
struct bfq_data *bfqd = q->elevator->elevator_data;
bfqd->rq_in_driver--;
}
#endif
static void bfq_remove_request(struct request_queue *q,
struct request *rq)
{
struct bfq_queue *bfqq = RQ_BFQQ(rq);
struct bfq_data *bfqd = bfqq->bfqd;
const int sync = rq_is_sync(rq);
if (bfqq->next_rq == rq) {
bfqq->next_rq = bfq_find_next_rq(bfqd, bfqq, rq);
bfq_updated_next_req(bfqd, bfqq);
}
if (rq->queuelist.prev != &rq->queuelist)
list_del_init(&rq->queuelist);
bfqq->queued[sync]--;
bfqd->queued--;
elv_rb_del(&bfqq->sort_list, rq);
elv_rqhash_del(q, rq);
if (q->last_merge == rq)
q->last_merge = NULL;
if (RB_EMPTY_ROOT(&bfqq->sort_list)) {
bfqq->next_rq = NULL;
if (bfq_bfqq_busy(bfqq) && bfqq != bfqd->in_service_queue) {
bfq_del_bfqq_busy(bfqd, bfqq, 1);
/*
* bfqq emptied. In normal operation, when
* bfqq is empty, bfqq->entity.service and
* bfqq->entity.budget must contain,
* respectively, the service received and the
* budget used last time bfqq emptied. These
* facts do not hold in this case, as at least
* this last removal occurred while bfqq is
* not in service. To avoid inconsistencies,
* reset both bfqq->entity.service and
* bfqq->entity.budget, if bfqq has still a
* process that may issue I/O requests to it.
*/
bfqq->entity.budget = bfqq->entity.service = 0;
}
}
if (rq->cmd_flags & REQ_META)
bfqq->meta_pending--;
}
static bool bfq_bio_merge(struct blk_mq_hw_ctx *hctx, struct bio *bio)
{
struct request_queue *q = hctx->queue;
struct bfq_data *bfqd = q->elevator->elevator_data;
struct request *free = NULL;
/*
* bfq_bic_lookup grabs the queue_lock: invoke it now and
* store its return value for later use, to avoid nesting
* queue_lock inside the bfqd->lock. We assume that the bic
* returned by bfq_bic_lookup does not go away before
* bfqd->lock is taken.
*/
struct bfq_io_cq *bic = bfq_bic_lookup(bfqd, current->io_context, q);
bool ret;
spin_lock_irq(&bfqd->lock);
if (bic)
bfqd->bio_bfqq = bic_to_bfqq(bic, op_is_sync(bio->bi_opf));
else
bfqd->bio_bfqq = NULL;
bfqd->bio_bic = bic;
ret = blk_mq_sched_try_merge(q, bio, &free);
if (free)
blk_mq_free_request(free);
spin_unlock_irq(&bfqd->lock);
return ret;
}
static int bfq_request_merge(struct request_queue *q, struct request **req,
struct bio *bio)
{
struct bfq_data *bfqd = q->elevator->elevator_data;
struct request *__rq;
__rq = bfq_find_rq_fmerge(bfqd, bio, q);
if (__rq && elv_bio_merge_ok(__rq, bio)) {
*req = __rq;
return ELEVATOR_FRONT_MERGE;
}
return ELEVATOR_NO_MERGE;
}
static void bfq_request_merged(struct request_queue *q, struct request *req,
enum elv_merge type)
{
if (type == ELEVATOR_FRONT_MERGE &&
rb_prev(&req->rb_node) &&
blk_rq_pos(req) <
blk_rq_pos(container_of(rb_prev(&req->rb_node),
struct request, rb_node))) {
struct bfq_queue *bfqq = RQ_BFQQ(req);
struct bfq_data *bfqd = bfqq->bfqd;
struct request *prev, *next_rq;
/* Reposition request in its sort_list */
elv_rb_del(&bfqq->sort_list, req);
elv_rb_add(&bfqq->sort_list, req);
/* Choose next request to be served for bfqq */
prev = bfqq->next_rq;
next_rq = bfq_choose_req(bfqd, bfqq->next_rq, req,
bfqd->last_position);
bfqq->next_rq = next_rq;
/*
* If next_rq changes, update the queue's budget to fit
* the new request.
*/
if (prev != bfqq->next_rq)
bfq_updated_next_req(bfqd, bfqq);
}
}
static void bfq_requests_merged(struct request_queue *q, struct request *rq,
struct request *next)
{
struct bfq_queue *bfqq = RQ_BFQQ(rq), *next_bfqq = RQ_BFQQ(next);
if (!RB_EMPTY_NODE(&rq->rb_node))
return;
spin_lock_irq(&bfqq->bfqd->lock);
/*
* If next and rq belong to the same bfq_queue and next is older
* than rq, then reposition rq in the fifo (by substituting next
* with rq). Otherwise, if next and rq belong to different
* bfq_queues, never reposition rq: in fact, we would have to
* reposition it with respect to next's position in its own fifo,
* which would most certainly be too expensive with respect to
* the benefits.
*/
if (bfqq == next_bfqq &&
!list_empty(&rq->queuelist) && !list_empty(&next->queuelist) &&
next->fifo_time < rq->fifo_time) {
list_del_init(&rq->queuelist);
list_replace_init(&next->queuelist, &rq->queuelist);
rq->fifo_time = next->fifo_time;
}
if (bfqq->next_rq == next)
bfqq->next_rq = rq;
bfq_remove_request(q, next);
spin_unlock_irq(&bfqq->bfqd->lock);
}
static bool bfq_allow_bio_merge(struct request_queue *q, struct request *rq,
struct bio *bio)
{
struct bfq_data *bfqd = q->elevator->elevator_data;
bool is_sync = op_is_sync(bio->bi_opf);
struct bfq_queue *bfqq = bfqd->bio_bfqq;
/*
* Disallow merge of a sync bio into an async request.
*/
if (is_sync && !rq_is_sync(rq))
return false;
/*
* Lookup the bfqq that this bio will be queued with. Allow
* merge only if rq is queued there.
*/
if (!bfqq)
return false;
return bfqq == RQ_BFQQ(rq);
}
static void __bfq_set_in_service_queue(struct bfq_data *bfqd,
struct bfq_queue *bfqq)
{
if (bfqq) {
bfq_mark_bfqq_budget_new(bfqq);
bfq_clear_bfqq_fifo_expire(bfqq);
bfqd->budgets_assigned = (bfqd->budgets_assigned * 7 + 256) / 8;
bfq_log_bfqq(bfqd, bfqq,
"set_in_service_queue, cur-budget = %d",
bfqq->entity.budget);
}
bfqd->in_service_queue = bfqq;
}
/*
* Get and set a new queue for service.
*/
static struct bfq_queue *bfq_set_in_service_queue(struct bfq_data *bfqd)
{
struct bfq_queue *bfqq = bfq_get_next_queue(bfqd);
__bfq_set_in_service_queue(bfqd, bfqq);
return bfqq;
}
/*
* bfq_default_budget - return the default budget for @bfqq on @bfqd.
* @bfqd: the device descriptor.
* @bfqq: the queue to consider.
*
* We use 3/4 of the @bfqd maximum budget as the default value
* for the max_budget field of the queues. This lets the feedback
* mechanism to start from some middle ground, then the behavior
* of the process will drive the heuristics towards high values, if
* it behaves as a greedy sequential reader, or towards small values
* if it shows a more intermittent behavior.
*/
static unsigned long bfq_default_budget(struct bfq_data *bfqd,
struct bfq_queue *bfqq)
{
unsigned long budget;
/*
* When we need an estimate of the peak rate we need to avoid
* to give budgets that are too short due to previous
* measurements. So, in the first 10 assignments use a
* ``safe'' budget value. For such first assignment the value
* of bfqd->budgets_assigned happens to be lower than 194.
* See __bfq_set_in_service_queue for the formula by which
* this field is computed.
*/
if (bfqd->budgets_assigned < 194 && bfqd->bfq_user_max_budget == 0)
budget = bfq_default_max_budget;
else
budget = bfqd->bfq_max_budget;
return budget - budget / 4;
}
static void bfq_arm_slice_timer(struct bfq_data *bfqd)
{
struct bfq_queue *bfqq = bfqd->in_service_queue;
struct bfq_io_cq *bic;
u32 sl;
/* Processes have exited, don't wait. */
bic = bfqd->in_service_bic;
if (!bic || atomic_read(&bic->icq.ioc->active_ref) == 0)
return;
bfq_mark_bfqq_wait_request(bfqq);
/*
* We don't want to idle for seeks, but we do want to allow
* fair distribution of slice time for a process doing back-to-back
* seeks. So allow a little bit of time for him to submit a new rq.
*/
sl = bfqd->bfq_slice_idle;
/*
* Grant only minimum idle time if the queue is seeky.
*/
if (BFQQ_SEEKY(bfqq))
sl = min_t(u64, sl, BFQ_MIN_TT);
bfqd->last_idling_start = ktime_get();
hrtimer_start(&bfqd->idle_slice_timer, ns_to_ktime(sl),
HRTIMER_MODE_REL);
}
/*
* Set the maximum time for the in-service queue to consume its
* budget. This prevents seeky processes from lowering the disk
* throughput (always guaranteed with a time slice scheme as in CFQ).
*/
static void bfq_set_budget_timeout(struct bfq_data *bfqd)
{
struct bfq_queue *bfqq = bfqd->in_service_queue;
unsigned int timeout_coeff = bfqq->entity.weight /
bfqq->entity.orig_weight;
bfqd->last_budget_start = ktime_get();
bfq_clear_bfqq_budget_new(bfqq);
bfqq->budget_timeout = jiffies +
bfqd->bfq_timeout * timeout_coeff;
bfq_log_bfqq(bfqd, bfqq, "set budget_timeout %u",
jiffies_to_msecs(bfqd->bfq_timeout * timeout_coeff));
}
/*
* Remove request from internal lists.
*/
static void bfq_dispatch_remove(struct request_queue *q, struct request *rq)
{
struct bfq_queue *bfqq = RQ_BFQQ(rq);
/*
* For consistency, the next instruction should have been
* executed after removing the request from the queue and
* dispatching it. We execute instead this instruction before
* bfq_remove_request() (and hence introduce a temporary
* inconsistency), for efficiency. In fact, should this
* dispatch occur for a non in-service bfqq, this anticipated
* increment prevents two counters related to bfqq->dispatched
* from risking to be, first, uselessly decremented, and then
* incremented again when the (new) value of bfqq->dispatched
* happens to be taken into account.
*/
bfqq->dispatched++;
bfq_remove_request(q, rq);
}
static void __bfq_bfqq_expire(struct bfq_data *bfqd, struct bfq_queue *bfqq)
{
__bfq_bfqd_reset_in_service(bfqd);
if (RB_EMPTY_ROOT(&bfqq->sort_list))
bfq_del_bfqq_busy(bfqd, bfqq, 1);
else
bfq_activate_bfqq(bfqd, bfqq);
}
/**
* __bfq_bfqq_recalc_budget - try to adapt the budget to the @bfqq behavior.
* @bfqd: device data.
* @bfqq: queue to update.
* @reason: reason for expiration.
*
* Handle the feedback on @bfqq budget at queue expiration.
* See the body for detailed comments.
*/
static void __bfq_bfqq_recalc_budget(struct bfq_data *bfqd,
struct bfq_queue *bfqq,
enum bfqq_expiration reason)
{
struct request *next_rq;
int budget, min_budget;
budget = bfqq->max_budget;
min_budget = bfq_min_budget(bfqd);
bfq_log_bfqq(bfqd, bfqq, "recalc_budg: last budg %d, budg left %d",
bfqq->entity.budget, bfq_bfqq_budget_left(bfqq));
bfq_log_bfqq(bfqd, bfqq, "recalc_budg: last max_budg %d, min budg %d",
budget, bfq_min_budget(bfqd));
bfq_log_bfqq(bfqd, bfqq, "recalc_budg: sync %d, seeky %d",
bfq_bfqq_sync(bfqq), BFQQ_SEEKY(bfqd->in_service_queue));
if (bfq_bfqq_sync(bfqq)) {
switch (reason) {
/*
* Caveat: in all the following cases we trade latency
* for throughput.
*/
case BFQQE_TOO_IDLE:
if (budget > min_budget + BFQ_BUDGET_STEP)
budget -= BFQ_BUDGET_STEP;
else
budget = min_budget;
break;
case BFQQE_BUDGET_TIMEOUT:
budget = bfq_default_budget(bfqd, bfqq);
break;
case BFQQE_BUDGET_EXHAUSTED:
/*
* The process still has backlog, and did not
* let either the budget timeout or the disk
* idling timeout expire. Hence it is not
* seeky, has a short thinktime and may be
* happy with a higher budget too. So
* definitely increase the budget of this good
* candidate to boost the disk throughput.
*/
budget = min(budget + 8 * BFQ_BUDGET_STEP,
bfqd->bfq_max_budget);
break;
case BFQQE_NO_MORE_REQUESTS:
/*
* For queues that expire for this reason, it
* is particularly important to keep the
* budget close to the actual service they
* need. Doing so reduces the timestamp
* misalignment problem described in the
* comments in the body of
* __bfq_activate_entity. In fact, suppose
* that a queue systematically expires for
* BFQQE_NO_MORE_REQUESTS and presents a
* new request in time to enjoy timestamp
* back-shifting. The larger the budget of the
* queue is with respect to the service the
* queue actually requests in each service
* slot, the more times the queue can be
* reactivated with the same virtual finish
* time. It follows that, even if this finish
* time is pushed to the system virtual time
* to reduce the consequent timestamp
* misalignment, the queue unjustly enjoys for
* many re-activations a lower finish time
* than all newly activated queues.
*
* The service needed by bfqq is measured
* quite precisely by bfqq->entity.service.
* Since bfqq does not enjoy device idling,
* bfqq->entity.service is equal to the number
* of sectors that the process associated with
* bfqq requested to read/write before waiting
* for request completions, or blocking for
* other reasons.
*/
budget = max_t(int, bfqq->entity.service, min_budget);
break;
default:
return;
}
} else {
/*
* Async queues get always the maximum possible
* budget, as for them we do not care about latency
* (in addition, their ability to dispatch is limited
* by the charging factor).
*/
budget = bfqd->bfq_max_budget;
}
bfqq->max_budget = budget;
if (bfqd->budgets_assigned >= bfq_stats_min_budgets &&
!bfqd->bfq_user_max_budget)
bfqq->max_budget = min(bfqq->max_budget, bfqd->bfq_max_budget);
/*
* If there is still backlog, then assign a new budget, making
* sure that it is large enough for the next request. Since
* the finish time of bfqq must be kept in sync with the
* budget, be sure to call __bfq_bfqq_expire() *after* this
* update.
*
* If there is no backlog, then no need to update the budget;
* it will be updated on the arrival of a new request.
*/
next_rq = bfqq->next_rq;
if (next_rq)
bfqq->entity.budget = max_t(unsigned long, bfqq->max_budget,
bfq_serv_to_charge(next_rq, bfqq));
bfq_log_bfqq(bfqd, bfqq, "head sect: %u, new budget %d",
next_rq ? blk_rq_sectors(next_rq) : 0,
bfqq->entity.budget);
}
static unsigned long bfq_calc_max_budget(u64 peak_rate, u64 timeout)
{
unsigned long max_budget;
/*
* The max_budget calculated when autotuning is equal to the
* amount of sectors transferred in timeout at the estimated
* peak rate. To get this value, peak_rate is, first,
* multiplied by 1000, because timeout is measured in ms,
* while peak_rate is measured in sectors/usecs. Then the
* result of this multiplication is right-shifted by
* BFQ_RATE_SHIFT, because peak_rate is equal to the value of
* the peak rate left-shifted by BFQ_RATE_SHIFT.
*/
max_budget = (unsigned long)(peak_rate * 1000 *
timeout >> BFQ_RATE_SHIFT);
return max_budget;
}
/*
* In addition to updating the peak rate, checks whether the process
* is "slow", and returns 1 if so. This slow flag is used, in addition
* to the budget timeout, to reduce the amount of service provided to
* seeky processes, and hence reduce their chances to lower the
* throughput. See the code for more details.
*/
static bool bfq_update_peak_rate(struct bfq_data *bfqd, struct bfq_queue *bfqq,
bool compensate)
{
u64 bw, usecs, expected, timeout;
ktime_t delta;
int update = 0;
if (!bfq_bfqq_sync(bfqq) || bfq_bfqq_budget_new(bfqq))
return false;
if (compensate)
delta = bfqd->last_idling_start;
else
delta = ktime_get();
delta = ktime_sub(delta, bfqd->last_budget_start);
usecs = ktime_to_us(delta);
/* don't use too short time intervals */
if (usecs < 1000)
return false;
/*
* Calculate the bandwidth for the last slice. We use a 64 bit
* value to store the peak rate, in sectors per usec in fixed
* point math. We do so to have enough precision in the estimate
* and to avoid overflows.
*/
bw = (u64)bfqq->entity.service << BFQ_RATE_SHIFT;
do_div(bw, (unsigned long)usecs);
timeout = jiffies_to_msecs(bfqd->bfq_timeout);
/*
* Use only long (> 20ms) intervals to filter out spikes for
* the peak rate estimation.
*/
if (usecs > 20000) {
if (bw > bfqd->peak_rate) {
bfqd->peak_rate = bw;
update = 1;
bfq_log(bfqd, "new peak_rate=%llu", bw);
}
update |= bfqd->peak_rate_samples == BFQ_PEAK_RATE_SAMPLES - 1;
if (bfqd->peak_rate_samples < BFQ_PEAK_RATE_SAMPLES)
bfqd->peak_rate_samples++;
if (bfqd->peak_rate_samples == BFQ_PEAK_RATE_SAMPLES &&
update && bfqd->bfq_user_max_budget == 0) {
bfqd->bfq_max_budget =
bfq_calc_max_budget(bfqd->peak_rate,
timeout);
bfq_log(bfqd, "new max_budget=%d",
bfqd->bfq_max_budget);
}
}
/*
* A process is considered ``slow'' (i.e., seeky, so that we
* cannot treat it fairly in the service domain, as it would
* slow down too much the other processes) if, when a slice
* ends for whatever reason, it has received service at a
* rate that would not be high enough to complete the budget
* before the budget timeout expiration.
*/
expected = bw * 1000 * timeout >> BFQ_RATE_SHIFT;
/*
* Caveat: processes doing IO in the slower disk zones will
* tend to be slow(er) even if not seeky. And the estimated
* peak rate will actually be an average over the disk
* surface. Hence, to not be too harsh with unlucky processes,
* we keep a budget/3 margin of safety before declaring a
* process slow.
*/
return expected > (4 * bfqq->entity.budget) / 3;
}
/*
* Return the farthest past time instant according to jiffies
* macros.
*/
static unsigned long bfq_smallest_from_now(void)
{
return jiffies - MAX_JIFFY_OFFSET;
}
/**
* bfq_bfqq_expire - expire a queue.
* @bfqd: device owning the queue.
* @bfqq: the queue to expire.
* @compensate: if true, compensate for the time spent idling.
* @reason: the reason causing the expiration.
*
*
* If the process associated with the queue is slow (i.e., seeky), or
* in case of budget timeout, or, finally, if it is async, we
* artificially charge it an entire budget (independently of the
* actual service it received). As a consequence, the queue will get
* higher timestamps than the correct ones upon reactivation, and
* hence it will be rescheduled as if it had received more service
* than what it actually received. In the end, this class of processes
* will receive less service in proportion to how slowly they consume
* their budgets (and hence how seriously they tend to lower the
* throughput).
*
* In contrast, when a queue expires because it has been idling for
* too much or because it exhausted its budget, we do not touch the
* amount of service it has received. Hence when the queue will be
* reactivated and its timestamps updated, the latter will be in sync
* with the actual service received by the queue until expiration.
*
* Charging a full budget to the first type of queues and the exact
* service to the others has the effect of using the WF2Q+ policy to
* schedule the former on a timeslice basis, without violating the
* service domain guarantees of the latter.
*/
static void bfq_bfqq_expire(struct bfq_data *bfqd,
struct bfq_queue *bfqq,
bool compensate,
enum bfqq_expiration reason)
{
bool slow;
int ref;
/*
* Update device peak rate for autotuning and check whether the
* process is slow (see bfq_update_peak_rate).
*/
slow = bfq_update_peak_rate(bfqd, bfqq, compensate);
/*
* As above explained, 'punish' slow (i.e., seeky), timed-out
* and async queues, to favor sequential sync workloads.
*/
if (slow || reason == BFQQE_BUDGET_TIMEOUT)
bfq_bfqq_charge_full_budget(bfqq);
if (reason == BFQQE_TOO_IDLE &&
bfqq->entity.service <= 2 * bfqq->entity.budget / 10)
bfq_clear_bfqq_IO_bound(bfqq);
bfq_log_bfqq(bfqd, bfqq,
"expire (%d, slow %d, num_disp %d, idle_win %d)", reason,
slow, bfqq->dispatched, bfq_bfqq_idle_window(bfqq));
/*
* Increase, decrease or leave budget unchanged according to
* reason.
*/
__bfq_bfqq_recalc_budget(bfqd, bfqq, reason);
ref = bfqq->ref;
__bfq_bfqq_expire(bfqd, bfqq);
/* mark bfqq as waiting a request only if a bic still points to it */
if (ref > 1 && !bfq_bfqq_busy(bfqq) &&
reason != BFQQE_BUDGET_TIMEOUT &&
reason != BFQQE_BUDGET_EXHAUSTED)
bfq_mark_bfqq_non_blocking_wait_rq(bfqq);
}
/*
* Budget timeout is not implemented through a dedicated timer, but
* just checked on request arrivals and completions, as well as on
* idle timer expirations.
*/
static bool bfq_bfqq_budget_timeout(struct bfq_queue *bfqq)
{
if (bfq_bfqq_budget_new(bfqq) ||
time_is_after_jiffies(bfqq->budget_timeout))
return false;
return true;
}
/*
* If we expire a queue that is actively waiting (i.e., with the
* device idled) for the arrival of a new request, then we may incur
* the timestamp misalignment problem described in the body of the
* function __bfq_activate_entity. Hence we return true only if this
* condition does not hold, or if the queue is slow enough to deserve
* only to be kicked off for preserving a high throughput.
*/
static bool bfq_may_expire_for_budg_timeout(struct bfq_queue *bfqq)
{
bfq_log_bfqq(bfqq->bfqd, bfqq,
"may_budget_timeout: wait_request %d left %d timeout %d",
bfq_bfqq_wait_request(bfqq),
bfq_bfqq_budget_left(bfqq) >= bfqq->entity.budget / 3,
bfq_bfqq_budget_timeout(bfqq));
return (!bfq_bfqq_wait_request(bfqq) ||
bfq_bfqq_budget_left(bfqq) >= bfqq->entity.budget / 3)
&&
bfq_bfqq_budget_timeout(bfqq);
}
/*
* For a queue that becomes empty, device idling is allowed only if
* this function returns true for the queue. And this function returns
* true only if idling is beneficial for throughput.
*/
static bool bfq_bfqq_may_idle(struct bfq_queue *bfqq)
{
struct bfq_data *bfqd = bfqq->bfqd;
bool idling_boosts_thr;
if (bfqd->strict_guarantees)
return true;
/*
* The value of the next variable is computed considering that
* idling is usually beneficial for the throughput if:
* (a) the device is not NCQ-capable, or
* (b) regardless of the presence of NCQ, the request pattern
* for bfqq is I/O-bound (possible throughput losses
* caused by granting idling to seeky queues are mitigated
* by the fact that, in all scenarios where boosting
* throughput is the best thing to do, i.e., in all
* symmetric scenarios, only a minimal idle time is
* allowed to seeky queues).
*/
idling_boosts_thr = !bfqd->hw_tag || bfq_bfqq_IO_bound(bfqq);
/*
* We have now the components we need to compute the return
* value of the function, which is true only if both the
* following conditions hold:
* 1) bfqq is sync, because idling make sense only for sync queues;
* 2) idling boosts the throughput.
*/
return bfq_bfqq_sync(bfqq) && idling_boosts_thr;
}
/*
* If the in-service queue is empty but the function bfq_bfqq_may_idle
* returns true, then:
* 1) the queue must remain in service and cannot be expired, and
* 2) the device must be idled to wait for the possible arrival of a new
* request for the queue.
* See the comments on the function bfq_bfqq_may_idle for the reasons
* why performing device idling is the best choice to boost the throughput
* and preserve service guarantees when bfq_bfqq_may_idle itself
* returns true.
*/
static bool bfq_bfqq_must_idle(struct bfq_queue *bfqq)
{
struct bfq_data *bfqd = bfqq->bfqd;
return RB_EMPTY_ROOT(&bfqq->sort_list) && bfqd->bfq_slice_idle != 0 &&
bfq_bfqq_may_idle(bfqq);
}
/*
* Select a queue for service. If we have a current queue in service,
* check whether to continue servicing it, or retrieve and set a new one.
*/
static struct bfq_queue *bfq_select_queue(struct bfq_data *bfqd)
{
struct bfq_queue *bfqq;
struct request *next_rq;
enum bfqq_expiration reason = BFQQE_BUDGET_TIMEOUT;
bfqq = bfqd->in_service_queue;
if (!bfqq)
goto new_queue;
bfq_log_bfqq(bfqd, bfqq, "select_queue: already in-service queue");
if (bfq_may_expire_for_budg_timeout(bfqq) &&
!bfq_bfqq_wait_request(bfqq) &&
!bfq_bfqq_must_idle(bfqq))
goto expire;
check_queue:
/*
* This loop is rarely executed more than once. Even when it
* happens, it is much more convenient to re-execute this loop
* than to return NULL and trigger a new dispatch to get a
* request served.
*/
next_rq = bfqq->next_rq;
/*
* If bfqq has requests queued and it has enough budget left to
* serve them, keep the queue, otherwise expire it.
*/
if (next_rq) {
if (bfq_serv_to_charge(next_rq, bfqq) >
bfq_bfqq_budget_left(bfqq)) {
/*
* Expire the queue for budget exhaustion,
* which makes sure that the next budget is
* enough to serve the next request, even if
* it comes from the fifo expired path.
*/
reason = BFQQE_BUDGET_EXHAUSTED;
goto expire;
} else {
/*
* The idle timer may be pending because we may
* not disable disk idling even when a new request
* arrives.
*/
if (bfq_bfqq_wait_request(bfqq)) {
/*
* If we get here: 1) at least a new request
* has arrived but we have not disabled the
* timer because the request was too small,
* 2) then the block layer has unplugged
* the device, causing the dispatch to be
* invoked.
*
* Since the device is unplugged, now the
* requests are probably large enough to
* provide a reasonable throughput.
* So we disable idling.
*/
bfq_clear_bfqq_wait_request(bfqq);
hrtimer_try_to_cancel(&bfqd->idle_slice_timer);
}
goto keep_queue;
}
}
/*
* No requests pending. However, if the in-service queue is idling
* for a new request, or has requests waiting for a completion and
* may idle after their completion, then keep it anyway.
*/
if (bfq_bfqq_wait_request(bfqq) ||
(bfqq->dispatched != 0 && bfq_bfqq_may_idle(bfqq))) {
bfqq = NULL;
goto keep_queue;
}
reason = BFQQE_NO_MORE_REQUESTS;
expire:
bfq_bfqq_expire(bfqd, bfqq, false, reason);
new_queue:
bfqq = bfq_set_in_service_queue(bfqd);
if (bfqq) {
bfq_log_bfqq(bfqd, bfqq, "select_queue: checking new queue");
goto check_queue;
}
keep_queue:
if (bfqq)
bfq_log_bfqq(bfqd, bfqq, "select_queue: returned this queue");
else
bfq_log(bfqd, "select_queue: no queue returned");
return bfqq;
}
/*
* Dispatch next request from bfqq.
*/
static struct request *bfq_dispatch_rq_from_bfqq(struct bfq_data *bfqd,
struct bfq_queue *bfqq)
{
struct request *rq = bfqq->next_rq;
unsigned long service_to_charge;
service_to_charge = bfq_serv_to_charge(rq, bfqq);
bfq_bfqq_served(bfqq, service_to_charge);
bfq_dispatch_remove(bfqd->queue, rq);
if (!bfqd->in_service_bic) {
atomic_long_inc(&RQ_BIC(rq)->icq.ioc->refcount);
bfqd->in_service_bic = RQ_BIC(rq);
}
/*
* Expire bfqq, pretending that its budget expired, if bfqq
* belongs to CLASS_IDLE and other queues are waiting for
* service.
*/
if (bfqd->busy_queues > 1 && bfq_class_idle(bfqq))
goto expire;
return rq;
expire:
bfq_bfqq_expire(bfqd, bfqq, false, BFQQE_BUDGET_EXHAUSTED);
return rq;
}
static bool bfq_has_work(struct blk_mq_hw_ctx *hctx)
{
struct bfq_data *bfqd = hctx->queue->elevator->elevator_data;
/*
* Avoiding lock: a race on bfqd->busy_queues should cause at
* most a call to dispatch for nothing
*/
return !list_empty_careful(&bfqd->dispatch) ||
bfqd->busy_queues > 0;
}
static struct request *__bfq_dispatch_request(struct blk_mq_hw_ctx *hctx)
{
struct bfq_data *bfqd = hctx->queue->elevator->elevator_data;
struct request *rq = NULL;
struct bfq_queue *bfqq = NULL;
if (!list_empty(&bfqd->dispatch)) {
rq = list_first_entry(&bfqd->dispatch, struct request,
queuelist);
list_del_init(&rq->queuelist);
bfqq = RQ_BFQQ(rq);
if (bfqq) {
/*
* Increment counters here, because this
* dispatch does not follow the standard
* dispatch flow (where counters are
* incremented)
*/
bfqq->dispatched++;
goto inc_in_driver_start_rq;
}
/*
* We exploit the put_rq_private hook to decrement
* rq_in_driver, but put_rq_private will not be
* invoked on this request. So, to avoid unbalance,
* just start this request, without incrementing
* rq_in_driver. As a negative consequence,
* rq_in_driver is deceptively lower than it should be
* while this request is in service. This may cause
* bfq_schedule_dispatch to be invoked uselessly.
*
* As for implementing an exact solution, the
* put_request hook, if defined, is probably invoked
* also on this request. So, by exploiting this hook,
* we could 1) increment rq_in_driver here, and 2)
* decrement it in put_request. Such a solution would
* let the value of the counter be always accurate,
* but it would entail using an extra interface
* function. This cost seems higher than the benefit,
* being the frequency of non-elevator-private
* requests very low.
*/
goto start_rq;
}
bfq_log(bfqd, "dispatch requests: %d busy queues", bfqd->busy_queues);
if (bfqd->busy_queues == 0)
goto exit;
/*
* Force device to serve one request at a time if
* strict_guarantees is true. Forcing this service scheme is
* currently the ONLY way to guarantee that the request
* service order enforced by the scheduler is respected by a
* queueing device. Otherwise the device is free even to make
* some unlucky request wait for as long as the device
* wishes.
*
* Of course, serving one request at at time may cause loss of
* throughput.
*/
if (bfqd->strict_guarantees && bfqd->rq_in_driver > 0)
goto exit;
bfqq = bfq_select_queue(bfqd);
if (!bfqq)
goto exit;
rq = bfq_dispatch_rq_from_bfqq(bfqd, bfqq);
if (rq) {
inc_in_driver_start_rq:
bfqd->rq_in_driver++;
start_rq:
rq->rq_flags |= RQF_STARTED;
}
exit:
return rq;
}
static struct request *bfq_dispatch_request(struct blk_mq_hw_ctx *hctx)
{
struct bfq_data *bfqd = hctx->queue->elevator->elevator_data;
struct request *rq;
spin_lock_irq(&bfqd->lock);
rq = __bfq_dispatch_request(hctx);
spin_unlock_irq(&bfqd->lock);
return rq;
}
/*
* Task holds one reference to the queue, dropped when task exits. Each rq
* in-flight on this queue also holds a reference, dropped when rq is freed.
*
* Scheduler lock must be held here. Recall not to use bfqq after calling
* this function on it.
*/
static void bfq_put_queue(struct bfq_queue *bfqq)
{
if (bfqq->bfqd)
bfq_log_bfqq(bfqq->bfqd, bfqq, "put_queue: %p %d",
bfqq, bfqq->ref);
bfqq->ref--;
if (bfqq->ref)
return;
kmem_cache_free(bfq_pool, bfqq);
}
static void bfq_exit_bfqq(struct bfq_data *bfqd, struct bfq_queue *bfqq)
{
if (bfqq == bfqd->in_service_queue) {
__bfq_bfqq_expire(bfqd, bfqq);
bfq_schedule_dispatch(bfqd);
}
bfq_log_bfqq(bfqd, bfqq, "exit_bfqq: %p, %d", bfqq, bfqq->ref);
bfq_put_queue(bfqq); /* release process reference */
}
static void bfq_exit_icq_bfqq(struct bfq_io_cq *bic, bool is_sync)
{
struct bfq_queue *bfqq = bic_to_bfqq(bic, is_sync);
struct bfq_data *bfqd;
if (bfqq)
bfqd = bfqq->bfqd; /* NULL if scheduler already exited */
if (bfqq && bfqd) {
unsigned long flags;
spin_lock_irqsave(&bfqd->lock, flags);
bfq_exit_bfqq(bfqd, bfqq);
bic_set_bfqq(bic, NULL, is_sync);
spin_unlock_irq(&bfqd->lock);
}
}
static void bfq_exit_icq(struct io_cq *icq)
{
struct bfq_io_cq *bic = icq_to_bic(icq);
bfq_exit_icq_bfqq(bic, true);
bfq_exit_icq_bfqq(bic, false);
}
/*
* Update the entity prio values; note that the new values will not
* be used until the next (re)activation.
*/
static void
bfq_set_next_ioprio_data(struct bfq_queue *bfqq, struct bfq_io_cq *bic)
{
struct task_struct *tsk = current;
int ioprio_class;
struct bfq_data *bfqd = bfqq->bfqd;
if (!bfqd)
return;
ioprio_class = IOPRIO_PRIO_CLASS(bic->ioprio);
switch (ioprio_class) {
default:
dev_err(bfqq->bfqd->queue->backing_dev_info->dev,
"bfq: bad prio class %d\n", ioprio_class);
case IOPRIO_CLASS_NONE:
/*
* No prio set, inherit CPU scheduling settings.
*/
bfqq->new_ioprio = task_nice_ioprio(tsk);
bfqq->new_ioprio_class = task_nice_ioclass(tsk);
break;
case IOPRIO_CLASS_RT:
bfqq->new_ioprio = IOPRIO_PRIO_DATA(bic->ioprio);
bfqq->new_ioprio_class = IOPRIO_CLASS_RT;
break;
case IOPRIO_CLASS_BE:
bfqq->new_ioprio = IOPRIO_PRIO_DATA(bic->ioprio);
bfqq->new_ioprio_class = IOPRIO_CLASS_BE;
break;
case IOPRIO_CLASS_IDLE:
bfqq->new_ioprio_class = IOPRIO_CLASS_IDLE;
bfqq->new_ioprio = 7;
bfq_clear_bfqq_idle_window(bfqq);
break;
}
if (bfqq->new_ioprio >= IOPRIO_BE_NR) {
pr_crit("bfq_set_next_ioprio_data: new_ioprio %d\n",
bfqq->new_ioprio);
bfqq->new_ioprio = IOPRIO_BE_NR;
}
bfqq->entity.new_weight = bfq_ioprio_to_weight(bfqq->new_ioprio);
bfqq->entity.prio_changed = 1;
}
static void bfq_check_ioprio_change(struct bfq_io_cq *bic, struct bio *bio)
{
struct bfq_data *bfqd = bic_to_bfqd(bic);
struct bfq_queue *bfqq;
int ioprio = bic->icq.ioc->ioprio;
/*
* This condition may trigger on a newly created bic, be sure to
* drop the lock before returning.
*/
if (unlikely(!bfqd) || likely(bic->ioprio == ioprio))
return;
bic->ioprio = ioprio;
bfqq = bic_to_bfqq(bic, false);
if (bfqq) {
/* release process reference on this queue */
bfq_put_queue(bfqq);
bfqq = bfq_get_queue(bfqd, bio, BLK_RW_ASYNC, bic);
bic_set_bfqq(bic, bfqq, false);
}
bfqq = bic_to_bfqq(bic, true);
if (bfqq)
bfq_set_next_ioprio_data(bfqq, bic);
}
static void bfq_init_bfqq(struct bfq_data *bfqd, struct bfq_queue *bfqq,
struct bfq_io_cq *bic, pid_t pid, int is_sync)
{
RB_CLEAR_NODE(&bfqq->entity.rb_node);
INIT_LIST_HEAD(&bfqq->fifo);
bfqq->ref = 0;
bfqq->bfqd = bfqd;
if (bic)
bfq_set_next_ioprio_data(bfqq, bic);
if (is_sync) {
if (!bfq_class_idle(bfqq))
bfq_mark_bfqq_idle_window(bfqq);
bfq_mark_bfqq_sync(bfqq);
} else
bfq_clear_bfqq_sync(bfqq);
/* set end request to minus infinity from now */
bfqq->ttime.last_end_request = ktime_get_ns() + 1;
bfq_mark_bfqq_IO_bound(bfqq);
bfqq->pid = pid;
/* Tentative initial value to trade off between thr and lat */
bfqq->max_budget = bfq_default_budget(bfqd, bfqq);
bfqq->budget_timeout = bfq_smallest_from_now();
bfqq->pid = pid;
/* first request is almost certainly seeky */
bfqq->seek_history = 1;
}
static struct bfq_queue **bfq_async_queue_prio(struct bfq_data *bfqd,
int ioprio_class, int ioprio)
{
switch (ioprio_class) {
case IOPRIO_CLASS_RT:
return &async_bfqq[0][ioprio];
case IOPRIO_CLASS_NONE:
ioprio = IOPRIO_NORM;
/* fall through */
case IOPRIO_CLASS_BE:
return &async_bfqq[1][ioprio];
case IOPRIO_CLASS_IDLE:
return &async_idle_bfqq;
default:
return NULL;
}
}
static struct bfq_queue *bfq_get_queue(struct bfq_data *bfqd,
struct bio *bio, bool is_sync,
struct bfq_io_cq *bic)
{
const int ioprio = IOPRIO_PRIO_DATA(bic->ioprio);
const int ioprio_class = IOPRIO_PRIO_CLASS(bic->ioprio);
struct bfq_queue **async_bfqq = NULL;
struct bfq_queue *bfqq;
rcu_read_lock();
if (!is_sync) {
async_bfqq = bfq_async_queue_prio(bfqd, ioprio_class,
ioprio);
bfqq = *async_bfqq;
if (bfqq)
goto out;
}
bfqq = kmem_cache_alloc_node(bfq_pool,
GFP_NOWAIT | __GFP_ZERO | __GFP_NOWARN,
bfqd->queue->node);
if (bfqq) {
bfq_init_bfqq(bfqd, bfqq, bic, current->pid,
is_sync);
bfq_init_entity(&bfqq->entity);
bfq_log_bfqq(bfqd, bfqq, "allocated");
} else {
bfqq = &bfqd->oom_bfqq;
bfq_log_bfqq(bfqd, bfqq, "using oom bfqq");
goto out;
}
/*
* Pin the queue now that it's allocated, scheduler exit will
* prune it.
*/
if (async_bfqq) {
bfqq->ref++;
bfq_log_bfqq(bfqd, bfqq,
"get_queue, bfqq not in async: %p, %d",
bfqq, bfqq->ref);
*async_bfqq = bfqq;
}
out:
bfqq->ref++; /* get a process reference to this queue */
bfq_log_bfqq(bfqd, bfqq, "get_queue, at end: %p, %d", bfqq, bfqq->ref);
rcu_read_unlock();
return bfqq;
}
static void bfq_update_io_thinktime(struct bfq_data *bfqd,
struct bfq_queue *bfqq)
{
struct bfq_ttime *ttime = &bfqq->ttime;
u64 elapsed = ktime_get_ns() - bfqq->ttime.last_end_request;
elapsed = min_t(u64, elapsed, 2ULL * bfqd->bfq_slice_idle);
ttime->ttime_samples = (7*bfqq->ttime.ttime_samples + 256) / 8;
ttime->ttime_total = div_u64(7*ttime->ttime_total + 256*elapsed, 8);
ttime->ttime_mean = div64_ul(ttime->ttime_total + 128,
ttime->ttime_samples);
}
static void
bfq_update_io_seektime(struct bfq_data *bfqd, struct bfq_queue *bfqq,
struct request *rq)
{
sector_t sdist = 0;
if (bfqq->last_request_pos) {
if (bfqq->last_request_pos < blk_rq_pos(rq))
sdist = blk_rq_pos(rq) - bfqq->last_request_pos;
else
sdist = bfqq->last_request_pos - blk_rq_pos(rq);
}
bfqq->seek_history <<= 1;
bfqq->seek_history |= sdist > BFQQ_SEEK_THR &&
(!blk_queue_nonrot(bfqd->queue) ||
blk_rq_sectors(rq) < BFQQ_SECT_THR_NONROT);
}
/*
* Disable idle window if the process thinks too long or seeks so much that
* it doesn't matter.
*/
static void bfq_update_idle_window(struct bfq_data *bfqd,
struct bfq_queue *bfqq,
struct bfq_io_cq *bic)
{
int enable_idle;
/* Don't idle for async or idle io prio class. */
if (!bfq_bfqq_sync(bfqq) || bfq_class_idle(bfqq))
return;
enable_idle = bfq_bfqq_idle_window(bfqq);
if (atomic_read(&bic->icq.ioc->active_ref) == 0 ||
bfqd->bfq_slice_idle == 0 ||
(bfqd->hw_tag && BFQQ_SEEKY(bfqq)))
enable_idle = 0;
else if (bfq_sample_valid(bfqq->ttime.ttime_samples)) {
if (bfqq->ttime.ttime_mean > bfqd->bfq_slice_idle)
enable_idle = 0;
else
enable_idle = 1;
}
bfq_log_bfqq(bfqd, bfqq, "update_idle_window: enable_idle %d",
enable_idle);
if (enable_idle)
bfq_mark_bfqq_idle_window(bfqq);
else
bfq_clear_bfqq_idle_window(bfqq);
}
/*
* Called when a new fs request (rq) is added to bfqq. Check if there's
* something we should do about it.
*/
static void bfq_rq_enqueued(struct bfq_data *bfqd, struct bfq_queue *bfqq,
struct request *rq)
{
struct bfq_io_cq *bic = RQ_BIC(rq);
if (rq->cmd_flags & REQ_META)
bfqq->meta_pending++;
bfq_update_io_thinktime(bfqd, bfqq);
bfq_update_io_seektime(bfqd, bfqq, rq);
if (bfqq->entity.service > bfq_max_budget(bfqd) / 8 ||
!BFQQ_SEEKY(bfqq))
bfq_update_idle_window(bfqd, bfqq, bic);
bfq_log_bfqq(bfqd, bfqq,
"rq_enqueued: idle_window=%d (seeky %d)",
bfq_bfqq_idle_window(bfqq), BFQQ_SEEKY(bfqq));
bfqq->last_request_pos = blk_rq_pos(rq) + blk_rq_sectors(rq);
if (bfqq == bfqd->in_service_queue && bfq_bfqq_wait_request(bfqq)) {
bool small_req = bfqq->queued[rq_is_sync(rq)] == 1 &&
blk_rq_sectors(rq) < 32;
bool budget_timeout = bfq_bfqq_budget_timeout(bfqq);
/*
* There is just this request queued: if the request
* is small and the queue is not to be expired, then
* just exit.
*
* In this way, if the device is being idled to wait
* for a new request from the in-service queue, we
* avoid unplugging the device and committing the
* device to serve just a small request. On the
* contrary, we wait for the block layer to decide
* when to unplug the device: hopefully, new requests
* will be merged to this one quickly, then the device
* will be unplugged and larger requests will be
* dispatched.
*/
if (small_req && !budget_timeout)
return;
/*
* A large enough request arrived, or the queue is to
* be expired: in both cases disk idling is to be
* stopped, so clear wait_request flag and reset
* timer.
*/
bfq_clear_bfqq_wait_request(bfqq);
hrtimer_try_to_cancel(&bfqd->idle_slice_timer);
/*
* The queue is not empty, because a new request just
* arrived. Hence we can safely expire the queue, in
* case of budget timeout, without risking that the
* timestamps of the queue are not updated correctly.
* See [1] for more details.
*/
if (budget_timeout)
bfq_bfqq_expire(bfqd, bfqq, false,
BFQQE_BUDGET_TIMEOUT);
}
}
static void __bfq_insert_request(struct bfq_data *bfqd, struct request *rq)
{
struct bfq_queue *bfqq = RQ_BFQQ(rq);
bfq_add_request(rq);
rq->fifo_time = ktime_get_ns() + bfqd->bfq_fifo_expire[rq_is_sync(rq)];
list_add_tail(&rq->queuelist, &bfqq->fifo);
bfq_rq_enqueued(bfqd, bfqq, rq);
}
static void bfq_insert_request(struct blk_mq_hw_ctx *hctx, struct request *rq,
bool at_head)
{
struct request_queue *q = hctx->queue;
struct bfq_data *bfqd = q->elevator->elevator_data;
spin_lock_irq(&bfqd->lock);
if (blk_mq_sched_try_insert_merge(q, rq)) {
spin_unlock_irq(&bfqd->lock);
return;
}
spin_unlock_irq(&bfqd->lock);
blk_mq_sched_request_inserted(rq);
spin_lock_irq(&bfqd->lock);
if (at_head || blk_rq_is_passthrough(rq)) {
if (at_head)
list_add(&rq->queuelist, &bfqd->dispatch);
else
list_add_tail(&rq->queuelist, &bfqd->dispatch);
} else {
__bfq_insert_request(bfqd, rq);
if (rq_mergeable(rq)) {
elv_rqhash_add(q, rq);
if (!q->last_merge)
q->last_merge = rq;
}
}
spin_unlock_irq(&bfqd->lock);
}
static void bfq_insert_requests(struct blk_mq_hw_ctx *hctx,
struct list_head *list, bool at_head)
{
while (!list_empty(list)) {
struct request *rq;
rq = list_first_entry(list, struct request, queuelist);
list_del_init(&rq->queuelist);
bfq_insert_request(hctx, rq, at_head);
}
}
static void bfq_update_hw_tag(struct bfq_data *bfqd)
{
bfqd->max_rq_in_driver = max_t(int, bfqd->max_rq_in_driver,
bfqd->rq_in_driver);
if (bfqd->hw_tag == 1)
return;
/*
* This sample is valid if the number of outstanding requests
* is large enough to allow a queueing behavior. Note that the
* sum is not exact, as it's not taking into account deactivated
* requests.
*/
if (bfqd->rq_in_driver + bfqd->queued < BFQ_HW_QUEUE_THRESHOLD)
return;
if (bfqd->hw_tag_samples++ < BFQ_HW_QUEUE_SAMPLES)
return;
bfqd->hw_tag = bfqd->max_rq_in_driver > BFQ_HW_QUEUE_THRESHOLD;
bfqd->max_rq_in_driver = 0;
bfqd->hw_tag_samples = 0;
}
static void bfq_completed_request(struct bfq_queue *bfqq, struct bfq_data *bfqd)
{
bfq_update_hw_tag(bfqd);
bfqd->rq_in_driver--;
bfqq->dispatched--;
bfqq->ttime.last_end_request = ktime_get_ns();
/*
* If this is the in-service queue, check if it needs to be expired,
* or if we want to idle in case it has no pending requests.
*/
if (bfqd->in_service_queue == bfqq) {
if (bfq_bfqq_budget_new(bfqq))
bfq_set_budget_timeout(bfqd);
if (bfq_bfqq_must_idle(bfqq)) {
bfq_arm_slice_timer(bfqd);
return;
} else if (bfq_may_expire_for_budg_timeout(bfqq))
bfq_bfqq_expire(bfqd, bfqq, false,
BFQQE_BUDGET_TIMEOUT);
else if (RB_EMPTY_ROOT(&bfqq->sort_list) &&
(bfqq->dispatched == 0 ||
!bfq_bfqq_may_idle(bfqq)))
bfq_bfqq_expire(bfqd, bfqq, false,
BFQQE_NO_MORE_REQUESTS);
}
}
static void bfq_put_rq_priv_body(struct bfq_queue *bfqq)
{
bfqq->allocated--;
bfq_put_queue(bfqq);
}
static void bfq_put_rq_private(struct request_queue *q, struct request *rq)
{
struct bfq_queue *bfqq = RQ_BFQQ(rq);
struct bfq_data *bfqd = bfqq->bfqd;
if (likely(rq->rq_flags & RQF_STARTED)) {
unsigned long flags;
spin_lock_irqsave(&bfqd->lock, flags);
bfq_completed_request(bfqq, bfqd);
bfq_put_rq_priv_body(bfqq);
spin_unlock_irqrestore(&bfqd->lock, flags);
} else {
/*
* Request rq may be still/already in the scheduler,
* in which case we need to remove it. And we cannot
* defer such a check and removal, to avoid
* inconsistencies in the time interval from the end
* of this function to the start of the deferred work.
* This situation seems to occur only in process
* context, as a consequence of a merge. In the
* current version of the code, this implies that the
* lock is held.
*/
if (!RB_EMPTY_NODE(&rq->rb_node))
bfq_remove_request(q, rq);
bfq_put_rq_priv_body(bfqq);
}
rq->elv.priv[0] = NULL;
rq->elv.priv[1] = NULL;
}
/*
* Allocate bfq data structures associated with this request.
*/
static int bfq_get_rq_private(struct request_queue *q, struct request *rq,
struct bio *bio)
{
struct bfq_data *bfqd = q->elevator->elevator_data;
struct bfq_io_cq *bic = icq_to_bic(rq->elv.icq);
const int is_sync = rq_is_sync(rq);
struct bfq_queue *bfqq;
spin_lock_irq(&bfqd->lock);
bfq_check_ioprio_change(bic, bio);
if (!bic)
goto queue_fail;
bfqq = bic_to_bfqq(bic, is_sync);
if (!bfqq || bfqq == &bfqd->oom_bfqq) {
if (bfqq)
bfq_put_queue(bfqq);
bfqq = bfq_get_queue(bfqd, bio, is_sync, bic);
bic_set_bfqq(bic, bfqq, is_sync);
}
bfqq->allocated++;
bfqq->ref++;
bfq_log_bfqq(bfqd, bfqq, "get_request %p: bfqq %p, %d",
rq, bfqq, bfqq->ref);
rq->elv.priv[0] = bic;
rq->elv.priv[1] = bfqq;
spin_unlock_irq(&bfqd->lock);
return 0;
queue_fail:
spin_unlock_irq(&bfqd->lock);
return 1;
}
static void bfq_idle_slice_timer_body(struct bfq_queue *bfqq)
{
struct bfq_data *bfqd = bfqq->bfqd;
enum bfqq_expiration reason;
unsigned long flags;
spin_lock_irqsave(&bfqd->lock, flags);
bfq_clear_bfqq_wait_request(bfqq);
if (bfqq != bfqd->in_service_queue) {
spin_unlock_irqrestore(&bfqd->lock, flags);
return;
}
if (bfq_bfqq_budget_timeout(bfqq))
/*
* Also here the queue can be safely expired
* for budget timeout without wasting
* guarantees
*/
reason = BFQQE_BUDGET_TIMEOUT;
else if (bfqq->queued[0] == 0 && bfqq->queued[1] == 0)
/*
* The queue may not be empty upon timer expiration,
* because we may not disable the timer when the
* first request of the in-service queue arrives
* during disk idling.
*/
reason = BFQQE_TOO_IDLE;
else
goto schedule_dispatch;
bfq_bfqq_expire(bfqd, bfqq, true, reason);
schedule_dispatch:
spin_unlock_irqrestore(&bfqd->lock, flags);
bfq_schedule_dispatch(bfqd);
}
/*
* Handler of the expiration of the timer running if the in-service queue
* is idling inside its time slice.
*/
static enum hrtimer_restart bfq_idle_slice_timer(struct hrtimer *timer)
{
struct bfq_data *bfqd = container_of(timer, struct bfq_data,
idle_slice_timer);
struct bfq_queue *bfqq = bfqd->in_service_queue;
/*
* Theoretical race here: the in-service queue can be NULL or
* different from the queue that was idling if a new request
* arrives for the current queue and there is a full dispatch
* cycle that changes the in-service queue. This can hardly
* happen, but in the worst case we just expire a queue too
* early.
*/
if (bfqq)
bfq_idle_slice_timer_body(bfqq);
return HRTIMER_NORESTART;
}
static void __bfq_put_async_bfqq(struct bfq_data *bfqd,
struct bfq_queue **bfqq_ptr)
{
struct bfq_queue *bfqq = *bfqq_ptr;
bfq_log(bfqd, "put_async_bfqq: %p", bfqq);
if (bfqq) {
bfq_log_bfqq(bfqd, bfqq, "put_async_bfqq: putting %p, %d",
bfqq, bfqq->ref);
bfq_put_queue(bfqq);
*bfqq_ptr = NULL;
}
}
/*
* Release the extra reference of the async queues as the device
* goes away.
*/
static void bfq_put_async_queues(struct bfq_data *bfqd)
{
int i, j;
for (i = 0; i < 2; i++)
for (j = 0; j < IOPRIO_BE_NR; j++)
__bfq_put_async_bfqq(bfqd, &async_bfqq[i][j]);
__bfq_put_async_bfqq(bfqd, &async_idle_bfqq);
}
static void bfq_exit_queue(struct elevator_queue *e)
{
struct bfq_data *bfqd = e->elevator_data;
struct bfq_queue *bfqq, *n;
hrtimer_cancel(&bfqd->idle_slice_timer);
spin_lock_irq(&bfqd->lock);
list_for_each_entry_safe(bfqq, n, &bfqd->idle_list, bfqq_list)
bfq_deactivate_bfqq(bfqd, bfqq, false);
bfq_put_async_queues(bfqd);
spin_unlock_irq(&bfqd->lock);
hrtimer_cancel(&bfqd->idle_slice_timer);
kfree(bfqd);
}
static int bfq_init_queue(struct request_queue *q, struct elevator_type *e)
{
struct bfq_data *bfqd;
struct elevator_queue *eq;
int i;
eq = elevator_alloc(q, e);
if (!eq)
return -ENOMEM;
bfqd = kzalloc_node(sizeof(*bfqd), GFP_KERNEL, q->node);
if (!bfqd) {
kobject_put(&eq->kobj);
return -ENOMEM;
}
eq->elevator_data = bfqd;
/*
* Our fallback bfqq if bfq_find_alloc_queue() runs into OOM issues.
* Grab a permanent reference to it, so that the normal code flow
* will not attempt to free it.
*/
bfq_init_bfqq(bfqd, &bfqd->oom_bfqq, NULL, 1, 0);
bfqd->oom_bfqq.ref++;
bfqd->oom_bfqq.new_ioprio = BFQ_DEFAULT_QUEUE_IOPRIO;
bfqd->oom_bfqq.new_ioprio_class = IOPRIO_CLASS_BE;
bfqd->oom_bfqq.entity.new_weight =
bfq_ioprio_to_weight(bfqd->oom_bfqq.new_ioprio);
/*
* Trigger weight initialization, according to ioprio, at the
* oom_bfqq's first activation. The oom_bfqq's ioprio and ioprio
* class won't be changed any more.
*/
bfqd->oom_bfqq.entity.prio_changed = 1;
bfqd->queue = q;
for (i = 0; i < BFQ_IOPRIO_CLASSES; i++)
bfqd->sched_data.service_tree[i] = BFQ_SERVICE_TREE_INIT;
hrtimer_init(&bfqd->idle_slice_timer, CLOCK_MONOTONIC,
HRTIMER_MODE_REL);
bfqd->idle_slice_timer.function = bfq_idle_slice_timer;
INIT_LIST_HEAD(&bfqd->active_list);
INIT_LIST_HEAD(&bfqd->idle_list);
bfqd->hw_tag = -1;
bfqd->bfq_max_budget = bfq_default_max_budget;
bfqd->bfq_fifo_expire[0] = bfq_fifo_expire[0];
bfqd->bfq_fifo_expire[1] = bfq_fifo_expire[1];
bfqd->bfq_back_max = bfq_back_max;
bfqd->bfq_back_penalty = bfq_back_penalty;
bfqd->bfq_slice_idle = bfq_slice_idle;
bfqd->bfq_class_idle_last_service = 0;
bfqd->bfq_timeout = bfq_timeout;
bfqd->bfq_requests_within_timer = 120;
spin_lock_init(&bfqd->lock);
INIT_LIST_HEAD(&bfqd->dispatch);
q->elevator = eq;
return 0;
}
static void bfq_slab_kill(void)
{
kmem_cache_destroy(bfq_pool);
}
static int __init bfq_slab_setup(void)
{
bfq_pool = KMEM_CACHE(bfq_queue, 0);
if (!bfq_pool)
return -ENOMEM;
return 0;
}
static ssize_t bfq_var_show(unsigned int var, char *page)
{
return sprintf(page, "%u\n", var);
}
static ssize_t bfq_var_store(unsigned long *var, const char *page,
size_t count)
{
unsigned long new_val;
int ret = kstrtoul(page, 10, &new_val);
if (ret == 0)
*var = new_val;
return count;
}
#define SHOW_FUNCTION(__FUNC, __VAR, __CONV) \
static ssize_t __FUNC(struct elevator_queue *e, char *page) \
{ \
struct bfq_data *bfqd = e->elevator_data; \
u64 __data = __VAR; \
if (__CONV == 1) \
__data = jiffies_to_msecs(__data); \
else if (__CONV == 2) \
__data = div_u64(__data, NSEC_PER_MSEC); \
return bfq_var_show(__data, (page)); \
}
SHOW_FUNCTION(bfq_fifo_expire_sync_show, bfqd->bfq_fifo_expire[1], 2);
SHOW_FUNCTION(bfq_fifo_expire_async_show, bfqd->bfq_fifo_expire[0], 2);
SHOW_FUNCTION(bfq_back_seek_max_show, bfqd->bfq_back_max, 0);
SHOW_FUNCTION(bfq_back_seek_penalty_show, bfqd->bfq_back_penalty, 0);
SHOW_FUNCTION(bfq_slice_idle_show, bfqd->bfq_slice_idle, 2);
SHOW_FUNCTION(bfq_max_budget_show, bfqd->bfq_user_max_budget, 0);
SHOW_FUNCTION(bfq_timeout_sync_show, bfqd->bfq_timeout, 1);
SHOW_FUNCTION(bfq_strict_guarantees_show, bfqd->strict_guarantees, 0);
#undef SHOW_FUNCTION
#define USEC_SHOW_FUNCTION(__FUNC, __VAR) \
static ssize_t __FUNC(struct elevator_queue *e, char *page) \
{ \
struct bfq_data *bfqd = e->elevator_data; \
u64 __data = __VAR; \
__data = div_u64(__data, NSEC_PER_USEC); \
return bfq_var_show(__data, (page)); \
}
USEC_SHOW_FUNCTION(bfq_slice_idle_us_show, bfqd->bfq_slice_idle);
#undef USEC_SHOW_FUNCTION
#define STORE_FUNCTION(__FUNC, __PTR, MIN, MAX, __CONV) \
static ssize_t \
__FUNC(struct elevator_queue *e, const char *page, size_t count) \
{ \
struct bfq_data *bfqd = e->elevator_data; \
unsigned long uninitialized_var(__data); \
int ret = bfq_var_store(&__data, (page), count); \
if (__data < (MIN)) \
__data = (MIN); \
else if (__data > (MAX)) \
__data = (MAX); \
if (__CONV == 1) \
*(__PTR) = msecs_to_jiffies(__data); \
else if (__CONV == 2) \
*(__PTR) = (u64)__data * NSEC_PER_MSEC; \
else \
*(__PTR) = __data; \
return ret; \
}
STORE_FUNCTION(bfq_fifo_expire_sync_store, &bfqd->bfq_fifo_expire[1], 1,
INT_MAX, 2);
STORE_FUNCTION(bfq_fifo_expire_async_store, &bfqd->bfq_fifo_expire[0], 1,
INT_MAX, 2);
STORE_FUNCTION(bfq_back_seek_max_store, &bfqd->bfq_back_max, 0, INT_MAX, 0);
STORE_FUNCTION(bfq_back_seek_penalty_store, &bfqd->bfq_back_penalty, 1,
INT_MAX, 0);
STORE_FUNCTION(bfq_slice_idle_store, &bfqd->bfq_slice_idle, 0, INT_MAX, 2);
#undef STORE_FUNCTION
#define USEC_STORE_FUNCTION(__FUNC, __PTR, MIN, MAX) \
static ssize_t __FUNC(struct elevator_queue *e, const char *page, size_t count)\
{ \
struct bfq_data *bfqd = e->elevator_data; \
unsigned long uninitialized_var(__data); \
int ret = bfq_var_store(&__data, (page), count); \
if (__data < (MIN)) \
__data = (MIN); \
else if (__data > (MAX)) \
__data = (MAX); \
*(__PTR) = (u64)__data * NSEC_PER_USEC; \
return ret; \
}
USEC_STORE_FUNCTION(bfq_slice_idle_us_store, &bfqd->bfq_slice_idle, 0,
UINT_MAX);
#undef USEC_STORE_FUNCTION
static unsigned long bfq_estimated_max_budget(struct bfq_data *bfqd)
{
u64 timeout = jiffies_to_msecs(bfqd->bfq_timeout);
if (bfqd->peak_rate_samples >= BFQ_PEAK_RATE_SAMPLES)
return bfq_calc_max_budget(bfqd->peak_rate, timeout);
else
return bfq_default_max_budget;
}
static ssize_t bfq_max_budget_store(struct elevator_queue *e,
const char *page, size_t count)
{
struct bfq_data *bfqd = e->elevator_data;
unsigned long uninitialized_var(__data);
int ret = bfq_var_store(&__data, (page), count);
if (__data == 0)
bfqd->bfq_max_budget = bfq_estimated_max_budget(bfqd);
else {
if (__data > INT_MAX)
__data = INT_MAX;
bfqd->bfq_max_budget = __data;
}
bfqd->bfq_user_max_budget = __data;
return ret;
}
/*
* Leaving this name to preserve name compatibility with cfq
* parameters, but this timeout is used for both sync and async.
*/
static ssize_t bfq_timeout_sync_store(struct elevator_queue *e,
const char *page, size_t count)
{
struct bfq_data *bfqd = e->elevator_data;
unsigned long uninitialized_var(__data);
int ret = bfq_var_store(&__data, (page), count);
if (__data < 1)
__data = 1;
else if (__data > INT_MAX)
__data = INT_MAX;
bfqd->bfq_timeout = msecs_to_jiffies(__data);
if (bfqd->bfq_user_max_budget == 0)
bfqd->bfq_max_budget = bfq_estimated_max_budget(bfqd);
return ret;
}
static ssize_t bfq_strict_guarantees_store(struct elevator_queue *e,
const char *page, size_t count)
{
struct bfq_data *bfqd = e->elevator_data;
unsigned long uninitialized_var(__data);
int ret = bfq_var_store(&__data, (page), count);
if (__data > 1)
__data = 1;
if (!bfqd->strict_guarantees && __data == 1
&& bfqd->bfq_slice_idle < 8 * NSEC_PER_MSEC)
bfqd->bfq_slice_idle = 8 * NSEC_PER_MSEC;
bfqd->strict_guarantees = __data;
return ret;
}
#define BFQ_ATTR(name) \
__ATTR(name, 0644, bfq_##name##_show, bfq_##name##_store)
static struct elv_fs_entry bfq_attrs[] = {
BFQ_ATTR(fifo_expire_sync),
BFQ_ATTR(fifo_expire_async),
BFQ_ATTR(back_seek_max),
BFQ_ATTR(back_seek_penalty),
BFQ_ATTR(slice_idle),
BFQ_ATTR(slice_idle_us),
BFQ_ATTR(max_budget),
BFQ_ATTR(timeout_sync),
BFQ_ATTR(strict_guarantees),
__ATTR_NULL
};
static struct elevator_type iosched_bfq_mq = {
.ops.mq = {
.get_rq_priv = bfq_get_rq_private,
.put_rq_priv = bfq_put_rq_private,
.exit_icq = bfq_exit_icq,
.insert_requests = bfq_insert_requests,
.dispatch_request = bfq_dispatch_request,
.next_request = elv_rb_latter_request,
.former_request = elv_rb_former_request,
.allow_merge = bfq_allow_bio_merge,
.bio_merge = bfq_bio_merge,
.request_merge = bfq_request_merge,
.requests_merged = bfq_requests_merged,
.request_merged = bfq_request_merged,
.has_work = bfq_has_work,
.init_sched = bfq_init_queue,
.exit_sched = bfq_exit_queue,
},
.uses_mq = true,
.icq_size = sizeof(struct bfq_io_cq),
.icq_align = __alignof__(struct bfq_io_cq),
.elevator_attrs = bfq_attrs,
.elevator_name = "bfq",
.elevator_owner = THIS_MODULE,
};
static int __init bfq_init(void)
{
int ret;
ret = -ENOMEM;
if (bfq_slab_setup())
goto err_pol_unreg;
ret = elv_register(&iosched_bfq_mq);
if (ret)
goto err_pol_unreg;
return 0;
err_pol_unreg:
return ret;
}
static void __exit bfq_exit(void)
{
elv_unregister(&iosched_bfq_mq);
bfq_slab_kill();
}
module_init(bfq_init);
module_exit(bfq_exit);
MODULE_AUTHOR("Paolo Valente");
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("MQ Budget Fair Queueing I/O Scheduler");
| ./CrossVul/dataset_final_sorted/CWE-416/c/bad_4007_0 |
crossvul-cpp_data_good_820_6 | #include <linux/kernel.h>
#include <linux/errno.h>
#include <linux/err.h>
#include <linux/spinlock.h>
#include <linux/mm.h>
#include <linux/memremap.h>
#include <linux/pagemap.h>
#include <linux/rmap.h>
#include <linux/swap.h>
#include <linux/swapops.h>
#include <linux/sched/signal.h>
#include <linux/rwsem.h>
#include <linux/hugetlb.h>
#include <linux/migrate.h>
#include <linux/mm_inline.h>
#include <linux/sched/mm.h>
#include <asm/mmu_context.h>
#include <asm/pgtable.h>
#include <asm/tlbflush.h>
#include "internal.h"
struct follow_page_context {
struct dev_pagemap *pgmap;
unsigned int page_mask;
};
static struct page *no_page_table(struct vm_area_struct *vma,
unsigned int flags)
{
/*
* When core dumping an enormous anonymous area that nobody
* has touched so far, we don't want to allocate unnecessary pages or
* page tables. Return error instead of NULL to skip handle_mm_fault,
* then get_dump_page() will return NULL to leave a hole in the dump.
* But we can only make this optimization where a hole would surely
* be zero-filled if handle_mm_fault() actually did handle it.
*/
if ((flags & FOLL_DUMP) && (!vma->vm_ops || !vma->vm_ops->fault))
return ERR_PTR(-EFAULT);
return NULL;
}
static int follow_pfn_pte(struct vm_area_struct *vma, unsigned long address,
pte_t *pte, unsigned int flags)
{
/* No page to get reference */
if (flags & FOLL_GET)
return -EFAULT;
if (flags & FOLL_TOUCH) {
pte_t entry = *pte;
if (flags & FOLL_WRITE)
entry = pte_mkdirty(entry);
entry = pte_mkyoung(entry);
if (!pte_same(*pte, entry)) {
set_pte_at(vma->vm_mm, address, pte, entry);
update_mmu_cache(vma, address, pte);
}
}
/* Proper page table entry exists, but no corresponding struct page */
return -EEXIST;
}
/*
* FOLL_FORCE can write to even unwritable pte's, but only
* after we've gone through a COW cycle and they are dirty.
*/
static inline bool can_follow_write_pte(pte_t pte, unsigned int flags)
{
return pte_write(pte) ||
((flags & FOLL_FORCE) && (flags & FOLL_COW) && pte_dirty(pte));
}
static struct page *follow_page_pte(struct vm_area_struct *vma,
unsigned long address, pmd_t *pmd, unsigned int flags,
struct dev_pagemap **pgmap)
{
struct mm_struct *mm = vma->vm_mm;
struct page *page;
spinlock_t *ptl;
pte_t *ptep, pte;
retry:
if (unlikely(pmd_bad(*pmd)))
return no_page_table(vma, flags);
ptep = pte_offset_map_lock(mm, pmd, address, &ptl);
pte = *ptep;
if (!pte_present(pte)) {
swp_entry_t entry;
/*
* KSM's break_ksm() relies upon recognizing a ksm page
* even while it is being migrated, so for that case we
* need migration_entry_wait().
*/
if (likely(!(flags & FOLL_MIGRATION)))
goto no_page;
if (pte_none(pte))
goto no_page;
entry = pte_to_swp_entry(pte);
if (!is_migration_entry(entry))
goto no_page;
pte_unmap_unlock(ptep, ptl);
migration_entry_wait(mm, pmd, address);
goto retry;
}
if ((flags & FOLL_NUMA) && pte_protnone(pte))
goto no_page;
if ((flags & FOLL_WRITE) && !can_follow_write_pte(pte, flags)) {
pte_unmap_unlock(ptep, ptl);
return NULL;
}
page = vm_normal_page(vma, address, pte);
if (!page && pte_devmap(pte) && (flags & FOLL_GET)) {
/*
* Only return device mapping pages in the FOLL_GET case since
* they are only valid while holding the pgmap reference.
*/
*pgmap = get_dev_pagemap(pte_pfn(pte), *pgmap);
if (*pgmap)
page = pte_page(pte);
else
goto no_page;
} else if (unlikely(!page)) {
if (flags & FOLL_DUMP) {
/* Avoid special (like zero) pages in core dumps */
page = ERR_PTR(-EFAULT);
goto out;
}
if (is_zero_pfn(pte_pfn(pte))) {
page = pte_page(pte);
} else {
int ret;
ret = follow_pfn_pte(vma, address, ptep, flags);
page = ERR_PTR(ret);
goto out;
}
}
if (flags & FOLL_SPLIT && PageTransCompound(page)) {
int ret;
get_page(page);
pte_unmap_unlock(ptep, ptl);
lock_page(page);
ret = split_huge_page(page);
unlock_page(page);
put_page(page);
if (ret)
return ERR_PTR(ret);
goto retry;
}
if (flags & FOLL_GET) {
if (unlikely(!try_get_page(page))) {
page = ERR_PTR(-ENOMEM);
goto out;
}
}
if (flags & FOLL_TOUCH) {
if ((flags & FOLL_WRITE) &&
!pte_dirty(pte) && !PageDirty(page))
set_page_dirty(page);
/*
* pte_mkyoung() would be more correct here, but atomic care
* is needed to avoid losing the dirty bit: it is easier to use
* mark_page_accessed().
*/
mark_page_accessed(page);
}
if ((flags & FOLL_MLOCK) && (vma->vm_flags & VM_LOCKED)) {
/* Do not mlock pte-mapped THP */
if (PageTransCompound(page))
goto out;
/*
* The preliminary mapping check is mainly to avoid the
* pointless overhead of lock_page on the ZERO_PAGE
* which might bounce very badly if there is contention.
*
* If the page is already locked, we don't need to
* handle it now - vmscan will handle it later if and
* when it attempts to reclaim the page.
*/
if (page->mapping && trylock_page(page)) {
lru_add_drain(); /* push cached pages to LRU */
/*
* Because we lock page here, and migration is
* blocked by the pte's page reference, and we
* know the page is still mapped, we don't even
* need to check for file-cache page truncation.
*/
mlock_vma_page(page);
unlock_page(page);
}
}
out:
pte_unmap_unlock(ptep, ptl);
return page;
no_page:
pte_unmap_unlock(ptep, ptl);
if (!pte_none(pte))
return NULL;
return no_page_table(vma, flags);
}
static struct page *follow_pmd_mask(struct vm_area_struct *vma,
unsigned long address, pud_t *pudp,
unsigned int flags,
struct follow_page_context *ctx)
{
pmd_t *pmd, pmdval;
spinlock_t *ptl;
struct page *page;
struct mm_struct *mm = vma->vm_mm;
pmd = pmd_offset(pudp, address);
/*
* The READ_ONCE() will stabilize the pmdval in a register or
* on the stack so that it will stop changing under the code.
*/
pmdval = READ_ONCE(*pmd);
if (pmd_none(pmdval))
return no_page_table(vma, flags);
if (pmd_huge(pmdval) && vma->vm_flags & VM_HUGETLB) {
page = follow_huge_pmd(mm, address, pmd, flags);
if (page)
return page;
return no_page_table(vma, flags);
}
if (is_hugepd(__hugepd(pmd_val(pmdval)))) {
page = follow_huge_pd(vma, address,
__hugepd(pmd_val(pmdval)), flags,
PMD_SHIFT);
if (page)
return page;
return no_page_table(vma, flags);
}
retry:
if (!pmd_present(pmdval)) {
if (likely(!(flags & FOLL_MIGRATION)))
return no_page_table(vma, flags);
VM_BUG_ON(thp_migration_supported() &&
!is_pmd_migration_entry(pmdval));
if (is_pmd_migration_entry(pmdval))
pmd_migration_entry_wait(mm, pmd);
pmdval = READ_ONCE(*pmd);
/*
* MADV_DONTNEED may convert the pmd to null because
* mmap_sem is held in read mode
*/
if (pmd_none(pmdval))
return no_page_table(vma, flags);
goto retry;
}
if (pmd_devmap(pmdval)) {
ptl = pmd_lock(mm, pmd);
page = follow_devmap_pmd(vma, address, pmd, flags, &ctx->pgmap);
spin_unlock(ptl);
if (page)
return page;
}
if (likely(!pmd_trans_huge(pmdval)))
return follow_page_pte(vma, address, pmd, flags, &ctx->pgmap);
if ((flags & FOLL_NUMA) && pmd_protnone(pmdval))
return no_page_table(vma, flags);
retry_locked:
ptl = pmd_lock(mm, pmd);
if (unlikely(pmd_none(*pmd))) {
spin_unlock(ptl);
return no_page_table(vma, flags);
}
if (unlikely(!pmd_present(*pmd))) {
spin_unlock(ptl);
if (likely(!(flags & FOLL_MIGRATION)))
return no_page_table(vma, flags);
pmd_migration_entry_wait(mm, pmd);
goto retry_locked;
}
if (unlikely(!pmd_trans_huge(*pmd))) {
spin_unlock(ptl);
return follow_page_pte(vma, address, pmd, flags, &ctx->pgmap);
}
if (flags & FOLL_SPLIT) {
int ret;
page = pmd_page(*pmd);
if (is_huge_zero_page(page)) {
spin_unlock(ptl);
ret = 0;
split_huge_pmd(vma, pmd, address);
if (pmd_trans_unstable(pmd))
ret = -EBUSY;
} else {
if (unlikely(!try_get_page(page))) {
spin_unlock(ptl);
return ERR_PTR(-ENOMEM);
}
spin_unlock(ptl);
lock_page(page);
ret = split_huge_page(page);
unlock_page(page);
put_page(page);
if (pmd_none(*pmd))
return no_page_table(vma, flags);
}
return ret ? ERR_PTR(ret) :
follow_page_pte(vma, address, pmd, flags, &ctx->pgmap);
}
page = follow_trans_huge_pmd(vma, address, pmd, flags);
spin_unlock(ptl);
ctx->page_mask = HPAGE_PMD_NR - 1;
return page;
}
static struct page *follow_pud_mask(struct vm_area_struct *vma,
unsigned long address, p4d_t *p4dp,
unsigned int flags,
struct follow_page_context *ctx)
{
pud_t *pud;
spinlock_t *ptl;
struct page *page;
struct mm_struct *mm = vma->vm_mm;
pud = pud_offset(p4dp, address);
if (pud_none(*pud))
return no_page_table(vma, flags);
if (pud_huge(*pud) && vma->vm_flags & VM_HUGETLB) {
page = follow_huge_pud(mm, address, pud, flags);
if (page)
return page;
return no_page_table(vma, flags);
}
if (is_hugepd(__hugepd(pud_val(*pud)))) {
page = follow_huge_pd(vma, address,
__hugepd(pud_val(*pud)), flags,
PUD_SHIFT);
if (page)
return page;
return no_page_table(vma, flags);
}
if (pud_devmap(*pud)) {
ptl = pud_lock(mm, pud);
page = follow_devmap_pud(vma, address, pud, flags, &ctx->pgmap);
spin_unlock(ptl);
if (page)
return page;
}
if (unlikely(pud_bad(*pud)))
return no_page_table(vma, flags);
return follow_pmd_mask(vma, address, pud, flags, ctx);
}
static struct page *follow_p4d_mask(struct vm_area_struct *vma,
unsigned long address, pgd_t *pgdp,
unsigned int flags,
struct follow_page_context *ctx)
{
p4d_t *p4d;
struct page *page;
p4d = p4d_offset(pgdp, address);
if (p4d_none(*p4d))
return no_page_table(vma, flags);
BUILD_BUG_ON(p4d_huge(*p4d));
if (unlikely(p4d_bad(*p4d)))
return no_page_table(vma, flags);
if (is_hugepd(__hugepd(p4d_val(*p4d)))) {
page = follow_huge_pd(vma, address,
__hugepd(p4d_val(*p4d)), flags,
P4D_SHIFT);
if (page)
return page;
return no_page_table(vma, flags);
}
return follow_pud_mask(vma, address, p4d, flags, ctx);
}
/**
* follow_page_mask - look up a page descriptor from a user-virtual address
* @vma: vm_area_struct mapping @address
* @address: virtual address to look up
* @flags: flags modifying lookup behaviour
* @ctx: contains dev_pagemap for %ZONE_DEVICE memory pinning and a
* pointer to output page_mask
*
* @flags can have FOLL_ flags set, defined in <linux/mm.h>
*
* When getting pages from ZONE_DEVICE memory, the @ctx->pgmap caches
* the device's dev_pagemap metadata to avoid repeating expensive lookups.
*
* On output, the @ctx->page_mask is set according to the size of the page.
*
* Return: the mapped (struct page *), %NULL if no mapping exists, or
* an error pointer if there is a mapping to something not represented
* by a page descriptor (see also vm_normal_page()).
*/
struct page *follow_page_mask(struct vm_area_struct *vma,
unsigned long address, unsigned int flags,
struct follow_page_context *ctx)
{
pgd_t *pgd;
struct page *page;
struct mm_struct *mm = vma->vm_mm;
ctx->page_mask = 0;
/* make this handle hugepd */
page = follow_huge_addr(mm, address, flags & FOLL_WRITE);
if (!IS_ERR(page)) {
BUG_ON(flags & FOLL_GET);
return page;
}
pgd = pgd_offset(mm, address);
if (pgd_none(*pgd) || unlikely(pgd_bad(*pgd)))
return no_page_table(vma, flags);
if (pgd_huge(*pgd)) {
page = follow_huge_pgd(mm, address, pgd, flags);
if (page)
return page;
return no_page_table(vma, flags);
}
if (is_hugepd(__hugepd(pgd_val(*pgd)))) {
page = follow_huge_pd(vma, address,
__hugepd(pgd_val(*pgd)), flags,
PGDIR_SHIFT);
if (page)
return page;
return no_page_table(vma, flags);
}
return follow_p4d_mask(vma, address, pgd, flags, ctx);
}
struct page *follow_page(struct vm_area_struct *vma, unsigned long address,
unsigned int foll_flags)
{
struct follow_page_context ctx = { NULL };
struct page *page;
page = follow_page_mask(vma, address, foll_flags, &ctx);
if (ctx.pgmap)
put_dev_pagemap(ctx.pgmap);
return page;
}
static int get_gate_page(struct mm_struct *mm, unsigned long address,
unsigned int gup_flags, struct vm_area_struct **vma,
struct page **page)
{
pgd_t *pgd;
p4d_t *p4d;
pud_t *pud;
pmd_t *pmd;
pte_t *pte;
int ret = -EFAULT;
/* user gate pages are read-only */
if (gup_flags & FOLL_WRITE)
return -EFAULT;
if (address > TASK_SIZE)
pgd = pgd_offset_k(address);
else
pgd = pgd_offset_gate(mm, address);
BUG_ON(pgd_none(*pgd));
p4d = p4d_offset(pgd, address);
BUG_ON(p4d_none(*p4d));
pud = pud_offset(p4d, address);
BUG_ON(pud_none(*pud));
pmd = pmd_offset(pud, address);
if (!pmd_present(*pmd))
return -EFAULT;
VM_BUG_ON(pmd_trans_huge(*pmd));
pte = pte_offset_map(pmd, address);
if (pte_none(*pte))
goto unmap;
*vma = get_gate_vma(mm);
if (!page)
goto out;
*page = vm_normal_page(*vma, address, *pte);
if (!*page) {
if ((gup_flags & FOLL_DUMP) || !is_zero_pfn(pte_pfn(*pte)))
goto unmap;
*page = pte_page(*pte);
/*
* This should never happen (a device public page in the gate
* area).
*/
if (is_device_public_page(*page))
goto unmap;
}
if (unlikely(!try_get_page(*page))) {
ret = -ENOMEM;
goto unmap;
}
out:
ret = 0;
unmap:
pte_unmap(pte);
return ret;
}
/*
* mmap_sem must be held on entry. If @nonblocking != NULL and
* *@flags does not include FOLL_NOWAIT, the mmap_sem may be released.
* If it is, *@nonblocking will be set to 0 and -EBUSY returned.
*/
static int faultin_page(struct task_struct *tsk, struct vm_area_struct *vma,
unsigned long address, unsigned int *flags, int *nonblocking)
{
unsigned int fault_flags = 0;
vm_fault_t ret;
/* mlock all present pages, but do not fault in new pages */
if ((*flags & (FOLL_POPULATE | FOLL_MLOCK)) == FOLL_MLOCK)
return -ENOENT;
if (*flags & FOLL_WRITE)
fault_flags |= FAULT_FLAG_WRITE;
if (*flags & FOLL_REMOTE)
fault_flags |= FAULT_FLAG_REMOTE;
if (nonblocking)
fault_flags |= FAULT_FLAG_ALLOW_RETRY;
if (*flags & FOLL_NOWAIT)
fault_flags |= FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_RETRY_NOWAIT;
if (*flags & FOLL_TRIED) {
VM_WARN_ON_ONCE(fault_flags & FAULT_FLAG_ALLOW_RETRY);
fault_flags |= FAULT_FLAG_TRIED;
}
ret = handle_mm_fault(vma, address, fault_flags);
if (ret & VM_FAULT_ERROR) {
int err = vm_fault_to_errno(ret, *flags);
if (err)
return err;
BUG();
}
if (tsk) {
if (ret & VM_FAULT_MAJOR)
tsk->maj_flt++;
else
tsk->min_flt++;
}
if (ret & VM_FAULT_RETRY) {
if (nonblocking && !(fault_flags & FAULT_FLAG_RETRY_NOWAIT))
*nonblocking = 0;
return -EBUSY;
}
/*
* The VM_FAULT_WRITE bit tells us that do_wp_page has broken COW when
* necessary, even if maybe_mkwrite decided not to set pte_write. We
* can thus safely do subsequent page lookups as if they were reads.
* But only do so when looping for pte_write is futile: in some cases
* userspace may also be wanting to write to the gotten user page,
* which a read fault here might prevent (a readonly page might get
* reCOWed by userspace write).
*/
if ((ret & VM_FAULT_WRITE) && !(vma->vm_flags & VM_WRITE))
*flags |= FOLL_COW;
return 0;
}
static int check_vma_flags(struct vm_area_struct *vma, unsigned long gup_flags)
{
vm_flags_t vm_flags = vma->vm_flags;
int write = (gup_flags & FOLL_WRITE);
int foreign = (gup_flags & FOLL_REMOTE);
if (vm_flags & (VM_IO | VM_PFNMAP))
return -EFAULT;
if (gup_flags & FOLL_ANON && !vma_is_anonymous(vma))
return -EFAULT;
if (write) {
if (!(vm_flags & VM_WRITE)) {
if (!(gup_flags & FOLL_FORCE))
return -EFAULT;
/*
* We used to let the write,force case do COW in a
* VM_MAYWRITE VM_SHARED !VM_WRITE vma, so ptrace could
* set a breakpoint in a read-only mapping of an
* executable, without corrupting the file (yet only
* when that file had been opened for writing!).
* Anon pages in shared mappings are surprising: now
* just reject it.
*/
if (!is_cow_mapping(vm_flags))
return -EFAULT;
}
} else if (!(vm_flags & VM_READ)) {
if (!(gup_flags & FOLL_FORCE))
return -EFAULT;
/*
* Is there actually any vma we can reach here which does not
* have VM_MAYREAD set?
*/
if (!(vm_flags & VM_MAYREAD))
return -EFAULT;
}
/*
* gups are always data accesses, not instruction
* fetches, so execute=false here
*/
if (!arch_vma_access_permitted(vma, write, false, foreign))
return -EFAULT;
return 0;
}
/**
* __get_user_pages() - pin user pages in memory
* @tsk: task_struct of target task
* @mm: mm_struct of target mm
* @start: starting user address
* @nr_pages: number of pages from start to pin
* @gup_flags: flags modifying pin behaviour
* @pages: array that receives pointers to the pages pinned.
* Should be at least nr_pages long. Or NULL, if caller
* only intends to ensure the pages are faulted in.
* @vmas: array of pointers to vmas corresponding to each page.
* Or NULL if the caller does not require them.
* @nonblocking: whether waiting for disk IO or mmap_sem contention
*
* Returns number of pages pinned. This may be fewer than the number
* requested. If nr_pages is 0 or negative, returns 0. If no pages
* were pinned, returns -errno. Each page returned must be released
* with a put_page() call when it is finished with. vmas will only
* remain valid while mmap_sem is held.
*
* Must be called with mmap_sem held. It may be released. See below.
*
* __get_user_pages walks a process's page tables and takes a reference to
* each struct page that each user address corresponds to at a given
* instant. That is, it takes the page that would be accessed if a user
* thread accesses the given user virtual address at that instant.
*
* This does not guarantee that the page exists in the user mappings when
* __get_user_pages returns, and there may even be a completely different
* page there in some cases (eg. if mmapped pagecache has been invalidated
* and subsequently re faulted). However it does guarantee that the page
* won't be freed completely. And mostly callers simply care that the page
* contains data that was valid *at some point in time*. Typically, an IO
* or similar operation cannot guarantee anything stronger anyway because
* locks can't be held over the syscall boundary.
*
* If @gup_flags & FOLL_WRITE == 0, the page must not be written to. If
* the page is written to, set_page_dirty (or set_page_dirty_lock, as
* appropriate) must be called after the page is finished with, and
* before put_page is called.
*
* If @nonblocking != NULL, __get_user_pages will not wait for disk IO
* or mmap_sem contention, and if waiting is needed to pin all pages,
* *@nonblocking will be set to 0. Further, if @gup_flags does not
* include FOLL_NOWAIT, the mmap_sem will be released via up_read() in
* this case.
*
* A caller using such a combination of @nonblocking and @gup_flags
* must therefore hold the mmap_sem for reading only, and recognize
* when it's been released. Otherwise, it must be held for either
* reading or writing and will not be released.
*
* In most cases, get_user_pages or get_user_pages_fast should be used
* instead of __get_user_pages. __get_user_pages should be used only if
* you need some special @gup_flags.
*/
static long __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
unsigned long start, unsigned long nr_pages,
unsigned int gup_flags, struct page **pages,
struct vm_area_struct **vmas, int *nonblocking)
{
long ret = 0, i = 0;
struct vm_area_struct *vma = NULL;
struct follow_page_context ctx = { NULL };
if (!nr_pages)
return 0;
VM_BUG_ON(!!pages != !!(gup_flags & FOLL_GET));
/*
* If FOLL_FORCE is set then do not force a full fault as the hinting
* fault information is unrelated to the reference behaviour of a task
* using the address space
*/
if (!(gup_flags & FOLL_FORCE))
gup_flags |= FOLL_NUMA;
do {
struct page *page;
unsigned int foll_flags = gup_flags;
unsigned int page_increm;
/* first iteration or cross vma bound */
if (!vma || start >= vma->vm_end) {
vma = find_extend_vma(mm, start);
if (!vma && in_gate_area(mm, start)) {
ret = get_gate_page(mm, start & PAGE_MASK,
gup_flags, &vma,
pages ? &pages[i] : NULL);
if (ret)
goto out;
ctx.page_mask = 0;
goto next_page;
}
if (!vma || check_vma_flags(vma, gup_flags)) {
ret = -EFAULT;
goto out;
}
if (is_vm_hugetlb_page(vma)) {
i = follow_hugetlb_page(mm, vma, pages, vmas,
&start, &nr_pages, i,
gup_flags, nonblocking);
continue;
}
}
retry:
/*
* If we have a pending SIGKILL, don't keep faulting pages and
* potentially allocating memory.
*/
if (fatal_signal_pending(current)) {
ret = -ERESTARTSYS;
goto out;
}
cond_resched();
page = follow_page_mask(vma, start, foll_flags, &ctx);
if (!page) {
ret = faultin_page(tsk, vma, start, &foll_flags,
nonblocking);
switch (ret) {
case 0:
goto retry;
case -EBUSY:
ret = 0;
/* FALLTHRU */
case -EFAULT:
case -ENOMEM:
case -EHWPOISON:
goto out;
case -ENOENT:
goto next_page;
}
BUG();
} else if (PTR_ERR(page) == -EEXIST) {
/*
* Proper page table entry exists, but no corresponding
* struct page.
*/
goto next_page;
} else if (IS_ERR(page)) {
ret = PTR_ERR(page);
goto out;
}
if (pages) {
pages[i] = page;
flush_anon_page(vma, page, start);
flush_dcache_page(page);
ctx.page_mask = 0;
}
next_page:
if (vmas) {
vmas[i] = vma;
ctx.page_mask = 0;
}
page_increm = 1 + (~(start >> PAGE_SHIFT) & ctx.page_mask);
if (page_increm > nr_pages)
page_increm = nr_pages;
i += page_increm;
start += page_increm * PAGE_SIZE;
nr_pages -= page_increm;
} while (nr_pages);
out:
if (ctx.pgmap)
put_dev_pagemap(ctx.pgmap);
return i ? i : ret;
}
static bool vma_permits_fault(struct vm_area_struct *vma,
unsigned int fault_flags)
{
bool write = !!(fault_flags & FAULT_FLAG_WRITE);
bool foreign = !!(fault_flags & FAULT_FLAG_REMOTE);
vm_flags_t vm_flags = write ? VM_WRITE : VM_READ;
if (!(vm_flags & vma->vm_flags))
return false;
/*
* The architecture might have a hardware protection
* mechanism other than read/write that can deny access.
*
* gup always represents data access, not instruction
* fetches, so execute=false here:
*/
if (!arch_vma_access_permitted(vma, write, false, foreign))
return false;
return true;
}
/*
* fixup_user_fault() - manually resolve a user page fault
* @tsk: the task_struct to use for page fault accounting, or
* NULL if faults are not to be recorded.
* @mm: mm_struct of target mm
* @address: user address
* @fault_flags:flags to pass down to handle_mm_fault()
* @unlocked: did we unlock the mmap_sem while retrying, maybe NULL if caller
* does not allow retry
*
* This is meant to be called in the specific scenario where for locking reasons
* we try to access user memory in atomic context (within a pagefault_disable()
* section), this returns -EFAULT, and we want to resolve the user fault before
* trying again.
*
* Typically this is meant to be used by the futex code.
*
* The main difference with get_user_pages() is that this function will
* unconditionally call handle_mm_fault() which will in turn perform all the
* necessary SW fixup of the dirty and young bits in the PTE, while
* get_user_pages() only guarantees to update these in the struct page.
*
* This is important for some architectures where those bits also gate the
* access permission to the page because they are maintained in software. On
* such architectures, gup() will not be enough to make a subsequent access
* succeed.
*
* This function will not return with an unlocked mmap_sem. So it has not the
* same semantics wrt the @mm->mmap_sem as does filemap_fault().
*/
int fixup_user_fault(struct task_struct *tsk, struct mm_struct *mm,
unsigned long address, unsigned int fault_flags,
bool *unlocked)
{
struct vm_area_struct *vma;
vm_fault_t ret, major = 0;
if (unlocked)
fault_flags |= FAULT_FLAG_ALLOW_RETRY;
retry:
vma = find_extend_vma(mm, address);
if (!vma || address < vma->vm_start)
return -EFAULT;
if (!vma_permits_fault(vma, fault_flags))
return -EFAULT;
ret = handle_mm_fault(vma, address, fault_flags);
major |= ret & VM_FAULT_MAJOR;
if (ret & VM_FAULT_ERROR) {
int err = vm_fault_to_errno(ret, 0);
if (err)
return err;
BUG();
}
if (ret & VM_FAULT_RETRY) {
down_read(&mm->mmap_sem);
if (!(fault_flags & FAULT_FLAG_TRIED)) {
*unlocked = true;
fault_flags &= ~FAULT_FLAG_ALLOW_RETRY;
fault_flags |= FAULT_FLAG_TRIED;
goto retry;
}
}
if (tsk) {
if (major)
tsk->maj_flt++;
else
tsk->min_flt++;
}
return 0;
}
EXPORT_SYMBOL_GPL(fixup_user_fault);
static __always_inline long __get_user_pages_locked(struct task_struct *tsk,
struct mm_struct *mm,
unsigned long start,
unsigned long nr_pages,
struct page **pages,
struct vm_area_struct **vmas,
int *locked,
unsigned int flags)
{
long ret, pages_done;
bool lock_dropped;
if (locked) {
/* if VM_FAULT_RETRY can be returned, vmas become invalid */
BUG_ON(vmas);
/* check caller initialized locked */
BUG_ON(*locked != 1);
}
if (pages)
flags |= FOLL_GET;
pages_done = 0;
lock_dropped = false;
for (;;) {
ret = __get_user_pages(tsk, mm, start, nr_pages, flags, pages,
vmas, locked);
if (!locked)
/* VM_FAULT_RETRY couldn't trigger, bypass */
return ret;
/* VM_FAULT_RETRY cannot return errors */
if (!*locked) {
BUG_ON(ret < 0);
BUG_ON(ret >= nr_pages);
}
if (!pages)
/* If it's a prefault don't insist harder */
return ret;
if (ret > 0) {
nr_pages -= ret;
pages_done += ret;
if (!nr_pages)
break;
}
if (*locked) {
/*
* VM_FAULT_RETRY didn't trigger or it was a
* FOLL_NOWAIT.
*/
if (!pages_done)
pages_done = ret;
break;
}
/* VM_FAULT_RETRY triggered, so seek to the faulting offset */
pages += ret;
start += ret << PAGE_SHIFT;
/*
* Repeat on the address that fired VM_FAULT_RETRY
* without FAULT_FLAG_ALLOW_RETRY but with
* FAULT_FLAG_TRIED.
*/
*locked = 1;
lock_dropped = true;
down_read(&mm->mmap_sem);
ret = __get_user_pages(tsk, mm, start, 1, flags | FOLL_TRIED,
pages, NULL, NULL);
if (ret != 1) {
BUG_ON(ret > 1);
if (!pages_done)
pages_done = ret;
break;
}
nr_pages--;
pages_done++;
if (!nr_pages)
break;
pages++;
start += PAGE_SIZE;
}
if (lock_dropped && *locked) {
/*
* We must let the caller know we temporarily dropped the lock
* and so the critical section protected by it was lost.
*/
up_read(&mm->mmap_sem);
*locked = 0;
}
return pages_done;
}
/*
* We can leverage the VM_FAULT_RETRY functionality in the page fault
* paths better by using either get_user_pages_locked() or
* get_user_pages_unlocked().
*
* get_user_pages_locked() is suitable to replace the form:
*
* down_read(&mm->mmap_sem);
* do_something()
* get_user_pages(tsk, mm, ..., pages, NULL);
* up_read(&mm->mmap_sem);
*
* to:
*
* int locked = 1;
* down_read(&mm->mmap_sem);
* do_something()
* get_user_pages_locked(tsk, mm, ..., pages, &locked);
* if (locked)
* up_read(&mm->mmap_sem);
*/
long get_user_pages_locked(unsigned long start, unsigned long nr_pages,
unsigned int gup_flags, struct page **pages,
int *locked)
{
return __get_user_pages_locked(current, current->mm, start, nr_pages,
pages, NULL, locked,
gup_flags | FOLL_TOUCH);
}
EXPORT_SYMBOL(get_user_pages_locked);
/*
* get_user_pages_unlocked() is suitable to replace the form:
*
* down_read(&mm->mmap_sem);
* get_user_pages(tsk, mm, ..., pages, NULL);
* up_read(&mm->mmap_sem);
*
* with:
*
* get_user_pages_unlocked(tsk, mm, ..., pages);
*
* It is functionally equivalent to get_user_pages_fast so
* get_user_pages_fast should be used instead if specific gup_flags
* (e.g. FOLL_FORCE) are not required.
*/
long get_user_pages_unlocked(unsigned long start, unsigned long nr_pages,
struct page **pages, unsigned int gup_flags)
{
struct mm_struct *mm = current->mm;
int locked = 1;
long ret;
down_read(&mm->mmap_sem);
ret = __get_user_pages_locked(current, mm, start, nr_pages, pages, NULL,
&locked, gup_flags | FOLL_TOUCH);
if (locked)
up_read(&mm->mmap_sem);
return ret;
}
EXPORT_SYMBOL(get_user_pages_unlocked);
/*
* get_user_pages_remote() - pin user pages in memory
* @tsk: the task_struct to use for page fault accounting, or
* NULL if faults are not to be recorded.
* @mm: mm_struct of target mm
* @start: starting user address
* @nr_pages: number of pages from start to pin
* @gup_flags: flags modifying lookup behaviour
* @pages: array that receives pointers to the pages pinned.
* Should be at least nr_pages long. Or NULL, if caller
* only intends to ensure the pages are faulted in.
* @vmas: array of pointers to vmas corresponding to each page.
* Or NULL if the caller does not require them.
* @locked: pointer to lock flag indicating whether lock is held and
* subsequently whether VM_FAULT_RETRY functionality can be
* utilised. Lock must initially be held.
*
* Returns number of pages pinned. This may be fewer than the number
* requested. If nr_pages is 0 or negative, returns 0. If no pages
* were pinned, returns -errno. Each page returned must be released
* with a put_page() call when it is finished with. vmas will only
* remain valid while mmap_sem is held.
*
* Must be called with mmap_sem held for read or write.
*
* get_user_pages walks a process's page tables and takes a reference to
* each struct page that each user address corresponds to at a given
* instant. That is, it takes the page that would be accessed if a user
* thread accesses the given user virtual address at that instant.
*
* This does not guarantee that the page exists in the user mappings when
* get_user_pages returns, and there may even be a completely different
* page there in some cases (eg. if mmapped pagecache has been invalidated
* and subsequently re faulted). However it does guarantee that the page
* won't be freed completely. And mostly callers simply care that the page
* contains data that was valid *at some point in time*. Typically, an IO
* or similar operation cannot guarantee anything stronger anyway because
* locks can't be held over the syscall boundary.
*
* If gup_flags & FOLL_WRITE == 0, the page must not be written to. If the page
* is written to, set_page_dirty (or set_page_dirty_lock, as appropriate) must
* be called after the page is finished with, and before put_page is called.
*
* get_user_pages is typically used for fewer-copy IO operations, to get a
* handle on the memory by some means other than accesses via the user virtual
* addresses. The pages may be submitted for DMA to devices or accessed via
* their kernel linear mapping (via the kmap APIs). Care should be taken to
* use the correct cache flushing APIs.
*
* See also get_user_pages_fast, for performance critical applications.
*
* get_user_pages should be phased out in favor of
* get_user_pages_locked|unlocked or get_user_pages_fast. Nothing
* should use get_user_pages because it cannot pass
* FAULT_FLAG_ALLOW_RETRY to handle_mm_fault.
*/
long get_user_pages_remote(struct task_struct *tsk, struct mm_struct *mm,
unsigned long start, unsigned long nr_pages,
unsigned int gup_flags, struct page **pages,
struct vm_area_struct **vmas, int *locked)
{
return __get_user_pages_locked(tsk, mm, start, nr_pages, pages, vmas,
locked,
gup_flags | FOLL_TOUCH | FOLL_REMOTE);
}
EXPORT_SYMBOL(get_user_pages_remote);
/*
* This is the same as get_user_pages_remote(), just with a
* less-flexible calling convention where we assume that the task
* and mm being operated on are the current task's and don't allow
* passing of a locked parameter. We also obviously don't pass
* FOLL_REMOTE in here.
*/
long get_user_pages(unsigned long start, unsigned long nr_pages,
unsigned int gup_flags, struct page **pages,
struct vm_area_struct **vmas)
{
return __get_user_pages_locked(current, current->mm, start, nr_pages,
pages, vmas, NULL,
gup_flags | FOLL_TOUCH);
}
EXPORT_SYMBOL(get_user_pages);
#if defined(CONFIG_FS_DAX) || defined (CONFIG_CMA)
#ifdef CONFIG_FS_DAX
static bool check_dax_vmas(struct vm_area_struct **vmas, long nr_pages)
{
long i;
struct vm_area_struct *vma_prev = NULL;
for (i = 0; i < nr_pages; i++) {
struct vm_area_struct *vma = vmas[i];
if (vma == vma_prev)
continue;
vma_prev = vma;
if (vma_is_fsdax(vma))
return true;
}
return false;
}
#else
static inline bool check_dax_vmas(struct vm_area_struct **vmas, long nr_pages)
{
return false;
}
#endif
#ifdef CONFIG_CMA
static struct page *new_non_cma_page(struct page *page, unsigned long private)
{
/*
* We want to make sure we allocate the new page from the same node
* as the source page.
*/
int nid = page_to_nid(page);
/*
* Trying to allocate a page for migration. Ignore allocation
* failure warnings. We don't force __GFP_THISNODE here because
* this node here is the node where we have CMA reservation and
* in some case these nodes will have really less non movable
* allocation memory.
*/
gfp_t gfp_mask = GFP_USER | __GFP_NOWARN;
if (PageHighMem(page))
gfp_mask |= __GFP_HIGHMEM;
#ifdef CONFIG_HUGETLB_PAGE
if (PageHuge(page)) {
struct hstate *h = page_hstate(page);
/*
* We don't want to dequeue from the pool because pool pages will
* mostly be from the CMA region.
*/
return alloc_migrate_huge_page(h, gfp_mask, nid, NULL);
}
#endif
if (PageTransHuge(page)) {
struct page *thp;
/*
* ignore allocation failure warnings
*/
gfp_t thp_gfpmask = GFP_TRANSHUGE | __GFP_NOWARN;
/*
* Remove the movable mask so that we don't allocate from
* CMA area again.
*/
thp_gfpmask &= ~__GFP_MOVABLE;
thp = __alloc_pages_node(nid, thp_gfpmask, HPAGE_PMD_ORDER);
if (!thp)
return NULL;
prep_transhuge_page(thp);
return thp;
}
return __alloc_pages_node(nid, gfp_mask, 0);
}
static long check_and_migrate_cma_pages(unsigned long start, long nr_pages,
unsigned int gup_flags,
struct page **pages,
struct vm_area_struct **vmas)
{
long i;
bool drain_allow = true;
bool migrate_allow = true;
LIST_HEAD(cma_page_list);
check_again:
for (i = 0; i < nr_pages; i++) {
/*
* If we get a page from the CMA zone, since we are going to
* be pinning these entries, we might as well move them out
* of the CMA zone if possible.
*/
if (is_migrate_cma_page(pages[i])) {
struct page *head = compound_head(pages[i]);
if (PageHuge(head)) {
isolate_huge_page(head, &cma_page_list);
} else {
if (!PageLRU(head) && drain_allow) {
lru_add_drain_all();
drain_allow = false;
}
if (!isolate_lru_page(head)) {
list_add_tail(&head->lru, &cma_page_list);
mod_node_page_state(page_pgdat(head),
NR_ISOLATED_ANON +
page_is_file_cache(head),
hpage_nr_pages(head));
}
}
}
}
if (!list_empty(&cma_page_list)) {
/*
* drop the above get_user_pages reference.
*/
for (i = 0; i < nr_pages; i++)
put_page(pages[i]);
if (migrate_pages(&cma_page_list, new_non_cma_page,
NULL, 0, MIGRATE_SYNC, MR_CONTIG_RANGE)) {
/*
* some of the pages failed migration. Do get_user_pages
* without migration.
*/
migrate_allow = false;
if (!list_empty(&cma_page_list))
putback_movable_pages(&cma_page_list);
}
/*
* We did migrate all the pages, Try to get the page references again
* migrating any new CMA pages which we failed to isolate earlier.
*/
nr_pages = get_user_pages(start, nr_pages, gup_flags, pages, vmas);
if ((nr_pages > 0) && migrate_allow) {
drain_allow = true;
goto check_again;
}
}
return nr_pages;
}
#else
static inline long check_and_migrate_cma_pages(unsigned long start, long nr_pages,
unsigned int gup_flags,
struct page **pages,
struct vm_area_struct **vmas)
{
return nr_pages;
}
#endif
/*
* This is the same as get_user_pages() in that it assumes we are
* operating on the current task's mm, but it goes further to validate
* that the vmas associated with the address range are suitable for
* longterm elevated page reference counts. For example, filesystem-dax
* mappings are subject to the lifetime enforced by the filesystem and
* we need guarantees that longterm users like RDMA and V4L2 only
* establish mappings that have a kernel enforced revocation mechanism.
*
* "longterm" == userspace controlled elevated page count lifetime.
* Contrast this to iov_iter_get_pages() usages which are transient.
*/
long get_user_pages_longterm(unsigned long start, unsigned long nr_pages,
unsigned int gup_flags, struct page **pages,
struct vm_area_struct **vmas_arg)
{
struct vm_area_struct **vmas = vmas_arg;
unsigned long flags;
long rc, i;
if (!pages)
return -EINVAL;
if (!vmas) {
vmas = kcalloc(nr_pages, sizeof(struct vm_area_struct *),
GFP_KERNEL);
if (!vmas)
return -ENOMEM;
}
flags = memalloc_nocma_save();
rc = get_user_pages(start, nr_pages, gup_flags, pages, vmas);
memalloc_nocma_restore(flags);
if (rc < 0)
goto out;
if (check_dax_vmas(vmas, rc)) {
for (i = 0; i < rc; i++)
put_page(pages[i]);
rc = -EOPNOTSUPP;
goto out;
}
rc = check_and_migrate_cma_pages(start, rc, gup_flags, pages, vmas);
out:
if (vmas != vmas_arg)
kfree(vmas);
return rc;
}
EXPORT_SYMBOL(get_user_pages_longterm);
#endif /* CONFIG_FS_DAX */
/**
* populate_vma_page_range() - populate a range of pages in the vma.
* @vma: target vma
* @start: start address
* @end: end address
* @nonblocking:
*
* This takes care of mlocking the pages too if VM_LOCKED is set.
*
* return 0 on success, negative error code on error.
*
* vma->vm_mm->mmap_sem must be held.
*
* If @nonblocking is NULL, it may be held for read or write and will
* be unperturbed.
*
* If @nonblocking is non-NULL, it must held for read only and may be
* released. If it's released, *@nonblocking will be set to 0.
*/
long populate_vma_page_range(struct vm_area_struct *vma,
unsigned long start, unsigned long end, int *nonblocking)
{
struct mm_struct *mm = vma->vm_mm;
unsigned long nr_pages = (end - start) / PAGE_SIZE;
int gup_flags;
VM_BUG_ON(start & ~PAGE_MASK);
VM_BUG_ON(end & ~PAGE_MASK);
VM_BUG_ON_VMA(start < vma->vm_start, vma);
VM_BUG_ON_VMA(end > vma->vm_end, vma);
VM_BUG_ON_MM(!rwsem_is_locked(&mm->mmap_sem), mm);
gup_flags = FOLL_TOUCH | FOLL_POPULATE | FOLL_MLOCK;
if (vma->vm_flags & VM_LOCKONFAULT)
gup_flags &= ~FOLL_POPULATE;
/*
* We want to touch writable mappings with a write fault in order
* to break COW, except for shared mappings because these don't COW
* and we would not want to dirty them for nothing.
*/
if ((vma->vm_flags & (VM_WRITE | VM_SHARED)) == VM_WRITE)
gup_flags |= FOLL_WRITE;
/*
* We want mlock to succeed for regions that have any permissions
* other than PROT_NONE.
*/
if (vma->vm_flags & (VM_READ | VM_WRITE | VM_EXEC))
gup_flags |= FOLL_FORCE;
/*
* We made sure addr is within a VMA, so the following will
* not result in a stack expansion that recurses back here.
*/
return __get_user_pages(current, mm, start, nr_pages, gup_flags,
NULL, NULL, nonblocking);
}
/*
* __mm_populate - populate and/or mlock pages within a range of address space.
*
* This is used to implement mlock() and the MAP_POPULATE / MAP_LOCKED mmap
* flags. VMAs must be already marked with the desired vm_flags, and
* mmap_sem must not be held.
*/
int __mm_populate(unsigned long start, unsigned long len, int ignore_errors)
{
struct mm_struct *mm = current->mm;
unsigned long end, nstart, nend;
struct vm_area_struct *vma = NULL;
int locked = 0;
long ret = 0;
end = start + len;
for (nstart = start; nstart < end; nstart = nend) {
/*
* We want to fault in pages for [nstart; end) address range.
* Find first corresponding VMA.
*/
if (!locked) {
locked = 1;
down_read(&mm->mmap_sem);
vma = find_vma(mm, nstart);
} else if (nstart >= vma->vm_end)
vma = vma->vm_next;
if (!vma || vma->vm_start >= end)
break;
/*
* Set [nstart; nend) to intersection of desired address
* range with the first VMA. Also, skip undesirable VMA types.
*/
nend = min(end, vma->vm_end);
if (vma->vm_flags & (VM_IO | VM_PFNMAP))
continue;
if (nstart < vma->vm_start)
nstart = vma->vm_start;
/*
* Now fault in a range of pages. populate_vma_page_range()
* double checks the vma flags, so that it won't mlock pages
* if the vma was already munlocked.
*/
ret = populate_vma_page_range(vma, nstart, nend, &locked);
if (ret < 0) {
if (ignore_errors) {
ret = 0;
continue; /* continue at next VMA */
}
break;
}
nend = nstart + ret * PAGE_SIZE;
ret = 0;
}
if (locked)
up_read(&mm->mmap_sem);
return ret; /* 0 or negative error code */
}
/**
* get_dump_page() - pin user page in memory while writing it to core dump
* @addr: user address
*
* Returns struct page pointer of user page pinned for dump,
* to be freed afterwards by put_page().
*
* Returns NULL on any kind of failure - a hole must then be inserted into
* the corefile, to preserve alignment with its headers; and also returns
* NULL wherever the ZERO_PAGE, or an anonymous pte_none, has been found -
* allowing a hole to be left in the corefile to save diskspace.
*
* Called without mmap_sem, but after all other threads have been killed.
*/
#ifdef CONFIG_ELF_CORE
struct page *get_dump_page(unsigned long addr)
{
struct vm_area_struct *vma;
struct page *page;
if (__get_user_pages(current, current->mm, addr, 1,
FOLL_FORCE | FOLL_DUMP | FOLL_GET, &page, &vma,
NULL) < 1)
return NULL;
flush_cache_page(vma, addr, page_to_pfn(page));
return page;
}
#endif /* CONFIG_ELF_CORE */
/*
* Generic Fast GUP
*
* get_user_pages_fast attempts to pin user pages by walking the page
* tables directly and avoids taking locks. Thus the walker needs to be
* protected from page table pages being freed from under it, and should
* block any THP splits.
*
* One way to achieve this is to have the walker disable interrupts, and
* rely on IPIs from the TLB flushing code blocking before the page table
* pages are freed. This is unsuitable for architectures that do not need
* to broadcast an IPI when invalidating TLBs.
*
* Another way to achieve this is to batch up page table containing pages
* belonging to more than one mm_user, then rcu_sched a callback to free those
* pages. Disabling interrupts will allow the fast_gup walker to both block
* the rcu_sched callback, and an IPI that we broadcast for splitting THPs
* (which is a relatively rare event). The code below adopts this strategy.
*
* Before activating this code, please be aware that the following assumptions
* are currently made:
*
* *) Either HAVE_RCU_TABLE_FREE is enabled, and tlb_remove_table() is used to
* free pages containing page tables or TLB flushing requires IPI broadcast.
*
* *) ptes can be read atomically by the architecture.
*
* *) access_ok is sufficient to validate userspace address ranges.
*
* The last two assumptions can be relaxed by the addition of helper functions.
*
* This code is based heavily on the PowerPC implementation by Nick Piggin.
*/
#ifdef CONFIG_HAVE_GENERIC_GUP
#ifndef gup_get_pte
/*
* We assume that the PTE can be read atomically. If this is not the case for
* your architecture, please provide the helper.
*/
static inline pte_t gup_get_pte(pte_t *ptep)
{
return READ_ONCE(*ptep);
}
#endif
static void undo_dev_pagemap(int *nr, int nr_start, struct page **pages)
{
while ((*nr) - nr_start) {
struct page *page = pages[--(*nr)];
ClearPageReferenced(page);
put_page(page);
}
}
/*
* Return the compund head page with ref appropriately incremented,
* or NULL if that failed.
*/
static inline struct page *try_get_compound_head(struct page *page, int refs)
{
struct page *head = compound_head(page);
if (WARN_ON_ONCE(page_ref_count(head) < 0))
return NULL;
if (unlikely(!page_cache_add_speculative(head, refs)))
return NULL;
return head;
}
#ifdef CONFIG_ARCH_HAS_PTE_SPECIAL
static int gup_pte_range(pmd_t pmd, unsigned long addr, unsigned long end,
int write, struct page **pages, int *nr)
{
struct dev_pagemap *pgmap = NULL;
int nr_start = *nr, ret = 0;
pte_t *ptep, *ptem;
ptem = ptep = pte_offset_map(&pmd, addr);
do {
pte_t pte = gup_get_pte(ptep);
struct page *head, *page;
/*
* Similar to the PMD case below, NUMA hinting must take slow
* path using the pte_protnone check.
*/
if (pte_protnone(pte))
goto pte_unmap;
if (!pte_access_permitted(pte, write))
goto pte_unmap;
if (pte_devmap(pte)) {
pgmap = get_dev_pagemap(pte_pfn(pte), pgmap);
if (unlikely(!pgmap)) {
undo_dev_pagemap(nr, nr_start, pages);
goto pte_unmap;
}
} else if (pte_special(pte))
goto pte_unmap;
VM_BUG_ON(!pfn_valid(pte_pfn(pte)));
page = pte_page(pte);
head = try_get_compound_head(page, 1);
if (!head)
goto pte_unmap;
if (unlikely(pte_val(pte) != pte_val(*ptep))) {
put_page(head);
goto pte_unmap;
}
VM_BUG_ON_PAGE(compound_head(page) != head, page);
SetPageReferenced(page);
pages[*nr] = page;
(*nr)++;
} while (ptep++, addr += PAGE_SIZE, addr != end);
ret = 1;
pte_unmap:
if (pgmap)
put_dev_pagemap(pgmap);
pte_unmap(ptem);
return ret;
}
#else
/*
* If we can't determine whether or not a pte is special, then fail immediately
* for ptes. Note, we can still pin HugeTLB and THP as these are guaranteed not
* to be special.
*
* For a futex to be placed on a THP tail page, get_futex_key requires a
* __get_user_pages_fast implementation that can pin pages. Thus it's still
* useful to have gup_huge_pmd even if we can't operate on ptes.
*/
static int gup_pte_range(pmd_t pmd, unsigned long addr, unsigned long end,
int write, struct page **pages, int *nr)
{
return 0;
}
#endif /* CONFIG_ARCH_HAS_PTE_SPECIAL */
#if defined(__HAVE_ARCH_PTE_DEVMAP) && defined(CONFIG_TRANSPARENT_HUGEPAGE)
static int __gup_device_huge(unsigned long pfn, unsigned long addr,
unsigned long end, struct page **pages, int *nr)
{
int nr_start = *nr;
struct dev_pagemap *pgmap = NULL;
do {
struct page *page = pfn_to_page(pfn);
pgmap = get_dev_pagemap(pfn, pgmap);
if (unlikely(!pgmap)) {
undo_dev_pagemap(nr, nr_start, pages);
return 0;
}
SetPageReferenced(page);
pages[*nr] = page;
get_page(page);
(*nr)++;
pfn++;
} while (addr += PAGE_SIZE, addr != end);
if (pgmap)
put_dev_pagemap(pgmap);
return 1;
}
static int __gup_device_huge_pmd(pmd_t orig, pmd_t *pmdp, unsigned long addr,
unsigned long end, struct page **pages, int *nr)
{
unsigned long fault_pfn;
int nr_start = *nr;
fault_pfn = pmd_pfn(orig) + ((addr & ~PMD_MASK) >> PAGE_SHIFT);
if (!__gup_device_huge(fault_pfn, addr, end, pages, nr))
return 0;
if (unlikely(pmd_val(orig) != pmd_val(*pmdp))) {
undo_dev_pagemap(nr, nr_start, pages);
return 0;
}
return 1;
}
static int __gup_device_huge_pud(pud_t orig, pud_t *pudp, unsigned long addr,
unsigned long end, struct page **pages, int *nr)
{
unsigned long fault_pfn;
int nr_start = *nr;
fault_pfn = pud_pfn(orig) + ((addr & ~PUD_MASK) >> PAGE_SHIFT);
if (!__gup_device_huge(fault_pfn, addr, end, pages, nr))
return 0;
if (unlikely(pud_val(orig) != pud_val(*pudp))) {
undo_dev_pagemap(nr, nr_start, pages);
return 0;
}
return 1;
}
#else
static int __gup_device_huge_pmd(pmd_t orig, pmd_t *pmdp, unsigned long addr,
unsigned long end, struct page **pages, int *nr)
{
BUILD_BUG();
return 0;
}
static int __gup_device_huge_pud(pud_t pud, pud_t *pudp, unsigned long addr,
unsigned long end, struct page **pages, int *nr)
{
BUILD_BUG();
return 0;
}
#endif
static int gup_huge_pmd(pmd_t orig, pmd_t *pmdp, unsigned long addr,
unsigned long end, int write, struct page **pages, int *nr)
{
struct page *head, *page;
int refs;
if (!pmd_access_permitted(orig, write))
return 0;
if (pmd_devmap(orig))
return __gup_device_huge_pmd(orig, pmdp, addr, end, pages, nr);
refs = 0;
page = pmd_page(orig) + ((addr & ~PMD_MASK) >> PAGE_SHIFT);
do {
pages[*nr] = page;
(*nr)++;
page++;
refs++;
} while (addr += PAGE_SIZE, addr != end);
head = try_get_compound_head(pmd_page(orig), refs);
if (!head) {
*nr -= refs;
return 0;
}
if (unlikely(pmd_val(orig) != pmd_val(*pmdp))) {
*nr -= refs;
while (refs--)
put_page(head);
return 0;
}
SetPageReferenced(head);
return 1;
}
static int gup_huge_pud(pud_t orig, pud_t *pudp, unsigned long addr,
unsigned long end, int write, struct page **pages, int *nr)
{
struct page *head, *page;
int refs;
if (!pud_access_permitted(orig, write))
return 0;
if (pud_devmap(orig))
return __gup_device_huge_pud(orig, pudp, addr, end, pages, nr);
refs = 0;
page = pud_page(orig) + ((addr & ~PUD_MASK) >> PAGE_SHIFT);
do {
pages[*nr] = page;
(*nr)++;
page++;
refs++;
} while (addr += PAGE_SIZE, addr != end);
head = try_get_compound_head(pud_page(orig), refs);
if (!head) {
*nr -= refs;
return 0;
}
if (unlikely(pud_val(orig) != pud_val(*pudp))) {
*nr -= refs;
while (refs--)
put_page(head);
return 0;
}
SetPageReferenced(head);
return 1;
}
static int gup_huge_pgd(pgd_t orig, pgd_t *pgdp, unsigned long addr,
unsigned long end, int write,
struct page **pages, int *nr)
{
int refs;
struct page *head, *page;
if (!pgd_access_permitted(orig, write))
return 0;
BUILD_BUG_ON(pgd_devmap(orig));
refs = 0;
page = pgd_page(orig) + ((addr & ~PGDIR_MASK) >> PAGE_SHIFT);
do {
pages[*nr] = page;
(*nr)++;
page++;
refs++;
} while (addr += PAGE_SIZE, addr != end);
head = try_get_compound_head(pgd_page(orig), refs);
if (!head) {
*nr -= refs;
return 0;
}
if (unlikely(pgd_val(orig) != pgd_val(*pgdp))) {
*nr -= refs;
while (refs--)
put_page(head);
return 0;
}
SetPageReferenced(head);
return 1;
}
static int gup_pmd_range(pud_t pud, unsigned long addr, unsigned long end,
int write, struct page **pages, int *nr)
{
unsigned long next;
pmd_t *pmdp;
pmdp = pmd_offset(&pud, addr);
do {
pmd_t pmd = READ_ONCE(*pmdp);
next = pmd_addr_end(addr, end);
if (!pmd_present(pmd))
return 0;
if (unlikely(pmd_trans_huge(pmd) || pmd_huge(pmd) ||
pmd_devmap(pmd))) {
/*
* NUMA hinting faults need to be handled in the GUP
* slowpath for accounting purposes and so that they
* can be serialised against THP migration.
*/
if (pmd_protnone(pmd))
return 0;
if (!gup_huge_pmd(pmd, pmdp, addr, next, write,
pages, nr))
return 0;
} else if (unlikely(is_hugepd(__hugepd(pmd_val(pmd))))) {
/*
* architecture have different format for hugetlbfs
* pmd format and THP pmd format
*/
if (!gup_huge_pd(__hugepd(pmd_val(pmd)), addr,
PMD_SHIFT, next, write, pages, nr))
return 0;
} else if (!gup_pte_range(pmd, addr, next, write, pages, nr))
return 0;
} while (pmdp++, addr = next, addr != end);
return 1;
}
static int gup_pud_range(p4d_t p4d, unsigned long addr, unsigned long end,
int write, struct page **pages, int *nr)
{
unsigned long next;
pud_t *pudp;
pudp = pud_offset(&p4d, addr);
do {
pud_t pud = READ_ONCE(*pudp);
next = pud_addr_end(addr, end);
if (pud_none(pud))
return 0;
if (unlikely(pud_huge(pud))) {
if (!gup_huge_pud(pud, pudp, addr, next, write,
pages, nr))
return 0;
} else if (unlikely(is_hugepd(__hugepd(pud_val(pud))))) {
if (!gup_huge_pd(__hugepd(pud_val(pud)), addr,
PUD_SHIFT, next, write, pages, nr))
return 0;
} else if (!gup_pmd_range(pud, addr, next, write, pages, nr))
return 0;
} while (pudp++, addr = next, addr != end);
return 1;
}
static int gup_p4d_range(pgd_t pgd, unsigned long addr, unsigned long end,
int write, struct page **pages, int *nr)
{
unsigned long next;
p4d_t *p4dp;
p4dp = p4d_offset(&pgd, addr);
do {
p4d_t p4d = READ_ONCE(*p4dp);
next = p4d_addr_end(addr, end);
if (p4d_none(p4d))
return 0;
BUILD_BUG_ON(p4d_huge(p4d));
if (unlikely(is_hugepd(__hugepd(p4d_val(p4d))))) {
if (!gup_huge_pd(__hugepd(p4d_val(p4d)), addr,
P4D_SHIFT, next, write, pages, nr))
return 0;
} else if (!gup_pud_range(p4d, addr, next, write, pages, nr))
return 0;
} while (p4dp++, addr = next, addr != end);
return 1;
}
static void gup_pgd_range(unsigned long addr, unsigned long end,
int write, struct page **pages, int *nr)
{
unsigned long next;
pgd_t *pgdp;
pgdp = pgd_offset(current->mm, addr);
do {
pgd_t pgd = READ_ONCE(*pgdp);
next = pgd_addr_end(addr, end);
if (pgd_none(pgd))
return;
if (unlikely(pgd_huge(pgd))) {
if (!gup_huge_pgd(pgd, pgdp, addr, next, write,
pages, nr))
return;
} else if (unlikely(is_hugepd(__hugepd(pgd_val(pgd))))) {
if (!gup_huge_pd(__hugepd(pgd_val(pgd)), addr,
PGDIR_SHIFT, next, write, pages, nr))
return;
} else if (!gup_p4d_range(pgd, addr, next, write, pages, nr))
return;
} while (pgdp++, addr = next, addr != end);
}
#ifndef gup_fast_permitted
/*
* Check if it's allowed to use __get_user_pages_fast() for the range, or
* we need to fall back to the slow version:
*/
bool gup_fast_permitted(unsigned long start, int nr_pages)
{
unsigned long len, end;
len = (unsigned long) nr_pages << PAGE_SHIFT;
end = start + len;
return end >= start;
}
#endif
/*
* Like get_user_pages_fast() except it's IRQ-safe in that it won't fall back to
* the regular GUP.
* Note a difference with get_user_pages_fast: this always returns the
* number of pages pinned, 0 if no pages were pinned.
*/
int __get_user_pages_fast(unsigned long start, int nr_pages, int write,
struct page **pages)
{
unsigned long len, end;
unsigned long flags;
int nr = 0;
start &= PAGE_MASK;
len = (unsigned long) nr_pages << PAGE_SHIFT;
end = start + len;
if (unlikely(!access_ok((void __user *)start, len)))
return 0;
/*
* Disable interrupts. We use the nested form as we can already have
* interrupts disabled by get_futex_key.
*
* With interrupts disabled, we block page table pages from being
* freed from under us. See struct mmu_table_batch comments in
* include/asm-generic/tlb.h for more details.
*
* We do not adopt an rcu_read_lock(.) here as we also want to
* block IPIs that come from THPs splitting.
*/
if (gup_fast_permitted(start, nr_pages)) {
local_irq_save(flags);
gup_pgd_range(start, end, write, pages, &nr);
local_irq_restore(flags);
}
return nr;
}
/**
* get_user_pages_fast() - pin user pages in memory
* @start: starting user address
* @nr_pages: number of pages from start to pin
* @write: whether pages will be written to
* @pages: array that receives pointers to the pages pinned.
* Should be at least nr_pages long.
*
* Attempt to pin user pages in memory without taking mm->mmap_sem.
* If not successful, it will fall back to taking the lock and
* calling get_user_pages().
*
* Returns number of pages pinned. This may be fewer than the number
* requested. If nr_pages is 0 or negative, returns 0. If no pages
* were pinned, returns -errno.
*/
int get_user_pages_fast(unsigned long start, int nr_pages, int write,
struct page **pages)
{
unsigned long addr, len, end;
int nr = 0, ret = 0;
start &= PAGE_MASK;
addr = start;
len = (unsigned long) nr_pages << PAGE_SHIFT;
end = start + len;
if (nr_pages <= 0)
return 0;
if (unlikely(!access_ok((void __user *)start, len)))
return -EFAULT;
if (gup_fast_permitted(start, nr_pages)) {
local_irq_disable();
gup_pgd_range(addr, end, write, pages, &nr);
local_irq_enable();
ret = nr;
}
if (nr < nr_pages) {
/* Try to get the remaining pages with get_user_pages */
start += nr << PAGE_SHIFT;
pages += nr;
ret = get_user_pages_unlocked(start, nr_pages - nr, pages,
write ? FOLL_WRITE : 0);
/* Have to be a bit careful with return values */
if (nr > 0) {
if (ret < 0)
ret = nr;
else
ret += nr;
}
}
return ret;
}
#endif /* CONFIG_HAVE_GENERIC_GUP */
| ./CrossVul/dataset_final_sorted/CWE-416/c/good_820_6 |
crossvul-cpp_data_bad_822_0 | #include <linux/kernel.h>
#include <linux/errno.h>
#include <linux/err.h>
#include <linux/spinlock.h>
#include <linux/mm.h>
#include <linux/memremap.h>
#include <linux/pagemap.h>
#include <linux/rmap.h>
#include <linux/swap.h>
#include <linux/swapops.h>
#include <linux/sched/signal.h>
#include <linux/rwsem.h>
#include <linux/hugetlb.h>
#include <asm/mmu_context.h>
#include <asm/pgtable.h>
#include <asm/tlbflush.h>
#include "internal.h"
struct follow_page_context {
struct dev_pagemap *pgmap;
unsigned int page_mask;
};
static struct page *no_page_table(struct vm_area_struct *vma,
unsigned int flags)
{
/*
* When core dumping an enormous anonymous area that nobody
* has touched so far, we don't want to allocate unnecessary pages or
* page tables. Return error instead of NULL to skip handle_mm_fault,
* then get_dump_page() will return NULL to leave a hole in the dump.
* But we can only make this optimization where a hole would surely
* be zero-filled if handle_mm_fault() actually did handle it.
*/
if ((flags & FOLL_DUMP) && (!vma->vm_ops || !vma->vm_ops->fault))
return ERR_PTR(-EFAULT);
return NULL;
}
static int follow_pfn_pte(struct vm_area_struct *vma, unsigned long address,
pte_t *pte, unsigned int flags)
{
/* No page to get reference */
if (flags & FOLL_GET)
return -EFAULT;
if (flags & FOLL_TOUCH) {
pte_t entry = *pte;
if (flags & FOLL_WRITE)
entry = pte_mkdirty(entry);
entry = pte_mkyoung(entry);
if (!pte_same(*pte, entry)) {
set_pte_at(vma->vm_mm, address, pte, entry);
update_mmu_cache(vma, address, pte);
}
}
/* Proper page table entry exists, but no corresponding struct page */
return -EEXIST;
}
/*
* FOLL_FORCE can write to even unwritable pte's, but only
* after we've gone through a COW cycle and they are dirty.
*/
static inline bool can_follow_write_pte(pte_t pte, unsigned int flags)
{
return pte_write(pte) ||
((flags & FOLL_FORCE) && (flags & FOLL_COW) && pte_dirty(pte));
}
static struct page *follow_page_pte(struct vm_area_struct *vma,
unsigned long address, pmd_t *pmd, unsigned int flags,
struct dev_pagemap **pgmap)
{
struct mm_struct *mm = vma->vm_mm;
struct page *page;
spinlock_t *ptl;
pte_t *ptep, pte;
retry:
if (unlikely(pmd_bad(*pmd)))
return no_page_table(vma, flags);
ptep = pte_offset_map_lock(mm, pmd, address, &ptl);
pte = *ptep;
if (!pte_present(pte)) {
swp_entry_t entry;
/*
* KSM's break_ksm() relies upon recognizing a ksm page
* even while it is being migrated, so for that case we
* need migration_entry_wait().
*/
if (likely(!(flags & FOLL_MIGRATION)))
goto no_page;
if (pte_none(pte))
goto no_page;
entry = pte_to_swp_entry(pte);
if (!is_migration_entry(entry))
goto no_page;
pte_unmap_unlock(ptep, ptl);
migration_entry_wait(mm, pmd, address);
goto retry;
}
if ((flags & FOLL_NUMA) && pte_protnone(pte))
goto no_page;
if ((flags & FOLL_WRITE) && !can_follow_write_pte(pte, flags)) {
pte_unmap_unlock(ptep, ptl);
return NULL;
}
page = vm_normal_page(vma, address, pte);
if (!page && pte_devmap(pte) && (flags & FOLL_GET)) {
/*
* Only return device mapping pages in the FOLL_GET case since
* they are only valid while holding the pgmap reference.
*/
*pgmap = get_dev_pagemap(pte_pfn(pte), *pgmap);
if (*pgmap)
page = pte_page(pte);
else
goto no_page;
} else if (unlikely(!page)) {
if (flags & FOLL_DUMP) {
/* Avoid special (like zero) pages in core dumps */
page = ERR_PTR(-EFAULT);
goto out;
}
if (is_zero_pfn(pte_pfn(pte))) {
page = pte_page(pte);
} else {
int ret;
ret = follow_pfn_pte(vma, address, ptep, flags);
page = ERR_PTR(ret);
goto out;
}
}
if (flags & FOLL_SPLIT && PageTransCompound(page)) {
int ret;
get_page(page);
pte_unmap_unlock(ptep, ptl);
lock_page(page);
ret = split_huge_page(page);
unlock_page(page);
put_page(page);
if (ret)
return ERR_PTR(ret);
goto retry;
}
if (flags & FOLL_GET)
get_page(page);
if (flags & FOLL_TOUCH) {
if ((flags & FOLL_WRITE) &&
!pte_dirty(pte) && !PageDirty(page))
set_page_dirty(page);
/*
* pte_mkyoung() would be more correct here, but atomic care
* is needed to avoid losing the dirty bit: it is easier to use
* mark_page_accessed().
*/
mark_page_accessed(page);
}
if ((flags & FOLL_MLOCK) && (vma->vm_flags & VM_LOCKED)) {
/* Do not mlock pte-mapped THP */
if (PageTransCompound(page))
goto out;
/*
* The preliminary mapping check is mainly to avoid the
* pointless overhead of lock_page on the ZERO_PAGE
* which might bounce very badly if there is contention.
*
* If the page is already locked, we don't need to
* handle it now - vmscan will handle it later if and
* when it attempts to reclaim the page.
*/
if (page->mapping && trylock_page(page)) {
lru_add_drain(); /* push cached pages to LRU */
/*
* Because we lock page here, and migration is
* blocked by the pte's page reference, and we
* know the page is still mapped, we don't even
* need to check for file-cache page truncation.
*/
mlock_vma_page(page);
unlock_page(page);
}
}
out:
pte_unmap_unlock(ptep, ptl);
return page;
no_page:
pte_unmap_unlock(ptep, ptl);
if (!pte_none(pte))
return NULL;
return no_page_table(vma, flags);
}
static struct page *follow_pmd_mask(struct vm_area_struct *vma,
unsigned long address, pud_t *pudp,
unsigned int flags,
struct follow_page_context *ctx)
{
pmd_t *pmd, pmdval;
spinlock_t *ptl;
struct page *page;
struct mm_struct *mm = vma->vm_mm;
pmd = pmd_offset(pudp, address);
/*
* The READ_ONCE() will stabilize the pmdval in a register or
* on the stack so that it will stop changing under the code.
*/
pmdval = READ_ONCE(*pmd);
if (pmd_none(pmdval))
return no_page_table(vma, flags);
if (pmd_huge(pmdval) && vma->vm_flags & VM_HUGETLB) {
page = follow_huge_pmd(mm, address, pmd, flags);
if (page)
return page;
return no_page_table(vma, flags);
}
if (is_hugepd(__hugepd(pmd_val(pmdval)))) {
page = follow_huge_pd(vma, address,
__hugepd(pmd_val(pmdval)), flags,
PMD_SHIFT);
if (page)
return page;
return no_page_table(vma, flags);
}
retry:
if (!pmd_present(pmdval)) {
if (likely(!(flags & FOLL_MIGRATION)))
return no_page_table(vma, flags);
VM_BUG_ON(thp_migration_supported() &&
!is_pmd_migration_entry(pmdval));
if (is_pmd_migration_entry(pmdval))
pmd_migration_entry_wait(mm, pmd);
pmdval = READ_ONCE(*pmd);
/*
* MADV_DONTNEED may convert the pmd to null because
* mmap_sem is held in read mode
*/
if (pmd_none(pmdval))
return no_page_table(vma, flags);
goto retry;
}
if (pmd_devmap(pmdval)) {
ptl = pmd_lock(mm, pmd);
page = follow_devmap_pmd(vma, address, pmd, flags, &ctx->pgmap);
spin_unlock(ptl);
if (page)
return page;
}
if (likely(!pmd_trans_huge(pmdval)))
return follow_page_pte(vma, address, pmd, flags, &ctx->pgmap);
if ((flags & FOLL_NUMA) && pmd_protnone(pmdval))
return no_page_table(vma, flags);
retry_locked:
ptl = pmd_lock(mm, pmd);
if (unlikely(pmd_none(*pmd))) {
spin_unlock(ptl);
return no_page_table(vma, flags);
}
if (unlikely(!pmd_present(*pmd))) {
spin_unlock(ptl);
if (likely(!(flags & FOLL_MIGRATION)))
return no_page_table(vma, flags);
pmd_migration_entry_wait(mm, pmd);
goto retry_locked;
}
if (unlikely(!pmd_trans_huge(*pmd))) {
spin_unlock(ptl);
return follow_page_pte(vma, address, pmd, flags, &ctx->pgmap);
}
if (flags & FOLL_SPLIT) {
int ret;
page = pmd_page(*pmd);
if (is_huge_zero_page(page)) {
spin_unlock(ptl);
ret = 0;
split_huge_pmd(vma, pmd, address);
if (pmd_trans_unstable(pmd))
ret = -EBUSY;
} else {
get_page(page);
spin_unlock(ptl);
lock_page(page);
ret = split_huge_page(page);
unlock_page(page);
put_page(page);
if (pmd_none(*pmd))
return no_page_table(vma, flags);
}
return ret ? ERR_PTR(ret) :
follow_page_pte(vma, address, pmd, flags, &ctx->pgmap);
}
page = follow_trans_huge_pmd(vma, address, pmd, flags);
spin_unlock(ptl);
ctx->page_mask = HPAGE_PMD_NR - 1;
return page;
}
static struct page *follow_pud_mask(struct vm_area_struct *vma,
unsigned long address, p4d_t *p4dp,
unsigned int flags,
struct follow_page_context *ctx)
{
pud_t *pud;
spinlock_t *ptl;
struct page *page;
struct mm_struct *mm = vma->vm_mm;
pud = pud_offset(p4dp, address);
if (pud_none(*pud))
return no_page_table(vma, flags);
if (pud_huge(*pud) && vma->vm_flags & VM_HUGETLB) {
page = follow_huge_pud(mm, address, pud, flags);
if (page)
return page;
return no_page_table(vma, flags);
}
if (is_hugepd(__hugepd(pud_val(*pud)))) {
page = follow_huge_pd(vma, address,
__hugepd(pud_val(*pud)), flags,
PUD_SHIFT);
if (page)
return page;
return no_page_table(vma, flags);
}
if (pud_devmap(*pud)) {
ptl = pud_lock(mm, pud);
page = follow_devmap_pud(vma, address, pud, flags, &ctx->pgmap);
spin_unlock(ptl);
if (page)
return page;
}
if (unlikely(pud_bad(*pud)))
return no_page_table(vma, flags);
return follow_pmd_mask(vma, address, pud, flags, ctx);
}
static struct page *follow_p4d_mask(struct vm_area_struct *vma,
unsigned long address, pgd_t *pgdp,
unsigned int flags,
struct follow_page_context *ctx)
{
p4d_t *p4d;
struct page *page;
p4d = p4d_offset(pgdp, address);
if (p4d_none(*p4d))
return no_page_table(vma, flags);
BUILD_BUG_ON(p4d_huge(*p4d));
if (unlikely(p4d_bad(*p4d)))
return no_page_table(vma, flags);
if (is_hugepd(__hugepd(p4d_val(*p4d)))) {
page = follow_huge_pd(vma, address,
__hugepd(p4d_val(*p4d)), flags,
P4D_SHIFT);
if (page)
return page;
return no_page_table(vma, flags);
}
return follow_pud_mask(vma, address, p4d, flags, ctx);
}
/**
* follow_page_mask - look up a page descriptor from a user-virtual address
* @vma: vm_area_struct mapping @address
* @address: virtual address to look up
* @flags: flags modifying lookup behaviour
* @ctx: contains dev_pagemap for %ZONE_DEVICE memory pinning and a
* pointer to output page_mask
*
* @flags can have FOLL_ flags set, defined in <linux/mm.h>
*
* When getting pages from ZONE_DEVICE memory, the @ctx->pgmap caches
* the device's dev_pagemap metadata to avoid repeating expensive lookups.
*
* On output, the @ctx->page_mask is set according to the size of the page.
*
* Return: the mapped (struct page *), %NULL if no mapping exists, or
* an error pointer if there is a mapping to something not represented
* by a page descriptor (see also vm_normal_page()).
*/
struct page *follow_page_mask(struct vm_area_struct *vma,
unsigned long address, unsigned int flags,
struct follow_page_context *ctx)
{
pgd_t *pgd;
struct page *page;
struct mm_struct *mm = vma->vm_mm;
ctx->page_mask = 0;
/* make this handle hugepd */
page = follow_huge_addr(mm, address, flags & FOLL_WRITE);
if (!IS_ERR(page)) {
BUG_ON(flags & FOLL_GET);
return page;
}
pgd = pgd_offset(mm, address);
if (pgd_none(*pgd) || unlikely(pgd_bad(*pgd)))
return no_page_table(vma, flags);
if (pgd_huge(*pgd)) {
page = follow_huge_pgd(mm, address, pgd, flags);
if (page)
return page;
return no_page_table(vma, flags);
}
if (is_hugepd(__hugepd(pgd_val(*pgd)))) {
page = follow_huge_pd(vma, address,
__hugepd(pgd_val(*pgd)), flags,
PGDIR_SHIFT);
if (page)
return page;
return no_page_table(vma, flags);
}
return follow_p4d_mask(vma, address, pgd, flags, ctx);
}
struct page *follow_page(struct vm_area_struct *vma, unsigned long address,
unsigned int foll_flags)
{
struct follow_page_context ctx = { NULL };
struct page *page;
page = follow_page_mask(vma, address, foll_flags, &ctx);
if (ctx.pgmap)
put_dev_pagemap(ctx.pgmap);
return page;
}
static int get_gate_page(struct mm_struct *mm, unsigned long address,
unsigned int gup_flags, struct vm_area_struct **vma,
struct page **page)
{
pgd_t *pgd;
p4d_t *p4d;
pud_t *pud;
pmd_t *pmd;
pte_t *pte;
int ret = -EFAULT;
/* user gate pages are read-only */
if (gup_flags & FOLL_WRITE)
return -EFAULT;
if (address > TASK_SIZE)
pgd = pgd_offset_k(address);
else
pgd = pgd_offset_gate(mm, address);
BUG_ON(pgd_none(*pgd));
p4d = p4d_offset(pgd, address);
BUG_ON(p4d_none(*p4d));
pud = pud_offset(p4d, address);
BUG_ON(pud_none(*pud));
pmd = pmd_offset(pud, address);
if (!pmd_present(*pmd))
return -EFAULT;
VM_BUG_ON(pmd_trans_huge(*pmd));
pte = pte_offset_map(pmd, address);
if (pte_none(*pte))
goto unmap;
*vma = get_gate_vma(mm);
if (!page)
goto out;
*page = vm_normal_page(*vma, address, *pte);
if (!*page) {
if ((gup_flags & FOLL_DUMP) || !is_zero_pfn(pte_pfn(*pte)))
goto unmap;
*page = pte_page(*pte);
/*
* This should never happen (a device public page in the gate
* area).
*/
if (is_device_public_page(*page))
goto unmap;
}
get_page(*page);
out:
ret = 0;
unmap:
pte_unmap(pte);
return ret;
}
/*
* mmap_sem must be held on entry. If @nonblocking != NULL and
* *@flags does not include FOLL_NOWAIT, the mmap_sem may be released.
* If it is, *@nonblocking will be set to 0 and -EBUSY returned.
*/
static int faultin_page(struct task_struct *tsk, struct vm_area_struct *vma,
unsigned long address, unsigned int *flags, int *nonblocking)
{
unsigned int fault_flags = 0;
vm_fault_t ret;
/* mlock all present pages, but do not fault in new pages */
if ((*flags & (FOLL_POPULATE | FOLL_MLOCK)) == FOLL_MLOCK)
return -ENOENT;
if (*flags & FOLL_WRITE)
fault_flags |= FAULT_FLAG_WRITE;
if (*flags & FOLL_REMOTE)
fault_flags |= FAULT_FLAG_REMOTE;
if (nonblocking)
fault_flags |= FAULT_FLAG_ALLOW_RETRY;
if (*flags & FOLL_NOWAIT)
fault_flags |= FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_RETRY_NOWAIT;
if (*flags & FOLL_TRIED) {
VM_WARN_ON_ONCE(fault_flags & FAULT_FLAG_ALLOW_RETRY);
fault_flags |= FAULT_FLAG_TRIED;
}
ret = handle_mm_fault(vma, address, fault_flags);
if (ret & VM_FAULT_ERROR) {
int err = vm_fault_to_errno(ret, *flags);
if (err)
return err;
BUG();
}
if (tsk) {
if (ret & VM_FAULT_MAJOR)
tsk->maj_flt++;
else
tsk->min_flt++;
}
if (ret & VM_FAULT_RETRY) {
if (nonblocking && !(fault_flags & FAULT_FLAG_RETRY_NOWAIT))
*nonblocking = 0;
return -EBUSY;
}
/*
* The VM_FAULT_WRITE bit tells us that do_wp_page has broken COW when
* necessary, even if maybe_mkwrite decided not to set pte_write. We
* can thus safely do subsequent page lookups as if they were reads.
* But only do so when looping for pte_write is futile: in some cases
* userspace may also be wanting to write to the gotten user page,
* which a read fault here might prevent (a readonly page might get
* reCOWed by userspace write).
*/
if ((ret & VM_FAULT_WRITE) && !(vma->vm_flags & VM_WRITE))
*flags |= FOLL_COW;
return 0;
}
static int check_vma_flags(struct vm_area_struct *vma, unsigned long gup_flags)
{
vm_flags_t vm_flags = vma->vm_flags;
int write = (gup_flags & FOLL_WRITE);
int foreign = (gup_flags & FOLL_REMOTE);
if (vm_flags & (VM_IO | VM_PFNMAP))
return -EFAULT;
if (gup_flags & FOLL_ANON && !vma_is_anonymous(vma))
return -EFAULT;
if (write) {
if (!(vm_flags & VM_WRITE)) {
if (!(gup_flags & FOLL_FORCE))
return -EFAULT;
/*
* We used to let the write,force case do COW in a
* VM_MAYWRITE VM_SHARED !VM_WRITE vma, so ptrace could
* set a breakpoint in a read-only mapping of an
* executable, without corrupting the file (yet only
* when that file had been opened for writing!).
* Anon pages in shared mappings are surprising: now
* just reject it.
*/
if (!is_cow_mapping(vm_flags))
return -EFAULT;
}
} else if (!(vm_flags & VM_READ)) {
if (!(gup_flags & FOLL_FORCE))
return -EFAULT;
/*
* Is there actually any vma we can reach here which does not
* have VM_MAYREAD set?
*/
if (!(vm_flags & VM_MAYREAD))
return -EFAULT;
}
/*
* gups are always data accesses, not instruction
* fetches, so execute=false here
*/
if (!arch_vma_access_permitted(vma, write, false, foreign))
return -EFAULT;
return 0;
}
/**
* __get_user_pages() - pin user pages in memory
* @tsk: task_struct of target task
* @mm: mm_struct of target mm
* @start: starting user address
* @nr_pages: number of pages from start to pin
* @gup_flags: flags modifying pin behaviour
* @pages: array that receives pointers to the pages pinned.
* Should be at least nr_pages long. Or NULL, if caller
* only intends to ensure the pages are faulted in.
* @vmas: array of pointers to vmas corresponding to each page.
* Or NULL if the caller does not require them.
* @nonblocking: whether waiting for disk IO or mmap_sem contention
*
* Returns number of pages pinned. This may be fewer than the number
* requested. If nr_pages is 0 or negative, returns 0. If no pages
* were pinned, returns -errno. Each page returned must be released
* with a put_page() call when it is finished with. vmas will only
* remain valid while mmap_sem is held.
*
* Must be called with mmap_sem held. It may be released. See below.
*
* __get_user_pages walks a process's page tables and takes a reference to
* each struct page that each user address corresponds to at a given
* instant. That is, it takes the page that would be accessed if a user
* thread accesses the given user virtual address at that instant.
*
* This does not guarantee that the page exists in the user mappings when
* __get_user_pages returns, and there may even be a completely different
* page there in some cases (eg. if mmapped pagecache has been invalidated
* and subsequently re faulted). However it does guarantee that the page
* won't be freed completely. And mostly callers simply care that the page
* contains data that was valid *at some point in time*. Typically, an IO
* or similar operation cannot guarantee anything stronger anyway because
* locks can't be held over the syscall boundary.
*
* If @gup_flags & FOLL_WRITE == 0, the page must not be written to. If
* the page is written to, set_page_dirty (or set_page_dirty_lock, as
* appropriate) must be called after the page is finished with, and
* before put_page is called.
*
* If @nonblocking != NULL, __get_user_pages will not wait for disk IO
* or mmap_sem contention, and if waiting is needed to pin all pages,
* *@nonblocking will be set to 0. Further, if @gup_flags does not
* include FOLL_NOWAIT, the mmap_sem will be released via up_read() in
* this case.
*
* A caller using such a combination of @nonblocking and @gup_flags
* must therefore hold the mmap_sem for reading only, and recognize
* when it's been released. Otherwise, it must be held for either
* reading or writing and will not be released.
*
* In most cases, get_user_pages or get_user_pages_fast should be used
* instead of __get_user_pages. __get_user_pages should be used only if
* you need some special @gup_flags.
*/
static long __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
unsigned long start, unsigned long nr_pages,
unsigned int gup_flags, struct page **pages,
struct vm_area_struct **vmas, int *nonblocking)
{
long ret = 0, i = 0;
struct vm_area_struct *vma = NULL;
struct follow_page_context ctx = { NULL };
if (!nr_pages)
return 0;
VM_BUG_ON(!!pages != !!(gup_flags & FOLL_GET));
/*
* If FOLL_FORCE is set then do not force a full fault as the hinting
* fault information is unrelated to the reference behaviour of a task
* using the address space
*/
if (!(gup_flags & FOLL_FORCE))
gup_flags |= FOLL_NUMA;
do {
struct page *page;
unsigned int foll_flags = gup_flags;
unsigned int page_increm;
/* first iteration or cross vma bound */
if (!vma || start >= vma->vm_end) {
vma = find_extend_vma(mm, start);
if (!vma && in_gate_area(mm, start)) {
ret = get_gate_page(mm, start & PAGE_MASK,
gup_flags, &vma,
pages ? &pages[i] : NULL);
if (ret)
goto out;
ctx.page_mask = 0;
goto next_page;
}
if (!vma || check_vma_flags(vma, gup_flags)) {
ret = -EFAULT;
goto out;
}
if (is_vm_hugetlb_page(vma)) {
i = follow_hugetlb_page(mm, vma, pages, vmas,
&start, &nr_pages, i,
gup_flags, nonblocking);
continue;
}
}
retry:
/*
* If we have a pending SIGKILL, don't keep faulting pages and
* potentially allocating memory.
*/
if (fatal_signal_pending(current)) {
ret = -ERESTARTSYS;
goto out;
}
cond_resched();
page = follow_page_mask(vma, start, foll_flags, &ctx);
if (!page) {
ret = faultin_page(tsk, vma, start, &foll_flags,
nonblocking);
switch (ret) {
case 0:
goto retry;
case -EBUSY:
ret = 0;
/* FALLTHRU */
case -EFAULT:
case -ENOMEM:
case -EHWPOISON:
goto out;
case -ENOENT:
goto next_page;
}
BUG();
} else if (PTR_ERR(page) == -EEXIST) {
/*
* Proper page table entry exists, but no corresponding
* struct page.
*/
goto next_page;
} else if (IS_ERR(page)) {
ret = PTR_ERR(page);
goto out;
}
if (pages) {
pages[i] = page;
flush_anon_page(vma, page, start);
flush_dcache_page(page);
ctx.page_mask = 0;
}
next_page:
if (vmas) {
vmas[i] = vma;
ctx.page_mask = 0;
}
page_increm = 1 + (~(start >> PAGE_SHIFT) & ctx.page_mask);
if (page_increm > nr_pages)
page_increm = nr_pages;
i += page_increm;
start += page_increm * PAGE_SIZE;
nr_pages -= page_increm;
} while (nr_pages);
out:
if (ctx.pgmap)
put_dev_pagemap(ctx.pgmap);
return i ? i : ret;
}
static bool vma_permits_fault(struct vm_area_struct *vma,
unsigned int fault_flags)
{
bool write = !!(fault_flags & FAULT_FLAG_WRITE);
bool foreign = !!(fault_flags & FAULT_FLAG_REMOTE);
vm_flags_t vm_flags = write ? VM_WRITE : VM_READ;
if (!(vm_flags & vma->vm_flags))
return false;
/*
* The architecture might have a hardware protection
* mechanism other than read/write that can deny access.
*
* gup always represents data access, not instruction
* fetches, so execute=false here:
*/
if (!arch_vma_access_permitted(vma, write, false, foreign))
return false;
return true;
}
/*
* fixup_user_fault() - manually resolve a user page fault
* @tsk: the task_struct to use for page fault accounting, or
* NULL if faults are not to be recorded.
* @mm: mm_struct of target mm
* @address: user address
* @fault_flags:flags to pass down to handle_mm_fault()
* @unlocked: did we unlock the mmap_sem while retrying, maybe NULL if caller
* does not allow retry
*
* This is meant to be called in the specific scenario where for locking reasons
* we try to access user memory in atomic context (within a pagefault_disable()
* section), this returns -EFAULT, and we want to resolve the user fault before
* trying again.
*
* Typically this is meant to be used by the futex code.
*
* The main difference with get_user_pages() is that this function will
* unconditionally call handle_mm_fault() which will in turn perform all the
* necessary SW fixup of the dirty and young bits in the PTE, while
* get_user_pages() only guarantees to update these in the struct page.
*
* This is important for some architectures where those bits also gate the
* access permission to the page because they are maintained in software. On
* such architectures, gup() will not be enough to make a subsequent access
* succeed.
*
* This function will not return with an unlocked mmap_sem. So it has not the
* same semantics wrt the @mm->mmap_sem as does filemap_fault().
*/
int fixup_user_fault(struct task_struct *tsk, struct mm_struct *mm,
unsigned long address, unsigned int fault_flags,
bool *unlocked)
{
struct vm_area_struct *vma;
vm_fault_t ret, major = 0;
if (unlocked)
fault_flags |= FAULT_FLAG_ALLOW_RETRY;
retry:
vma = find_extend_vma(mm, address);
if (!vma || address < vma->vm_start)
return -EFAULT;
if (!vma_permits_fault(vma, fault_flags))
return -EFAULT;
ret = handle_mm_fault(vma, address, fault_flags);
major |= ret & VM_FAULT_MAJOR;
if (ret & VM_FAULT_ERROR) {
int err = vm_fault_to_errno(ret, 0);
if (err)
return err;
BUG();
}
if (ret & VM_FAULT_RETRY) {
down_read(&mm->mmap_sem);
if (!(fault_flags & FAULT_FLAG_TRIED)) {
*unlocked = true;
fault_flags &= ~FAULT_FLAG_ALLOW_RETRY;
fault_flags |= FAULT_FLAG_TRIED;
goto retry;
}
}
if (tsk) {
if (major)
tsk->maj_flt++;
else
tsk->min_flt++;
}
return 0;
}
EXPORT_SYMBOL_GPL(fixup_user_fault);
static __always_inline long __get_user_pages_locked(struct task_struct *tsk,
struct mm_struct *mm,
unsigned long start,
unsigned long nr_pages,
struct page **pages,
struct vm_area_struct **vmas,
int *locked,
unsigned int flags)
{
long ret, pages_done;
bool lock_dropped;
if (locked) {
/* if VM_FAULT_RETRY can be returned, vmas become invalid */
BUG_ON(vmas);
/* check caller initialized locked */
BUG_ON(*locked != 1);
}
if (pages)
flags |= FOLL_GET;
pages_done = 0;
lock_dropped = false;
for (;;) {
ret = __get_user_pages(tsk, mm, start, nr_pages, flags, pages,
vmas, locked);
if (!locked)
/* VM_FAULT_RETRY couldn't trigger, bypass */
return ret;
/* VM_FAULT_RETRY cannot return errors */
if (!*locked) {
BUG_ON(ret < 0);
BUG_ON(ret >= nr_pages);
}
if (!pages)
/* If it's a prefault don't insist harder */
return ret;
if (ret > 0) {
nr_pages -= ret;
pages_done += ret;
if (!nr_pages)
break;
}
if (*locked) {
/*
* VM_FAULT_RETRY didn't trigger or it was a
* FOLL_NOWAIT.
*/
if (!pages_done)
pages_done = ret;
break;
}
/* VM_FAULT_RETRY triggered, so seek to the faulting offset */
pages += ret;
start += ret << PAGE_SHIFT;
/*
* Repeat on the address that fired VM_FAULT_RETRY
* without FAULT_FLAG_ALLOW_RETRY but with
* FAULT_FLAG_TRIED.
*/
*locked = 1;
lock_dropped = true;
down_read(&mm->mmap_sem);
ret = __get_user_pages(tsk, mm, start, 1, flags | FOLL_TRIED,
pages, NULL, NULL);
if (ret != 1) {
BUG_ON(ret > 1);
if (!pages_done)
pages_done = ret;
break;
}
nr_pages--;
pages_done++;
if (!nr_pages)
break;
pages++;
start += PAGE_SIZE;
}
if (lock_dropped && *locked) {
/*
* We must let the caller know we temporarily dropped the lock
* and so the critical section protected by it was lost.
*/
up_read(&mm->mmap_sem);
*locked = 0;
}
return pages_done;
}
/*
* We can leverage the VM_FAULT_RETRY functionality in the page fault
* paths better by using either get_user_pages_locked() or
* get_user_pages_unlocked().
*
* get_user_pages_locked() is suitable to replace the form:
*
* down_read(&mm->mmap_sem);
* do_something()
* get_user_pages(tsk, mm, ..., pages, NULL);
* up_read(&mm->mmap_sem);
*
* to:
*
* int locked = 1;
* down_read(&mm->mmap_sem);
* do_something()
* get_user_pages_locked(tsk, mm, ..., pages, &locked);
* if (locked)
* up_read(&mm->mmap_sem);
*/
long get_user_pages_locked(unsigned long start, unsigned long nr_pages,
unsigned int gup_flags, struct page **pages,
int *locked)
{
return __get_user_pages_locked(current, current->mm, start, nr_pages,
pages, NULL, locked,
gup_flags | FOLL_TOUCH);
}
EXPORT_SYMBOL(get_user_pages_locked);
/*
* get_user_pages_unlocked() is suitable to replace the form:
*
* down_read(&mm->mmap_sem);
* get_user_pages(tsk, mm, ..., pages, NULL);
* up_read(&mm->mmap_sem);
*
* with:
*
* get_user_pages_unlocked(tsk, mm, ..., pages);
*
* It is functionally equivalent to get_user_pages_fast so
* get_user_pages_fast should be used instead if specific gup_flags
* (e.g. FOLL_FORCE) are not required.
*/
long get_user_pages_unlocked(unsigned long start, unsigned long nr_pages,
struct page **pages, unsigned int gup_flags)
{
struct mm_struct *mm = current->mm;
int locked = 1;
long ret;
down_read(&mm->mmap_sem);
ret = __get_user_pages_locked(current, mm, start, nr_pages, pages, NULL,
&locked, gup_flags | FOLL_TOUCH);
if (locked)
up_read(&mm->mmap_sem);
return ret;
}
EXPORT_SYMBOL(get_user_pages_unlocked);
/*
* get_user_pages_remote() - pin user pages in memory
* @tsk: the task_struct to use for page fault accounting, or
* NULL if faults are not to be recorded.
* @mm: mm_struct of target mm
* @start: starting user address
* @nr_pages: number of pages from start to pin
* @gup_flags: flags modifying lookup behaviour
* @pages: array that receives pointers to the pages pinned.
* Should be at least nr_pages long. Or NULL, if caller
* only intends to ensure the pages are faulted in.
* @vmas: array of pointers to vmas corresponding to each page.
* Or NULL if the caller does not require them.
* @locked: pointer to lock flag indicating whether lock is held and
* subsequently whether VM_FAULT_RETRY functionality can be
* utilised. Lock must initially be held.
*
* Returns number of pages pinned. This may be fewer than the number
* requested. If nr_pages is 0 or negative, returns 0. If no pages
* were pinned, returns -errno. Each page returned must be released
* with a put_page() call when it is finished with. vmas will only
* remain valid while mmap_sem is held.
*
* Must be called with mmap_sem held for read or write.
*
* get_user_pages walks a process's page tables and takes a reference to
* each struct page that each user address corresponds to at a given
* instant. That is, it takes the page that would be accessed if a user
* thread accesses the given user virtual address at that instant.
*
* This does not guarantee that the page exists in the user mappings when
* get_user_pages returns, and there may even be a completely different
* page there in some cases (eg. if mmapped pagecache has been invalidated
* and subsequently re faulted). However it does guarantee that the page
* won't be freed completely. And mostly callers simply care that the page
* contains data that was valid *at some point in time*. Typically, an IO
* or similar operation cannot guarantee anything stronger anyway because
* locks can't be held over the syscall boundary.
*
* If gup_flags & FOLL_WRITE == 0, the page must not be written to. If the page
* is written to, set_page_dirty (or set_page_dirty_lock, as appropriate) must
* be called after the page is finished with, and before put_page is called.
*
* get_user_pages is typically used for fewer-copy IO operations, to get a
* handle on the memory by some means other than accesses via the user virtual
* addresses. The pages may be submitted for DMA to devices or accessed via
* their kernel linear mapping (via the kmap APIs). Care should be taken to
* use the correct cache flushing APIs.
*
* See also get_user_pages_fast, for performance critical applications.
*
* get_user_pages should be phased out in favor of
* get_user_pages_locked|unlocked or get_user_pages_fast. Nothing
* should use get_user_pages because it cannot pass
* FAULT_FLAG_ALLOW_RETRY to handle_mm_fault.
*/
long get_user_pages_remote(struct task_struct *tsk, struct mm_struct *mm,
unsigned long start, unsigned long nr_pages,
unsigned int gup_flags, struct page **pages,
struct vm_area_struct **vmas, int *locked)
{
return __get_user_pages_locked(tsk, mm, start, nr_pages, pages, vmas,
locked,
gup_flags | FOLL_TOUCH | FOLL_REMOTE);
}
EXPORT_SYMBOL(get_user_pages_remote);
/*
* This is the same as get_user_pages_remote(), just with a
* less-flexible calling convention where we assume that the task
* and mm being operated on are the current task's and don't allow
* passing of a locked parameter. We also obviously don't pass
* FOLL_REMOTE in here.
*/
long get_user_pages(unsigned long start, unsigned long nr_pages,
unsigned int gup_flags, struct page **pages,
struct vm_area_struct **vmas)
{
return __get_user_pages_locked(current, current->mm, start, nr_pages,
pages, vmas, NULL,
gup_flags | FOLL_TOUCH);
}
EXPORT_SYMBOL(get_user_pages);
#ifdef CONFIG_FS_DAX
/*
* This is the same as get_user_pages() in that it assumes we are
* operating on the current task's mm, but it goes further to validate
* that the vmas associated with the address range are suitable for
* longterm elevated page reference counts. For example, filesystem-dax
* mappings are subject to the lifetime enforced by the filesystem and
* we need guarantees that longterm users like RDMA and V4L2 only
* establish mappings that have a kernel enforced revocation mechanism.
*
* "longterm" == userspace controlled elevated page count lifetime.
* Contrast this to iov_iter_get_pages() usages which are transient.
*/
long get_user_pages_longterm(unsigned long start, unsigned long nr_pages,
unsigned int gup_flags, struct page **pages,
struct vm_area_struct **vmas_arg)
{
struct vm_area_struct **vmas = vmas_arg;
struct vm_area_struct *vma_prev = NULL;
long rc, i;
if (!pages)
return -EINVAL;
if (!vmas) {
vmas = kcalloc(nr_pages, sizeof(struct vm_area_struct *),
GFP_KERNEL);
if (!vmas)
return -ENOMEM;
}
rc = get_user_pages(start, nr_pages, gup_flags, pages, vmas);
for (i = 0; i < rc; i++) {
struct vm_area_struct *vma = vmas[i];
if (vma == vma_prev)
continue;
vma_prev = vma;
if (vma_is_fsdax(vma))
break;
}
/*
* Either get_user_pages() failed, or the vma validation
* succeeded, in either case we don't need to put_page() before
* returning.
*/
if (i >= rc)
goto out;
for (i = 0; i < rc; i++)
put_page(pages[i]);
rc = -EOPNOTSUPP;
out:
if (vmas != vmas_arg)
kfree(vmas);
return rc;
}
EXPORT_SYMBOL(get_user_pages_longterm);
#endif /* CONFIG_FS_DAX */
/**
* populate_vma_page_range() - populate a range of pages in the vma.
* @vma: target vma
* @start: start address
* @end: end address
* @nonblocking:
*
* This takes care of mlocking the pages too if VM_LOCKED is set.
*
* return 0 on success, negative error code on error.
*
* vma->vm_mm->mmap_sem must be held.
*
* If @nonblocking is NULL, it may be held for read or write and will
* be unperturbed.
*
* If @nonblocking is non-NULL, it must held for read only and may be
* released. If it's released, *@nonblocking will be set to 0.
*/
long populate_vma_page_range(struct vm_area_struct *vma,
unsigned long start, unsigned long end, int *nonblocking)
{
struct mm_struct *mm = vma->vm_mm;
unsigned long nr_pages = (end - start) / PAGE_SIZE;
int gup_flags;
VM_BUG_ON(start & ~PAGE_MASK);
VM_BUG_ON(end & ~PAGE_MASK);
VM_BUG_ON_VMA(start < vma->vm_start, vma);
VM_BUG_ON_VMA(end > vma->vm_end, vma);
VM_BUG_ON_MM(!rwsem_is_locked(&mm->mmap_sem), mm);
gup_flags = FOLL_TOUCH | FOLL_POPULATE | FOLL_MLOCK;
if (vma->vm_flags & VM_LOCKONFAULT)
gup_flags &= ~FOLL_POPULATE;
/*
* We want to touch writable mappings with a write fault in order
* to break COW, except for shared mappings because these don't COW
* and we would not want to dirty them for nothing.
*/
if ((vma->vm_flags & (VM_WRITE | VM_SHARED)) == VM_WRITE)
gup_flags |= FOLL_WRITE;
/*
* We want mlock to succeed for regions that have any permissions
* other than PROT_NONE.
*/
if (vma->vm_flags & (VM_READ | VM_WRITE | VM_EXEC))
gup_flags |= FOLL_FORCE;
/*
* We made sure addr is within a VMA, so the following will
* not result in a stack expansion that recurses back here.
*/
return __get_user_pages(current, mm, start, nr_pages, gup_flags,
NULL, NULL, nonblocking);
}
/*
* __mm_populate - populate and/or mlock pages within a range of address space.
*
* This is used to implement mlock() and the MAP_POPULATE / MAP_LOCKED mmap
* flags. VMAs must be already marked with the desired vm_flags, and
* mmap_sem must not be held.
*/
int __mm_populate(unsigned long start, unsigned long len, int ignore_errors)
{
struct mm_struct *mm = current->mm;
unsigned long end, nstart, nend;
struct vm_area_struct *vma = NULL;
int locked = 0;
long ret = 0;
end = start + len;
for (nstart = start; nstart < end; nstart = nend) {
/*
* We want to fault in pages for [nstart; end) address range.
* Find first corresponding VMA.
*/
if (!locked) {
locked = 1;
down_read(&mm->mmap_sem);
vma = find_vma(mm, nstart);
} else if (nstart >= vma->vm_end)
vma = vma->vm_next;
if (!vma || vma->vm_start >= end)
break;
/*
* Set [nstart; nend) to intersection of desired address
* range with the first VMA. Also, skip undesirable VMA types.
*/
nend = min(end, vma->vm_end);
if (vma->vm_flags & (VM_IO | VM_PFNMAP))
continue;
if (nstart < vma->vm_start)
nstart = vma->vm_start;
/*
* Now fault in a range of pages. populate_vma_page_range()
* double checks the vma flags, so that it won't mlock pages
* if the vma was already munlocked.
*/
ret = populate_vma_page_range(vma, nstart, nend, &locked);
if (ret < 0) {
if (ignore_errors) {
ret = 0;
continue; /* continue at next VMA */
}
break;
}
nend = nstart + ret * PAGE_SIZE;
ret = 0;
}
if (locked)
up_read(&mm->mmap_sem);
return ret; /* 0 or negative error code */
}
/**
* get_dump_page() - pin user page in memory while writing it to core dump
* @addr: user address
*
* Returns struct page pointer of user page pinned for dump,
* to be freed afterwards by put_page().
*
* Returns NULL on any kind of failure - a hole must then be inserted into
* the corefile, to preserve alignment with its headers; and also returns
* NULL wherever the ZERO_PAGE, or an anonymous pte_none, has been found -
* allowing a hole to be left in the corefile to save diskspace.
*
* Called without mmap_sem, but after all other threads have been killed.
*/
#ifdef CONFIG_ELF_CORE
struct page *get_dump_page(unsigned long addr)
{
struct vm_area_struct *vma;
struct page *page;
if (__get_user_pages(current, current->mm, addr, 1,
FOLL_FORCE | FOLL_DUMP | FOLL_GET, &page, &vma,
NULL) < 1)
return NULL;
flush_cache_page(vma, addr, page_to_pfn(page));
return page;
}
#endif /* CONFIG_ELF_CORE */
/*
* Generic Fast GUP
*
* get_user_pages_fast attempts to pin user pages by walking the page
* tables directly and avoids taking locks. Thus the walker needs to be
* protected from page table pages being freed from under it, and should
* block any THP splits.
*
* One way to achieve this is to have the walker disable interrupts, and
* rely on IPIs from the TLB flushing code blocking before the page table
* pages are freed. This is unsuitable for architectures that do not need
* to broadcast an IPI when invalidating TLBs.
*
* Another way to achieve this is to batch up page table containing pages
* belonging to more than one mm_user, then rcu_sched a callback to free those
* pages. Disabling interrupts will allow the fast_gup walker to both block
* the rcu_sched callback, and an IPI that we broadcast for splitting THPs
* (which is a relatively rare event). The code below adopts this strategy.
*
* Before activating this code, please be aware that the following assumptions
* are currently made:
*
* *) Either HAVE_RCU_TABLE_FREE is enabled, and tlb_remove_table() is used to
* free pages containing page tables or TLB flushing requires IPI broadcast.
*
* *) ptes can be read atomically by the architecture.
*
* *) access_ok is sufficient to validate userspace address ranges.
*
* The last two assumptions can be relaxed by the addition of helper functions.
*
* This code is based heavily on the PowerPC implementation by Nick Piggin.
*/
#ifdef CONFIG_HAVE_GENERIC_GUP
#ifndef gup_get_pte
/*
* We assume that the PTE can be read atomically. If this is not the case for
* your architecture, please provide the helper.
*/
static inline pte_t gup_get_pte(pte_t *ptep)
{
return READ_ONCE(*ptep);
}
#endif
static void undo_dev_pagemap(int *nr, int nr_start, struct page **pages)
{
while ((*nr) - nr_start) {
struct page *page = pages[--(*nr)];
ClearPageReferenced(page);
put_page(page);
}
}
#ifdef CONFIG_ARCH_HAS_PTE_SPECIAL
static int gup_pte_range(pmd_t pmd, unsigned long addr, unsigned long end,
int write, struct page **pages, int *nr)
{
struct dev_pagemap *pgmap = NULL;
int nr_start = *nr, ret = 0;
pte_t *ptep, *ptem;
ptem = ptep = pte_offset_map(&pmd, addr);
do {
pte_t pte = gup_get_pte(ptep);
struct page *head, *page;
/*
* Similar to the PMD case below, NUMA hinting must take slow
* path using the pte_protnone check.
*/
if (pte_protnone(pte))
goto pte_unmap;
if (!pte_access_permitted(pte, write))
goto pte_unmap;
if (pte_devmap(pte)) {
pgmap = get_dev_pagemap(pte_pfn(pte), pgmap);
if (unlikely(!pgmap)) {
undo_dev_pagemap(nr, nr_start, pages);
goto pte_unmap;
}
} else if (pte_special(pte))
goto pte_unmap;
VM_BUG_ON(!pfn_valid(pte_pfn(pte)));
page = pte_page(pte);
head = compound_head(page);
if (!page_cache_get_speculative(head))
goto pte_unmap;
if (unlikely(pte_val(pte) != pte_val(*ptep))) {
put_page(head);
goto pte_unmap;
}
VM_BUG_ON_PAGE(compound_head(page) != head, page);
SetPageReferenced(page);
pages[*nr] = page;
(*nr)++;
} while (ptep++, addr += PAGE_SIZE, addr != end);
ret = 1;
pte_unmap:
if (pgmap)
put_dev_pagemap(pgmap);
pte_unmap(ptem);
return ret;
}
#else
/*
* If we can't determine whether or not a pte is special, then fail immediately
* for ptes. Note, we can still pin HugeTLB and THP as these are guaranteed not
* to be special.
*
* For a futex to be placed on a THP tail page, get_futex_key requires a
* __get_user_pages_fast implementation that can pin pages. Thus it's still
* useful to have gup_huge_pmd even if we can't operate on ptes.
*/
static int gup_pte_range(pmd_t pmd, unsigned long addr, unsigned long end,
int write, struct page **pages, int *nr)
{
return 0;
}
#endif /* CONFIG_ARCH_HAS_PTE_SPECIAL */
#if defined(__HAVE_ARCH_PTE_DEVMAP) && defined(CONFIG_TRANSPARENT_HUGEPAGE)
static int __gup_device_huge(unsigned long pfn, unsigned long addr,
unsigned long end, struct page **pages, int *nr)
{
int nr_start = *nr;
struct dev_pagemap *pgmap = NULL;
do {
struct page *page = pfn_to_page(pfn);
pgmap = get_dev_pagemap(pfn, pgmap);
if (unlikely(!pgmap)) {
undo_dev_pagemap(nr, nr_start, pages);
return 0;
}
SetPageReferenced(page);
pages[*nr] = page;
get_page(page);
(*nr)++;
pfn++;
} while (addr += PAGE_SIZE, addr != end);
if (pgmap)
put_dev_pagemap(pgmap);
return 1;
}
static int __gup_device_huge_pmd(pmd_t orig, pmd_t *pmdp, unsigned long addr,
unsigned long end, struct page **pages, int *nr)
{
unsigned long fault_pfn;
int nr_start = *nr;
fault_pfn = pmd_pfn(orig) + ((addr & ~PMD_MASK) >> PAGE_SHIFT);
if (!__gup_device_huge(fault_pfn, addr, end, pages, nr))
return 0;
if (unlikely(pmd_val(orig) != pmd_val(*pmdp))) {
undo_dev_pagemap(nr, nr_start, pages);
return 0;
}
return 1;
}
static int __gup_device_huge_pud(pud_t orig, pud_t *pudp, unsigned long addr,
unsigned long end, struct page **pages, int *nr)
{
unsigned long fault_pfn;
int nr_start = *nr;
fault_pfn = pud_pfn(orig) + ((addr & ~PUD_MASK) >> PAGE_SHIFT);
if (!__gup_device_huge(fault_pfn, addr, end, pages, nr))
return 0;
if (unlikely(pud_val(orig) != pud_val(*pudp))) {
undo_dev_pagemap(nr, nr_start, pages);
return 0;
}
return 1;
}
#else
static int __gup_device_huge_pmd(pmd_t orig, pmd_t *pmdp, unsigned long addr,
unsigned long end, struct page **pages, int *nr)
{
BUILD_BUG();
return 0;
}
static int __gup_device_huge_pud(pud_t pud, pud_t *pudp, unsigned long addr,
unsigned long end, struct page **pages, int *nr)
{
BUILD_BUG();
return 0;
}
#endif
static int gup_huge_pmd(pmd_t orig, pmd_t *pmdp, unsigned long addr,
unsigned long end, int write, struct page **pages, int *nr)
{
struct page *head, *page;
int refs;
if (!pmd_access_permitted(orig, write))
return 0;
if (pmd_devmap(orig))
return __gup_device_huge_pmd(orig, pmdp, addr, end, pages, nr);
refs = 0;
page = pmd_page(orig) + ((addr & ~PMD_MASK) >> PAGE_SHIFT);
do {
pages[*nr] = page;
(*nr)++;
page++;
refs++;
} while (addr += PAGE_SIZE, addr != end);
head = compound_head(pmd_page(orig));
if (!page_cache_add_speculative(head, refs)) {
*nr -= refs;
return 0;
}
if (unlikely(pmd_val(orig) != pmd_val(*pmdp))) {
*nr -= refs;
while (refs--)
put_page(head);
return 0;
}
SetPageReferenced(head);
return 1;
}
static int gup_huge_pud(pud_t orig, pud_t *pudp, unsigned long addr,
unsigned long end, int write, struct page **pages, int *nr)
{
struct page *head, *page;
int refs;
if (!pud_access_permitted(orig, write))
return 0;
if (pud_devmap(orig))
return __gup_device_huge_pud(orig, pudp, addr, end, pages, nr);
refs = 0;
page = pud_page(orig) + ((addr & ~PUD_MASK) >> PAGE_SHIFT);
do {
pages[*nr] = page;
(*nr)++;
page++;
refs++;
} while (addr += PAGE_SIZE, addr != end);
head = compound_head(pud_page(orig));
if (!page_cache_add_speculative(head, refs)) {
*nr -= refs;
return 0;
}
if (unlikely(pud_val(orig) != pud_val(*pudp))) {
*nr -= refs;
while (refs--)
put_page(head);
return 0;
}
SetPageReferenced(head);
return 1;
}
static int gup_huge_pgd(pgd_t orig, pgd_t *pgdp, unsigned long addr,
unsigned long end, int write,
struct page **pages, int *nr)
{
int refs;
struct page *head, *page;
if (!pgd_access_permitted(orig, write))
return 0;
BUILD_BUG_ON(pgd_devmap(orig));
refs = 0;
page = pgd_page(orig) + ((addr & ~PGDIR_MASK) >> PAGE_SHIFT);
do {
pages[*nr] = page;
(*nr)++;
page++;
refs++;
} while (addr += PAGE_SIZE, addr != end);
head = compound_head(pgd_page(orig));
if (!page_cache_add_speculative(head, refs)) {
*nr -= refs;
return 0;
}
if (unlikely(pgd_val(orig) != pgd_val(*pgdp))) {
*nr -= refs;
while (refs--)
put_page(head);
return 0;
}
SetPageReferenced(head);
return 1;
}
static int gup_pmd_range(pud_t pud, unsigned long addr, unsigned long end,
int write, struct page **pages, int *nr)
{
unsigned long next;
pmd_t *pmdp;
pmdp = pmd_offset(&pud, addr);
do {
pmd_t pmd = READ_ONCE(*pmdp);
next = pmd_addr_end(addr, end);
if (!pmd_present(pmd))
return 0;
if (unlikely(pmd_trans_huge(pmd) || pmd_huge(pmd) ||
pmd_devmap(pmd))) {
/*
* NUMA hinting faults need to be handled in the GUP
* slowpath for accounting purposes and so that they
* can be serialised against THP migration.
*/
if (pmd_protnone(pmd))
return 0;
if (!gup_huge_pmd(pmd, pmdp, addr, next, write,
pages, nr))
return 0;
} else if (unlikely(is_hugepd(__hugepd(pmd_val(pmd))))) {
/*
* architecture have different format for hugetlbfs
* pmd format and THP pmd format
*/
if (!gup_huge_pd(__hugepd(pmd_val(pmd)), addr,
PMD_SHIFT, next, write, pages, nr))
return 0;
} else if (!gup_pte_range(pmd, addr, next, write, pages, nr))
return 0;
} while (pmdp++, addr = next, addr != end);
return 1;
}
static int gup_pud_range(p4d_t p4d, unsigned long addr, unsigned long end,
int write, struct page **pages, int *nr)
{
unsigned long next;
pud_t *pudp;
pudp = pud_offset(&p4d, addr);
do {
pud_t pud = READ_ONCE(*pudp);
next = pud_addr_end(addr, end);
if (pud_none(pud))
return 0;
if (unlikely(pud_huge(pud))) {
if (!gup_huge_pud(pud, pudp, addr, next, write,
pages, nr))
return 0;
} else if (unlikely(is_hugepd(__hugepd(pud_val(pud))))) {
if (!gup_huge_pd(__hugepd(pud_val(pud)), addr,
PUD_SHIFT, next, write, pages, nr))
return 0;
} else if (!gup_pmd_range(pud, addr, next, write, pages, nr))
return 0;
} while (pudp++, addr = next, addr != end);
return 1;
}
static int gup_p4d_range(pgd_t pgd, unsigned long addr, unsigned long end,
int write, struct page **pages, int *nr)
{
unsigned long next;
p4d_t *p4dp;
p4dp = p4d_offset(&pgd, addr);
do {
p4d_t p4d = READ_ONCE(*p4dp);
next = p4d_addr_end(addr, end);
if (p4d_none(p4d))
return 0;
BUILD_BUG_ON(p4d_huge(p4d));
if (unlikely(is_hugepd(__hugepd(p4d_val(p4d))))) {
if (!gup_huge_pd(__hugepd(p4d_val(p4d)), addr,
P4D_SHIFT, next, write, pages, nr))
return 0;
} else if (!gup_pud_range(p4d, addr, next, write, pages, nr))
return 0;
} while (p4dp++, addr = next, addr != end);
return 1;
}
static void gup_pgd_range(unsigned long addr, unsigned long end,
int write, struct page **pages, int *nr)
{
unsigned long next;
pgd_t *pgdp;
pgdp = pgd_offset(current->mm, addr);
do {
pgd_t pgd = READ_ONCE(*pgdp);
next = pgd_addr_end(addr, end);
if (pgd_none(pgd))
return;
if (unlikely(pgd_huge(pgd))) {
if (!gup_huge_pgd(pgd, pgdp, addr, next, write,
pages, nr))
return;
} else if (unlikely(is_hugepd(__hugepd(pgd_val(pgd))))) {
if (!gup_huge_pd(__hugepd(pgd_val(pgd)), addr,
PGDIR_SHIFT, next, write, pages, nr))
return;
} else if (!gup_p4d_range(pgd, addr, next, write, pages, nr))
return;
} while (pgdp++, addr = next, addr != end);
}
#ifndef gup_fast_permitted
/*
* Check if it's allowed to use __get_user_pages_fast() for the range, or
* we need to fall back to the slow version:
*/
bool gup_fast_permitted(unsigned long start, int nr_pages, int write)
{
unsigned long len, end;
len = (unsigned long) nr_pages << PAGE_SHIFT;
end = start + len;
return end >= start;
}
#endif
/*
* Like get_user_pages_fast() except it's IRQ-safe in that it won't fall back to
* the regular GUP.
* Note a difference with get_user_pages_fast: this always returns the
* number of pages pinned, 0 if no pages were pinned.
*/
int __get_user_pages_fast(unsigned long start, int nr_pages, int write,
struct page **pages)
{
unsigned long len, end;
unsigned long flags;
int nr = 0;
start &= PAGE_MASK;
len = (unsigned long) nr_pages << PAGE_SHIFT;
end = start + len;
if (unlikely(!access_ok((void __user *)start, len)))
return 0;
/*
* Disable interrupts. We use the nested form as we can already have
* interrupts disabled by get_futex_key.
*
* With interrupts disabled, we block page table pages from being
* freed from under us. See struct mmu_table_batch comments in
* include/asm-generic/tlb.h for more details.
*
* We do not adopt an rcu_read_lock(.) here as we also want to
* block IPIs that come from THPs splitting.
*/
if (gup_fast_permitted(start, nr_pages, write)) {
local_irq_save(flags);
gup_pgd_range(start, end, write, pages, &nr);
local_irq_restore(flags);
}
return nr;
}
/**
* get_user_pages_fast() - pin user pages in memory
* @start: starting user address
* @nr_pages: number of pages from start to pin
* @write: whether pages will be written to
* @pages: array that receives pointers to the pages pinned.
* Should be at least nr_pages long.
*
* Attempt to pin user pages in memory without taking mm->mmap_sem.
* If not successful, it will fall back to taking the lock and
* calling get_user_pages().
*
* Returns number of pages pinned. This may be fewer than the number
* requested. If nr_pages is 0 or negative, returns 0. If no pages
* were pinned, returns -errno.
*/
int get_user_pages_fast(unsigned long start, int nr_pages, int write,
struct page **pages)
{
unsigned long addr, len, end;
int nr = 0, ret = 0;
start &= PAGE_MASK;
addr = start;
len = (unsigned long) nr_pages << PAGE_SHIFT;
end = start + len;
if (nr_pages <= 0)
return 0;
if (unlikely(!access_ok((void __user *)start, len)))
return -EFAULT;
if (gup_fast_permitted(start, nr_pages, write)) {
local_irq_disable();
gup_pgd_range(addr, end, write, pages, &nr);
local_irq_enable();
ret = nr;
}
if (nr < nr_pages) {
/* Try to get the remaining pages with get_user_pages */
start += nr << PAGE_SHIFT;
pages += nr;
ret = get_user_pages_unlocked(start, nr_pages - nr, pages,
write ? FOLL_WRITE : 0);
/* Have to be a bit careful with return values */
if (nr > 0) {
if (ret < 0)
ret = nr;
else
ret += nr;
}
}
return ret;
}
#endif /* CONFIG_HAVE_GENERIC_GUP */
| ./CrossVul/dataset_final_sorted/CWE-416/c/bad_822_0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.